diff --git a/.gitignore b/.gitignore index 6024f55..04642c6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ node_modules build .qaspherecli -.DS_Store \ No newline at end of file +.DS_Store +.env diff --git a/README.md b/README.md index bfca5a9..31e5bc8 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ The QAS CLI is a command-line tool for submitting your test automation results to [QA Sphere](https://qasphere.com/). It provides the most efficient way to collect and report test results from your test automation workflow, CI/CD pipeline, and build servers. -The tool can upload test case results from JUnit XML files to QA Sphere test runs by matching test case names (mentions of special markers) to QA Sphere's test cases. +The tool can upload test case results from JUnit XML and Playwright JSON files to QA Sphere test runs by matching test case names (mentions of special markers) to QA Sphere's test cases. ## Installation @@ -53,14 +53,14 @@ QAS_TOKEN=your_token QAS_URL=https://qas.eu1.qasphere.com # Example with real values: -# QAS_TOKEN=tst0000001.1CKCEtest_JYyckc3zYtest.dhhjYY3BYEoQH41e62itest +# QAS_TOKEN=qas.1CKCEtest_JYyckc3zYtest.dhhjYY3BYEoQH41e62itest # QAS_URL=https://qas.eu1.qasphere.com ``` -## Command: `junit-upload` +## Commands: `junit-upload`, `playwright-json-upload` -The `junit-upload` command creates a new test run within a QA Sphere project from your JUnit XML files or uploads results to an existing run. +The `junit-upload` and `playwright-json-upload` commands upload test results from JUnit XML and Playwright JSON reports to QA Sphere respectively. Both commands can either create a new test run within a QA Sphere project or upload results to an existing run, and they share the same set of options. ### Options @@ -89,7 +89,9 @@ The `--run-name` option supports the following placeholders: ### Usage Examples -Ensure the required environment variables are defined before running these commands: +Ensure the required environment variables are defined before running these commands. + +**Note:** The following examples use `junit-upload`, but you can replace it with `playwright-json-upload` and adjust the file extension from `.xml` to `.json` to upload Playwright JSON reports instead. 1. Create a new test run with default name template (`Automated test run - {MMM} {DD}, {YYYY}, {hh}:{mm}:{ss} {AMPM}`) and upload results: ```bash @@ -123,36 +125,52 @@ Ensure the required environment variables are defined before running these comma qasphere junit-upload --attachments ./test1.xml ``` -7. Force upload even with missing test cases: +7. Force upload even with missing test cases or attachments: ```bash qasphere junit-upload --force ./test-results.xml ``` -## JUnit XML File Requirements +## Test Report Requirements + +The QAS CLI requires test cases in your reports (JUnit XML or Playwright JSON) to reference corresponding test cases in QA Sphere. These references are used to map test results from your automation to the appropriate test cases in QA Sphere. If a report lacks these references or the referenced test case doesn't exist in QA Sphere, the tool will display an error message. + +### JUnit XML + +Test case names in JUnit XML reports must include a QA Sphere test case marker in the format `PROJECT-SEQUENCE`: + +- **PROJECT** - Your QA Sphere project code +- **SEQUENCE** - Test case sequence number (minimum 3 digits, zero-padded if needed) -The QAS CLI tool requires JUnit XML files to have test case names that match the test case codes on QA Sphere. If your XML file doesn't contain any matching test cases, the tool will display an error message. +**Examples:** +- `PRJ-002: Login with valid credentials` +- `Login with invalid credentials: PRJ-1312` -### Test Case Naming Convention +**Note:** The project code in test names must exactly match your QA Sphere project code. -Test case names in the XML report should contain a QA Sphere test case marker (PROJECT-SEQUENCE). +### Playwright JSON -This marker is used to match test cases in the XML report with test cases in QA Sphere: +Playwright JSON reports support two methods for referencing test cases (checked in order): -- **PROJECT** is your QA Sphere project code -- **SEQUENCE** is at least a three-digit test case sequence number +1. **Test Annotations (Recommended)** - Add a [test annotation](https://playwright.dev/docs/test-annotations#annotate-tests) with: + - `type`: `"test case"` (case-insensitive) + - `description`: Full QA Sphere test case URL -Examples: -- **PRJ-312: Login with valid credentials** -- **Login with valid credentials: PRJ-312** + ```typescript + test('user login', { + annotation: { type: 'test case', description: 'https://qas.eu1.qasphere.com/project/PRJ/tcase/123' } + }, async ({ page }) => { + // test code + }); + ``` -The project code in your test names must match the project code in QA Sphere. +2. **Test Case Marker in Name** - Include the `PROJECT-SEQUENCE` marker in the test name (same format as JUnit XML) -### Development (for those who want to contribute to the tool) +## Development (for those who want to contribute to the tool) 1. Install and build: `npm install && npm run build && npm link` 2. Get test account at [qasphere.com](https://qasphere.com/) (includes demo project) 3. Configure `.qaspherecli` with credentials -4. Test with sample XML from [bistro-e2e](https://github.com/Hypersequent/bistro-e2e) +4. Test with sample reports from [bistro-e2e](https://github.com/Hypersequent/bistro-e2e) Tests: `npm test` (Vitest) and `cd mnode-test && ./docker-test.sh` (Node.js 18+ compatibility) diff --git a/package-lock.json b/package-lock.json index 2872379..65833fe 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,18 +1,19 @@ { "name": "qas-cli", - "version": "0.2.5", + "version": "0.4.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "qas-cli", - "version": "0.2.5", + "version": "0.4.0", "license": "ISC", "dependencies": { "chalk": "^5.4.1", "dotenv": "^16.5.0", "escape-html": "^1.0.3", "semver": "^7.7.1", + "strip-ansi": "^7.1.2", "xml2js": "^0.6.2", "yargs": "^17.7.2", "zod": "^3.24.3" @@ -1522,11 +1523,15 @@ } }, "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "license": "MIT", "engines": { - "node": ">=8" + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" } }, "node_modules/ansi-styles": { @@ -1662,6 +1667,27 @@ "node": ">=12" } }, + "node_modules/cliui/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/cliui/node_modules/wrap-ansi": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", @@ -2946,10 +2972,20 @@ "node": ">=8" } }, - "node_modules/strip-ansi": { + "node_modules/string-width/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", "dependencies": { "ansi-regex": "^5.0.1" }, @@ -2957,6 +2993,21 @@ "node": ">=8" } }, + "node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, "node_modules/strip-json-comments": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", @@ -3452,6 +3503,29 @@ "node": ">=8" } }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/xml2js": { "version": "0.6.2", "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.6.2.tgz", diff --git a/package.json b/package.json index 8cce7e9..296da98 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "qas-cli", - "version": "0.3.4", + "version": "0.4.0", "description": "QAS CLI is a command line tool for submitting your automation test results to QA Sphere at https://qasphere.com/", "type": "module", "main": "./build/bin/qasphere.js", @@ -47,6 +47,7 @@ "dotenv": "^16.5.0", "escape-html": "^1.0.3", "semver": "^7.7.1", + "strip-ansi": "^7.1.2", "xml2js": "^0.6.2", "yargs": "^17.7.2", "zod": "^3.24.3" diff --git a/src/api/run.ts b/src/api/run.ts index 2e9dda1..ad2ee66 100644 --- a/src/api/run.ts +++ b/src/api/run.ts @@ -14,7 +14,7 @@ export interface CreateRunRequest { } export interface CreateRunResponse { - id: string + id: number } export const createRunApi = (fetcher: typeof fetch) => { diff --git a/src/commands/junit-upload.ts b/src/commands/junit-upload.ts deleted file mode 100644 index 098f10d..0000000 --- a/src/commands/junit-upload.ts +++ /dev/null @@ -1,106 +0,0 @@ -import { Arguments, Argv, CommandModule } from 'yargs' -import chalk from 'chalk' -import { JUnitResultUploader } from '../utils/junit/JUnitResultUploader' -import { loadEnvs } from '../utils/env' - -export interface JUnitArgs { - runUrl?: string - runName?: string - files: string[] - force: boolean - attachments: boolean -} - -export class JUnitUploadCommandModule implements CommandModule { - command = 'junit-upload [args..] ' - describe = 'Upload JUnit xml files to a new or existing test run' - - builder = (argv: Argv) => { - argv.options({ - 'run-url': { - alias: 'r', - describe: 'Optional URL of an existing Run for uploading results', - type: 'string', - requiresArg: true, - }, - 'run-name': { - describe: - 'Optional name template for creating new test run when run url is not specified. If not specified, "Automated test run - {MMM} {DD}, {YYYY}, {hh}:{mm}:{ss} {AMPM}" is used as default', - type: 'string', - }, - attachments: { - describe: 'Try to detect any attachments and upload it with the test result', - type: 'boolean', - }, - force: { - describe: 'Ignore API request errors, invalid test cases or attachments', - type: 'boolean', - }, - help: { - alias: 'h', - help: true, - }, - }) - - argv.example( - '$0 junit-upload ./test-results.xml', - 'Create a new test run with default name template and upload results (project code detected from test names)' - ) - - argv.example( - '$0 junit-upload -r https://qas.eu1.qasphere.com/project/P1/run/23 ./test-results.xml', - 'Upload results to existing run ID 23 of Project P1' - ) - - argv.example( - '$0 junit-upload --run-name "v1.4.4-rc5" ./test-results.xml', - 'Create a new test run with name template without any placeholders and upload results' - ) - - argv.example( - '$0 junit-upload --run-name "CI Build {env:BUILD_NUMBER} - {YYYY}-{MM}-{DD}" ./test-results.xml', - 'Create a new test run with name template using environment variable and date placeholders and upload results' - ) - - argv.example( - '$0 junit-upload --run-name "Nightly Tests {YYYY}/{MM}/{DD} {HH}:{mm}" ./test-results.xml', - 'Create a new test run with name template using date and time placeholders and upload results' - ) - - argv.epilogue(`Requirements: - Test case names in the XML report should contain QA Sphere test case reference (PROJECT-SEQUENCE). - This reference is used to match test cases in the XML report with test cases in QA Sphere. - - - ${chalk.bold('PROJECT')} is your QASphere project code - - ${chalk.bold('SEQUENCE')} is at least three-digit test case sequence number - - For example, - - ${chalk.bold('PRJ-312')}: Login with valid credentials - - Login with valid credentials: ${chalk.bold('PRJ-312')} - - Required environment variables (in .qaspherecli or exported): - - QAS_TOKEN: Your QASphere API token - - QAS_URL: Your QASphere instance URL (e.g., http://tenant1.localhost:5173) - -Run name template placeholders: - - ${chalk.bold('{env:VAR_NAME}')}: Environment variables - - ${chalk.bold('{YYYY}')}: 4-digit year - - ${chalk.bold('{YY}')}: 2-digit year - - ${chalk.bold('{MMM}')}: 3-letter month (e.g., Jan, Feb, Mar) - - ${chalk.bold('{MM}')}: 2-digit month - - ${chalk.bold('{DD}')}: 2-digit day - - ${chalk.bold('{HH}')}: 2-digit hour (24-hour format) - - ${chalk.bold('{hh}')}: 2-digit hour (12-hour format) - - ${chalk.bold('{mm}')}: 2-digit minute - - ${chalk.bold('{ss}')}: 2-digit second - - ${chalk.bold('{AMPM}')}: AM/PM`) - - return argv as Argv - } - - handler = async (args: Arguments) => { - loadEnvs() - const handler = new JUnitResultUploader(args) - await handler.handle() - } -} diff --git a/src/commands/main.ts b/src/commands/main.ts index ac21990..d8a76b3 100644 --- a/src/commands/main.ts +++ b/src/commands/main.ts @@ -1,5 +1,5 @@ import yargs from 'yargs' -import { JUnitUploadCommandModule } from './junit-upload' +import { ResultUploadCommandModule } from './resultUpload' import { qasEnvs, qasEnvFile } from '../utils/env' import { getVersion } from '../utils/version' @@ -11,7 +11,8 @@ export const run = (args: string | string[]) => Required variables: ${qasEnvs.join(', ')} These should be either exported as env vars or defined in a ${qasEnvFile} file.` ) - .command(new JUnitUploadCommandModule()) + .command(new ResultUploadCommandModule('junit-upload')) + .command(new ResultUploadCommandModule('playwright-json-upload')) .demandCommand(1, "") .help('h') .alias('h', 'help') diff --git a/src/commands/resultUpload.ts b/src/commands/resultUpload.ts new file mode 100644 index 0000000..2d171b0 --- /dev/null +++ b/src/commands/resultUpload.ts @@ -0,0 +1,117 @@ +import { Arguments, Argv, CommandModule } from 'yargs' +import chalk from 'chalk' +import { loadEnvs } from '../utils/env' +import { + ResultUploadCommandArgs, + ResultUploadCommandHandler, + UploadCommandType +} from '../utils/result-upload/ResultUploadCommandHandler' + +const commandTypeDisplayStrings: Record = { + 'junit-upload': 'JUnit XML', + 'playwright-json-upload': 'Playwright JSON', +} + +const commandTypeFileExtensions: Record = { + 'junit-upload': 'xml', + 'playwright-json-upload': 'json', +} + +export class ResultUploadCommandModule implements CommandModule { + constructor(private readonly type: UploadCommandType) {} + + get command() { + return `${this.type} [args..] ` + } + + get describe() { + return `Upload ${commandTypeDisplayStrings[this.type]} files to a new or existing test run` + } + + builder = (argv: Argv) => { + argv.options({ + 'run-url': { + alias: 'r', + describe: 'Optional URL of an existing test run for uploading results', + type: 'string', + requiresArg: true, + }, + 'run-name': { + describe: + 'Optional name template for creating new test run when run url is not specified. If not specified, "Automated test run - {MMM} {DD}, {YYYY}, {hh}:{mm}:{ss} {AMPM}" is used as default', + type: 'string', + }, + attachments: { + describe: 'Try to detect any attachments and upload it with the test result', + type: 'boolean', + }, + force: { + describe: 'Ignore API request errors, invalid test cases or attachments', + type: 'boolean', + }, + help: { + alias: 'h', + help: true, + }, + }) + + argv.example( + `$0 ${this.type} ./test-results.${commandTypeFileExtensions[this.type]}`, + 'Create a new test run with default name template and upload results (project code detected from test names)' + ) + + argv.example( + `$0 ${this.type} -r https://qas.eu1.qasphere.com/project/P1/run/23 ./test-results.${commandTypeFileExtensions[this.type]}`, + 'Upload results to existing run ID 23 of project P1' + ) + + argv.example( + `$0 ${this.type} --run-name "v1.4.4-rc5" ./test-results.${commandTypeFileExtensions[this.type]}`, + 'Create a new test run with name template without any placeholders and upload results' + ) + + argv.example( + `$0 ${this.type} --run-name "CI Build {env:BUILD_NUMBER} - {YYYY}-{MM}-{DD}" ./test-results.${commandTypeFileExtensions[this.type]}`, + 'Create a new test run with name template using environment variable and date placeholders and upload results' + ) + + argv.example( + `$0 ${this.type} --run-name "Nightly Tests {YYYY}/{MM}/{DD} {HH}:{mm}" ./test-results.${commandTypeFileExtensions[this.type]}`, + 'Create a new test run with name template using date and time placeholders and upload results' + ) + + argv.epilogue(`Requirements: + Test case names in the report should contain QA Sphere test case reference (PROJECT-SEQUENCE). This reference is used to match test cases in the report with test cases in QA Sphere. + - ${chalk.bold('PROJECT')} is your QASphere project code + - ${chalk.bold('SEQUENCE')} is at least three-digit test case sequence number + + For example, + - ${chalk.bold('PRJ-312')}: Login with valid credentials + - Login with valid credentials: ${chalk.bold('PRJ-312')} + + Required environment variables (in .qaspherecli or exported): + - QAS_TOKEN: Your QASphere API token + - QAS_URL: Your QASphere instance URL (e.g., https://qas.eu1.qasphere.com) + +Run name template placeholders: + - ${chalk.bold('{env:VAR_NAME}')}: Environment variables + - ${chalk.bold('{YYYY}')}: 4-digit year + - ${chalk.bold('{YY}')}: 2-digit year + - ${chalk.bold('{MMM}')}: 3-letter month (e.g., Jan, Feb, Mar) + - ${chalk.bold('{MM}')}: 2-digit month + - ${chalk.bold('{DD}')}: 2-digit day + - ${chalk.bold('{HH}')}: 2-digit hour (24-hour format) + - ${chalk.bold('{hh}')}: 2-digit hour (12-hour format) + - ${chalk.bold('{mm}')}: 2-digit minute + - ${chalk.bold('{ss}')}: 2-digit second + - ${chalk.bold('{AMPM}')}: AM/PM`) + + return argv as Argv + } + + handler = async (args: Arguments) => { + loadEnvs() + const handler = new ResultUploadCommandHandler(this.type, args) + await handler.handle() + } +} diff --git a/src/tests/fixtures/playwright-json/comprehensive-test.json b/src/tests/fixtures/playwright-json/comprehensive-test.json new file mode 100644 index 0000000..37be6cd --- /dev/null +++ b/src/tests/fixtures/playwright-json/comprehensive-test.json @@ -0,0 +1,338 @@ +{ + "suites": [ + { + "title": "failure.scenarios.spec.ts", + "specs": [ + { + "title": "TEST-001 Failure with only type", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "failed", + "errors": [ + { + "message": "AssertionError" + } + ], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [] + } + ], + "status": "unexpected" + } + ] + }, + { + "title": "TEST-002 Failure with type and message", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "failed", + "errors": [ + { + "message": "AssertionError: Expected true but was false" + } + ], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [] + } + ], + "status": "unexpected" + } + ] + }, + { + "title": "TEST-003 Failure with type, message and text content", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "failed", + "errors": [ + { + "message": "AssertionError: Expected value did not match actual value\n AssertionError: Expected value did not match actual value\n at Object.assert (/path/to/test.js:15:23)\n at TestCase.run (/path/to/test.js:45:12)\n Stack trace continues..." + } + ], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [] + } + ], + "status": "unexpected" + } + ] + } + ], + "suites": [] + }, + { + "title": "error.scenarios.spec.ts", + "specs": [ + { + "title": "TEST-004 Error with only type", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "failed", + "errors": [ + { + "message": "NullPointerException" + } + ], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [] + } + ], + "status": "unexpected" + } + ] + }, + { + "title": "TEST-005 Error with type and text content", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "failed", + "errors": [ + { + "message": "RuntimeException: Unexpected error occurred during test execution" + } + ], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [] + } + ], + "status": "unexpected" + } + ] + }, + { + "title": "TEST-006 Error with type, message and text content", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "failed", + "errors": [ + { + "message": "IOException: File not found\n IOException: File not found\n at FileReader.read (/path/to/file.js:23:45)\n at TestSetup.initialize (/path/to/setup.js:67:89)\n Full stack trace here..." + } + ], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [] + } + ], + "status": "unexpected" + } + ] + } + ], + "suites": [] + }, + { + "title": "skipped.scenarios.spec.ts", + "specs": [ + { + "title": "TEST-007 Empty skipped", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "skipped", + "projectName": "chromium", + "results": [ + { + "status": "skipped", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [] + } + ], + "status": "skipped" + } + ] + }, + { + "title": "TEST-008 Skipped with only message", + "tags": [], + "tests": [ + { + "annotations": [ + { + "type": "skip", + "description": "Test not applicable for current configuration" + } + ], + "expectedStatus": "skipped", + "projectName": "chromium", + "results": [ + { + "status": "skipped", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [] + } + ], + "status": "skipped" + } + ] + }, + { + "title": "TEST-009 Skipped with only text content", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "skipped", + "projectName": "chromium", + "results": [ + { + "status": "skipped", + "errors": [], + "stdout": [ + { + "text": "This test was skipped because the required feature is not enabled" + } + ], + "stderr": [], + "retry": 0, + "attachments": [] + } + ], + "status": "skipped" + } + ] + }, + { + "title": "TEST-010 Skipped with message and text content", + "tags": [], + "tests": [ + { + "annotations": [ + { + "type": "skip", + "description": "Environment not ready" + } + ], + "expectedStatus": "skipped", + "projectName": "chromium", + "results": [ + { + "status": "skipped", + "errors": [], + "stdout": [ + { + "text": "Test skipped due to environment not being ready.\nThe database connection is not available.\nPlease check the configuration and try again." + } + ], + "stderr": [], + "retry": 0, + "attachments": [] + } + ], + "status": "skipped" + } + ] + }, + { + "title": "TEST-011 Successful test", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [ + { + "text": "Test completed successfully with all assertions passing." + } + ], + "stderr": [], + "retry": 0, + "attachments": [] + } + ], + "status": "expected" + } + ] + }, + { + "title": "TEST-012 Another successful test", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "screenshot", + "contentType": "image/png", + "path": "./test-results/successful-test-screenshot.png" + } + ] + } + ], + "status": "expected" + } + ] + } + ], + "suites": [] + } + ] +} diff --git a/src/tests/fixtures/playwright-json/empty-tsuite.json b/src/tests/fixtures/playwright-json/empty-tsuite.json new file mode 100644 index 0000000..a6ec7c7 --- /dev/null +++ b/src/tests/fixtures/playwright-json/empty-tsuite.json @@ -0,0 +1,37 @@ +{ + "suites": [ + { + "title": "ui.cart.spec.ts", + "specs": [ + { + "title": "Test cart TEST-002", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [] + } + ], + "status": "expected" + } + ] + } + ], + "suites": [] + }, + { + "title": "ui.contents.spec.ts", + "specs": [], + "suites": [] + } + ] +} diff --git a/src/tests/fixtures/playwright-json/matching-tcases.json b/src/tests/fixtures/playwright-json/matching-tcases.json new file mode 100644 index 0000000..2682541 --- /dev/null +++ b/src/tests/fixtures/playwright-json/matching-tcases.json @@ -0,0 +1,160 @@ +{ + "suites": [ + { + "title": "ui.cart.spec.ts", + "specs": [ + { + "title": "Test cart TEST-002", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./matching-tcases.json" + } + ] + } + ], + "status": "expected" + } + ] + }, + { + "title": "Test checkout TEST-003", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./matching-tcases.json" + } + ] + } + ], + "status": "expected" + } + ] + } + ], + "suites": [] + }, + { + "title": "ui.contents.spec.ts", + "specs": [ + { + "title": "TEST-004 About page content", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./matching-tcases.json" + } + ] + } + ], + "status": "expected" + } + ] + }, + { + "title": "TEST-006 Navigation bar items", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "failed", + "errors": [ + { + "message": "Error: expect(received).toEqual(expected) // deep equality\n\n - Expected - 1\n + Received + 1\n\n @@ -1,8 +1,8 @@\n Array [\n Object {\n - \"isActive\": true,\n + \"isActive\": false,\n \"text\": \"Welcome\",\n },\n Object {\n \"isActive\": false,\n \"text\": \"Today's Menu\",\n\n 37 | \tawait welcome.goto()\n 38 | \tlet navbarItems = await welcome.getNavbarItems()\n > 39 | \texpect(navbarItems).toEqual([\n | \t ^\n 40 | \t\t{ text: 'Welcome', isActive: true },\n 41 | \t\t{ text: \"Today's Menu\", isActive: false },\n 42 | \t\t{ text: 'About', isActive: false }" + } + ], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./matching-tcases.json" + } + ] + } + ], + "status": "unexpected" + } + ] + }, + { + "title": "Menu page content TEST-007", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./matching-tcases.json" + } + ] + } + ], + "status": "expected" + } + ] + } + ], + "suites": [] + } + ] +} diff --git a/src/tests/fixtures/playwright-json/missing-attachments.json b/src/tests/fixtures/playwright-json/missing-attachments.json new file mode 100644 index 0000000..78ef351 --- /dev/null +++ b/src/tests/fixtures/playwright-json/missing-attachments.json @@ -0,0 +1,160 @@ +{ + "suites": [ + { + "title": "ui.cart.spec.ts", + "specs": [ + { + "title": "Test cart TEST-002", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./matching-tcases.json" + } + ] + } + ], + "status": "expected" + } + ] + }, + { + "title": "Test checkout TEST-003", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./matching-tcases.json" + } + ] + } + ], + "status": "expected" + } + ] + } + ], + "suites": [] + }, + { + "title": "ui.contents.spec.ts", + "specs": [ + { + "title": "TEST-004 About page content", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./__missing-file.json" + } + ] + } + ], + "status": "expected" + } + ] + }, + { + "title": "TEST-006 Navigation bar items", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "failed", + "errors": [ + { + "message": "Error: expect(received).toEqual(expected) // deep equality\n\n - Expected - 1\n + Received + 1\n\n @@ -1,8 +1,8 @@\n Array [\n Object {\n - \"isActive\": true,\n + \"isActive\": false,\n \"text\": \"Welcome\",\n },\n Object {\n \"isActive\": false,\n \"text\": \"Today's Menu\",\n\n 37 | \tawait welcome.goto()\n 38 | \tlet navbarItems = await welcome.getNavbarItems()\n > 39 | \texpect(navbarItems).toEqual([\n | \t ^\n 40 | \t\t{ text: 'Welcome', isActive: true },\n 41 | \t\t{ text: \"Today's Menu\", isActive: false },\n 42 | \t\t{ text: 'About', isActive: false }" + } + ], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./matching-tcases.json" + } + ] + } + ], + "status": "unexpected" + } + ] + }, + { + "title": "Menu page content TEST-007", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./matching-tcases.json" + } + ] + } + ], + "status": "expected" + } + ] + } + ], + "suites": [] + } + ] +} diff --git a/src/tests/fixtures/playwright-json/missing-tcases.json b/src/tests/fixtures/playwright-json/missing-tcases.json new file mode 100644 index 0000000..ba5c462 --- /dev/null +++ b/src/tests/fixtures/playwright-json/missing-tcases.json @@ -0,0 +1,188 @@ +{ + "suites": [ + { + "title": "ui.cart.spec.ts", + "specs": [ + { + "title": "Test cart TEST-002", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./with-missing-tcases.json" + } + ] + } + ], + "status": "expected" + } + ] + }, + { + "title": "Test checkout TEST-003", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./missing.json" + } + ] + } + ], + "status": "expected" + } + ] + } + ], + "suites": [] + }, + { + "title": "ui.contents.spec.ts", + "specs": [ + { + "title": "TEST-000 Missing content", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./with-missing-tcases.json" + } + ] + } + ], + "status": "expected" + } + ] + }, + { + "title": "About page content", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./with-missing-tcases.json" + } + ] + } + ], + "status": "expected" + } + ] + }, + { + "title": "TEST-006 Navigation bar items", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "failed", + "errors": [ + { + "message": "Error: expect(received).toEqual(expected) // deep equality\n\n - Expected - 1\n + Received + 1\n\n @@ -1,8 +1,8 @@\n Array [\n Object {\n - \"isActive\": true,\n + \"isActive\": false,\n \"text\": \"Welcome\",\n },\n Object {\n \"isActive\": false,\n \"text\": \"Today's Menu\",\n\n 37 | \tawait welcome.goto()\n 38 | \tlet navbarItems = await welcome.getNavbarItems()\n > 39 | \texpect(navbarItems).toEqual([\n | \t ^\n 40 | \t\t{ text: 'Welcome', isActive: true },\n 41 | \t\t{ text: \"Today's Menu\", isActive: false },\n 42 | \t\t{ text: 'About', isActive: false }" + } + ], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "application/json", + "path": "./with-missing-tcases.json" + } + ] + } + ], + "status": "unexpected" + } + ] + }, + { + "title": "Menu page content TEST-007", + "tags": [], + "tests": [ + { + "annotations": [], + "expectedStatus": "passed", + "projectName": "chromium", + "results": [ + { + "status": "passed", + "errors": [], + "stdout": [], + "stderr": [], + "retry": 0, + "attachments": [ + { + "name": "attachment", + "contentType": "image/png", + "path": "../test-results/ui.cart-Test-cart-chromium/test-finished-1.png" + } + ] + } + ], + "status": "expected" + } + ] + } + ], + "suites": [] + } + ] +} diff --git a/src/tests/junit-upload.spec.ts b/src/tests/junit-upload.spec.ts deleted file mode 100644 index e4c0e66..0000000 --- a/src/tests/junit-upload.spec.ts +++ /dev/null @@ -1,305 +0,0 @@ -import { afterAll, beforeAll, expect, test, describe, afterEach } from 'vitest' -import { run } from '../commands/main' -import { setupServer } from 'msw/node' -import { HttpResponse, http } from 'msw' -import { runTestCases } from './fixtures/testcases' -import { countMockedApiCalls } from './utils' - -const projectCode = 'TEST' -const runId = '1' -const qasHost = 'qas.eu1.qasphere.com' -const baseURL = `https://${qasHost}` -const runURL = `${baseURL}/project/${projectCode}/run/${runId}` -const xmlBasePath = './src/tests/fixtures/junit-xml' - -process.env['QAS_TOKEN'] = 'QAS_TOKEN' -process.env['QAS_URL'] = baseURL - -let lastCreatedRunTitle = '' -let createRunTitleConflict = false - -const server = setupServer( - http.get(`${baseURL}/api/public/v0/project/${projectCode}`, ({ request }) => { - expect(request.headers.get('Authorization')).toEqual('ApiKey QAS_TOKEN') - return HttpResponse.json({ exists: true }) - }), - http.post(`${baseURL}/api/public/v0/project/${projectCode}/tcase/seq`, ({ request }) => { - expect(request.headers.get('Authorization')).toEqual('ApiKey QAS_TOKEN') - return HttpResponse.json({ - data: runTestCases, - total: runTestCases.length, - }) - }), - http.post(`${baseURL}/api/public/v0/project/${projectCode}/run`, async ({ request }) => { - expect(request.headers.get('Authorization')).toEqual('ApiKey QAS_TOKEN') - const body = (await request.json()) as { title: string } - lastCreatedRunTitle = body.title - - if (createRunTitleConflict) { - return HttpResponse.json( - { - message: 'run title must be unique within the milestone, conflicting run id: 1', - }, - { - status: 403, - } - ) - } - - return HttpResponse.json({ - id: parseInt(runId), - }) - }), - http.get(`${baseURL}/api/public/v0/project/${projectCode}/run/${runId}/tcase`, ({ request }) => { - expect(request.headers.get('Authorization')).toEqual('ApiKey QAS_TOKEN') - return HttpResponse.json({ - tcases: runTestCases, - }) - }), - http.post( - new RegExp(`${baseURL}/api/public/v0/project/${projectCode}/run/${runId}/tcase/.+/result`), - ({ request }) => { - expect(request.headers.get('Authorization')).toEqual('ApiKey QAS_TOKEN') - return HttpResponse.json({ - id: 0, - }) - } - ), - http.post(`${baseURL}/api/public/v0/file`, async ({ request }) => { - expect(request.headers.get('Authorization')).toEqual('ApiKey QAS_TOKEN') - expect(request.headers.get('Content-Type')).includes('multipart/form-data') - return HttpResponse.json({ - id: 'TEST', - url: 'http://example.com', - }) - }) -) - -beforeAll(() => { - server.listen({ onUnhandledRequest: 'error' }) -}) -afterAll(() => { - server.close() -}) -afterEach(() => { - server.resetHandlers() - server.events.removeAllListeners() -}) - -const countFileUploadApiCalls = () => - countMockedApiCalls(server, (req) => req.url.endsWith('/file')) -const countResultUploadApiCalls = () => - countMockedApiCalls(server, (req) => new URL(req.url).pathname.endsWith('/result')) - -describe('Uploading JUnit xml files', () => { - describe('Argument parsing', () => { - test('Passing correct Run URL pattern should result in success', async () => { - const patterns = [ - `junit-upload --run-url ${runURL} ${xmlBasePath}/matching-tcases.xml`, - `junit-upload -r ${runURL}/ ${xmlBasePath}/matching-tcases.xml`, - `junit-upload -r ${runURL}/tcase/1 ${xmlBasePath}/matching-tcases.xml`, - ] - - for (const pattern of patterns) { - const fileUploadCount = countFileUploadApiCalls() - const tcaseUploadCount = countResultUploadApiCalls() - await run(pattern) - expect(fileUploadCount()).toBe(0) - expect(tcaseUploadCount()).toBe(5) - } - }) - - test('Passing correct Run URL pattern without https, should result in success', async () => { - const fileUploadCount = countFileUploadApiCalls() - const tcaseUploadCount = countResultUploadApiCalls() - await run( - `junit-upload -r ${qasHost}/project/${projectCode}/run/${runId} ${xmlBasePath}/matching-tcases.xml` - ) - expect(fileUploadCount()).toBe(0) - expect(tcaseUploadCount()).toBe(5) - }) - - test('Passing incorrect Run URL pattern should result in failure', async () => { - const patterns = [ - `junit-upload -r ${qasHost}/projects/${projectCode}/runs/${runId} ${xmlBasePath}/matching-tcases.xml`, - `junit-upload -r ${runURL}abc/tcase/1 ${xmlBasePath}/matching-tcases.xml`, - ] - - for (const pattern of patterns) { - const fileUploadCount = countFileUploadApiCalls() - const tcaseUploadCount = countResultUploadApiCalls() - let isError = false - - try { - await run(pattern) - } catch { - isError = true - } - expect(isError).toBeTruthy() - expect(fileUploadCount()).toBe(0) - expect(tcaseUploadCount()).toBe(0) - } - }) - }) - - describe('Uploading test results', () => { - test('Test cases on xml file with all matching test cases on QAS should be successful', async () => { - const fileUploadCount = countFileUploadApiCalls() - const tcaseUploadCount = countResultUploadApiCalls() - await run(`junit-upload -r ${runURL} ${xmlBasePath}/matching-tcases.xml`) - expect(fileUploadCount()).toBe(0) - expect(tcaseUploadCount()).toBe(5) - }) - - test('Test cases on xml file with a missing test case on QAS should throw an error', async () => { - const fileUploadCount = countFileUploadApiCalls() - const tcaseUploadCount = countResultUploadApiCalls() - await expect( - run(`junit-upload -r ${runURL} ${xmlBasePath}/missing-tcases.xml`) - ).rejects.toThrowError() - expect(fileUploadCount()).toBe(0) - expect(tcaseUploadCount()).toBe(0) - }) - - test('Test cases on xml file with a missing test case on QAS should be successful when forced', async () => { - const fileUploadCount = countFileUploadApiCalls() - const tcaseUploadCount = countResultUploadApiCalls() - await run(`junit-upload -r ${runURL} --force ${xmlBasePath}/missing-tcases.xml`) - expect(fileUploadCount()).toBe(0) - expect(tcaseUploadCount()).toBe(4) - }) - - test('Test cases from muliple xml files should be processed successfully', async () => { - const fileUploadCount = countFileUploadApiCalls() - const tcaseUploadCount = countResultUploadApiCalls() - await run( - `junit-upload -r ${runURL} --force ${xmlBasePath}/missing-tcases.xml ${xmlBasePath}/missing-tcases.xml` - ) - expect(fileUploadCount()).toBe(0) - expect(tcaseUploadCount()).toBe(8) - }) - - test('Test suite with empty tcases should not result in error and be skipped', async () => { - const fileUploadCount = countFileUploadApiCalls() - const tcaseUploadCount = countResultUploadApiCalls() - await run(`junit-upload -r ${runURL} --force ${xmlBasePath}/empty-tsuite.xml`) - expect(fileUploadCount()).toBe(0) - expect(tcaseUploadCount()).toBe(1) - }) - }) - - describe('Uploading with attachments', () => { - test('Attachments should be uploaded', async () => { - const fileUploadCount = countFileUploadApiCalls() - const tcaseUploadCount = countResultUploadApiCalls() - await run(`junit-upload -r ${runURL} --attachments ${xmlBasePath}/matching-tcases.xml`) - expect(fileUploadCount()).toBe(5) - expect(tcaseUploadCount()).toBe(5) - }) - test('Missing attachments should throw an error', async () => { - const fileUploadCount = countFileUploadApiCalls() - const tcaseUploadCount = countResultUploadApiCalls() - await expect( - run(`junit-upload -r ${runURL} --attachments ${xmlBasePath}/missing-attachments.xml`) - ).rejects.toThrow() - expect(fileUploadCount()).toBe(0) - expect(tcaseUploadCount()).toBe(0) - }) - test('Missing attachments should be successful when forced', async () => { - const fileUploadCount = countFileUploadApiCalls() - const tcaseUploadCount = countResultUploadApiCalls() - await run( - `junit-upload -r ${runURL} --attachments --force ${xmlBasePath}/missing-attachments.xml` - ) - expect(fileUploadCount()).toBe(4) - expect(tcaseUploadCount()).toBe(5) - }) - }) - - describe('Run name template processing', () => { - afterEach(() => { - lastCreatedRunTitle = '' - createRunTitleConflict = false - }) - - test('Should create new run with name template using environment variables', async () => { - // Set up test environment variable - const oldEnv = process.env.TEST_BUILD_NUMBER - process.env.TEST_BUILD_NUMBER = '456' - - try { - // This should create a new run since no --run-url is specified - await run( - `junit-upload --run-name "CI Build {env:TEST_BUILD_NUMBER}" ${xmlBasePath}/matching-tcases.xml` - ) - - expect(lastCreatedRunTitle).toBe('CI Build 456') - } finally { - // Restore original environment - if (oldEnv !== undefined) { - process.env.TEST_BUILD_NUMBER = oldEnv - } else { - delete process.env.TEST_BUILD_NUMBER - } - } - }) - - test('Should create new run with name template using date placeholders', async () => { - const now = new Date() - const expectedYear = now.getFullYear().toString() - const expectedMonth = String(now.getMonth() + 1).padStart(2, '0') - const expectedDay = String(now.getDate()).padStart(2, '0') - - await run( - `junit-upload --run-name "Test Run {YYYY}-{MM}-{DD}" ${xmlBasePath}/matching-tcases.xml` - ) - - expect(lastCreatedRunTitle).toBe(`Test Run ${expectedYear}-${expectedMonth}-${expectedDay}`) - }) - - test('Should create new run with name template using mixed placeholders', async () => { - const oldEnv = process.env.TEST_PROJECT - process.env.TEST_PROJECT = 'MyProject' - - try { - await run( - `junit-upload --run-name "{env:TEST_PROJECT} - {YYYY}/{MM}" ${xmlBasePath}/matching-tcases.xml` - ) - - const now = new Date() - const expectedYear = now.getFullYear().toString() - const expectedMonth = String(now.getMonth() + 1).padStart(2, '0') - - expect(lastCreatedRunTitle).toBe(`MyProject - ${expectedYear}/${expectedMonth}`) - } finally { - if (oldEnv !== undefined) { - process.env.TEST_PROJECT = oldEnv - } else { - delete process.env.TEST_PROJECT - } - } - }) - - test('Should reuse existing run when run title is already used', async () => { - const fileUploadCount = countFileUploadApiCalls() - const tcaseUploadCount = countResultUploadApiCalls() - - createRunTitleConflict = true - await run(`junit-upload --run-name "duplicate run title" ${xmlBasePath}/matching-tcases.xml`) - - expect(lastCreatedRunTitle).toBe('duplicate run title') - expect(fileUploadCount()).toBe(0) - expect(tcaseUploadCount()).toBe(5) - }) - - test('Should use default name template when --run-name is not specified', async () => { - await run(`junit-upload ${xmlBasePath}/matching-tcases.xml`) - - // Should use default format: "Automated test run - {MMM} {DD}, {YYYY}, {hh}:{mm}:{ss} {AMPM}" - expect(lastCreatedRunTitle).toContain('Automated test run - ') - expect(lastCreatedRunTitle).toMatch( - /Automated test run - \w{3} \d{2}, \d{4}, \d{1,2}:\d{2}:\d{2} (AM|PM)/ - ) - }) - }) -}) diff --git a/src/tests/junit-xml-parsing.spec.ts b/src/tests/junit-xml-parsing.spec.ts index dd6dc8d..2b1b934 100644 --- a/src/tests/junit-xml-parsing.spec.ts +++ b/src/tests/junit-xml-parsing.spec.ts @@ -1,109 +1,113 @@ import { expect, test, describe } from 'vitest' -import { parseJUnitXml } from '../utils/junit/junitXmlParser' +import { parseJUnitXml } from '../utils/result-upload/junitXmlParser' import { readFile } from 'fs/promises' const xmlBasePath = './src/tests/fixtures/junit-xml' describe('Junit XML parsing', () => { - test('Should parse comprehensive test XML without exceptions', async () => { - const xmlPath = `${xmlBasePath}/comprehensive-test.xml` - const xmlContent = await readFile(xmlPath, 'utf8') - - // This should not throw any exceptions - const result = await parseJUnitXml(xmlContent, xmlBasePath) - - // Verify that we got the expected number of test cases - expect(result.testcases).toHaveLength(12) - - // Verify we have all the different test result types - const testTypes = result.testcases.map(tc => tc.type) - expect(testTypes).toContain('failure') - expect(testTypes).toContain('error') - expect(testTypes).toContain('skipped') - expect(testTypes).toContain('success') - - // Verify specific counts by type - const typeCounts = result.testcases.reduce((acc, tc) => { - acc[tc.type] = (acc[tc.type] || 0) + 1 - return acc - }, {} as Record) - - expect(typeCounts.failure).toBe(3) - expect(typeCounts.error).toBe(3) - expect(typeCounts.skipped).toBe(4) - expect(typeCounts.success).toBe(2) - - // Verify that test cases have expected properties - result.testcases.forEach(tc => { - expect(tc).toHaveProperty('name') - expect(tc).toHaveProperty('type') - expect(tc).toHaveProperty('attachments') - expect(Array.isArray(tc.attachments)).toBe(true) - }) - }) - - test('Should handle all failure/error/skipped element variations', async () => { - const xmlPath = `${xmlBasePath}/comprehensive-test.xml` - const xmlContent = await readFile(xmlPath, 'utf8') - - const result = await parseJUnitXml(xmlContent, xmlBasePath) - - // Test specific scenarios from our comprehensive test - const failureTests = result.testcases.filter(tc => tc.type === 'failure') - const errorTests = result.testcases.filter(tc => tc.type === 'error') - const skippedTests = result.testcases.filter(tc => tc.type === 'skipped') - - // Verify we have the expected failure scenarios - expect(failureTests.some(tc => tc.name?.includes('only type'))).toBe(true) - expect(failureTests.some(tc => tc.name?.includes('type and message'))).toBe(true) - expect(failureTests.some(tc => tc.name?.includes('type, message and text content'))).toBe(true) - - // Verify we have the expected error scenarios - expect(errorTests.some(tc => tc.name?.includes('only type'))).toBe(true) - expect(errorTests.some(tc => tc.name?.includes('type and text content'))).toBe(true) - expect(errorTests.some(tc => tc.name?.includes('type, message and text content'))).toBe(true) - - // Verify we have the expected skipped scenarios - expect(skippedTests.some(tc => tc.name?.includes('Empty skipped'))).toBe(true) - expect(skippedTests.some(tc => tc.name?.includes('only message'))).toBe(true) - expect(skippedTests.some(tc => tc.name?.includes('only text content'))).toBe(true) - expect(skippedTests.some(tc => tc.name?.includes('message and text content'))).toBe(true) - }) - - test('Should handle empty and similar empty tags', async () => { - const xmlPath = `${xmlBasePath}/empty-system-err.xml` - const xmlContent = await readFile(xmlPath, 'utf8') - - const result = await parseJUnitXml(xmlContent, xmlBasePath) - expect(result.testcases).toHaveLength(1) - - // Should parse as success (no failure/error/skipped present) - expect(result.testcases[0].type).toBe('success') - - // Message should include system-out content but not fail on empty system-err - expect(result.testcases[0].message).toContain('ViewManager initialized') - }) - - test('Should handle Jest failure without type attribute', async () => { - const xmlPath = `${xmlBasePath}/jest-failure-type-missing.xml` - const xmlContent = await readFile(xmlPath, 'utf8') - - const result = await parseJUnitXml(xmlContent, xmlBasePath) - expect(result.testcases).toHaveLength(3) - - // Verify test result types - const typeCounts = result.testcases.reduce((acc, tc) => { - acc[tc.type] = (acc[tc.type] || 0) + 1 - return acc - }, {} as Record) - - expect(typeCounts.success).toBe(2) - expect(typeCounts.failure).toBe(1) - - // Find the failure test case - const failedTest = result.testcases.find(tc => tc.type === 'failure') - expect(failedTest).toBeDefined() - expect(failedTest?.name).toContain('subtracts two numbers correctly') - expect(failedTest?.message).toContain('expect(received).toBe(expected)') - }) + test('Should parse comprehensive test XML without exceptions', async () => { + const xmlPath = `${xmlBasePath}/comprehensive-test.xml` + const xmlContent = await readFile(xmlPath, 'utf8') + + // This should not throw any exceptions + const testcases = await parseJUnitXml(xmlContent, xmlBasePath) + + // Verify that we got the expected number of test cases + expect(testcases).toHaveLength(12) + + // Verify we have all the different test result types + const testTypes = testcases.map((tc) => tc.status) + expect(testTypes).toContain('failed') + expect(testTypes).toContain('blocked') + expect(testTypes).toContain('skipped') + expect(testTypes).toContain('passed') + + // Verify specific counts by type + const typeCounts = testcases.reduce((acc, tc) => { + acc[tc.status] = (acc[tc.status] || 0) + 1 + return acc + }, {} as Record) + + expect(typeCounts.failed).toBe(3) + expect(typeCounts.blocked).toBe(3) + expect(typeCounts.skipped).toBe(4) + expect(typeCounts.passed).toBe(2) + + // Verify that test cases have expected properties + testcases.forEach((tc) => { + expect(tc).toHaveProperty('name') + expect(tc).toHaveProperty('folder') + expect(tc).toHaveProperty('status') + expect(tc).toHaveProperty('message') + expect(tc).toHaveProperty('attachments') + expect(Array.isArray(tc.attachments)).toBe(true) + }) + }) + + test('Should handle all failure/error/skipped element variations', async () => { + const xmlPath = `${xmlBasePath}/comprehensive-test.xml` + const xmlContent = await readFile(xmlPath, 'utf8') + + const testcases = await parseJUnitXml(xmlContent, xmlBasePath) + + // Test specific scenarios from our comprehensive test + const failureTests = testcases.filter((tc) => tc.status === 'failed') + const errorTests = testcases.filter((tc) => tc.status === 'blocked') + const skippedTests = testcases.filter((tc) => tc.status === 'skipped') + + // Verify we have the expected failure scenarios + expect(failureTests.some((tc) => tc.name.includes('only type'))).toBe(true) + expect(failureTests.some((tc) => tc.name.includes('type and message'))).toBe(true) + expect(failureTests.some((tc) => tc.name.includes('type, message and text content'))).toBe( + true + ) + + // Verify we have the expected error scenarios + expect(errorTests.some((tc) => tc.name.includes('only type'))).toBe(true) + expect(errorTests.some((tc) => tc.name.includes('type and text content'))).toBe(true) + expect(errorTests.some((tc) => tc.name.includes('type, message and text content'))).toBe(true) + + // Verify we have the expected skipped scenarios + expect(skippedTests.some((tc) => tc.name.includes('Empty skipped'))).toBe(true) + expect(skippedTests.some((tc) => tc.name.includes('only message'))).toBe(true) + expect(skippedTests.some((tc) => tc.name.includes('only text content'))).toBe(true) + expect(skippedTests.some((tc) => tc.name.includes('message and text content'))).toBe(true) + }) + + test('Should handle empty and similar empty tags', async () => { + const xmlPath = `${xmlBasePath}/empty-system-err.xml` + const xmlContent = await readFile(xmlPath, 'utf8') + + const testcases = await parseJUnitXml(xmlContent, xmlBasePath) + expect(testcases).toHaveLength(1) + + // Should parse as success (no failure/error/skipped present) + expect(testcases[0].status).toBe('passed') + + // Message should include system-out content but not fail on empty system-err + expect(testcases[0].message).toContain('ViewManager initialized') + }) + + test('Should handle Jest failure without type attribute', async () => { + const xmlPath = `${xmlBasePath}/jest-failure-type-missing.xml` + const xmlContent = await readFile(xmlPath, 'utf8') + + const testcases = await parseJUnitXml(xmlContent, xmlBasePath) + expect(testcases).toHaveLength(3) + + // Verify test result types + const typeCounts = testcases.reduce((acc, tc) => { + acc[tc.status] = (acc[tc.status] || 0) + 1 + return acc + }, {} as Record) + + expect(typeCounts.passed).toBe(2) + expect(typeCounts.failed).toBe(1) + + // Find the failure test case + const failedTest = testcases.find((tc) => tc.status === 'failed') + expect(failedTest).toBeDefined() + expect(failedTest?.name).toContain('subtracts two numbers correctly') + expect(failedTest?.message).toContain('expect(received).toBe(expected)') + }) }) diff --git a/src/tests/playwright-json-parsing.spec.ts b/src/tests/playwright-json-parsing.spec.ts new file mode 100644 index 0000000..a42911c --- /dev/null +++ b/src/tests/playwright-json-parsing.spec.ts @@ -0,0 +1,462 @@ +import { expect, test, describe } from 'vitest' +import { parsePlaywrightJson } from '../utils/result-upload/playwrightJsonParser' +import { readFile } from 'fs/promises' + +const playwrightJsonBasePath = './src/tests/fixtures/playwright-json' + +describe('Playwright JSON parsing', () => { + test('Should parse comprehensive test JSON without exceptions', async () => { + const jsonPath = `${playwrightJsonBasePath}/comprehensive-test.json` + const jsonContent = await readFile(jsonPath, 'utf8') + + // This should not throw any exceptions + const testcases = await parsePlaywrightJson(jsonContent, '') + + // Verify that we got the expected number of test cases + expect(testcases).toHaveLength(12) + + // Verify we have all the different test result types + const testStatuses = testcases.map((tc) => tc.status) + expect(testStatuses).toContain('failed') + expect(testStatuses).toContain('skipped') + expect(testStatuses).toContain('passed') + + // Verify specific counts by status + const statusCounts = testcases.reduce((acc, tc) => { + acc[tc.status] = (acc[tc.status] || 0) + 1 + return acc + }, {} as Record) + + expect(statusCounts.failed).toBe(6) // 3 failures + 3 errors + expect(statusCounts.skipped).toBe(4) + expect(statusCounts.passed).toBe(2) + + // Verify that test cases have expected properties + testcases.forEach((tc) => { + expect(tc).toHaveProperty('name') + expect(tc).toHaveProperty('folder') + expect(tc).toHaveProperty('status') + expect(tc).toHaveProperty('message') + expect(tc).toHaveProperty('attachments') + expect(Array.isArray(tc.attachments)).toBe(true) + }) + }) + + test('Should handle empty test suite', async () => { + const jsonPath = `${playwrightJsonBasePath}/empty-tsuite.json` + const jsonContent = await readFile(jsonPath, 'utf8') + + const testcases = await parsePlaywrightJson(jsonContent, '') + + // Should only have the one test from ui.cart.spec.ts, not the empty ui.contents.spec.ts + expect(testcases).toHaveLength(1) + expect(testcases[0].name).toContain('Test cart TEST-002') + }) + + test('Should use last result when there are retries', async () => { + const jsonContent = JSON.stringify({ + suites: [ + { + title: 'retry.spec.ts', + specs: [ + { + title: 'Flaky test', + tags: [], + tests: [ + { + annotations: [], + expectedStatus: 'passed', + projectName: 'chromium', + results: [ + { + status: 'failed', + errors: [{ message: 'First attempt failed' }], + stdout: [], + stderr: [], + retry: 0, + attachments: [], + }, + { + status: 'passed', + errors: [], + stdout: [], + stderr: [], + retry: 1, + attachments: [], + }, + ], + status: 'flaky', + }, + ], + }, + ], + suites: [], + }, + ], + }) + + const testcases = await parsePlaywrightJson(jsonContent, '') + expect(testcases).toHaveLength(1) + + // Should use the last result (passed on retry) + expect(testcases[0].status).toBe('passed') + expect(testcases[0].message).toContain('Test passed in 2 attempts') + }) + + test('Should handle nested suites correctly', async () => { + const jsonContent = JSON.stringify({ + suites: [ + { + title: 'parent.spec.ts', + specs: [ + { + title: 'Parent test', + tags: [], + tests: [ + { + annotations: [], + expectedStatus: 'passed', + projectName: 'chromium', + results: [ + { + status: 'passed', + errors: [], + stdout: [], + stderr: [], + retry: 0, + attachments: [], + }, + ], + status: 'expected', + }, + ], + }, + ], + suites: [ + { + title: 'Nested Suite', + specs: [ + { + title: 'Nested test', + tags: [], + tests: [ + { + annotations: [], + expectedStatus: 'passed', + projectName: 'chromium', + results: [ + { + status: 'passed', + errors: [], + stdout: [], + stderr: [], + retry: 0, + attachments: [], + }, + ], + status: 'expected', + }, + ], + }, + ], + suites: [], + }, + ], + }, + ], + }) + + const testcases = await parsePlaywrightJson(jsonContent, '') + expect(testcases).toHaveLength(2) + + // Verify folder is set to top-level suite title + expect(testcases[0].folder).toBe('parent.spec.ts') + expect(testcases[1].folder).toBe('parent.spec.ts') + + // Verify nested test has suite title as prefix + expect(testcases[1].name).toContain('Nested Suite') + expect(testcases[1].name).toContain('Nested test') + }) + + test('Should strip ANSI escape codes from errors and output', async () => { + const jsonContent = JSON.stringify({ + suites: [ + { + title: 'ansi.spec.ts', + specs: [ + { + title: 'Test with ANSI colors in error', + tags: [], + tests: [ + { + annotations: [], + expectedStatus: 'passed', + projectName: 'chromium', + results: [ + { + status: 'failed', + errors: [ + { + message: + '\x1b[31mError: Test failed\x1b[0m\n\x1b[90m at Object.test\x1b[0m', + }, + ], + stdout: [ + { + text: '\x1b[32m✓\x1b[0m Test started\n\x1b[33mWarning:\x1b[0m Something happened', + }, + ], + stderr: [ + { + text: '\x1b[31mError output\x1b[0m\n\x1b[90mStack trace\x1b[0m', + }, + ], + retry: 0, + attachments: [], + }, + ], + status: 'unexpected', + }, + ], + }, + ], + suites: [], + }, + ], + }) + + const testcases = await parsePlaywrightJson(jsonContent, '') + expect(testcases).toHaveLength(1) + + // Verify ANSI codes are stripped from message + const message = testcases[0].message + expect(message).not.toContain('\x1b[') + expect(message).not.toContain('\x1b[31m') + expect(message).not.toContain('\x1b[0m') + + // Verify actual content is preserved + expect(message).toContain('Error: Test failed') + expect(message).toContain('Test started') + expect(message).toContain('Warning:') + expect(message).toContain('Error output') + expect(message).toContain('Stack trace') + }) + + test('Should prefix test case marker from annotations to test name', async () => { + const jsonContent = JSON.stringify({ + suites: [ + { + title: 'annotation.spec.ts', + specs: [ + { + title: 'User login test', + tags: [], + tests: [ + { + annotations: [ + { + type: 'test case', + description: 'https://qas.eu1.qasphere.com/project/PRJ/tcase/123', + }, + ], + expectedStatus: 'passed', + projectName: 'chromium', + results: [ + { + status: 'passed', + errors: [], + stdout: [], + stderr: [], + retry: 0, + attachments: [], + }, + ], + status: 'expected', + }, + ], + }, + { + title: 'Test without annotation', + tags: [], + tests: [ + { + annotations: [], + expectedStatus: 'passed', + projectName: 'chromium', + results: [ + { + status: 'passed', + errors: [], + stdout: [], + stderr: [], + retry: 0, + attachments: [], + }, + ], + status: 'expected', + }, + ], + }, + { + title: 'PRJ-456: Test with marker in name and annotation', + tags: [], + tests: [ + { + annotations: [ + { + type: 'Test Case', + description: 'https://qas.eu1.qasphere.com/project/PRJ/tcase/789', + }, + ], + expectedStatus: 'passed', + projectName: 'chromium', + results: [ + { + status: 'passed', + errors: [], + stdout: [], + stderr: [], + retry: 0, + attachments: [], + }, + ], + status: 'expected', + }, + ], + }, + ], + suites: [], + }, + ], + }) + + const testcases = await parsePlaywrightJson(jsonContent, '') + expect(testcases).toHaveLength(3) + + // Test with annotation should have marker prefixed + expect(testcases[0].name).toBe('PRJ-123: User login test') + + // Test without annotation should use original name + expect(testcases[1].name).toBe('Test without annotation') + + // Test with both annotation and marker in name - annotation takes precedence + expect(testcases[2].name).toBe('PRJ-789: PRJ-456: Test with marker in name and annotation') + }) + + test('Should map test status correctly', async () => { + const jsonContent = JSON.stringify({ + suites: [ + { + title: 'status.spec.ts', + specs: [ + { + title: 'Expected test', + tags: [], + tests: [ + { + annotations: [], + expectedStatus: 'passed', + projectName: 'chromium', + results: [ + { + status: 'passed', + errors: [], + stdout: [], + stderr: [], + retry: 0, + attachments: [], + }, + ], + status: 'expected', + }, + ], + }, + { + title: 'Unexpected test', + tags: [], + tests: [ + { + annotations: [], + expectedStatus: 'passed', + projectName: 'chromium', + results: [ + { + status: 'failed', + errors: [{ message: 'Test failed' }], + stdout: [], + stderr: [], + retry: 0, + attachments: [], + }, + ], + status: 'unexpected', + }, + ], + }, + { + title: 'Flaky test', + tags: [], + tests: [ + { + annotations: [], + expectedStatus: 'passed', + projectName: 'chromium', + results: [ + { + status: 'failed', + errors: [], + stdout: [], + stderr: [], + retry: 0, + attachments: [], + }, + { + status: 'passed', + errors: [], + stdout: [], + stderr: [], + retry: 1, + attachments: [], + }, + ], + status: 'flaky', + }, + ], + }, + { + title: 'Skipped test', + tags: [], + tests: [ + { + annotations: [], + expectedStatus: 'skipped', + projectName: 'chromium', + results: [ + { + status: 'skipped', + errors: [], + stdout: [], + stderr: [], + retry: 0, + attachments: [], + }, + ], + status: 'skipped', + }, + ], + }, + ], + suites: [], + }, + ], + }) + + const testcases = await parsePlaywrightJson(jsonContent, '') + expect(testcases).toHaveLength(4) + + expect(testcases[0].status).toBe('passed') // expected + expect(testcases[1].status).toBe('failed') // unexpected + expect(testcases[2].status).toBe('passed') // flaky (passed on retry) + expect(testcases[3].status).toBe('skipped') // skipped + }) +}) diff --git a/src/tests/result-upload.spec.ts b/src/tests/result-upload.spec.ts new file mode 100644 index 0000000..ee43840 --- /dev/null +++ b/src/tests/result-upload.spec.ts @@ -0,0 +1,337 @@ +import { afterAll, beforeAll, expect, test, describe, afterEach } from 'vitest' +import { run } from '../commands/main' +import { setupServer } from 'msw/node' +import { HttpResponse, http } from 'msw' +import { runTestCases } from './fixtures/testcases' +import { countMockedApiCalls } from './utils' + +const projectCode = 'TEST' +const runId = '1' +const qasHost = 'qas.eu1.qasphere.com' +const baseURL = `https://${qasHost}` +const runURL = `${baseURL}/project/${projectCode}/run/${runId}` + +process.env['QAS_TOKEN'] = 'QAS_TOKEN' +process.env['QAS_URL'] = baseURL + +let lastCreatedRunTitle = '' +let createRunTitleConflict = false + +const server = setupServer( + http.get(`${baseURL}/api/public/v0/project/${projectCode}`, ({ request }) => { + expect(request.headers.get('Authorization')).toEqual('ApiKey QAS_TOKEN') + return HttpResponse.json({ exists: true }) + }), + http.post(`${baseURL}/api/public/v0/project/${projectCode}/tcase/seq`, ({ request }) => { + expect(request.headers.get('Authorization')).toEqual('ApiKey QAS_TOKEN') + return HttpResponse.json({ + data: runTestCases, + total: runTestCases.length, + }) + }), + http.post(`${baseURL}/api/public/v0/project/${projectCode}/run`, async ({ request }) => { + expect(request.headers.get('Authorization')).toEqual('ApiKey QAS_TOKEN') + const body = (await request.json()) as { title: string } + lastCreatedRunTitle = body.title + + if (createRunTitleConflict) { + return HttpResponse.json( + { + message: 'run title must be unique within the milestone, conflicting run id: 1', + }, + { + status: 403, + } + ) + } + + return HttpResponse.json({ + id: parseInt(runId), + }) + }), + http.get(`${baseURL}/api/public/v0/project/${projectCode}/run/${runId}/tcase`, ({ request }) => { + expect(request.headers.get('Authorization')).toEqual('ApiKey QAS_TOKEN') + return HttpResponse.json({ + tcases: runTestCases, + }) + }), + http.post( + new RegExp(`${baseURL}/api/public/v0/project/${projectCode}/run/${runId}/tcase/.+/result`), + ({ request }) => { + expect(request.headers.get('Authorization')).toEqual('ApiKey QAS_TOKEN') + return HttpResponse.json({ + id: 0, + }) + } + ), + http.post(`${baseURL}/api/public/v0/file`, async ({ request }) => { + expect(request.headers.get('Authorization')).toEqual('ApiKey QAS_TOKEN') + expect(request.headers.get('Content-Type')).includes('multipart/form-data') + return HttpResponse.json({ + id: 'TEST', + url: 'http://example.com', + }) + }) +) + +beforeAll(() => { + server.listen({ onUnhandledRequest: 'error' }) +}) +afterAll(() => { + server.close() +}) +afterEach(() => { + server.resetHandlers() + server.events.removeAllListeners() +}) + +const countFileUploadApiCalls = () => + countMockedApiCalls(server, (req) => req.url.endsWith('/file')) +const countResultUploadApiCalls = () => + countMockedApiCalls(server, (req) => new URL(req.url).pathname.endsWith('/result')) + +const fileTypes = [ + { + name: 'JUnit XML', + command: 'junit-upload', + dataBasePath: './src/tests/fixtures/junit-xml', + fileExtension: 'xml', + }, + { + name: 'Playwright JSON', + command: 'playwright-json-upload', + dataBasePath: './src/tests/fixtures/playwright-json', + fileExtension: 'json', + }, +] + +fileTypes.forEach((fileType) => { + describe(`Uploading ${fileType.name} files`, () => { + describe('Argument parsing', () => { + test('Passing correct Run URL pattern should result in success', async () => { + const patterns = [ + `${fileType.command} --run-url ${runURL} ${fileType.dataBasePath}/matching-tcases.${fileType.fileExtension}`, + `${fileType.command} -r ${runURL}/ ${fileType.dataBasePath}/matching-tcases.${fileType.fileExtension}`, + `${fileType.command} -r ${runURL}/tcase/1 ${fileType.dataBasePath}/matching-tcases.${fileType.fileExtension}`, + ] + + for (const pattern of patterns) { + const fileUploadCount = countFileUploadApiCalls() + const tcaseUploadCount = countResultUploadApiCalls() + await run(pattern) + expect(fileUploadCount()).toBe(0) + expect(tcaseUploadCount()).toBe(5) + } + }) + + test('Passing correct Run URL pattern without https, should result in success', async () => { + const fileUploadCount = countFileUploadApiCalls() + const tcaseUploadCount = countResultUploadApiCalls() + await run( + `${fileType.command} -r ${qasHost}/project/${projectCode}/run/${runId} ${fileType.dataBasePath}/matching-tcases.${fileType.fileExtension}` + ) + expect(fileUploadCount()).toBe(0) + expect(tcaseUploadCount()).toBe(5) + }) + + test('Passing incorrect Run URL pattern should result in failure', async () => { + const patterns = [ + `${fileType.command} -r ${qasHost}/projects/${projectCode}/runs/${runId} ${fileType.dataBasePath}/matching-tcases.${fileType.fileExtension}`, + `${fileType.command} -r ${runURL}abc/tcase/1 ${fileType.dataBasePath}/matching-tcases.${fileType.fileExtension}`, + ] + + for (const pattern of patterns) { + const fileUploadCount = countFileUploadApiCalls() + const tcaseUploadCount = countResultUploadApiCalls() + let isError = false + + try { + await run(pattern) + } catch { + isError = true + } + expect(isError).toBeTruthy() + expect(fileUploadCount()).toBe(0) + expect(tcaseUploadCount()).toBe(0) + } + }) + }) + + describe('Uploading test results', () => { + test('Test cases on reports with all matching test cases on QAS should be successful', async () => { + const fileUploadCount = countFileUploadApiCalls() + const tcaseUploadCount = countResultUploadApiCalls() + await run( + `${fileType.command} -r ${runURL} ${fileType.dataBasePath}/matching-tcases.${fileType.fileExtension}` + ) + expect(fileUploadCount()).toBe(0) + expect(tcaseUploadCount()).toBe(5) + }) + + test('Test cases on reports with a missing test case on QAS should throw an error', async () => { + const fileUploadCount = countFileUploadApiCalls() + const tcaseUploadCount = countResultUploadApiCalls() + await expect( + run( + `${fileType.command} -r ${runURL} ${fileType.dataBasePath}/missing-tcases.${fileType.fileExtension}` + ) + ).rejects.toThrowError() + expect(fileUploadCount()).toBe(0) + expect(tcaseUploadCount()).toBe(0) + }) + + test('Test cases on reports with a missing test case on QAS should be successful when forced', async () => { + const fileUploadCount = countFileUploadApiCalls() + const tcaseUploadCount = countResultUploadApiCalls() + await run( + `${fileType.command} -r ${runURL} --force ${fileType.dataBasePath}/missing-tcases.${fileType.fileExtension}` + ) + expect(fileUploadCount()).toBe(0) + expect(tcaseUploadCount()).toBe(4) + }) + + test('Test cases from multiple reports should be processed successfully', async () => { + const fileUploadCount = countFileUploadApiCalls() + const tcaseUploadCount = countResultUploadApiCalls() + await run( + `${fileType.command} -r ${runURL} --force ${fileType.dataBasePath}/missing-tcases.${fileType.fileExtension} ${fileType.dataBasePath}/missing-tcases.${fileType.fileExtension}` + ) + expect(fileUploadCount()).toBe(0) + expect(tcaseUploadCount()).toBe(8) + }) + + test('Test suite with empty tcases should not result in error and be skipped', async () => { + const fileUploadCount = countFileUploadApiCalls() + const tcaseUploadCount = countResultUploadApiCalls() + await run( + `${fileType.command} -r ${runURL} --force ${fileType.dataBasePath}/empty-tsuite.${fileType.fileExtension}` + ) + expect(fileUploadCount()).toBe(0) + expect(tcaseUploadCount()).toBe(1) + }) + }) + + describe('Uploading with attachments', () => { + test('Attachments should be uploaded', async () => { + const fileUploadCount = countFileUploadApiCalls() + const tcaseUploadCount = countResultUploadApiCalls() + await run( + `${fileType.command} -r ${runURL} --attachments ${fileType.dataBasePath}/matching-tcases.${fileType.fileExtension}` + ) + expect(fileUploadCount()).toBe(5) + expect(tcaseUploadCount()).toBe(5) + }) + test('Missing attachments should throw an error', async () => { + const fileUploadCount = countFileUploadApiCalls() + const tcaseUploadCount = countResultUploadApiCalls() + await expect( + run( + `${fileType.command} -r ${runURL} --attachments ${fileType.dataBasePath}/missing-attachments.${fileType.fileExtension}` + ) + ).rejects.toThrow() + expect(fileUploadCount()).toBe(0) + expect(tcaseUploadCount()).toBe(0) + }) + test('Missing attachments should be successful when forced', async () => { + const fileUploadCount = countFileUploadApiCalls() + const tcaseUploadCount = countResultUploadApiCalls() + await run( + `${fileType.command} -r ${runURL} --attachments --force ${fileType.dataBasePath}/missing-attachments.${fileType.fileExtension}` + ) + expect(fileUploadCount()).toBe(4) + expect(tcaseUploadCount()).toBe(5) + }) + }) + + describe('Run name template processing', () => { + afterEach(() => { + lastCreatedRunTitle = '' + createRunTitleConflict = false + }) + + test('Should create new run with name template using environment variables', async () => { + // Set up test environment variable + const oldEnv = process.env.TEST_BUILD_NUMBER + process.env.TEST_BUILD_NUMBER = '456' + + try { + // This should create a new run since no --run-url is specified + await run( + `${fileType.command} --run-name "CI Build {env:TEST_BUILD_NUMBER}" ${fileType.dataBasePath}/matching-tcases.${fileType.fileExtension}` + ) + + expect(lastCreatedRunTitle).toBe('CI Build 456') + } finally { + // Restore original environment + if (oldEnv !== undefined) { + process.env.TEST_BUILD_NUMBER = oldEnv + } else { + delete process.env.TEST_BUILD_NUMBER + } + } + }) + + test('Should create new run with name template using date placeholders', async () => { + const now = new Date() + const expectedYear = now.getFullYear().toString() + const expectedMonth = String(now.getMonth() + 1).padStart(2, '0') + const expectedDay = String(now.getDate()).padStart(2, '0') + + await run( + `${fileType.command} --run-name "Test Run {YYYY}-{MM}-{DD}" ${fileType.dataBasePath}/matching-tcases.${fileType.fileExtension}` + ) + + expect(lastCreatedRunTitle).toBe(`Test Run ${expectedYear}-${expectedMonth}-${expectedDay}`) + }) + + test('Should create new run with name template using mixed placeholders', async () => { + const oldEnv = process.env.TEST_PROJECT + process.env.TEST_PROJECT = 'MyProject' + + try { + await run( + `${fileType.command} --run-name "{env:TEST_PROJECT} - {YYYY}/{MM}" ${fileType.dataBasePath}/matching-tcases.${fileType.fileExtension}` + ) + + const now = new Date() + const expectedYear = now.getFullYear().toString() + const expectedMonth = String(now.getMonth() + 1).padStart(2, '0') + + expect(lastCreatedRunTitle).toBe(`MyProject - ${expectedYear}/${expectedMonth}`) + } finally { + if (oldEnv !== undefined) { + process.env.TEST_PROJECT = oldEnv + } else { + delete process.env.TEST_PROJECT + } + } + }) + + test('Should reuse existing run when run title is already used', async () => { + const fileUploadCount = countFileUploadApiCalls() + const tcaseUploadCount = countResultUploadApiCalls() + + createRunTitleConflict = true + await run( + `${fileType.command} --run-name "duplicate run title" ${fileType.dataBasePath}/matching-tcases.${fileType.fileExtension}` + ) + + expect(lastCreatedRunTitle).toBe('duplicate run title') + expect(fileUploadCount()).toBe(0) + expect(tcaseUploadCount()).toBe(5) + }) + + test('Should use default name template when --run-name is not specified', async () => { + await run( + `${fileType.command} ${fileType.dataBasePath}/matching-tcases.${fileType.fileExtension}` + ) + + // Should use default format: "Automated test run - {MMM} {DD}, {YYYY}, {hh}:{mm}:{ss} {AMPM}" + expect(lastCreatedRunTitle).toContain('Automated test run - ') + expect(lastCreatedRunTitle).toMatch( + /Automated test run - \w{3} \d{2}, \d{4}, \d{1,2}:\d{2}:\d{2} (AM|PM)/ + ) + }) + }) + }) +}) diff --git a/src/utils/env.ts b/src/utils/env.ts index 5bb6070..3410477 100644 --- a/src/utils/env.ts +++ b/src/utils/env.ts @@ -7,7 +7,7 @@ export const qasEnvFile = '.qaspherecli' export const qasEnvs = ['QAS_TOKEN', 'QAS_URL'] export function hasRequiredKeys(env: NodeJS.ProcessEnv | DotenvPopulateInput): boolean { - return qasEnvs.every(key => key in env && env[key] !== 'undefined') + return qasEnvs.every((key) => key in env && env[key] !== 'undefined') } export function loadEnvs(): void { @@ -47,8 +47,7 @@ export function loadEnvs(): void { const fileEnvValue = fileEnvs[env] if (fileEnvValue && fileEnvValue !== 'undefined') { process.env[env] = fileEnvValue - } - else { + } else { missingEnvs.push(env) } } diff --git a/src/utils/junit/JUnitCommandHandler.ts b/src/utils/junit/JUnitCommandHandler.ts deleted file mode 100644 index 6afec61..0000000 --- a/src/utils/junit/JUnitCommandHandler.ts +++ /dev/null @@ -1,171 +0,0 @@ -import { Arguments } from 'yargs' -import { JUnitArgs } from '../../commands/junit-upload' -import { parseJUnitXml, type JUnitResultType, type JUnitTestCase } from './junitXmlParser' -import chalk from 'chalk' -import { ResultStatus, RunTCase } from '../../api/schemas' -import { parseRunUrl, printError, printErrorThenExit, twirlLoader } from '../misc' -import { Api, createApi } from '../../api' -import { readFileSync } from 'node:fs' -import { dirname } from 'node:path' - -export class JUnitCommandHandler { - private api: Api - - private project: string - - private run: number - - constructor(private args: Arguments) { - const apiToken = process.env.QAS_TOKEN! - const {url, project, run} = parseRunUrl(args) - - this.project = project - this.run = run - this.api = createApi(url, apiToken) - } - - async handle() { - const junitResults : JUnitTestCase[] = [] - - console.log(`Uploading files [${this.args.files.map((f) => chalk.green(f)).join(", ")}]`+ - ` to run [${chalk.green(this.run)}] of project [${chalk.green(this.project)}]`) - - for (const file of this.args.files) { - const xmlString = readFileSync(file).toString() - const { testcases: results } = await parseJUnitXml(xmlString, dirname(file)) - junitResults.push(...results) - } - - const tcases = await this.api.runs - .getRunTCases(this.project, this.run) - .catch(printErrorThenExit) - - const { results, missing } = this.mapTestCaseResults(junitResults, tcases) - this.validateAndPrintMissingTestCases(missing) - this.validateAndPrintMissingAttachments(results) - await this.uploadTestCases(results) - - console.log(`Uploaded ${results.length} test cases`) - } - - private validateAndPrintMissingTestCases(missing: JUnitTestCase[]) { - missing.forEach((item) => { - const folderMessage = item.folder ? ` "${item.folder}" ->` : '' - const header = this.args.force ? chalk.yellow('Warning:') : chalk.red('Error:') - console.error( - `${header}${chalk.blue(`${folderMessage} "${item.name}"`)} does not match any test cases` - ) - }) - - if (missing.length) { - console.error(chalk.yellow('\nTo fix this issue, please rename your test cases in the JUnit file to match the expected format:')) - console.error(` Expected format: ${chalk.green(`${this.project}-: Your test name`)}`) - console.error(` Where is the test case sequence number (can be 3 or more digits).`) - console.error(` Example: ${chalk.green(`${this.project}-1024: Login with valid credentials`)}\n`) - console.error(chalk.yellow("Also ensure that the test cases are part of the the run.\n")) - } - - if (missing.length && !this.args.force) { - process.exit(1) - } - } - - private validateAndPrintMissingAttachments = (results: TCaseWithResult[]) => { - if (this.args.attachments) { - let hasAttachmentErrors = false - results.forEach(({ result }) => { - result.attachments.forEach((attachment) => { - if (attachment.error) { - printError(attachment.error) - hasAttachmentErrors = true - } - }) - }) - if (hasAttachmentErrors && !this.args.force) { - process.exit(1) - } - } - } - - private uploadTestCases = async (results: TCaseWithResult[]) => { - const loader = twirlLoader() - loader.start() - try { - for (let i = 0; i < results.length; i++) { - const { tcase, result } = results[i] - let comment = result.message - loader.setText(`Uploading test case ${i + 1} of ${results.length}`) - if (this.args.attachments) { - const attachmentUrls: Array<{ name: string; url: string }> = [] - for (const attachment of result.attachments) { - if (attachment.buffer) { - const { url } = await this.api.file.uploadFile( - new Blob([attachment.buffer]), - attachment.filename - ) - attachmentUrls.push({ url, name: attachment.filename }) - } - } - comment += `\n

Attachments:

\n${makeListHtml(attachmentUrls)}` - } - - await this.api.runs.createResultStatus(this.project, this.run, tcase.id, { - status: getResult(result.type), - comment, - }) - } - loader.stop() - } catch (e) { - loader.stop() - printErrorThenExit(e) - } - } - - private mapTestCaseResults = (junitTCases: JUnitTestCase[], testcases: RunTCase[]) => { - const results: TCaseWithResult[] = [] - const missing: JUnitTestCase[] = [] - - junitTCases.forEach((result) => { - const tcase = testcases.find((tcase) => { - if (!result.name) return false - - const tcaseCode = `${this.project}-${tcase.seq.toString().padStart(3, '0')}` - return result.name.includes(tcaseCode) - }) - if (tcase) { - results.push({ - result, - tcase, - }) - return - } - missing.push(result) - }) - - return { results, missing } - } -} - -interface TCaseWithResult { - tcase: RunTCase - result: JUnitTestCase -} - -const makeListHtml = (list: { name: string; url: string }[]) => { - return `` -} - -const getResult = (result: JUnitResultType): ResultStatus => { - switch (result) { - case 'error': - return 'blocked' - case 'failure': - return 'failed' - case 'skipped': - return 'skipped' - case 'success': - return 'passed' - } -} diff --git a/src/utils/junit/JUnitResultUploader.ts b/src/utils/junit/JUnitResultUploader.ts deleted file mode 100644 index 6a16451..0000000 --- a/src/utils/junit/JUnitResultUploader.ts +++ /dev/null @@ -1,165 +0,0 @@ -import { Arguments } from 'yargs' -import { JUnitArgs } from '../../commands/junit-upload' -import { parseJUnitXml } from './junitXmlParser' -import chalk from 'chalk' -import { parseRunUrl, printErrorThenExit, processTemplate } from '../misc' -import { Api, createApi } from '../../api' -import { readFileSync } from 'node:fs' -import { dirname } from 'node:path' -import { JUnitCommandHandler } from './JUnitCommandHandler' -import { extractProjectCode } from '../projectExtractor' -import { CreateRunResponse } from '../../api/run' -import { PaginatedResponse, TCaseBySeq } from '../../api/tcases' - -export class JUnitResultUploader { - private api: Api - private apiToken: string - private baseUrl: string - private project: string - private run?: number - - constructor(private args: Arguments) { - // Get required environment variables - this.apiToken = process.env.QAS_TOKEN! - this.baseUrl = process.env.QAS_URL!.replace(/\/+$/, '') - - if (args.runUrl) { - // Handle existing run URL - const { url, project, run } = parseRunUrl(args) - if (url !== this.baseUrl) { - printErrorThenExit( - `Invalid --run-url specified. Must be in the format: ${this.baseUrl}/project/PROJECT/run/RUN` - ) - } - - this.project = project - this.run = run - } else { - // Auto-detect project from XML files - this.project = extractProjectCode(args.files) - console.log(chalk.blue(`Detected project code: ${this.project}`)) - } - - this.api = createApi(this.baseUrl, this.apiToken) - } - - async handle() { - if (!this.args.files || this.args.files.length === 0) { - return printErrorThenExit('No files specified') - } - - if (this.run) { - // Handle existing test run - console.log(chalk.blue(`Using existing test run: ${this.args.runUrl}`)) - const handler = new JUnitCommandHandler({ - ...this.args, - }) - await handler.handle() - return - } - - if (!(await this.api.projects.checkProjectExists(this.project))) { - return printErrorThenExit(`Project ${this.project} does not exist`) - } - - // Create a new test run - console.log(chalk.blue(`Creating a new test run for project: ${this.project}`)) - const tcaseRefs = await this.extractTestCaseRefs() - const tcases = await this.getTestCases(tcaseRefs) - const runId = await this.createNewRun(tcases) - console.log( - chalk.blue(`Test run URL: ${this.baseUrl}/project/${this.project}/run/${runId.id}`) - ) - await this.uploadResults(runId) - } - - private async extractTestCaseRefs(): Promise> { - const tcaseRefs = new Set() - - for (const file of this.args.files) { - const xmlContent = readFileSync(file).toString() - const { testcases } = await parseJUnitXml(xmlContent, dirname(file)) - - for (const testcase of testcases) { - if (!testcase.name) { - if (!this.args.force) { - return printErrorThenExit(`Test case in ${file} has no name`) - } - continue - } - const match = /(\d{3,})/.exec(testcase.name) - if (match) { - tcaseRefs.add(`${this.project}-${match[1]}`) - } else if (!this.args.force) { - return printErrorThenExit( - `Test case name "${testcase.name}" in ${file} does not contain valid sequence number (e.g., 123)` - ) - } - } - } - - if (tcaseRefs.size === 0) { - return printErrorThenExit('No valid test case references found in files') - } - - return tcaseRefs - } - - private async getTestCases(tcaseRefs: Set) { - const response = await this.api.testcases.getTCasesBySeq(this.project, { - seqIds: Array.from(tcaseRefs), - page: 1, - limit: tcaseRefs.size, - }) - - if (response.total === 0 || response.data.length === 0) { - return printErrorThenExit('No matching test cases found in the project') - } - - return response - } - - private async createNewRun(tcases: PaginatedResponse) { - const title = this.args.runName - ? processTemplate(this.args.runName) - : processTemplate('Automated test run - {MMM} {DD}, {YYYY}, {hh}:{mm}:{ss} {AMPM}') - - try { - const runId = await this.api.runs.createRun(this.project, { - title, - description: 'Test run created through automation pipeline', - type: 'static_struct', - queryPlans: [ - { - tcaseIds: tcases.data.map((t: TCaseBySeq) => t.id), - }, - ], - }) - - console.log(chalk.green(`Created new test run "${title}" with ID: ${runId.id}`)) - return runId - } catch (error) { - // Check if the error is about conflicting run ID - const errorMessage = error instanceof Error ? error.message : String(error) - const conflictMatch = errorMessage.match(/conflicting run id: (\d+)$/) - - if (conflictMatch) { - const existingRunId = conflictMatch[1] - console.log(chalk.yellow(`Reusing existing test run "${title}" with ID: ${existingRunId}`)) - return { id: existingRunId } - } - - // If it's not a conflicting run ID error, re-throw the original error - throw error - } - } - - private async uploadResults(runId: CreateRunResponse) { - const newRunUrl = `${this.baseUrl}/project/${this.project}/run/${runId.id}` - const newHandler = new JUnitCommandHandler({ - ...this.args, - runUrl: newRunUrl, - }) - await newHandler.handle() - } -} diff --git a/src/utils/misc.ts b/src/utils/misc.ts index 5ce68f1..e485b0e 100644 --- a/src/utils/misc.ts +++ b/src/utils/misc.ts @@ -79,6 +79,21 @@ export const parseRunUrl = (args: Record) => { throw new Error('--run-url is required but not provided.') } +export const parseTCaseUrl = (url: string) => { + if (!url.includes('://')) { + url = `https://${url}` + } + + const matches = url.match(/^(\S+)\/project\/(\w+)\/tcase\/(\d+)(\/|\?|$)/) + if (matches) { + return { + url: matches[1], + project: matches[2], + tcaseSeq: Number(matches[3]), + } + } +} + export const printErrorThenExit = (e: unknown): never => { printError(e) process.exit(1) diff --git a/src/utils/projectExtractor.ts b/src/utils/projectExtractor.ts deleted file mode 100644 index 0f4918b..0000000 --- a/src/utils/projectExtractor.ts +++ /dev/null @@ -1,28 +0,0 @@ -import { readFileSync } from 'node:fs'; -import chalk from 'chalk'; -import { printErrorThenExit } from './misc'; - -export function extractProjectCode(files: string[]): string { - for (const file of files) { - try { - const xmlString = readFileSync(file).toString(); - // Look for pattern like PRJ-123 or TEST-456 - const match = xmlString.match(/([A-Za-z0-9]{1,5})-\d{3,}/); - if (match) { - const [, projectCode] = match; - return projectCode; - } - } catch (error) { - if (error instanceof Error && error.message.startsWith('ENOENT:')) { - if (files.length > 1) { - console.error(chalk.yellow(`Warning: File ${file} does not exist`)); - } else { - return printErrorThenExit(`File ${file} does not exist`); - } - } else { - return printErrorThenExit(`Could not read file ${file}`); - } - } - } - return printErrorThenExit('Could not detect project code from test case names in XML files. Please make sure that test case names contain a valid project code (e.g., PRJ-123)'); -} \ No newline at end of file diff --git a/src/utils/result-upload/ResultUploadCommandHandler.ts b/src/utils/result-upload/ResultUploadCommandHandler.ts new file mode 100644 index 0000000..d80dec9 --- /dev/null +++ b/src/utils/result-upload/ResultUploadCommandHandler.ts @@ -0,0 +1,202 @@ +import { Arguments } from 'yargs' +import chalk from 'chalk' +import { readFileSync } from 'node:fs' +import { dirname } from 'node:path' +import { parseRunUrl, printErrorThenExit, processTemplate } from '../misc' +import { Api, createApi } from '../../api' +import { PaginatedResponse, TCaseBySeq } from '../../api/tcases' +import { TestCaseResult } from './types' +import { ResultUploader } from './ResultUploader' +import { parseJUnitXml } from './junitXmlParser' +import { parsePlaywrightJson } from './playwrightJsonParser' + +export type UploadCommandType = 'junit-upload' | 'playwright-json-upload' + +export type Parser = (data: string, attachmentBaseDirectory: string) => Promise + +export interface ResultUploadCommandArgs { + type: UploadCommandType + runUrl?: string + runName?: string + files: string[] + force: boolean + attachments: boolean +} + +interface FileResults { + file: string + results: TestCaseResult[] +} + +const commandTypeParsers: Record = { + 'junit-upload': parseJUnitXml, + 'playwright-json-upload': parsePlaywrightJson, +} + +export class ResultUploadCommandHandler { + private api: Api + private baseUrl: string + + constructor(private type: UploadCommandType, private args: Arguments) { + const apiToken = process.env.QAS_TOKEN! + + this.baseUrl = process.env.QAS_URL!.replace(/\/+$/, '') + this.api = createApi(this.baseUrl, apiToken) + } + + async handle() { + if (!this.args.files || this.args.files.length === 0) { + return printErrorThenExit('No files specified') + } + + const fileResults = await this.parseFiles() + const results = fileResults.flatMap((fileResult) => fileResult.results) + + let projectCode = '' + let runId = 0 + if (this.args.runUrl) { + // Handle existing run URL + console.log(chalk.blue(`Using existing test run: ${this.args.runUrl}`)) + + const urlParsed = parseRunUrl(this.args) + if (urlParsed.url !== this.baseUrl) { + printErrorThenExit( + `Invalid --run-url specified. Must be in the format: ${this.baseUrl}/project/PROJECT/run/RUN` + ) + } + + runId = urlParsed.run + projectCode = urlParsed.project + } else { + // Auto-detect project from results + projectCode = this.detectProjectCode(results) + console.log(chalk.blue(`Detected project code: ${projectCode}`)) + + // Create a new test run + if (!(await this.api.projects.checkProjectExists(projectCode))) { + return printErrorThenExit(`Project ${projectCode} does not exist`) + } + + console.log(chalk.blue(`Creating a new test run for project: ${projectCode}`)) + const tcaseRefs = this.extractTestCaseRefs(projectCode, fileResults) + const tcases = await this.getTestCases(projectCode, tcaseRefs) + runId = await this.createNewRun(projectCode, tcases) + console.log(chalk.blue(`Test run URL: ${this.baseUrl}/project/${projectCode}/run/${runId}`)) + } + + await this.uploadResults(projectCode, runId, results) + } + + protected async parseFiles(): Promise { + const results: FileResults[] = [] + + for (const file of this.args.files) { + const fileData = readFileSync(file).toString() + const fileResults = await commandTypeParsers[this.type](fileData, dirname(file)) + results.push({ file, results: fileResults }) + } + + return results + } + + protected detectProjectCode(results: TestCaseResult[]) { + for (const result of results) { + if (result.name) { + // Look for pattern like PRJ-123 or TEST-456 + const match = result.name.match(/([A-Za-z0-9]{1,5})-\d{3,}/) + if (match) { + return match[1] + } + } + } + + return printErrorThenExit( + 'Could not detect project code from test case names. Please make sure they contain a valid project code (e.g., PRJ-123)' + ) + } + + protected extractTestCaseRefs(projectCode: string, fileResults: FileResults[]): Set { + const tcaseRefs = new Set() + + for (const { file, results } of fileResults) { + for (const result of results) { + if (!result.name) { + if (!this.args.force) { + return printErrorThenExit(`Test case in ${file} has no name`) + } + continue + } + + const match = new RegExp(`${projectCode}-(\\d{3,})`).exec(result.name) + if (match) { + tcaseRefs.add(`${projectCode}-${match[1]}`) + } else if (!this.args.force) { + return printErrorThenExit( + `Test case name "${result.name}" in ${file} does not contain valid sequence number with project code (e.g., ${projectCode}-123)` + ) + } + } + } + + if (tcaseRefs.size === 0) { + return printErrorThenExit('No valid test case references found in any of the files') + } + + return tcaseRefs + } + + private async getTestCases(projectCode: string, tcaseRefs: Set) { + const response = await this.api.testcases.getTCasesBySeq(projectCode, { + seqIds: Array.from(tcaseRefs), + page: 1, + limit: tcaseRefs.size, + }) + + if (response.total === 0 || response.data.length === 0) { + return printErrorThenExit('No matching test cases found in the project') + } + + return response + } + + private async createNewRun(projectCode: string, tcases: PaginatedResponse) { + const title = processTemplate( + this.args.runName ?? 'Automated test run - {MMM} {DD}, {YYYY}, {hh}:{mm}:{ss} {AMPM}' + ) + + try { + const response = await this.api.runs.createRun(projectCode, { + title, + description: 'Test run created through automation pipeline', + type: 'static_struct', + queryPlans: [ + { + tcaseIds: tcases.data.map((t: TCaseBySeq) => t.id), + }, + ], + }) + + console.log(chalk.green(`Created new test run "${title}" with ID: ${response.id}`)) + return response.id + } catch (error) { + // Check if the error is about conflicting run ID + const errorMessage = error instanceof Error ? error.message : String(error) + const conflictMatch = errorMessage.match(/conflicting run id: (\d+)$/) + + if (conflictMatch) { + const existingRunId = Number(conflictMatch[1]) + console.log(chalk.yellow(`Reusing existing test run "${title}" with ID: ${existingRunId}`)) + return existingRunId + } + + // If it's not a conflicting run ID error, re-throw the original error + throw error + } + } + + private async uploadResults(projectCode: string, runId: number, results: TestCaseResult[]) { + const runUrl = `${this.baseUrl}/project/${projectCode}/run/${runId}` + const uploader = new ResultUploader(this.type, { ...this.args, runUrl }) + await uploader.handle(results) + } +} diff --git a/src/utils/result-upload/ResultUploader.ts b/src/utils/result-upload/ResultUploader.ts new file mode 100644 index 0000000..fa0abbb --- /dev/null +++ b/src/utils/result-upload/ResultUploader.ts @@ -0,0 +1,179 @@ +import { Arguments } from 'yargs' +import chalk from 'chalk' +import { RunTCase } from '../../api/schemas' +import { parseRunUrl, printError, printErrorThenExit, twirlLoader } from '../misc' +import { Api, createApi } from '../../api' +import { TestCaseResult } from './types' +import { ResultUploadCommandArgs, UploadCommandType } from './ResultUploadCommandHandler' + +export class ResultUploader { + private api: Api + private project: string + private run: number + + constructor(private type: UploadCommandType, private args: Arguments) { + const apiToken = process.env.QAS_TOKEN! + const { url, project, run } = parseRunUrl(args) + + this.project = project + this.run = run + this.api = createApi(url, apiToken) + } + + async handle(results: TestCaseResult[]) { + const tcases = await this.api.runs + .getRunTCases(this.project, this.run) + .catch(printErrorThenExit) + + const { results: mappedResults, missing } = this.mapTestCaseResults(results, tcases) + this.validateAndPrintMissingTestCases(missing) + this.validateAndPrintMissingAttachments(mappedResults) + + console.log( + `Uploading files [${this.args.files + .map((f) => chalk.green(f)) + .join(', ')}] to run [${chalk.green(this.run)}] of project [${chalk.green(this.project)}]` + ) + await this.uploadTestCases(mappedResults) + console.log(`Uploaded ${mappedResults.length} test cases`) + } + + private validateAndPrintMissingTestCases(missing: TestCaseResult[]) { + missing.forEach((item) => { + const folderMessage = item.folder ? ` "${item.folder}" ->` : '' + const header = this.args.force ? chalk.yellow('Warning:') : chalk.red('Error:') + console.error( + `${header}${chalk.blue(`${folderMessage} "${item.name}"`)} does not match any test cases` + ) + }) + + if (missing.length) { + if (this.type === 'junit-upload') { + console.error(` +${chalk.yellow('To fix this issue, include the test case marker in your test names:')} + + Format: ${chalk.green(`${this.project}-: Your test name`)} + Example: ${chalk.green(`${this.project}-002: Login with valid credentials`)} + ${chalk.green(`Login with invalid credentials: ${this.project}-1312`)} + + ${chalk.dim('Where is the test case number (minimum 3 digits, zero-padded if needed)')} +`) + } else { + console.error(` +${chalk.yellow('To fix this issue, choose one of the following options:')} + + ${chalk.bold('Option 1: Use Test Annotations (Recommended)')} + Add a test annotation to your Playwright test: + + ${chalk.green(`test('${missing[0]?.name || 'your test name'}', { + annotation: { + type: 'test case', + description: 'https://your-qas-instance.com/project/${this.project}/tcase/123' + } + }, async ({ page }) => { + // your test code + });`)} + + ${chalk.dim('Note: The "type" field is case-insensitive')} + + ${chalk.bold('Option 2: Include Test Case Marker in Name')} + Rename your test to include the marker ${chalk.green(`${this.project}-`)}: + + Format: ${chalk.green(`${this.project}-: Your test name`)} + Example: ${chalk.green(`${this.project}-1024: Login with valid credentials`)} + ${chalk.dim('Where is the test case number (minimum 3 digits, zero-padded if needed)')} +`) + } + + console.error(chalk.yellow('Also ensure that the test cases exist in the QA Sphere project and the test run (if run URL is provided).')) + } + + if (missing.length && !this.args.force) { + process.exit(1) + } + } + + private validateAndPrintMissingAttachments = (results: TCaseWithResult[]) => { + if (this.args.attachments) { + let hasAttachmentErrors = false + results.forEach(({ result }) => { + result.attachments.forEach((attachment) => { + if (attachment.error) { + printError(attachment.error) + hasAttachmentErrors = true + } + }) + }) + if (hasAttachmentErrors && !this.args.force) { + process.exit(1) + } + } + } + + private uploadTestCases = async (results: TCaseWithResult[]) => { + const loader = twirlLoader() + loader.start() + try { + for (let i = 0; i < results.length; i++) { + const { tcase, result } = results[i] + let comment = result.message + loader.setText(`Uploading test case ${i + 1} of ${results.length}`) + if (this.args.attachments) { + const attachmentUrls: Array<{ name: string; url: string }> = [] + for (const attachment of result.attachments) { + if (attachment.buffer) { + const { url } = await this.api.file.uploadFile( + new Blob([attachment.buffer]), + attachment.filename + ) + attachmentUrls.push({ url, name: attachment.filename }) + } + } + comment += `\n

Attachments:

\n${makeListHtml(attachmentUrls)}` + } + + await this.api.runs.createResultStatus(this.project, this.run, tcase.id, { + status: result.status, + comment, + }) + } + loader.stop() + } catch (e) { + loader.stop() + printErrorThenExit(e) + } + } + + private mapTestCaseResults = (testcaseResults: TestCaseResult[], testcases: RunTCase[]) => { + const results: TCaseWithResult[] = [] + const missing: TestCaseResult[] = [] + + testcaseResults.forEach((result) => { + if (result.name) { + const tcase = testcases.find((tcase) => { + const tcaseCode = `${this.project}-${tcase.seq.toString().padStart(3, '0')}` + return result.name.includes(tcaseCode) + }) + + if (tcase) { + results.push({ result, tcase }) + return + } + } + missing.push(result) + }) + + return { results, missing } + } +} + +interface TCaseWithResult { + tcase: RunTCase + result: TestCaseResult +} + +const makeListHtml = (list: { name: string; url: string }[]) => { + return `` +} diff --git a/src/utils/junit/junitXmlParser.ts b/src/utils/result-upload/junitXmlParser.ts similarity index 58% rename from src/utils/junit/junitXmlParser.ts rename to src/utils/result-upload/junitXmlParser.ts index a40a856..a2d75d9 100644 --- a/src/utils/junit/junitXmlParser.ts +++ b/src/utils/result-upload/junitXmlParser.ts @@ -1,25 +1,27 @@ -import path, { basename } from 'node:path' import escapeHtml from 'escape-html' -import { readFile } from 'fs/promises' import xml from 'xml2js' import z from 'zod' +import { Attachment, TestCaseResult } from './types' +import { Parser } from './ResultUploadCommandHandler' +import { ResultStatus } from '../../api/schemas' +import { getAttachments } from './utils' // Note about junit xml schema: // there are multiple schemas on the internet, and apparently some are more strict than others // we have to use LESS strict schema (see one from Jest, based on Jenkins JUnit schema) // see https://github.com/jest-community/jest-junit/blob/master/__tests__/lib/junit.xsd#L42 - - const stringContent = z.object({ _: z.string().optional(), }) const failureErrorSchema = stringContent.extend({ - $: z.object({ - message: z.string().optional(), - type: z.string().optional(), // type attribute is optional (some test runners like Jest don't include it) - }).optional(), + $: z + .object({ + message: z.string().optional(), + type: z.string().optional(), // type attribute is optional (some test runners like Jest don't include it) + }) + .optional(), }) // As per https://github.com/windyroad/JUnit-Schema/blob/master/JUnit.xsd, only message attribute @@ -29,10 +31,12 @@ const failureErrorSchema = stringContent.extend({ const skippedSchema = z.union([ z.string(), stringContent.extend({ - $: z.object({ - message: z.string().optional(), - }).optional(), - }) + $: z + .object({ + message: z.string().optional(), + }) + .optional(), + }), ]) const testCaseSchema = z.object({ @@ -49,7 +53,7 @@ const testCaseSchema = z.object({ error: z.array(failureErrorSchema).optional(), }) -const xmlSchema = z.object({ +const junitXmlSchema = z.object({ testsuites: z.object({ $: z.object({ name: z.string().optional(), @@ -69,49 +73,43 @@ const xmlSchema = z.object({ }), }) -export type JUnitResultType = 'failure' | 'error' | 'skipped' | 'success' - -export interface JUnitAttachment { - path: string - buffer: Buffer | null - error: Error | null - filename: string -} - -export interface JUnitTestCase extends JUnitResult { - name?: string - folder?: string - logs?: string - attachments: JUnitAttachment[] -} - -export interface ParseResult { - testcases: JUnitTestCase[] -} - -export const parseJUnitXml = async (xmlString: string, basePath: string): Promise => { +export const parseJUnitXml: Parser = async ( + xmlString: string, + attachmentBaseDirectory: string +): Promise => { const xmlData = await xml.parseStringPromise(xmlString, { explicitCharkey: true, includeWhiteChars: true, }) - const validated = xmlSchema.parse(xmlData) - const testcases: JUnitTestCase[] = [] - const attachmentsPromises: Array<{ index: number; promise: Promise }> = [] + const validated = junitXmlSchema.parse(xmlData) + const testcases: TestCaseResult[] = [] + const attachmentsPromises: Array<{ + index: number + promise: Promise + }> = [] for (const suite of validated.testsuites.testsuite) { - for (const tcase of suite.testcase??[]) { + for (const tcase of suite.testcase ?? []) { const result = getResult(tcase) const index = testcases.push({ - folder: suite.$.name, - name: tcase.$.name, + folder: suite.$.name ?? '', + name: tcase.$.name ?? '', ...result, attachments: [], }) - 1 - const attachments = getAttachments(tcase, basePath) + + const attachmentPaths = [] + for (const out of tcase['system-out'] || []) { + const text = typeof out === 'string' ? out : out._ ?? '' + if (text) { + attachmentPaths.push(...extractAttachmentPaths(text)) + } + } + attachmentsPromises.push({ index, - promise: attachments, + promise: getAttachments(attachmentPaths, attachmentBaseDirectory), }) } } @@ -122,20 +120,17 @@ export const parseJUnitXml = async (xmlString: string, basePath: string): Promis testcases[tcaseIndex].attachments = tcaseAttachment }) - return { testcases } -} - -interface JUnitResult { - type: JUnitResultType - message?: string + return testcases } -const getResult = (tcase: z.infer): JUnitResult => { +const getResult = ( + tcase: z.infer +): { status: ResultStatus; message: string } => { const err = tcase['system-err'] || [] const out = tcase['system-out'] || [] if (tcase.error) return { - type: 'error', + status: 'blocked', message: getResultMessage( { result: tcase.error, type: 'code' }, { result: out, type: 'code' }, @@ -144,7 +139,7 @@ const getResult = (tcase: z.infer): JUnitResult => { } if (tcase.failure) return { - type: 'failure', + status: 'failed', message: getResultMessage( { result: tcase.failure, type: 'code' }, { result: out, type: 'code' }, @@ -153,7 +148,7 @@ const getResult = (tcase: z.infer): JUnitResult => { } if (tcase.skipped) return { - type: 'skipped', + status: 'skipped', message: getResultMessage( { result: tcase.skipped, type: 'code' }, { result: out, type: 'code' }, @@ -161,21 +156,21 @@ const getResult = (tcase: z.infer): JUnitResult => { ), } return { - type: 'success', + status: 'passed', message: getResultMessage({ result: out, type: 'code' }, { result: err, type: 'code' }), } } interface GetResultMessageOption { result?: ( - string | - Partial> | - Partial> + | string + | Partial> + | Partial> )[] type?: 'paragraph' | 'code' } -const getResultMessage = (...options: GetResultMessageOption[]): string | undefined => { +const getResultMessage = (...options: GetResultMessageOption[]): string => { let message = '' options.forEach((option) => { option.result?.forEach((r) => { @@ -195,52 +190,6 @@ const getResultMessage = (...options: GetResultMessageOption[]): string | undefi return message } -const getAttachments = async ( - tcase: z.infer, - basePath: string -): Promise => { - const out = tcase['system-out'] || [] - const attachments: JUnitAttachment[] = [] - const promises: Array<{ file: Promise; path: string; filename: string }> = [] - - for (const contents of out) { - const text = typeof contents === 'string' ? contents : contents._ ?? '' - if (text) { - const paths = extractAttachmentPaths(text) - paths.forEach((p) => - promises.push({ - file: getFile(p, basePath), - path: p, - filename: basename(p), - }) - ) - } - } - - const files = await Promise.allSettled(promises.map((p) => p.file)) - files.forEach((p, i) => { - const path = promises[i].path - const filename = promises[i].filename - if (p.status === 'fulfilled') { - attachments.push({ - buffer: p.value, - path, - error: null, - filename: filename, - }) - } else { - attachments.push({ - buffer: null, - path, - error: p.reason, - filename: filename, - }) - } - }) - - return attachments -} - const extractAttachmentPaths = (content: string) => { const regex = /^\[\[ATTACHMENT\|(.+)\]\]$/gm const matches = content.matchAll(regex) @@ -250,21 +199,3 @@ const extractAttachmentPaths = (content: string) => { }) return paths } - -const getFile = async (filePath: string, basePath: string): Promise => { - try { - const file = readFile(path.join(basePath, filePath)) - return file - } catch (e) { - if ( - e && - typeof e === 'object' && - 'code' in e && - typeof e.code === 'string' && - e.code === 'ENOENT' - ) { - throw new Error(`Attachment not found: "${filePath}"`) - } - throw e - } -} diff --git a/src/utils/result-upload/playwrightJsonParser.ts b/src/utils/result-upload/playwrightJsonParser.ts new file mode 100644 index 0000000..44c6351 --- /dev/null +++ b/src/utils/result-upload/playwrightJsonParser.ts @@ -0,0 +1,220 @@ +import z from 'zod' +import escapeHtml from 'escape-html' +import stripAnsi from 'strip-ansi' +import { Attachment, TestCaseResult } from './types' +import { Parser } from './ResultUploadCommandHandler' +import { ResultStatus } from '../../api/schemas' +import { parseTCaseUrl } from '../misc' +import { getAttachments } from './utils' + +// Schema definition as per https://github.com/microsoft/playwright/blob/main/packages/playwright/types/testReporter.d.ts + +const expectedStatusSchema = z.enum(['passed', 'failed', 'interrupted', 'skipped', 'timedOut']) + +const statusSchema = z.enum(['expected', 'unexpected', 'flaky', 'skipped']) +type Status = z.infer + +const reportErrorSchema = z.object({ + message: z.string(), +}) + +const stdioEntrySchema = z.union([z.object({ text: z.string() }), z.object({ buffer: z.string() })]) + +const annotationSchema = z.object({ + type: z.string(), + description: z.string().optional(), +}) +type Annotation = z.infer + +const attachmentSchema = z.object({ + name: z.string(), + contentType: z.string(), + path: z.string().optional(), + body: z.string().optional(), +}) + +const resultSchema = z.object({ + status: expectedStatusSchema.optional(), + errors: reportErrorSchema.array(), + stdout: stdioEntrySchema.array(), + stderr: stdioEntrySchema.array(), + retry: z.number(), + attachments: attachmentSchema.array().optional(), + annotations: annotationSchema.array().optional(), +}) +type Result = z.infer + +const testSchema = z.object({ + annotations: annotationSchema.array(), + expectedStatus: expectedStatusSchema, + projectName: z.string(), + results: resultSchema.array(), + status: statusSchema, +}) + +const specSchema = z.object({ + title: z.string(), + tags: z.string().array(), + tests: testSchema.array(), +}) +type Spec = z.infer + +interface Suite { + title: string + specs: Spec[] + suites?: Suite[] +} + +const suiteSchema: z.ZodType = z.object({ + title: z.string(), + specs: specSchema.array(), + suites: z + .lazy(() => suiteSchema) + .array() + .optional(), +}) + +const playwrightJsonSchema = z.object({ + suites: suiteSchema.array(), +}) + +export const parsePlaywrightJson: Parser = async ( + jsonString: string, + attachmentBaseDirectory: string +): Promise => { + const jsonData = JSON.parse(jsonString) + const validated = playwrightJsonSchema.parse(jsonData) + const testcases: TestCaseResult[] = [] + const attachmentsPromises: Array<{ + index: number + promise: Promise + }> = [] + + const processSuite = async (suite: Suite, topLevelSuite: string, titlePrefix: string) => { + // Process specs in this suite + for (const spec of suite.specs || []) { + const test = spec.tests[0] // Why is tests an array? + const result = test.results.at(-1) // There can be multiple results due to retries, use the last one + + if (!result) { + return // Can this happen? + } + + const markerFromAnnotations = getTCaseMarkerFromAnnotations(test.annotations) // What about result.annotations? + const numTestcases = testcases.push({ + // Use markerFromAnnotations as name prefix, so that it takes precedence over any + // other marker present. Prefixing it to name also helps in detectProjectCode + name: markerFromAnnotations + ? `${markerFromAnnotations}: ${titlePrefix}${spec.title}` + : `${titlePrefix}${spec.title}`, + folder: topLevelSuite, + status: mapPlaywrightStatus(test.status), + message: buildMessage(result), + attachments: [], + }) + + const attachmentPaths = [] + for (const out of result.attachments || []) { + if (out.path) { + attachmentPaths.push(out.path) + } + } + attachmentsPromises.push({ + index: numTestcases - 1, + // Attachment paths are absolute, but in tests we are using relative paths + promise: getAttachments( + attachmentPaths, + attachmentPaths[0]?.startsWith('/') ? undefined : attachmentBaseDirectory + ), + }) + } + + // Recursively process nested suites + for (const nestedSuite of suite.suites || []) { + await processSuite(nestedSuite, topLevelSuite, `${titlePrefix}${nestedSuite.title} › `) + } + } + + for (const suite of validated.suites) { + // Top level suites in Playwright JSON are equivalent to test suites in JUnit XML, which are used + // to populate TestCaseResult.folder property. The title of nested suites are used as prefix for + // TestCaseResult.name for nested specs (similar to JUnit XML) + await processSuite({ ...suite, title: '' }, suite.title, '') + } + + const attachments = await Promise.all(attachmentsPromises.map((p) => p.promise)) + attachments.forEach((tcaseAttachment, i) => { + const tcaseIndex = attachmentsPromises[i].index + testcases[tcaseIndex].attachments = tcaseAttachment + }) + + return testcases +} + +const getTCaseMarkerFromAnnotations = (annotations: Annotation[]) => { + for (const annotation of annotations) { + if (annotation.type.toLowerCase().includes('test case') && annotation.description) { + const res = parseTCaseUrl(annotation.description) + if (res) { + return `${res.project}-${res.tcaseSeq.toString().padStart(3, '0')}` + } + } + } +} + +const mapPlaywrightStatus = (status: Status): ResultStatus => { + switch (status) { + case 'expected': + return 'passed' + case 'unexpected': + return 'failed' + case 'flaky': + return 'passed' // Flaky means test passed but on retries + case 'skipped': + return 'skipped' + default: + return 'passed' // Default to passed + } +} + +const buildMessage = (result: Result) => { + let message = '' + + if (result.retry) { + message += `

Test passed in ${result.retry + 1} attempts

` + } + + if (result.errors.length > 0) { + message += '

Errors:

' + result.errors.forEach((error) => { + if (error.message) { + const cleanMessage = stripAnsi(error.message) + message += `
${escapeHtml(cleanMessage)}
` + } + }) + } + + if (result.stdout.length > 0) { + message += '

Output:

' + result.stdout.forEach((out) => { + const content = 'text' in out ? out.text : out.buffer + if (content) { + const cleanContent = stripAnsi(content) + message += `
${escapeHtml(cleanContent)}
` + } + }) + } + + if (result.stderr.length > 0) { + message += '

Errors (stderr):

' + result.stderr.forEach((err) => { + const content = 'text' in err ? err.text : err.buffer + if (content) { + const cleanContent = stripAnsi(content) + message += `
${escapeHtml(cleanContent)}
` + } + }) + } + + return message +} diff --git a/src/utils/result-upload/types.ts b/src/utils/result-upload/types.ts new file mode 100644 index 0000000..0a51eb6 --- /dev/null +++ b/src/utils/result-upload/types.ts @@ -0,0 +1,18 @@ +import { ResultStatus } from '../../api/schemas' + +export interface Attachment { + filename: string + buffer: Buffer | null + error: Error | null +} + +export interface TestCaseResult { + // Name of the test case extracted from the report. In case of nested suites, it might contain name of + // parent suites as well. Useful for logging and extracting QA Sphere sequence number for the test case + name: string + // Name of the test file (or the suite) to which the test belongs, useful for logging purposes + folder: string + status: ResultStatus + message: string + attachments: Attachment[] +} diff --git a/src/utils/result-upload/utils.ts b/src/utils/result-upload/utils.ts new file mode 100644 index 0000000..db4f748 --- /dev/null +++ b/src/utils/result-upload/utils.ts @@ -0,0 +1,33 @@ +import { readFile } from 'fs/promises' +import path, { basename } from 'path' +import { Attachment } from './types' + +const getFile = async (filePath: string, basePath?: string): Promise => { + try { + return readFile(basePath ? path.join(basePath, filePath) : filePath) + } catch (e) { + if ( + e && + typeof e === 'object' && + 'code' in e && + typeof e.code === 'string' && + e.code === 'ENOENT' + ) { + throw new Error(`Attachment not found: "${filePath}"`) + } + throw e + } +} + +export const getAttachments = async ( + filePaths: string[], + basePath?: string +): Promise => { + return Promise.allSettled(filePaths.map((p) => getFile(p, basePath))).then((results) => { + return results.map((p, i) => ({ + filename: basename(filePaths[i]), + buffer: p.status === 'fulfilled' ? p.value : null, + error: p.status === 'fulfilled' ? null : p.reason, + })) + }) +} diff --git a/src/utils/version.ts b/src/utils/version.ts index c033d22..6b6a8d4 100644 --- a/src/utils/version.ts +++ b/src/utils/version.ts @@ -5,37 +5,37 @@ import { existsSync, readFileSync } from 'fs' const FALLBACK_VERSION = '0-hpsq-unknown' function tryReadPackageJson(path: string): string | null { - try { - if (existsSync(path)) { - const content = readFileSync(path, 'utf8') - const pkg = JSON.parse(content) - if (pkg.version && typeof pkg.version === 'string') { - return pkg.version - } - } - } catch { - // Silently fail - } - return null + try { + if (existsSync(path)) { + const content = readFileSync(path, 'utf8') + const pkg = JSON.parse(content) + if (pkg.version && typeof pkg.version === 'string') { + return pkg.version + } + } + } catch { + // Silently fail + } + return null } export function getVersion(): string { - try { - const __filename = fileURLToPath(import.meta.url) - let currentDir = dirname(__filename) - - for (let i = 0; i < 5; i++) { - const packagePath = join(currentDir, 'package.json') - const version = tryReadPackageJson(packagePath) - if (version) return version - - const parentDir = dirname(currentDir) - if (parentDir === currentDir) break // Reached root - currentDir = parentDir - } - } catch { - // fileURLToPath might fail in some environments - } + try { + const __filename = fileURLToPath(import.meta.url) + let currentDir = dirname(__filename) - return FALLBACK_VERSION -} \ No newline at end of file + for (let i = 0; i < 5; i++) { + const packagePath = join(currentDir, 'package.json') + const version = tryReadPackageJson(packagePath) + if (version) return version + + const parentDir = dirname(currentDir) + if (parentDir === currentDir) break // Reached root + currentDir = parentDir + } + } catch { + // fileURLToPath might fail in some environments + } + + return FALLBACK_VERSION +}