diff --git a/.github/linters/.jscpd.json b/.github/linters/.jscpd.json index 23970e85..3bbc16c5 100644 --- a/.github/linters/.jscpd.json +++ b/.github/linters/.jscpd.json @@ -4,7 +4,8 @@ "consoleFull" ], "ignore": [ - "**/tests/**" + "**/tests/**", + "**/.github/workflows/Action-Test**" ], "absolute": true } diff --git a/.github/workflows/Action-Test.yml b/.github/workflows/Action-Test.yml index 66a407e8..f352202b 100644 --- a/.github/workflows/Action-Test.yml +++ b/.github/workflows/Action-Test.yml @@ -21,8 +21,11 @@ jobs: name: Action-Test - [1-Simple] runs-on: ubuntu-latest outputs: - outcome: ${{ steps.action-test.outcome }} - conclusion: ${{ steps.action-test.conclusion }} + Outcome: ${{ steps.action-test.outcome }} + Conclusion: ${{ steps.action-test.conclusion }} + Executed: ${{ steps.action-test.outputs.Executed }} + Result: ${{ steps.action-test.outputs.Result }} + steps: # Need to check out as part of the test, as its a local action - name: Checkout repo @@ -44,8 +47,11 @@ jobs: name: Action-Test - [1-Simple-Failure] runs-on: ubuntu-latest outputs: - outcome: ${{ steps.action-test.outcome }} - conclusion: ${{ steps.action-test.conclusion }} + Outcome: ${{ steps.action-test.outcome }} + Conclusion: ${{ steps.action-test.conclusion }} + Executed: ${{ steps.action-test.outputs.Executed }} + Result: ${{ steps.action-test.outputs.Result }} + steps: # Need to check out as part of the test, as its a local action - name: Checkout repo @@ -64,12 +70,42 @@ jobs: Write-Host "Outcome: ${{ steps.action-test.outcome }}" Write-Host "Conclusion: ${{ steps.action-test.conclusion }}" + ActionTest1SimpleExecutionFailure: + name: Action-Test - [1-Simple-ExecutionFailure] + runs-on: ubuntu-latest + outputs: + Outcome: ${{ steps.action-test.outcome }} + Conclusion: ${{ steps.action-test.conclusion }} + Executed: ${{ steps.action-test.outputs.Executed }} + Result: ${{ steps.action-test.outputs.Result }} + + steps: + # Need to check out as part of the test, as its a local action + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Action-Test [1-Simple-ExecutionFailure] + uses: ./ + id: action-test + continue-on-error: true + with: + WorkingDirectory: tests/1-Simple-ExecutionFailure + + - name: Status + shell: pwsh + run: | + Write-Host "Outcome: ${{ steps.action-test.outcome }}" + Write-Host "Conclusion: ${{ steps.action-test.conclusion }}" + ActionTest2Standard: name: Action-Test - [2-Standard] runs-on: ubuntu-latest outputs: - outcome: ${{ steps.action-test.outcome }} - conclusion: ${{ steps.action-test.conclusion }} + Outcome: ${{ steps.action-test.outcome }} + Conclusion: ${{ steps.action-test.conclusion }} + Executed: ${{ steps.action-test.outputs.Executed }} + Result: ${{ steps.action-test.outputs.Result }} + steps: # Need to check out as part of the test, as its a local action - name: Checkout repo @@ -92,8 +128,11 @@ jobs: name: Action-Test - [3-Advanced] runs-on: ubuntu-latest outputs: - outcome: ${{ steps.action-test.outcome }} - conclusion: ${{ steps.action-test.conclusion }} + Outcome: ${{ steps.action-test.outcome }} + Conclusion: ${{ steps.action-test.conclusion }} + Executed: ${{ steps.action-test.outputs.Executed }} + Result: ${{ steps.action-test.outputs.Result }} + steps: # Need to check out as part of the test, as its a local action - name: Checkout repo @@ -117,6 +156,7 @@ jobs: needs: - ActionTest1Simple - ActionTest1SimpleFailure + - ActionTest1SimpleExecutionFailure - ActionTest2Standard - ActionTest3Advanced if: always() @@ -129,82 +169,171 @@ jobs: Install-PSResource -Name Markdown -Repository PSGallery -TrustRepository # Build an array of objects for each job - $ActionTest1SimpleOutcome = "${{ needs.ActionTest1Simple.outputs.outcome }}" - $ActionTest1SimpleExpectedOutcome = "success" - $ActionTest1SimpleOutcomeResult = $ActionTest1SimpleOutcome -eq $ActionTest1SimpleExpectedOutcome - $ActionTest1SimpleConclusion = "${{ needs.ActionTest1Simple.outputs.conclusion }}" - $ActionTest1SimpleExpectedConclusion = "success" - $ActionTest1SimpleConclusionResult = $ActionTest1SimpleConclusion -eq $ActionTest1SimpleExpectedConclusion - $ActionTest1SimpleFailureOutcome = "${{ needs.ActionTest1SimpleFailure.outputs.outcome }}" - $ActionTest1SimpleFailureExpectedOutcome = "failure" - $ActionTest1SimpleFailureOutcomeResult = $ActionTest1SimpleFailureOutcome -eq $ActionTest1SimpleFailureExpectedOutcome - $ActionTest1SimpleFailureConclusion = "${{ needs.ActionTest1SimpleFailure.outputs.conclusion }}" - $ActionTest1SimpleFailureExpectedConclusion = "success" - $ActionTest1SimpleFailureConclusionResult = $ActionTest1SimpleFailureConclusion -eq $ActionTest1SimpleFailureExpectedConclusion - $ActionTest2StandardOutcome = "${{ needs.ActionTest2Standard.outputs.outcome }}" - $ActionTest2StandardExpectedOutcome = "success" - $ActionTest2StandardOutcomeResult = $ActionTest2StandardOutcome -eq $ActionTest2StandardExpectedOutcome - $ActionTest2StandardConclusion = "${{ needs.ActionTest2Standard.outputs.conclusion }}" - $ActionTest2StandardExpectedConclusion = "success" - $ActionTest2StandardConclusionResult = $ActionTest2StandardConclusion -eq $ActionTest2StandardExpectedConclusion - $ActionTest3AdvancedOutcome = "${{ needs.ActionTest3Advanced.outputs.outcome }}" - $ActionTest3AdvancedExpectedOutcome = "success" - $ActionTest3AdvancedOutcomeResult = $ActionTest3AdvancedOutcome -eq $ActionTest3AdvancedExpectedOutcome - $ActionTest3AdvancedConclusion = "${{ needs.ActionTest3Advanced.outputs.conclusion }}" - $ActionTest3AdvancedExpectedConclusion = "success" - $ActionTest3AdvancedConclusionResult = $ActionTest3AdvancedConclusion -eq $ActionTest3AdvancedExpectedConclusion + $ActionTest1SimpleOutcome = '${{ needs.ActionTest1Simple.outputs.outcome }}' + $ActionTest1SimpleOutcomeExpected = 'success' + $ActionTest1SimpleOutcomeResult = $ActionTest1SimpleOutcome -eq $ActionTest1SimpleOutcomeExpected + $ActionTest1SimpleConclusion = '${{ needs.ActionTest1Simple.outputs.conclusion }}' + $ActionTest1SimpleConclusionExpected = 'success' + $ActionTest1SimpleConclusionResult = $ActionTest1SimpleConclusion -eq $ActionTest1SimpleConclusionExpected + $ActionTest1SimpleExecuted = '${{ needs.ActionTest1Simple.outputs.Executed }}' + $ActionTest1SimpleExecutedExpected = 'True' + $ActionTest1SimpleExecutedResult = $ActionTest1SimpleExecuted -eq $ActionTest1SimpleExecutedExpected + $ActionTest1SimpleResult = '${{ needs.ActionTest1Simple.outputs.Result }}' + $ActionTest1SimpleResultExpected = 'Passed' + $ActionTest1SimpleResultResult = $ActionTest1SimpleResult -eq $ActionTest1SimpleResultExpected + + $ActionTest1SimpleFailureOutcome = '${{ needs.ActionTest1SimpleFailure.outputs.outcome }}' + $ActionTest1SimpleFailureOutcomeExpected = 'failure' + $ActionTest1SimpleFailureOutcomeResult = $ActionTest1SimpleFailureOutcome -eq $ActionTest1SimpleFailureOutcomeExpected + $ActionTest1SimpleFailureConclusion = '${{ needs.ActionTest1SimpleFailure.outputs.conclusion }}' + $ActionTest1SimpleFailureConclusionExpected = 'success' + $ActionTest1SimpleFailureConclusionResult = $ActionTest1SimpleFailureConclusion -eq $ActionTest1SimpleFailureConclusionExpected + $ActionTest1SimpleFailureExecuted = '${{ needs.ActionTest1SimpleFailure.outputs.Executed }}' + $ActionTest1SimpleFailureExecutedExpected = 'True' + $ActionTest1SimpleFailureExecutedResult = $ActionTest1SimpleFailureExecuted -eq $ActionTest1SimpleFailureExecutedExpected + $ActionTest1SimpleFailureResult = '${{ needs.ActionTest1SimpleFailure.outputs.Result }}' + $ActionTest1SimpleFailureResultExpected = 'Failed' + $ActionTest1SimpleFailureResultResult = $ActionTest1SimpleFailureResult -eq $ActionTest1SimpleFailureResultExpected + + $ActionTest1SimpleExecutionFailureOutcome = '${{ needs.ActionTest1SimpleExecutionFailure.outputs.outcome }}' + $ActionTest1SimpleExecutionFailureOutcomeExpected = 'failure' + $ActionTest1SimpleExecutionFailureOutcomeResult = $ActionTest1SimpleExecutionFailureOutcome -eq $ActionTest1SimpleExecutionFailureOutcomeExpected + $ActionTest1SimpleExecutionFailureConclusion = '${{ needs.ActionTest1SimpleExecutionFailure.outputs.conclusion }}' + $ActionTest1SimpleExecutionFailureConclusionExpected = 'success' + $ActionTest1SimpleExecutionFailureConclusionResult = $ActionTest1SimpleExecutionFailureConclusion -eq $ActionTest1SimpleExecutionFailureConclusionExpected + $ActionTest1SimpleExecutionFailureExecuted = '${{ needs.ActionTest1SimpleExecutionFailure.outputs.Executed }}' + $ActionTest1SimpleExecutionFailureExecutedExpected = 'False' + $ActionTest1SimpleExecutionFailureExecutedResult = $ActionTest1SimpleExecutionFailureExecuted -eq $ActionTest1SimpleExecutionFailureExecutedExpected + $ActionTest1SimpleExecutionFailureResult = '${{ needs.ActionTest1SimpleExecutionFailure.outputs.Result }}' + $ActionTest1SimpleExecutionFailureResultExpected = '' + $ActionTest1SimpleExecutionFailureResultResult = $ActionTest1SimpleExecutionFailureResult -eq $ActionTest1SimpleExecutionFailureResultExpected + + $ActionTest2StandardOutcome = '${{ needs.ActionTest2Standard.outputs.outcome }}' + $ActionTest2StandardOutcomeExpected = 'success' + $ActionTest2StandardOutcomeResult = $ActionTest2StandardOutcome -eq $ActionTest2StandardOutcomeExpected + $ActionTest2StandardConclusion = '${{ needs.ActionTest2Standard.outputs.conclusion }}' + $ActionTest2StandardConclusionExpected = 'success' + $ActionTest2StandardConclusionResult = $ActionTest2StandardConclusion -eq $ActionTest2StandardConclusionExpected + $ActionTest2StandardExecuted = '${{ needs.ActionTest2Standard.outputs.Executed }}' + $ActionTest2StandardExecutedExpected = 'True' + $ActionTest2StandardExecutedResult = $ActionTest2StandardExecuted -eq $ActionTest2StandardExecutedExpected + $ActionTest2StandardResult = '${{ needs.ActionTest2Standard.outputs.Result }}' + $ActionTest2StandardResultExpected = 'Passed' + $ActionTest2StandardResultResult = $ActionTest2StandardResult -eq $ActionTest2StandardResultExpected + + $ActionTest3AdvancedOutcome = '${{ needs.ActionTest3Advanced.outputs.outcome }}' + $ActionTest3AdvancedOutcomeExpected = 'success' + $ActionTest3AdvancedOutcomeResult = $ActionTest3AdvancedOutcome -eq $ActionTest3AdvancedOutcomeExpected + $ActionTest3AdvancedConclusion = '${{ needs.ActionTest3Advanced.outputs.conclusion }}' + $ActionTest3AdvancedConclusionExpected = 'success' + $ActionTest3AdvancedConclusionResult = $ActionTest3AdvancedConclusion -eq $ActionTest3AdvancedConclusionExpected + $ActionTest3AdvancedExecuted = '${{ needs.ActionTest3Advanced.outputs.Executed }}' + $ActionTest3AdvancedExecutedExpected = 'True' + $ActionTest3AdvancedExecutedResult = $ActionTest3AdvancedExecuted -eq $ActionTest3AdvancedExecutedExpected + $ActionTest3AdvancedResult = '${{ needs.ActionTest3Advanced.outputs.Result }}' + $ActionTest3AdvancedResultExpected = 'Passed' + $ActionTest3AdvancedResultResult = $ActionTest3AdvancedResult -eq $ActionTest3AdvancedResultExpected $jobs = @( [PSCustomObject]@{ Name = "Action-Test - [1-Simple]" Outcome = $ActionTest1SimpleOutcome - ExpectedOutcome = $ActionTest1SimpleExpectedOutcome - PassedOutcome = $ActionTest1SimpleOutcomeResult + OutcomeExpected = $ActionTest1SimpleOutcomeExpected + OutcomeResult = $ActionTest1SimpleOutcomeResult Conclusion = $ActionTest1SimpleConclusion - ExpectedConclusion = $ActionTest1SimpleExpectedConclusion - PassedConclusion = $ActionTest1SimpleConclusionResult + ConclusionExpected = $ActionTest1SimpleConclusionExpected + ConclusionResult = $ActionTest1SimpleConclusionResult + Executed = $ActionTest1SimpleExecuted + ExecutedExpected = $ActionTest1SimpleExecutedExpected + ExecutedResult = $ActionTest1SimpleExecutedResult + Result = $ActionTest1SimpleResult + ResultExpected = $ActionTest1SimpleResultExpected + ResultResult = $ActionTest1SimpleResultResult }, [PSCustomObject]@{ Name = "Action-Test - [1-Simple-Failure]" Outcome = $ActionTest1SimpleFailureOutcome - ExpectedOutcome = $ActionTest1SimpleFailureExpectedOutcome - PassedOutcome = $ActionTest1SimpleFailureOutcomeResult + OutcomeExpected = $ActionTest1SimpleFailureOutcomeExpected + OutcomeResult = $ActionTest1SimpleFailureOutcomeResult Conclusion = $ActionTest1SimpleFailureConclusion - ExpectedConclusion = $ActionTest1SimpleFailureExpectedConclusion - PassedConclusion = $ActionTest1SimpleFailureConclusionResult + ConclusionExpected = $ActionTest1SimpleFailureConclusionExpected + ConclusionResult = $ActionTest1SimpleFailureConclusionResult + Executed = $ActionTest1SimpleFailureExecuted + ExecutedExpected = $ActionTest1SimpleFailureExecutedExpected + ExecutedResult = $ActionTest1SimpleFailureExecutedResult + Result = $ActionTest1SimpleFailureResult + ResultExpected = $ActionTest1SimpleFailureResultExpected + ResultResult = $ActionTest1SimpleFailureResultResult + }, + [PSCustomObject]@{ + Name = "Action-Test - [1-Simple-ExecutionFailure]" + Outcome = $ActionTest1SimpleExecutionFailureOutcome + OutcomeExpected = $ActionTest1SimpleExecutionFailureOutcomeExpected + OutcomeResult = $ActionTest1SimpleExecutionFailureOutcomeResult + Conclusion = $ActionTest1SimpleExecutionFailureConclusion + ConclusionExpected = $ActionTest1SimpleExecutionFailureConclusionExpected + ConclusionResult = $ActionTest1SimpleExecutionFailureConclusionResult + Executed = $ActionTest1SimpleExecutionFailureExecuted + ExecutedExpected = $ActionTest1SimpleExecutionFailureExecutedExpected + ExecutedResult = $ActionTest1SimpleExecutionFailureExecutedResult + Result = $ActionTest1SimpleExecutionFailureResult + ResultExpected = $ActionTest1SimpleExecutionFailureResultExpected + ResultResult = $ActionTest1SimpleExecutionFailureResultResult }, [PSCustomObject]@{ Name = "Action-Test - [2-Standard]" Outcome = $ActionTest2StandardOutcome - ExpectedOutcome = $ActionTest2StandardExpectedOutcome - PassedOutcome = $ActionTest2StandardOutcomeResult + OutcomeExpected = $ActionTest2StandardOutcomeExpected + OutcomeResult = $ActionTest2StandardOutcomeResult Conclusion = $ActionTest2StandardConclusion - ExpectedConclusion = $ActionTest2StandardExpectedConclusion - PassedConclusion = $ActionTest2StandardConclusionResult + ConclusionExpected = $ActionTest2StandardConclusionExpected + ConclusionResult = $ActionTest2StandardConclusionResult + Executed = $ActionTest2StandardExecuted + ExecutedExpected = $ActionTest2StandardExecutedExpected + ExecutedResult = $ActionTest2StandardExecutedResult + Result = $ActionTest2StandardResult + ResultExpected = $ActionTest2StandardResultExpected + ResultResult = $ActionTest2StandardResultResult }, [PSCustomObject]@{ Name = "Action-Test - [3-Advanced]" Outcome = $ActionTest3AdvancedOutcome - ExpectedOutcome = $ActionTest3AdvancedExpectedOutcome - PassedOutcome = $ActionTest3AdvancedOutcomeResult + OutcomeExpected = $ActionTest3AdvancedOutcomeExpected + OutcomeResult = $ActionTest3AdvancedOutcomeResult Conclusion = $ActionTest3AdvancedConclusion - ExpectedConclusion = $ActionTest3AdvancedExpectedConclusion - PassedConclusion = $ActionTest3AdvancedConclusionResult + ConclusionExpected = $ActionTest3AdvancedConclusionExpected + ConclusionResult = $ActionTest3AdvancedConclusionResult + Executed = $ActionTest3AdvancedExecuted + ExecutedExpected = $ActionTest3AdvancedExecutedExpected + ExecutedResult = $ActionTest3AdvancedExecutedResult + Result = $ActionTest3AdvancedResult + ResultExpected = $ActionTest3AdvancedResultExpected + ResultResult = $ActionTest3AdvancedResultResult } ) # Display the table in the workflow logs - $jobs | Format-List + $jobs | Format-List | Out-String $passed = $true $jobs | ForEach-Object { - if (-not $_.PassedOutcome) { - Write-Error "Job $($_.Name) failed with Outcome $($_.Outcome) and Expected Outcome $($_.ExpectedOutcome)" + if (-not $_.OutcomeResult) { + Write-Error "Job $($_.Name) failed with Outcome $($_.Outcome) and Expected Outcome $($_.OutcomeExpected)" + $passed = $false + } + + if (-not $_.ConclusionResult) { + Write-Error "Job $($_.Name) failed with Conclusion $($_.Conclusion) and Expected Conclusion $($_.ConclusionExpected)" + $passed = $false + } + + if (-not $_.ExecutedResult) { + Write-Error "Job $($_.Name) not executed as expected. (Actual: $($_.Executed), Expected: $($_.ExecutedExpected))" $passed = $false } - if (-not $_.PassedConclusion) { - Write-Error "Job $($_.Name) failed with Conclusion $($_.Conclusion) and Expected Conclusion $($_.ExpectedConclusion)" + if (-not $_.ResultResult) { + Write-Error "Job $($_.Name) tests did not pass as expected. (Actual: $($_.Result), Expected: $($_.ResultExpected))" $passed = $false } } diff --git a/.github/workflows/Auto-Release.yml b/.github/workflows/Auto-Release.yml index 1a580b87..db872f59 100644 --- a/.github/workflows/Auto-Release.yml +++ b/.github/workflows/Auto-Release.yml @@ -30,5 +30,3 @@ jobs: - name: Auto-Release uses: PSModule/Auto-Release@v1 - env: - GITHUB_TOKEN: ${{ github.token }} diff --git a/README.md b/README.md index cda9dea9..2d2aebf0 100644 --- a/README.md +++ b/README.md @@ -158,26 +158,33 @@ If you specify `CodeCoverage_Enabled: true` here, it will enable coverage even i ## How to Determine a Test's Outcome -After running your tests, you can assess the overall result by checking: - -- **Outcome:** - The step's outcome will be `success` if all tests passed or `failure` if one or more tests failed. - -- **Conclusion:** - This value provides an overall summary (typically `success` or `failure`) of the test run. - Use this with the `continue-on-error` flag to run a separate step to gather results of parallel tests. +After running your tests, you can assess the overall result by checking the following outputs provided by the action: + +- **Outcome**: Indicates the GitHub Action step outcome (`success` or `failure`). +- **Conclusion**: Provides an overall summary (`success` or `failure`) of the test run. +- **Executed**: Indicates whether tests were executed (`True` or `False`). +- **Result**: Overall result of the Pester test run (`Passed` or `Failed`). +- **PassedCount**: Number of passed tests. +- **FailedCount**: Number of failed tests. +- **SkippedCount**: Number of skipped tests. +- **InconclusiveCount**: Number of inconclusive tests. +- **NotRunCount**: Number of tests not run. +- **TotalCount**: Total number of tests executed. These values are accessible in your workflow using the step's outputs, for example: ```yaml - name: Status shell: pwsh - env: - OUTCOME: ${{ steps.action-test.outcome }} - CONCLUSION: ${{ steps.action-test.conclusion }} run: | - Write-Host "Outcome: [$env:OUTCOME]" - Write-Host "Conclusion: [$env:CONCLUSION]" + Write-Host "Outcome: [${{ steps.action-test.outputs.Outcome }}]" + Write-Host "Conclusion: [${{ steps.action-test.outputs.Conclusion }}]" + Write-Host "Executed: [${{ steps.action-test.outputs.Executed }}]" + Write-Host "Result: [${{ steps.action-test.outputs.Result }}]" + Write-Host "Passed tests: [${{ steps.action-test.outputs.PassedCount }}]" + Write-Host "Failed tests: [${{ steps.action-test.outputs.FailedCount }}]" + Write-Host "Skipped tests: [${{ steps.action-test.outputs.SkippedCount }}]" + Write-Host "Total tests: [${{ steps.action-test.outputs.TotalCount }}]" ``` ## Controlling Workflow Execution Based on Test Outcome/Conclusion @@ -229,7 +236,7 @@ jobs: uses: actions/checkout@v4 - name: Run Pester Tests - uses: PSModule/Invoke-Pester@v2 + uses: PSModule/Invoke-Pester@v3 id: action-test continue-on-error: true with: @@ -252,62 +259,148 @@ jobs: *All inputs are optional unless noted otherwise. For more details, refer to the [Pester Configuration documentation](https://pester.dev/docs/usage/configuration).* `Run.PassThru` is forced to `$true` to ensure the action can capture test results. -| **Input** | **Description** | **Default** | -|--------------------------------------|--------------------------------------------------------------------------------------------------------|---------------------------------| -| `Path` | Path to where tests are located or a configuration file. | *(none)* | -| `ReportAsJson` | Output generated reports in JSON format in addition to the configured format through Pester. | `true` | -| `Run_Path` | Directories/files to be searched for tests. | *(none)* | -| `Run_ExcludePath` | Directories/files to exclude from the run. | *(none)* | -| `Run_ScriptBlock` | ScriptBlocks containing tests to be executed. | *(none)* | -| `Run_Container` | ContainerInfo objects containing tests to be executed. | *(none)* | -| `Run_TestExtension` | Filter used to identify test files (e.g. `.Tests.ps1`). | *(none)* | -| `Run_Exit` | Whether to exit with a non-zero exit code on failure. | *(none)* | -| `Run_Throw` | Whether to throw an exception on test failure. | *(none)* | -| `Run_SkipRun` | Discovery only, skip actual test run. | *(none)* | -| `Run_SkipRemainingOnFailure` | Skips remaining tests after the first failure. Options: `None`, `Run`, `Container`, `Block`. | *(none)* | -| `Filter_Tag` | Tags of Describe/Context/It blocks to run. | *(none)* | -| `Filter_ExcludeTag` | Tags of Describe/Context/It blocks to exclude. | *(none)* | -| `Filter_Line` | Filter by file + scriptblock start line (e.g. `C:\tests\file1.Tests.ps1:37`). | *(none)* | -| `Filter_ExcludeLine` | Exclude by file + scriptblock start line. Precedence over `Filter_Line`. | *(none)* | -| `Filter_FullName` | Full name of a test with wildcards, joined by dot. E.g. `*.describe Get-Item.test1` | *(none)* | -| `CodeCoverage_Enabled` | Enable code coverage. | *(none)* | -| `CodeCoverage_OutputFormat` | Format for the coverage report. Possible values: `JaCoCo`, `CoverageGutters`, `Cobertura`. | *(none)* | -| `CodeCoverage_OutputPath` | Where to save the code coverage report (relative to the current dir). | *(none)* | -| `CodeCoverage_OutputEncoding` | Encoding of the coverage file. | *(none)* | -| `CodeCoverage_Path` | Files/directories to measure coverage on (by default, reuses `Path` from the general settings). | *(none)* | -| `CodeCoverage_ExcludeTests` | Exclude tests themselves from coverage. | *(none)* | -| `CodeCoverage_RecursePaths` | Recurse through coverage directories. | *(none)* | -| `CodeCoverage_CoveragePercentTarget` | Desired minimum coverage percentage. | *(none)* | -| `CodeCoverage_UseBreakpoints` | **Experimental**: When `false`, use a Profiler-based tracer instead of breakpoints. | *(none)* | -| `CodeCoverage_SingleHitBreakpoints` | Remove breakpoints after first hit. | *(none)* | -| `TestResult_Enabled` | Enable test-result output (e.g. NUnitXml, JUnitXml). | *(none)* | -| `TestResult_OutputFormat` | Possible values: `NUnitXml`, `NUnit2.5`, `NUnit3`, `JUnitXml`. | *(none)* | -| `TestResult_OutputPath` | Where to save the test-result report (relative path). | *(none)* | -| `TestResult_OutputEncoding` | Encoding of the test-result file. | *(none)* | -| `TestResult_TestSuiteName` | Name used for the root `test-suite` element in the result file. | *(none)* | -| `Should_ErrorAction` | Controls if `Should` throws on error. Use `Stop` to throw, or `Continue` to fail at the end. | *(none)* | -| `Debug_ShowFullErrors` | Show Pester internal stack on errors. (Deprecated – overrides `Output.StackTraceVerbosity` to `Full`). | *(none)* | -| `Debug_WriteDebugMessages` | Write debug messages to screen. | *(none)* | -| `Debug_WriteDebugMessagesFrom` | Filter debug messages by source. Wildcards allowed. | *(none)* | -| `Debug_ShowNavigationMarkers` | Write paths after every block/test for easy navigation in Visual Studio Code. | *(none)* | -| `Debug_ReturnRawResultObject` | Returns an unfiltered result object, for development only. | *(none)* | -| `Output_Verbosity` | Verbosity: `None`, `Normal`, `Detailed`, `Diagnostic`. | *(none)* | -| `Output_StackTraceVerbosity` | Stacktrace detail: `None`, `FirstLine`, `Filtered`, `Full`. | *(none)* | -| `Output_CIFormat` | CI format of error output: `None`, `Auto`, `AzureDevops`, `GithubActions`. | *(none)* | -| `Output_CILogLevel` | CI log level: `Error` or `Warning`. | *(none)* | -| `Output_RenderMode` | How to render console output: `Auto`, `Ansi`, `ConsoleColor`, `Plaintext`. | *(none)* | -| `TestDrive_Enabled` | Enable `TestDrive`. | *(none)* | -| `TestRegistry_Enabled` | Enable `TestRegistry`. | *(none)* | -| `Debug` | Enable debug output. | `'false'` | -| `Verbose` | Enable verbose output. | `'false'` | -| `Version` | Specifies the exact version of the GitHub module to install. | *(none)* | -| `Prerelease` | Allow prerelease versions if available. | `'false'` | -| `WorkingDirectory` | The working directory where the script runs. | `${{ github.workspace }}` | +| **Input** | **Description** | **Default** | +|--------------------------------------|--------------------------------------------------------------------------------------------------------|-------------| +| `Path` | Path to where tests are located or a configuration file. | *(none)* | +| `ReportAsJson` | Output generated reports in JSON format in addition to the configured format through Pester. | `true` | +| `Run_Path` | Directories/files to be searched for tests. | *(none)* | +| `Run_ExcludePath` | Directories/files to exclude from the run. | *(none)* | +| `Run_ScriptBlock` | ScriptBlocks containing tests to be executed. | *(none)* | +| `Run_Container` | ContainerInfo objects containing tests to be executed. | *(none)* | +| `Run_TestExtension` | Filter used to identify test files (e.g. `.Tests.ps1`). | *(none)* | +| `Run_Exit` | Whether to exit with a non-zero exit code on failure. | *(none)* | +| `Run_Throw` | Whether to throw an exception on test failure. | *(none)* | +| `Run_SkipRun` | Discovery only, skip actual test run. | *(none)* | +| `Run_SkipRemainingOnFailure` | Skips remaining tests after the first failure. Options: `None`, `Run`, `Container`, `Block`. | *(none)* | +| `Filter_Tag` | Tags of Describe/Context/It blocks to run. | *(none)* | +| `Filter_ExcludeTag` | Tags of Describe/Context/It blocks to exclude. | *(none)* | +| `Filter_Line` | Filter by file + scriptblock start line (e.g. `C:\tests\file1.Tests.ps1:37`). | *(none)* | +| `Filter_ExcludeLine` | Exclude by file + scriptblock start line. Precedence over `Filter_Line`. | *(none)* | +| `Filter_FullName` | Full name of a test with wildcards, joined by dot. E.g. `*.describe Get-Item.test1` | *(none)* | +| `CodeCoverage_Enabled` | Enable code coverage. | *(none)* | +| `CodeCoverage_OutputFormat` | Format for the coverage report. Possible values: `JaCoCo`, `CoverageGutters`, `Cobertura`. | *(none)* | +| `CodeCoverage_OutputPath` | Where to save the code coverage report (relative to the current dir). | *(none)* | +| `CodeCoverage_OutputEncoding` | Encoding of the coverage file. | *(none)* | +| `CodeCoverage_Path` | Files/directories to measure coverage on (by default, reuses `Path` from the general settings). | *(none)* | +| `CodeCoverage_ExcludeTests` | Exclude tests themselves from coverage. | *(none)* | +| `CodeCoverage_RecursePaths` | Recurse through coverage directories. | *(none)* | +| `CodeCoverage_CoveragePercentTarget` | Desired minimum coverage percentage. | *(none)* | +| `CodeCoverage_UseBreakpoints` | **Experimental**: When `false`, use a Profiler-based tracer instead of breakpoints. | *(none)* | +| `CodeCoverage_SingleHitBreakpoints` | Remove breakpoints after first hit. | *(none)* | +| `TestResult_Enabled` | Enable test-result output (e.g. NUnitXml, JUnitXml). | *(none)* | +| `TestResult_OutputFormat` | Possible values: `NUnitXml`, `NUnit2.5`, `NUnit3`, `JUnitXml`. | *(none)* | +| `TestResult_OutputPath` | Where to save the test-result report (relative path). | *(none)* | +| `TestResult_OutputEncoding` | Encoding of the test-result file. | *(none)* | +| `TestResult_TestSuiteName` | Name used for the root `test-suite` element in the result file. | *(none)* | +| `Should_ErrorAction` | Controls if `Should` throws on error. Use `Stop` to throw, or `Continue` to fail at the end. | *(none)* | +| `Debug_ShowFullErrors` | Show Pester internal stack on errors. (Deprecated – overrides `Output.StackTraceVerbosity` to `Full`). | *(none)* | +| `Debug_WriteDebugMessages` | Write debug messages to screen. | *(none)* | +| `Debug_WriteDebugMessagesFrom` | Filter debug messages by source. Wildcards allowed. | *(none)* | +| `Debug_ShowNavigationMarkers` | Write paths after every block/test for easy navigation in Visual Studio Code. | *(none)* | +| `Debug_ReturnRawResultObject` | Returns an unfiltered result object, for development only. | *(none)* | +| `Output_Verbosity` | Verbosity: `None`, `Normal`, `Detailed`, `Diagnostic`. | *(none)* | +| `Output_StackTraceVerbosity` | Stacktrace detail: `None`, `FirstLine`, `Filtered`, `Full`. | *(none)* | +| `Output_CIFormat` | CI format of error output: `None`, `Auto`, `AzureDevops`, `GithubActions`. | *(none)* | +| `Output_CILogLevel` | CI log level: `Error` or `Warning`. | *(none)* | +| `Output_RenderMode` | How to render console output: `Auto`, `Ansi`, `ConsoleColor`, `Plaintext`. | *(none)* | +| `TestDrive_Enabled` | Enable `TestDrive`. | *(none)* | +| `TestRegistry_Enabled` | Enable `TestRegistry`. | *(none)* | +| `Debug` | Enable debug output. | `'false'` | +| `Verbose` | Enable verbose output. | `'false'` | +| `Version` | Specifies the exact version of the GitHub module to install. | *(none)* | +| `Prerelease` | Allow prerelease versions if available. | `'false'` | +| `WorkingDirectory` | The working directory where the script runs. | `'.'` | ### Outputs -No outputs are provided directly by this action. Instead, use the step's **outcome** and **conclusion** properties along with the published outputs -listed above to control the subsequent flow of your workflow. +The action provides the following outputs: + +| Output | Description | +|--------|-------------| +| `Outcome` | The outcome of the test run (success/failure) | +| `Conclusion` | The conclusion of the test run (success/failure) | +| `Executed` | Whether tests were executed (True/False) | +| `Result` | Overall result of the Pester test run (Passed/Failed) | +| `FailedCount` | Number of failed tests | +| `FailedBlocksCount` | Number of failed blocks | +| `FailedContainersCount` | Number of failed containers | +| `PassedCount` | Number of passed tests | +| `SkippedCount` | Number of skipped tests | +| `InconclusiveCount` | Number of inconclusive tests | +| `NotRunCount` | Number of tests not run | +| `TotalCount` | Total count of tests | + +## Examples + +### Basic Usage + +```yaml +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Run Pester tests + uses: PSModule/Invoke-Pester@v1 + with: + Path: './tests' +``` + +### Using Test Results in Subsequent Steps + +```yaml +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Run Pester tests + uses: PSModule/Invoke-Pester@v3 + id: action-test + with: + Path: './tests' + TestResult_Enabled: 'true' + TestResult_OutputPath: './test-results.xml' + + - name: Process test results + if: always() + run: | + Write-Output "Total tests: ${{ steps.action-test.outputs.TotalCount }}" + Write-Output "Passed tests: ${{ steps.action-test.outputs.PassedCount }}" + Write-Output "Failed tests: ${{ steps.action-test.outputs.FailedCount }}" + Write-Output "Failed blocks: ${{ steps.action-test.outputs.FailedBlocksCount }}" + Write-Output "Failed containers: ${{ steps.action-test.outputs.FailedContainersCount }}" + Write-Output "Test outcome: ${{ steps.action-test.outputs.Result }}" + shell: pwsh + + - name: Take action based on test outcome + if: steps.action-test.outputs.Result == 'Passed' + run: echo "All tests passed! Ready to proceed with deployment." +``` + +### With Code Coverage + +```yaml +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Run Pester tests with code coverage + id: pester + uses: PSModule/Invoke-Pester@v1 + with: + Path: './tests' + CodeCoverage_Enabled: 'true' + CodeCoverage_Path: './src' + CodeCoverage_OutputPath: './coverage.xml' + CodeCoverage_OutputFormat: 'JaCoCo' +``` + +## See Also -The provided example workflows demonstrate how you can use these outputs to control the flow. For instance, the **Status** steps in the test -workflows print the `outcome` and `conclusion` values, and a later job aggregates these values to decide whether to continue or abort the workflow. +- [Pester Documentation](https://pester.dev/) +- [PowerShell Documentation](https://docs.microsoft.com/en-us/powershell/) diff --git a/action.yml b/action.yml index 546ac0ac..e6fe3e26 100644 --- a/action.yml +++ b/action.yml @@ -205,7 +205,57 @@ inputs: WorkingDirectory: description: The working directory where the script will run from. required: false - default: ${{ github.workspace }} + default: '.' + +outputs: + Outcome: + description: | + The outcome of the test run. + value: ${{ steps.test.outcome }} + Conclusion: + description: | + The conclusion of the test run. + value: ${{ steps.test.conclusion }} + Executed: + description: | + Whether tests were executed. + value: ${{ steps.status.outputs.Executed }} + Result: + description: | + Overall result of the Pester test run (e.g., Passed, Failed). + value: ${{ steps.test.outputs.Result }} + FailedCount: + description: | + Number of failed tests. + value: ${{ steps.test.outputs.FailedCount }} + FailedBlocksCount: + description: | + Number of failed blocks. + value: ${{ steps.test.outputs.FailedBlocksCount }} + FailedContainersCount: + description: | + Number of failed containers. + value: ${{ steps.test.outputs.FailedContainersCount }} + PassedCount: + description: | + Number of passed tests. + value: ${{ steps.test.outputs.PassedCount }} + SkippedCount: + description: | + Number of skipped tests. + value: ${{ steps.test.outputs.SkippedCount }} + InconclusiveCount: + description: | + Number of inconclusive tests. + value: ${{ steps.test.outputs.InconclusiveCount }} + NotRunCount: + description: | + Number of tests not run. + value: ${{ steps.test.outputs.NotRunCount }} + TotalCount: + description: | + Total count of tests. + value: ${{ steps.test.outputs.TotalCount }} runs: using: composite @@ -268,14 +318,16 @@ runs: # Invoke-Pester (init) ${{ github.action_path }}/scripts/init.ps1 - - name: Invoke-Pester + - name: Invoke-Pester (exec) shell: pwsh continue-on-error: true working-directory: ${{ inputs.WorkingDirectory }} env: PSMODULE_INVOKE_PESTER_INPUT_ReportAsJson: ${{ inputs.ReportAsJson }} id: test - run: ${{ github.action_path }}/scripts/main.ps1 + run: | + # Invoke-Pester (exec) + ${{ github.action_path }}/scripts/exec.ps1 - name: Upload test results - [${{ steps.test.outputs.TestSuiteName }}-TestResults] uses: actions/upload-artifact@v4 @@ -297,10 +349,39 @@ runs: - name: Status shell: pwsh + id: status working-directory: ${{ inputs.WorkingDirectory }} + env: + PSMODULE_INVOKE_PESTER_INTERNAL_Executed: ${{ steps.test.outputs.Executed }} + PSMODULE_INVOKE_PESTER_INTERNAL_Outcome: ${{ steps.test.outcome }} + PSMODULE_INVOKE_PESTER_INTERNAL_Conclusion: ${{ steps.test.conclusion }} run: | - $outcome = '${{ steps.test.outcome }}' - Write-Host "outcome: [$outcome]" + # Status + $PSStyle.OutputRendering = 'Ansi' + $executed = $env:PSMODULE_INVOKE_PESTER_INTERNAL_Executed -eq 'true' + $outcome = $env:PSMODULE_INVOKE_PESTER_INTERNAL_Outcome + $conclusion = $env:PSMODULE_INVOKE_PESTER_INTERNAL_Conclusion + + "Executed=$executed" >> $env:GITHUB_OUTPUT + + [PSCustomObject]@{ + Executed = $executed + Outcome = $outcome + Conclusion = $conclusion + } | Format-List | Out-String + + # If the tests did not execute, exit with a failure code + if ($executed -ne 'true') { + Write-Error "Tests did not execute." + exit 1 + } + # If the outcome is not success, exit with a failure code if ($outcome -ne 'success') { + Write-Error "Tests did not pass." + exit 1 + } + # If the conclusion is not success + if ($conclusion -ne 'success') { + Write-Error "Tests did not pass." exit 1 } diff --git a/scripts/main.ps1 b/scripts/exec.ps1 similarity index 86% rename from scripts/main.ps1 rename to scripts/exec.ps1 index 51d32736..bfc03052 100644 --- a/scripts/main.ps1 +++ b/scripts/exec.ps1 @@ -33,6 +33,9 @@ if (-not (Test-Path -Path $configPath)) { exit 1 } Get-Content -Path $configPath -Raw +'::endgroup::' + +'::group::Exec - PesterConfiguration' $configuration = . $configPath $configuration.Run.Container = @() $containerFiles = Get-ChildItem -Path $path -Filter *.Container.* -Recurse | Sort-Object FullName @@ -99,6 +102,16 @@ LogGroup 'Eval - Set outputs' { Set-GitHubOutput -Name 'TestResultOutputPath' -Value $testResultOutputFolderPath Set-GitHubOutput -Name 'CodeCoverageEnabled' -Value $testResults.Configuration.CodeCoverage.Enabled.Value Set-GitHubOutput -Name 'CodeCoverageOutputPath' -Value $codeCoverageOutputFolderPath + Set-GitHubOutput -Name 'Executed' -Value $testResults.Executed + Set-GitHubOutput -Name 'Result' -Value $testResults.Result + Set-GitHubOutput -Name 'FailedCount' -Value $testResults.FailedCount + Set-GitHubOutput -Name 'FailedBlocksCount' -Value $testResults.FailedBlocksCount + Set-GitHubOutput -Name 'FailedContainersCount' -Value $testResults.FailedContainersCount + Set-GitHubOutput -Name 'PassedCount' -Value $testResults.PassedCount + Set-GitHubOutput -Name 'SkippedCount' -Value $testResults.SkippedCount + Set-GitHubOutput -Name 'InconclusiveCount' -Value $testResults.InconclusiveCount + Set-GitHubOutput -Name 'NotRunCount' -Value $testResults.NotRunCount + Set-GitHubOutput -Name 'TotalCount' -Value $testResults.TotalCount if ($env:PSMODULE_INVOKE_PESTER_INPUT_ReportAsJson -eq 'true' -and $testResults.Configuration.TestResult.Enabled.Value) { $jsonOutputPath = $testResults.Configuration.TestResult.OutputPath.Value -Replace '\.xml$', '.json' diff --git a/scripts/init.ps1 b/scripts/init.ps1 index 2a6d41b8..b1f76f89 100644 --- a/scripts/init.ps1 +++ b/scripts/init.ps1 @@ -194,20 +194,37 @@ LogGroup 'Init - Export containers' { $containerFiles = Get-ChildItem -Path $testDir -Filter *.Container.* -Recurse Write-Output "Containers found in [$testDir]: [$($containerFiles.Count)]" if ($containerFiles.Count -eq 0) { - # Look for test files and make a container for each test file. - $testFiles = Get-ChildItem -Path $testDir -Filter *.Tests.ps1 -Recurse - Write-Output "Test files found in [$testDir]: [$($testFiles.Count)]" + # First, look for test files directly in the test directory (non-recursive) + $rootTestFiles = Get-ChildItem -Path $testDir -Filter *.Tests.ps1 -File + Write-Output "Root level test files found in [$testDir]: [$($rootTestFiles.Count)]" + + # Then, look for test files in subdirectories + $subfolderTestFiles = Get-ChildItem -Path $testDir -Filter *.Tests.ps1 -Recurse -File | + Where-Object { $_.DirectoryName -ne $testDir } + Write-Output "Subfolder test files found in [$testDir]: [$($subfolderTestFiles.Count)]" + + # Combine all test files using a generic List + $testFiles = [System.Collections.Generic.List[System.IO.FileInfo]]::new() + if ($rootTestFiles) { + $rootTestFiles | ForEach-Object { $testFiles.Add($_) } + } + if ($subfolderTestFiles) { + $subfolderTestFiles | ForEach-Object { $testFiles.Add($_) } + } + Write-Output "Total test files found in [$testDir]: [$($testFiles.Count)]" + foreach ($testFile in $testFiles) { $container = @{ Path = $testFile.FullName } + $containerFileName = ($testFile | Split-Path -Leaf).Replace('.Tests.ps1', '.Container.ps1') LogGroup "Init - Export containers - Generated - $containerFileName" { - $containerFileName = ($testFile | Split-Path -Leaf).Replace('.Tests.ps1', '.Container.ps1') Write-Output "Exporting container [$path/$containerFileName]" Export-Hashtable -Hashtable $container -Path "$path/$containerFileName" } - Write-Output "Containers created from test files: [$($containers.Count)]" + $containers += $container } + Write-Output "Containers created from test files: [$($containers.Count)]" } foreach ($containerFile in $containerFiles) { $container = Import-Hashtable $containerFile @@ -217,6 +234,7 @@ LogGroup 'Init - Export containers' { Write-Output "Exporting container [$path/$containerFileName]" Export-Hashtable -Hashtable $container -Path "$path/$containerFileName" } + $containers += $container } } $configuration.Run.Container = @() diff --git a/tests/1-Simple-ExecutionFailure/ExecutionFailure.ps1 b/tests/1-Simple-ExecutionFailure/ExecutionFailure.ps1 new file mode 100644 index 00000000..f6c246b2 --- /dev/null +++ b/tests/1-Simple-ExecutionFailure/ExecutionFailure.ps1 @@ -0,0 +1 @@ +"Not a testfile"