From 75ed6526dd387e965f12b8ea1bec2903e48eb177 Mon Sep 17 00:00:00 2001 From: Ross Grambo Date: Tue, 6 Jan 2026 11:08:27 -0800 Subject: [PATCH 1/4] Adds test automation and fixes misc. readme issues --- .github/workflows/validation-tests.yml | 306 ++++++++++++++++++++++++ README.md | 2 + libraryValidations/JavaScript/README.md | 2 +- libraryValidations/Python/README.md | 9 +- libraryValidations/Spring/README.md | 2 +- 5 files changed, 316 insertions(+), 5 deletions(-) create mode 100644 .github/workflows/validation-tests.yml diff --git a/.github/workflows/validation-tests.yml b/.github/workflows/validation-tests.yml new file mode 100644 index 0000000..7d6c928 --- /dev/null +++ b/.github/workflows/validation-tests.yml @@ -0,0 +1,306 @@ +name: Validation Tests + +on: + # Run every Monday at 8:00 AM UTC (early Monday morning) + schedule: + - cron: '0 8 * * 1' + # Run on pull requests + pull_request: + branches: + - main + - master + # Allow manual trigger + workflow_dispatch: + +jobs: + dotnet-tests: + name: .NET Tests + runs-on: ubuntu-latest + outputs: + status: ${{ steps.test.outcome }} + results: ${{ steps.test-details.outputs.results }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup .NET + uses: actions/setup-dotnet@v4 + with: + dotnet-version: '9.0.x' + + - name: Run .NET tests + id: test + continue-on-error: true + working-directory: ./libraryValidations/Dotnet + run: dotnet test --logger "trx;LogFileName=test-results.trx" --logger "console;verbosity=detailed" + + - name: Parse .NET test results + id: test-details + if: always() + working-directory: ./libraryValidations/Dotnet + run: | + echo 'results<> $GITHUB_OUTPUT + if [ -f "TestResults/test-results.trx" ]; then + # Extract test results from TRX file + python3 << 'PYTHON' + import xml.etree.ElementTree as ET + import json + + tree = ET.parse('TestResults/test-results.trx') + root = tree.getroot() + ns = {'ns': 'http://microsoft.com/schemas/VisualStudio/TeamTest/2010'} + + results = {} + for test in root.findall('.//ns:UnitTestResult', ns): + test_name = test.get('testName') + outcome = test.get('outcome') + results[test_name] = '✅' if outcome == 'Passed' else '❌' + + print(json.dumps(results)) + PYTHON + else + echo "{}" + fi + echo 'EOF' >> $GITHUB_OUTPUT + + python-tests: + name: Python Tests + runs-on: ubuntu-latest + outputs: + status: ${{ steps.test.outcome }} + results: ${{ steps.test-details.outputs.results }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + working-directory: ./libraryValidations/Python + run: | + pip install -r requirements.txt + + - name: Run Python tests + id: test + continue-on-error: true + working-directory: ./libraryValidations/Python + run: | + pytest test_json_validations.py --junitxml=results.xml --verbose || true + if [ ! -z "${{ secrets.APP_CONFIG_VALIDATION_CONNECTION_STRING }}" ]; then + pytest test_json_validations_with_provider.py --junitxml=results_provider.xml --verbose || true + fi + + - name: Parse Python test results + id: test-details + if: always() + working-directory: ./libraryValidations/Python + run: | + echo 'results<> $GITHUB_OUTPUT + python3 << 'PYTHON' + import xml.etree.ElementTree as ET + import json + import os + + results = {} + for xml_file in ['results.xml', 'results_provider.xml']: + if os.path.exists(xml_file): + tree = ET.parse(xml_file) + root = tree.getroot() + + for testcase in root.findall('.//testcase'): + test_name = testcase.get('name') + # Check if test failed + failed = testcase.find('failure') is not None or testcase.find('error') is not None + results[test_name] = '❌' if failed else '✅' + + print(json.dumps(results)) + PYTHON + echo 'EOF' >> $GITHUB_OUTPUT + + javascript-tests: + name: JavaScript Tests + runs-on: ubuntu-latest + outputs: + status: ${{ steps.test.outcome }} + results: ${{ steps.test-details.outputs.results }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Install dependencies + working-directory: ./libraryValidations/JavaScript + run: npm install + + - name: Build + working-directory: ./libraryValidations/JavaScript + run: npm run build + + - name: Run JavaScript tests + id: test + continue-on-error: true + working-directory: ./libraryValidations/JavaScript + env: + JEST_JUNIT_OUTPUT_DIR: ./ + JEST_JUNIT_OUTPUT_NAME: results.xml + run: | + npm install --save-dev jest-junit + npm run test -- --reporters=default --reporters=jest-junit || true + + - name: Parse JavaScript test results + id: test-details + if: always() + working-directory: ./libraryValidations/JavaScript + run: | + echo 'results<> $GITHUB_OUTPUT + if [ -f "results.xml" ]; then + python3 << 'PYTHON' + import xml.etree.ElementTree as ET + import json + + tree = ET.parse('results.xml') + root = tree.getroot() + + results = {} + for testcase in root.findall('.//testcase'): + test_name = testcase.get('name') + # Check if test failed + failed = testcase.find('failure') is not None or testcase.find('error') is not None + results[test_name] = '❌' if failed else '✅' + + print(json.dumps(results)) + PYTHON + else + echo "{}" + fi + echo 'EOF' >> $GITHUB_OUTPUT + + spring-tests: + name: Spring Tests + runs-on: ubuntu-latest + outputs: + status: ${{ steps.test.outcome }} + results: ${{ steps.test-details.outputs.results }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Java + uses: actions/setup-java@v4 + with: + java-version: '17' + distribution: 'temurin' + + - name: Run Spring tests + id: test + continue-on-error: true + working-directory: ./libraryValidations/Spring/validation-tests + run: mvn test + + - name: Parse Spring test results + id: test-details + if: always() + working-directory: ./libraryValidations/Spring/validation-tests + run: | + echo 'results<> $GITHUB_OUTPUT + python3 << 'PYTHON' + import xml.etree.ElementTree as ET + import json + import os + import glob + + results = {} + xml_files = glob.glob('target/surefire-reports/TEST-*.xml') + + for xml_file in xml_files: + tree = ET.parse(xml_file) + root = tree.getroot() + + for testcase in root.findall('.//testcase'): + test_name = testcase.get('name') + # Check if test failed + failed = testcase.find('failure') is not None or testcase.find('error') is not None + results[test_name] = '❌' if failed else '✅' + + print(json.dumps(results)) + PYTHON + echo 'EOF' >> $GITHUB_OUTPUT + + test-summary: + name: Test Summary + runs-on: ubuntu-latest + needs: [dotnet-tests, python-tests, javascript-tests, spring-tests] + if: always() + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Generate test matrix + id: matrix + run: | + python3 << 'PYTHON' + import json + import os + import glob + + # Parse results from each language + dotnet_results = json.loads('''${{ needs.dotnet-tests.outputs.results }}''') if '${{ needs.dotnet-tests.outputs.results }}' else {} + python_results = json.loads('''${{ needs.python-tests.outputs.results }}''') if '${{ needs.python-tests.outputs.results }}' else {} + javascript_results = json.loads('''${{ needs.javascript-tests.outputs.results }}''') if '${{ needs.javascript-tests.outputs.results }}' else {} + spring_results = json.loads('''${{ needs.spring-tests.outputs.results }}''') if '${{ needs.spring-tests.outputs.results }}' else {} + + # Collect all unique test names across all languages + all_tests = set() + all_tests.update(dotnet_results.keys()) + all_tests.update(python_results.keys()) + all_tests.update(javascript_results.keys()) + all_tests.update(spring_results.keys()) + + # Sort tests for consistent output + sorted_tests = sorted(all_tests) + + # Generate markdown table + with open('summary.md', 'w') as f: + f.write("## 🧪 Validation Test Results\n\n") + f.write("| Test Name | .NET | Python | JavaScript | Spring |\n") + f.write("|-----------|------|--------|------------|--------|\n") + + for test in sorted_tests: + # Get result for each language, default to ⚠️ if not found + dotnet = dotnet_results.get(test, '⚠️') + python = python_results.get(test, '⚠️') + javascript = javascript_results.get(test, '⚠️') + spring = spring_results.get(test, '⚠️') + + f.write(f"| {test} | {dotnet} | {python} | {javascript} | {spring} |\n") + + f.write(f"\n_Workflow run: ${{ github.run_id }}_\n") + + # Print to console + with open('summary.md', 'r') as f: + print(f.read()) + PYTHON + + cat summary.md >> $GITHUB_STEP_SUMMARY + + - name: Comment on PR + if: github.event_name == 'pull_request' + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const summary = fs.readFileSync('summary.md', 'utf8'); + + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: summary + }); diff --git a/README.md b/README.md index e0dcf5b..ab67b58 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ # Microsoft Feature Management +[![Validation Tests](https://github.com/microsoft/FeatureManagement/actions/workflows/validation-tests.yml/badge.svg)](https://github.com/microsoft/FeatureManagement/actions/workflows/validation-tests.yml) + Traditionally, shipping a new application feature requires a complete redeployment of the application itself. Testing a feature often requires multiple deployments of the application. Each deployment might change the feature or expose the feature to different customers for testing. Feature management is a software-development practice that decouples feature release from code deployment and enables quick changes to feature availability on demand. It uses a technique called *feature flags* (also known as *feature toggles* and *feature switches*) to dynamically administer a feature's lifecycle. diff --git a/libraryValidations/JavaScript/README.md b/libraryValidations/JavaScript/README.md index 534bd62..5b76bac 100644 --- a/libraryValidations/JavaScript/README.md +++ b/libraryValidations/JavaScript/README.md @@ -1,6 +1,6 @@ # JavaScript Feature Management Validation Tests -This directory contains test cases to verify that the correctness of the latest JS Feature Management library against the files in the `Samples` directory. +This directory contains test cases to verify the correctness of the JavaScript Feature Management library against the files in the `Samples` directory. ## Running the test diff --git a/libraryValidations/Python/README.md b/libraryValidations/Python/README.md index 59354cc..ec5cbf7 100644 --- a/libraryValidations/Python/README.md +++ b/libraryValidations/Python/README.md @@ -1,6 +1,6 @@ # Python Validation Tests -This directory contains a Python script that can be used to validate the correctness of the library against the files in the `Samples` directory. +This directory contains Python scripts that can be used to validate the correctness of the library against the files in the `Samples` directory. ## Prerequisites @@ -8,13 +8,16 @@ This directory contains a Python script that can be used to validate the correct ## Running the tests -To run the tests, execute the following command: +To run the tests, execute the following commands: ```bash pip install -r requirements.txt pytest test_json_validations.py +pytest test_json_validations_with_provider.py ``` +Note: Tests in `test_json_validations_with_provider.py` require the `APP_CONFIG_VALIDATION_CONNECTION_STRING` environment variable to be set. + ## Update to run more tests -To add more tests, after creating the required json files in the `Samples` directory, add a new test method in the `test_json_validations.py` file. The test method should be named as `test_`. The test method needs to call the `runs_tests` method with the name of the test file as the argument. +To add more tests, after creating the required json files in the `Samples` directory, add a new test method in the appropriate test file. The test method should be named as `test_`. The test method needs to call the `runs_tests` method with the name of the test file as the argument. diff --git a/libraryValidations/Spring/README.md b/libraryValidations/Spring/README.md index 8f61732..d06f016 100644 --- a/libraryValidations/Spring/README.md +++ b/libraryValidations/Spring/README.md @@ -17,7 +17,7 @@ mvn test ## Update to run more tests -To add more tests, after creating the required json files in the `Samples` directory, add a new test class in the `src\test\java\com\microsoft\validation_tests` folder. The test file should be named as `Tests.java`. And should use the bellow template. +To add more tests, after creating the required json files in the `Samples` directory, add a new test class in the `src/test/java/com/microsoft/validation_tests` folder. The test file should be named as `Tests.java` and should use the following template. ```java import java.io.IOException; From 514b5c0e0f759bd7e4be7d73e0f633b37ae40bbd Mon Sep 17 00:00:00 2001 From: Ross Grambo Date: Tue, 6 Jan 2026 12:52:14 -0800 Subject: [PATCH 2/4] Fixing PR comment permissions --- .github/workflows/validation-tests.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/validation-tests.yml b/.github/workflows/validation-tests.yml index 7d6c928..5cfec6c 100644 --- a/.github/workflows/validation-tests.yml +++ b/.github/workflows/validation-tests.yml @@ -12,6 +12,11 @@ on: # Allow manual trigger workflow_dispatch: +permissions: + contents: read + pull-requests: write + issues: write + jobs: dotnet-tests: name: .NET Tests From 9c700744d138a4aa77b6a9d636d32c68677e480f Mon Sep 17 00:00:00 2001 From: Ross Grambo Date: Tue, 6 Jan 2026 13:02:37 -0800 Subject: [PATCH 3/4] Fixing PR comment --- .github/workflows/validation-tests.yml | 177 +++++++++++++++++-------- 1 file changed, 125 insertions(+), 52 deletions(-) diff --git a/.github/workflows/validation-tests.yml b/.github/workflows/validation-tests.yml index 5cfec6c..cfab50a 100644 --- a/.github/workflows/validation-tests.yml +++ b/.github/workflows/validation-tests.yml @@ -37,33 +37,45 @@ jobs: id: test continue-on-error: true working-directory: ./libraryValidations/Dotnet - run: dotnet test --logger "trx;LogFileName=test-results.trx" --logger "console;verbosity=detailed" + run: dotnet test --logger trx --results-directory ./TestResults - name: Parse .NET test results id: test-details if: always() working-directory: ./libraryValidations/Dotnet run: | + echo "=== Listing all files in TestResults directory ===" + find ./TestResults -type f 2>/dev/null || echo "No TestResults directory" + echo "==========================================" + echo 'results<> $GITHUB_OUTPUT - if [ -f "TestResults/test-results.trx" ]; then - # Extract test results from TRX file - python3 << 'PYTHON' + TRX_FILE=$(find ./TestResults -name "*.trx" -type f 2>/dev/null | head -1) + if [ ! -z "$TRX_FILE" ]; then + echo "Found TRX file: $TRX_FILE" + export TRX_FILE + python3 <<'PYTHON' import xml.etree.ElementTree as ET import json + import os - tree = ET.parse('TestResults/test-results.trx') - root = tree.getroot() - ns = {'ns': 'http://microsoft.com/schemas/VisualStudio/TeamTest/2010'} - - results = {} - for test in root.findall('.//ns:UnitTestResult', ns): - test_name = test.get('testName') - outcome = test.get('outcome') - results[test_name] = '✅' if outcome == 'Passed' else '❌' - - print(json.dumps(results)) + trx_file = os.environ.get('TRX_FILE') + if trx_file: + tree = ET.parse(trx_file) + root = tree.getroot() + ns = {'ns': 'http://microsoft.com/schemas/VisualStudio/TeamTest/2010'} + + results = {} + for test in root.findall('.//ns:UnitTestResult', ns): + test_name = test.get('testName') + outcome = test.get('outcome') + results[test_name] = '✅' if outcome == 'Passed' else '❌' + + print(json.dumps(results)) + else: + print("{}") PYTHON else + echo "No TRX file found" echo "{}" fi echo 'EOF' >> $GITHUB_OUTPUT @@ -103,8 +115,12 @@ jobs: if: always() working-directory: ./libraryValidations/Python run: | + echo "=== Listing Python test output files ===" + ls -la *.xml 2>/dev/null || echo "No XML files found" + echo "==========================================" + echo 'results<> $GITHUB_OUTPUT - python3 << 'PYTHON' + python3 <<'PYTHON' import xml.etree.ElementTree as ET import json import os @@ -112,16 +128,16 @@ jobs: results = {} for xml_file in ['results.xml', 'results_provider.xml']: if os.path.exists(xml_file): + print(f"Parsing {xml_file}", flush=True) tree = ET.parse(xml_file) root = tree.getroot() for testcase in root.findall('.//testcase'): test_name = testcase.get('name') - # Check if test failed failed = testcase.find('failure') is not None or testcase.find('error') is not None results[test_name] = '❌' if failed else '✅' - print(json.dumps(results)) + print(json.dumps(results), flush=True) PYTHON echo 'EOF' >> $GITHUB_OUTPUT @@ -152,37 +168,54 @@ jobs: id: test continue-on-error: true working-directory: ./libraryValidations/JavaScript - env: - JEST_JUNIT_OUTPUT_DIR: ./ - JEST_JUNIT_OUTPUT_NAME: results.xml run: | npm install --save-dev jest-junit - npm run test -- --reporters=default --reporters=jest-junit || true + npx jest --reporters=jest-junit --testResultsProcessor=jest-junit || true - name: Parse JavaScript test results id: test-details if: always() working-directory: ./libraryValidations/JavaScript run: | + echo "=== Listing JavaScript test output files ===" + ls -la junit.xml 2>/dev/null || echo "No junit.xml found" + find . -maxdepth 2 -name "*.xml" -type f 2>/dev/null | head -10 + echo "==========================================" + echo 'results<> $GITHUB_OUTPUT - if [ -f "results.xml" ]; then - python3 << 'PYTHON' + XML_FILE="" + if [ -f "junit.xml" ]; then + XML_FILE="junit.xml" + elif [ -f "test-results/junit.xml" ]; then + XML_FILE="test-results/junit.xml" + fi + + if [ ! -z "$XML_FILE" ]; then + echo "Parsing JavaScript $XML_FILE" + export XML_FILE + python3 <<'PYTHON' import xml.etree.ElementTree as ET import json + import os - tree = ET.parse('results.xml') - root = tree.getroot() - - results = {} - for testcase in root.findall('.//testcase'): - test_name = testcase.get('name') - # Check if test failed - failed = testcase.find('failure') is not None or testcase.find('error') is not None - results[test_name] = '❌' if failed else '✅' + xml_file = os.environ.get('XML_FILE') - print(json.dumps(results)) + if xml_file and os.path.exists(xml_file): + tree = ET.parse(xml_file) + root = tree.getroot() + + results = {} + for testcase in root.findall('.//testcase'): + test_name = testcase.get('name') + failed = testcase.find('failure') is not None or testcase.find('error') is not None + results[test_name] = '❌' if failed else '✅' + + print(json.dumps(results), flush=True) + else: + print("{}") PYTHON else + echo "No JavaScript test results XML found" echo "{}" fi echo 'EOF' >> $GITHUB_OUTPUT @@ -214,8 +247,13 @@ jobs: if: always() working-directory: ./libraryValidations/Spring/validation-tests run: | + echo "=== Listing Spring test output files ===" + ls -la target/surefire-reports/ 2>/dev/null || echo "No surefire-reports directory" + find target -name "*.xml" -type f 2>/dev/null | head -10 + echo "==========================================" + echo 'results<> $GITHUB_OUTPUT - python3 << 'PYTHON' + python3 <<'PYTHON' import xml.etree.ElementTree as ET import json import os @@ -224,17 +262,19 @@ jobs: results = {} xml_files = glob.glob('target/surefire-reports/TEST-*.xml') + print(f"Found {len(xml_files)} Spring test result files", flush=True) + for xml_file in xml_files: + print(f"Parsing {xml_file}", flush=True) tree = ET.parse(xml_file) root = tree.getroot() for testcase in root.findall('.//testcase'): test_name = testcase.get('name') - # Check if test failed failed = testcase.find('failure') is not None or testcase.find('error') is not None results[test_name] = '❌' if failed else '✅' - print(json.dumps(results)) + print(json.dumps(results), flush=True) PYTHON echo 'EOF' >> $GITHUB_OUTPUT @@ -249,17 +289,44 @@ jobs: - name: Generate test matrix id: matrix + env: + DOTNET_RESULTS: ${{ needs.dotnet-tests.outputs.results }} + PYTHON_RESULTS: ${{ needs.python-tests.outputs.results }} + JAVASCRIPT_RESULTS: ${{ needs.javascript-tests.outputs.results }} + SPRING_RESULTS: ${{ needs.spring-tests.outputs.results }} run: | - python3 << 'PYTHON' + python3 <<'PYTHON' import json import os - import glob + + # Debug: print raw results + print("=== Debug: Raw Results ===") + print(f"DOTNET: {os.environ.get('DOTNET_RESULTS', 'EMPTY')}") + print(f"PYTHON: {os.environ.get('PYTHON_RESULTS', 'EMPTY')}") + print(f"JAVASCRIPT: {os.environ.get('JAVASCRIPT_RESULTS', 'EMPTY')}") + print(f"SPRING: {os.environ.get('SPRING_RESULTS', 'EMPTY')}") + print("========================\n") # Parse results from each language - dotnet_results = json.loads('''${{ needs.dotnet-tests.outputs.results }}''') if '${{ needs.dotnet-tests.outputs.results }}' else {} - python_results = json.loads('''${{ needs.python-tests.outputs.results }}''') if '${{ needs.python-tests.outputs.results }}' else {} - javascript_results = json.loads('''${{ needs.javascript-tests.outputs.results }}''') if '${{ needs.javascript-tests.outputs.results }}' else {} - spring_results = json.loads('''${{ needs.spring-tests.outputs.results }}''') if '${{ needs.spring-tests.outputs.results }}' else {} + try: + dotnet_results = json.loads(os.environ.get('DOTNET_RESULTS', '{}')) + except: + dotnet_results = {} + + try: + python_results = json.loads(os.environ.get('PYTHON_RESULTS', '{}')) + except: + python_results = {} + + try: + javascript_results = json.loads(os.environ.get('JAVASCRIPT_RESULTS', '{}')) + except: + javascript_results = {} + + try: + spring_results = json.loads(os.environ.get('SPRING_RESULTS', '{}')) + except: + spring_results = {} # Collect all unique test names across all languages all_tests = set() @@ -271,20 +338,26 @@ jobs: # Sort tests for consistent output sorted_tests = sorted(all_tests) + print(f"Found {len(sorted_tests)} unique tests") + # Generate markdown table with open('summary.md', 'w') as f: f.write("## 🧪 Validation Test Results\n\n") - f.write("| Test Name | .NET | Python | JavaScript | Spring |\n") - f.write("|-----------|------|--------|------------|--------|\n") - for test in sorted_tests: - # Get result for each language, default to ⚠️ if not found - dotnet = dotnet_results.get(test, '⚠️') - python = python_results.get(test, '⚠️') - javascript = javascript_results.get(test, '⚠️') - spring = spring_results.get(test, '⚠️') + if not sorted_tests: + f.write("⚠️ No test results found. Check individual job outputs for details.\n\n") + else: + f.write("| Test Name | .NET | Python | JavaScript | Spring |\n") + f.write("|-----------|------|--------|------------|--------|\n") - f.write(f"| {test} | {dotnet} | {python} | {javascript} | {spring} |\n") + for test in sorted_tests: + # Get result for each language, default to ⚠️ if not found + dotnet = dotnet_results.get(test, '⚠️') + python = python_results.get(test, '⚠️') + javascript = javascript_results.get(test, '⚠️') + spring = spring_results.get(test, '⚠️') + + f.write(f"| {test} | {dotnet} | {python} | {javascript} | {spring} |\n") f.write(f"\n_Workflow run: ${{ github.run_id }}_\n") From 5ed49436a5cf2fa57e84a56e6c58685898fff090 Mon Sep 17 00:00:00 2001 From: Ross Grambo Date: Fri, 9 Jan 2026 15:19:15 -0800 Subject: [PATCH 4/4] Simplify test result parsing to use console output instead of XML files --- .github/workflows/validation-tests.yml | 61 +++++++++++++------------- 1 file changed, 30 insertions(+), 31 deletions(-) diff --git a/.github/workflows/validation-tests.yml b/.github/workflows/validation-tests.yml index cfab50a..b09c251 100644 --- a/.github/workflows/validation-tests.yml +++ b/.github/workflows/validation-tests.yml @@ -37,47 +37,46 @@ jobs: id: test continue-on-error: true working-directory: ./libraryValidations/Dotnet - run: dotnet test --logger trx --results-directory ./TestResults + run: dotnet test --no-build --logger "console;verbosity=normal" 2>&1 | tee test-output.txt - name: Parse .NET test results id: test-details if: always() working-directory: ./libraryValidations/Dotnet run: | - echo "=== Listing all files in TestResults directory ===" - find ./TestResults -type f 2>/dev/null || echo "No TestResults directory" - echo "==========================================" - echo 'results<> $GITHUB_OUTPUT - TRX_FILE=$(find ./TestResults -name "*.trx" -type f 2>/dev/null | head -1) - if [ ! -z "$TRX_FILE" ]; then - echo "Found TRX file: $TRX_FILE" - export TRX_FILE - python3 <<'PYTHON' - import xml.etree.ElementTree as ET + python3 <<'PYTHON' import json - import os + import re - trx_file = os.environ.get('TRX_FILE') - if trx_file: - tree = ET.parse(trx_file) - root = tree.getroot() - ns = {'ns': 'http://microsoft.com/schemas/VisualStudio/TeamTest/2010'} - - results = {} - for test in root.findall('.//ns:UnitTestResult', ns): - test_name = test.get('testName') - outcome = test.get('outcome') - results[test_name] = '✅' if outcome == 'Passed' else '❌' - - print(json.dumps(results)) - else: - print("{}") + results = {} + try: + with open('test-output.txt', 'r') as f: + content = f.read() + + # Look for test method names in the output + # Pattern: "TestMethodName (status)" + for match in re.finditer(r'(\w+)\s+\([\d.]+m?s?\):\s+(PASSED|FAILED|passed|failed)', content, re.IGNORECASE): + test_name = match.group(1) + status = match.group(2).upper() + results[test_name] = '✅' if status == 'PASSED' else '❌' + + # If we didn't find tests this way, try parsing the summary line + if not results: + summary_match = re.search(r'Failed[!]?\s*-\s*Failed:\s*(\d+),\s*Passed:\s*(\d+)', content) + if summary_match: + failed_count = int(summary_match.group(1)) + passed_count = int(summary_match.group(2)) + # Create generic test entries + for i in range(passed_count): + results[f'Test_{i+1}'] = '✅' + for i in range(failed_count): + results[f'FailedTest_{i+1}'] = '❌' + except Exception as e: + print(f"Error parsing: {e}") + + print(json.dumps(results)) PYTHON - else - echo "No TRX file found" - echo "{}" - fi echo 'EOF' >> $GITHUB_OUTPUT python-tests: