name: SuperClaude Memory System Tests on: push: branches: [ master, main, develop ] paths: - 'SuperClaude/Core/**' - 'tests/**' - '.github/workflows/memory-system-tests.yml' pull_request: branches: [ master, main ] paths: - 'SuperClaude/Core/**' - 'tests/**' schedule: # Run daily at 2 AM UTC - cron: '0 2 * * *' workflow_dispatch: inputs: test_suite: description: 'Test suite to run' required: false default: 'all' type: choice options: - all - unit - comprehensive - performance - integration - stress env: PYTHON_VERSION: '3.11' SUPERCLAUDE_TEST_MODE: '1' SUPERCLAUDE_LOG_LEVEL: 'INFO' jobs: test-matrix: name: Test Matrix runs-on: ubuntu-latest strategy: fail-fast: false matrix: python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] test-suite: ['unit', 'comprehensive'] steps: - name: Checkout repository uses: actions/checkout@v4 with: fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} cache: 'pip' - name: Install system dependencies run: | sudo apt-get update sudo apt-get install -y sqlite3 libsqlite3-dev - name: Install Python dependencies run: | python -m pip install --upgrade pip setuptools wheel pip install -r tests/requirements-test.txt pip install -e . - name: Verify installation run: | python -c "import SuperClaude; print('SuperClaude imported successfully')" python -c "from SuperClaude.Core import serena_integration; print('Core modules available')" - name: Run ${{ matrix.test-suite }} tests run: | cd tests python run_test_suite.py --suites ${{ matrix.test-suite }} --ci --parallel timeout-minutes: 30 - name: Upload test results uses: actions/upload-artifact@v3 if: always() with: name: test-results-${{ matrix.python-version }}-${{ matrix.test-suite }} path: | tests/results/ tests/htmlcov/ retention-days: 7 - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 if: matrix.python-version == '3.11' && matrix.test-suite == 'comprehensive' with: file: tests/results/coverage/comprehensive.json flags: memory-system name: memory-system-coverage fail_ci_if_error: false performance-tests: name: Performance Benchmarks runs-on: ubuntu-latest needs: test-matrix if: github.event_name == 'push' || github.event_name == 'schedule' || github.event.inputs.test_suite == 'performance' || github.event.inputs.test_suite == 'all' steps: - name: Checkout repository uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON_VERSION }} cache: 'pip' - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r tests/requirements-test.txt pip install -e . - name: Run performance benchmarks run: | cd tests python run_test_suite.py --suites performance --ci timeout-minutes: 45 - name: Analyze performance results run: | python -c " import json from pathlib import Path results_file = Path('tests/results/reports/performance_report.json') if results_file.exists(): with open(results_file) as f: data = json.load(f) print('๐ŸŽฏ Performance Analysis:') if data.get('summary', {}).get('passed', 0) > 0: print('โœ… Performance targets met (<200ms)') else: print('โŒ Performance targets not met') exit(1) else: print('โš ๏ธ Performance results not found') " - name: Upload performance results uses: actions/upload-artifact@v3 if: always() with: name: performance-benchmarks path: | tests/results/performance/ tests/results/reports/performance* retention-days: 30 integration-tests: name: Integration & E2E Tests runs-on: ubuntu-latest needs: test-matrix if: github.event_name == 'push' || github.event.inputs.test_suite == 'integration' || github.event.inputs.test_suite == 'all' steps: - name: Checkout repository uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON_VERSION }} cache: 'pip' - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r tests/requirements-test.txt pip install -e . - name: Run integration tests run: | cd tests python run_test_suite.py --suites integration --ci timeout-minutes: 60 - name: Upload integration test results uses: actions/upload-artifact@v3 if: always() with: name: integration-test-results path: | tests/results/ retention-days: 14 stress-tests: name: Stress & Concurrent Tests runs-on: ubuntu-latest needs: [test-matrix, performance-tests] if: github.event_name == 'schedule' || github.event.inputs.test_suite == 'stress' || github.event.inputs.test_suite == 'all' steps: - name: Checkout repository uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON_VERSION }} cache: 'pip' - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r tests/requirements-test.txt pip install -e . - name: Run stress tests run: | cd tests python run_test_suite.py --suites stress --ci timeout-minutes: 45 - name: Upload stress test results uses: actions/upload-artifact@v3 if: always() with: name: stress-test-results path: | tests/results/ retention-days: 7 comprehensive-report: name: Generate Comprehensive Report runs-on: ubuntu-latest needs: [test-matrix, performance-tests, integration-tests] if: always() steps: - name: Checkout repository uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON_VERSION }} - name: Download all test artifacts uses: actions/download-artifact@v3 with: path: artifacts/ - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r tests/requirements-test.txt - name: Generate comprehensive report run: | python -c " import json import os from pathlib import Path from datetime import datetime # Collect all test results artifacts_dir = Path('artifacts') all_results = {} for artifact_dir in artifacts_dir.iterdir(): if artifact_dir.is_dir(): for result_file in artifact_dir.rglob('*_report.json'): try: with open(result_file) as f: data = json.load(f) all_results[artifact_dir.name] = data except: pass # Generate summary report report = f'''# SuperClaude Memory System CI/CD Test Report Generated: {datetime.utcnow().isoformat()}Z ## Summary Total Test Artifacts: {len(list(artifacts_dir.iterdir()))} Test Results Collected: {len(all_results)} ## Test Status by Category ''' for artifact_name, results in all_results.items(): status = 'โœ… PASSED' if results.get('exitcode', 1) == 0 else 'โŒ FAILED' report += f'- **{artifact_name}**: {status}\n' report += f''' ## Performance Validation Performance Target: <200ms for individual operations ''' # Check for performance results perf_passed = any('performance' in name for name in all_results.keys()) if perf_passed: report += 'โœ… Performance benchmarks completed successfully\n' else: report += 'โš ๏ธ Performance benchmarks not completed\n' report += ''' ## Recommendations ''' if all(results.get('exitcode', 1) == 0 for results in all_results.values()): report += '''โœ… All tests passed - system ready for deployment ๐Ÿš€ Memory system migration validated ๐Ÿ“Š Performance targets met ''' else: report += '''โŒ Some tests failed - review before deployment ๐Ÿ”ง Check individual test results for details โš ๏ธ System may not meet requirements ''' # Save report with open('comprehensive_test_report.md', 'w') as f: f.write(report) print('๐Ÿ“Š Comprehensive report generated') " - name: Upload comprehensive report uses: actions/upload-artifact@v3 with: name: comprehensive-test-report path: comprehensive_test_report.md retention-days: 30 - name: Comment on PR (if applicable) uses: actions/github-script@v6 if: github.event_name == 'pull_request' with: script: | const fs = require('fs'); if (fs.existsSync('comprehensive_test_report.md')) { const report = fs.readFileSync('comprehensive_test_report.md', 'utf8'); github.rest.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, body: `## ๐Ÿงช SuperClaude Memory System Test Results\n\n${report}` }); } security-scan: name: Security Scan runs-on: ubuntu-latest if: github.event_name == 'push' || github.event_name == 'pull_request' steps: - name: Checkout repository uses: actions/checkout@v4 - name: Run Trivy vulnerability scanner uses: aquasecurity/trivy-action@master with: scan-type: 'fs' scan-ref: '.' format: 'sarif' output: 'trivy-results.sarif' - name: Upload Trivy scan results to GitHub Security tab uses: github/codeql-action/upload-sarif@v2 if: always() with: sarif_file: 'trivy-results.sarif' notify-status: name: Notify Status runs-on: ubuntu-latest needs: [test-matrix, performance-tests, integration-tests, comprehensive-report] if: always() && github.event_name == 'push' steps: - name: Determine overall status id: status run: | if [[ "${{ needs.test-matrix.result }}" == "success" && "${{ needs.performance-tests.result }}" == "success" && "${{ needs.integration-tests.result }}" == "success" ]]; then echo "status=success" >> $GITHUB_OUTPUT echo "message=All SuperClaude memory system tests passed! โœ…" >> $GITHUB_OUTPUT else echo "status=failure" >> $GITHUB_OUTPUT echo "message=Some SuperClaude memory system tests failed! โŒ" >> $GITHUB_OUTPUT fi - name: Create status check uses: actions/github-script@v6 with: script: | github.rest.repos.createCommitStatus({ owner: context.repo.owner, repo: context.repo.repo, sha: context.sha, state: '${{ steps.status.outputs.status }}', description: '${{ steps.status.outputs.message }}', context: 'SuperClaude Memory System Tests' });