refactor: remove obsolete tests and scripts for old architecture

Remove tests/core/:
- test_intelligent_execution.py (old superclaude.core tests)
- pm_init/test_init_hook.py (old context initialization)

Remove obsolete scripts:
- validate_pypi_ready.py (old structure validation)
- build_and_upload.py (old package paths)
- migrate_to_skills.py (migration already complete)
- demo_intelligent_execution.py (old core demo)
- verify_research_integration.sh (old structure verification)

New architecture (src/superclaude/) has its own tests in tests/pm_agent/.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
kazuki
2025-10-21 12:00:36 +09:00
parent eb37591922
commit b241ff80fe
5 changed files with 0 additions and 1182 deletions

View File

@@ -1,282 +0,0 @@
#!/usr/bin/env python3
"""
PyPI Build and Upload Script for SuperClaude Framework
Handles building, validation, and uploading to PyPI with proper error handling
"""
import os
import sys
import shutil
import subprocess
import argparse
from pathlib import Path
from typing import Tuple, List, Optional
# Project root
PROJECT_ROOT = Path(__file__).parent.parent
DIST_DIR = PROJECT_ROOT / "dist"
BUILD_DIR = PROJECT_ROOT / "build"
def run_command(cmd: List[str], description: str) -> Tuple[bool, str]:
"""Run a command and return success status and output"""
print(f"🔄 {description}...")
try:
result = subprocess.run(
cmd, capture_output=True, text=True, cwd=PROJECT_ROOT, check=True
)
print(f"{description} completed successfully")
return True, result.stdout
except subprocess.CalledProcessError as e:
print(f"{description} failed:")
print(f" Exit code: {e.returncode}")
print(f" Error: {e.stderr}")
return False, e.stderr
except Exception as e:
print(f"{description} failed with exception: {e}")
return False, str(e)
def clean_build_artifacts():
"""Clean previous build artifacts"""
artifacts = [DIST_DIR, BUILD_DIR, PROJECT_ROOT / "superclaude.egg-info"]
for artifact in artifacts:
if artifact.exists():
print(f"🧹 Removing {artifact}")
if artifact.is_dir():
shutil.rmtree(artifact)
else:
artifact.unlink()
def install_build_tools() -> bool:
"""Install required build tools"""
tools = ["build", "twine"]
for tool in tools:
success, _ = run_command(
[sys.executable, "-m", "pip", "install", "--upgrade", tool],
f"Installing {tool}",
)
if not success:
return False
return True
def validate_project_structure() -> bool:
"""Validate project structure before building"""
required_files = [
"pyproject.toml",
"README.md",
"LICENSE",
"superclaude/__init__.py",
"superclaude/__main__.py",
"setup/__init__.py",
]
print("🔍 Validating project structure...")
for file_path in required_files:
full_path = PROJECT_ROOT / file_path
if not full_path.exists():
print(f"❌ Missing required file: {file_path}")
return False
# Check if version is consistent
try:
from superclaude import __version__
print(f"📦 Package version: {__version__}")
except ImportError as e:
print(f"❌ Could not import version from SuperClaude: {e}")
return False
print("✅ Project structure validation passed")
return True
def build_package() -> bool:
"""Build the package"""
return run_command(
[sys.executable, "-m", "build"], "Building package distributions"
)[0]
def validate_distribution() -> bool:
"""Validate the built distribution"""
if not DIST_DIR.exists():
print("❌ Distribution directory does not exist")
return False
dist_files = list(DIST_DIR.glob("*"))
if not dist_files:
print("❌ No distribution files found")
return False
print(f"📦 Found distribution files:")
for file in dist_files:
print(f" - {file.name}")
# Check with twine
return run_command(
[sys.executable, "-m", "twine", "check"] + [str(f) for f in dist_files],
"Validating distributions with twine",
)[0]
def upload_to_testpypi() -> bool:
"""Upload to TestPyPI for testing"""
dist_files = list(DIST_DIR.glob("*"))
return run_command(
[sys.executable, "-m", "twine", "upload", "--repository", "testpypi"]
+ [str(f) for f in dist_files],
"Uploading to TestPyPI",
)[0]
def upload_to_pypi() -> bool:
"""Upload to production PyPI"""
dist_files = list(DIST_DIR.glob("*"))
# Check if we have API token in environment
if os.getenv("PYPI_API_TOKEN"):
cmd = [
sys.executable,
"-m",
"twine",
"upload",
"--username",
"__token__",
"--password",
os.getenv("PYPI_API_TOKEN"),
] + [str(f) for f in dist_files]
else:
# Fall back to .pypirc configuration
cmd = [sys.executable, "-m", "twine", "upload"] + [str(f) for f in dist_files]
return run_command(cmd, "Uploading to PyPI")[0]
def test_installation_from_testpypi() -> bool:
"""Test installation from TestPyPI"""
print("🧪 Testing installation from TestPyPI...")
print(" Note: This will install in a separate process")
success, output = run_command(
[
sys.executable,
"-m",
"pip",
"install",
"--index-url",
"https://test.pypi.org/simple/",
"--extra-index-url",
"https://pypi.org/simple/",
"SuperClaude",
"--force-reinstall",
"--no-deps",
],
"Installing from TestPyPI",
)
if success:
print("✅ Test installation successful")
# Try to import the package
try:
import superclaude
print(f"✅ Package import successful, version: {superclaude.__version__}")
return True
except ImportError as e:
print(f"❌ Package import failed: {e}")
return False
return False
def main():
"""Main execution function"""
parser = argparse.ArgumentParser(description="Build and upload SuperClaude to PyPI")
parser.add_argument(
"--testpypi", action="store_true", help="Upload to TestPyPI instead of PyPI"
)
parser.add_argument(
"--test-install", action="store_true", help="Test installation from TestPyPI"
)
parser.add_argument(
"--skip-build", action="store_true", help="Skip build step (use existing dist)"
)
parser.add_argument(
"--skip-validation", action="store_true", help="Skip validation steps"
)
parser.add_argument(
"--clean", action="store_true", help="Only clean build artifacts"
)
args = parser.parse_args()
# Change to project root
os.chdir(PROJECT_ROOT)
if args.clean:
clean_build_artifacts()
return
print("🚀 SuperClaude PyPI Build and Upload Script")
print(f"📁 Working directory: {PROJECT_ROOT}")
# Step 1: Clean previous builds
clean_build_artifacts()
# Step 2: Install build tools
if not install_build_tools():
print("❌ Failed to install build tools")
sys.exit(1)
# Step 3: Validate project structure
if not args.skip_validation and not validate_project_structure():
print("❌ Project structure validation failed")
sys.exit(1)
# Step 4: Build package
if not args.skip_build:
if not build_package():
print("❌ Package build failed")
sys.exit(1)
# Step 5: Validate distribution
if not args.skip_validation and not validate_distribution():
print("❌ Distribution validation failed")
sys.exit(1)
# Step 6: Upload
if args.testpypi:
if not upload_to_testpypi():
print("❌ Upload to TestPyPI failed")
sys.exit(1)
# Test installation if requested
if args.test_install:
if not test_installation_from_testpypi():
print("❌ Test installation failed")
sys.exit(1)
else:
# Confirm production upload
response = input(
"🚨 Upload to production PyPI? This cannot be undone! (yes/no): "
)
if response.lower() != "yes":
print("❌ Upload cancelled")
sys.exit(1)
if not upload_to_pypi():
print("❌ Upload to PyPI failed")
sys.exit(1)
print("✅ All operations completed successfully!")
if __name__ == "__main__":
main()

View File

@@ -1,216 +0,0 @@
#!/usr/bin/env python3
"""
Demo: Intelligent Execution Engine
Demonstrates:
1. Reflection × 3 before execution
2. Parallel execution planning
3. Automatic self-correction
Usage:
python scripts/demo_intelligent_execution.py
"""
import sys
from pathlib import Path
# Add src to path
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
from superclaude.core import intelligent_execute, quick_execute, safe_execute
import time
def demo_high_confidence_execution():
"""Demo 1: High confidence task execution"""
print("\n" + "=" * 80)
print("DEMO 1: High Confidence Execution")
print("=" * 80)
# Define operations
def read_file_1():
time.sleep(0.1)
return "Content of file1.py"
def read_file_2():
time.sleep(0.1)
return "Content of file2.py"
def read_file_3():
time.sleep(0.1)
return "Content of file3.py"
def analyze_files():
time.sleep(0.2)
return "Analysis complete"
# Execute with high confidence
result = intelligent_execute(
task="Read and analyze three validation files: file1.py, file2.py, file3.py",
operations=[read_file_1, read_file_2, read_file_3, analyze_files],
context={
"project_index": "Loaded project structure",
"current_branch": "main",
"git_status": "clean"
}
)
print(f"\nResult: {result['status']}")
print(f"Confidence: {result['confidence']:.0%}")
print(f"Speedup: {result.get('speedup', 0):.1f}x")
def demo_low_confidence_blocked():
"""Demo 2: Low confidence blocks execution"""
print("\n" + "=" * 80)
print("DEMO 2: Low Confidence Blocked")
print("=" * 80)
result = intelligent_execute(
task="Do something", # Vague task
operations=[lambda: "result"],
context=None # No context
)
print(f"\nResult: {result['status']}")
print(f"Confidence: {result['confidence']:.0%}")
if result['status'] == 'blocked':
print("\nBlockers:")
for blocker in result['blockers']:
print(f"{blocker}")
print("\nRecommendations:")
for rec in result['recommendations']:
print(f" 💡 {rec}")
def demo_self_correction():
"""Demo 3: Self-correction learns from failure"""
print("\n" + "=" * 80)
print("DEMO 3: Self-Correction Learning")
print("=" * 80)
# Operation that fails
def validate_form():
raise ValueError("Missing required field: email")
result = intelligent_execute(
task="Validate user registration form with email field check",
operations=[validate_form],
context={"project_index": "Loaded"},
auto_correct=True
)
print(f"\nResult: {result['status']}")
print(f"Error: {result.get('error', 'N/A')}")
# Check reflexion memory
reflexion_file = Path.cwd() / "docs" / "memory" / "reflexion.json"
if reflexion_file.exists():
import json
with open(reflexion_file) as f:
data = json.load(f)
print(f"\nLearning captured:")
print(f" Mistakes recorded: {len(data.get('mistakes', []))}")
print(f" Prevention rules: {len(data.get('prevention_rules', []))}")
if data.get('prevention_rules'):
print("\n Latest prevention rule:")
print(f" 📝 {data['prevention_rules'][-1]}")
def demo_quick_execution():
"""Demo 4: Quick execution without reflection"""
print("\n" + "=" * 80)
print("DEMO 4: Quick Execution (No Reflection)")
print("=" * 80)
ops = [
lambda: "Task 1 complete",
lambda: "Task 2 complete",
lambda: "Task 3 complete",
]
start = time.time()
results = quick_execute(ops)
elapsed = time.time() - start
print(f"\nResults: {results}")
print(f"Time: {elapsed:.3f}s")
print("✅ No reflection overhead - fastest execution")
def demo_parallel_speedup():
"""Demo 5: Parallel execution speedup comparison"""
print("\n" + "=" * 80)
print("DEMO 5: Parallel Speedup Demonstration")
print("=" * 80)
# Create 10 slow operations
def slow_op(i):
time.sleep(0.1)
return f"Operation {i} complete"
ops = [lambda i=i: slow_op(i) for i in range(10)]
# Sequential time estimate
sequential_time = 10 * 0.1 # 1.0s
print(f"Sequential time (estimated): {sequential_time:.1f}s")
print(f"Operations: {len(ops)}")
# Execute in parallel
start = time.time()
result = intelligent_execute(
task="Process 10 files in parallel for validation and security checks",
operations=ops,
context={"project_index": "Loaded"}
)
elapsed = time.time() - start
print(f"\nParallel execution time: {elapsed:.2f}s")
print(f"Theoretical speedup: {sequential_time / elapsed:.1f}x")
print(f"Reported speedup: {result.get('speedup', 0):.1f}x")
def main():
print("\n" + "=" * 80)
print("🧠 INTELLIGENT EXECUTION ENGINE - DEMONSTRATION")
print("=" * 80)
print("\nThis demo showcases:")
print(" 1. Reflection × 3 for confidence checking")
print(" 2. Automatic parallel execution planning")
print(" 3. Self-correction and learning from failures")
print(" 4. Quick execution mode for simple tasks")
print(" 5. Parallel speedup measurements")
print("=" * 80)
# Run demos
demo_high_confidence_execution()
demo_low_confidence_blocked()
demo_self_correction()
demo_quick_execution()
demo_parallel_speedup()
print("\n" + "=" * 80)
print("✅ DEMONSTRATION COMPLETE")
print("=" * 80)
print("\nKey Takeaways:")
print(" ✅ Reflection prevents wrong-direction execution")
print(" ✅ Parallel execution achieves significant speedup")
print(" ✅ Self-correction learns from failures automatically")
print(" ✅ Flexible modes for different use cases")
print("=" * 80 + "\n")
if __name__ == "__main__":
main()

View File

@@ -1,285 +0,0 @@
#!/usr/bin/env python3
"""
Migrate SuperClaude components to Skills-based architecture
Converts always-loaded Markdown files to on-demand Skills loading
for 97-98% token savings at Claude Code startup.
Usage:
python scripts/migrate_to_skills.py --dry-run # Preview changes
python scripts/migrate_to_skills.py # Execute migration
python scripts/migrate_to_skills.py --rollback # Undo migration
"""
import argparse
import shutil
from pathlib import Path
import sys
# Configuration
CLAUDE_DIR = Path.home() / ".claude"
SUPERCLAUDE_DIR = CLAUDE_DIR / "superclaude"
SKILLS_DIR = CLAUDE_DIR / "skills"
BACKUP_DIR = SUPERCLAUDE_DIR.parent / "superclaude.backup"
# Component mapping: superclaude path → skill name
COMPONENTS = {
# Agents
"agents/pm-agent.md": "pm",
"agents/task-agent.md": "task",
"agents/research-agent.md": "research",
"agents/brainstorm-agent.md": "brainstorm",
"agents/analyzer.md": "analyze",
# Modes
"modes/MODE_Orchestration.md": "orchestration-mode",
"modes/MODE_Brainstorming.md": "brainstorming-mode",
"modes/MODE_Introspection.md": "introspection-mode",
"modes/MODE_Task_Management.md": "task-management-mode",
"modes/MODE_Token_Efficiency.md": "token-efficiency-mode",
"modes/MODE_DeepResearch.md": "deep-research-mode",
"modes/MODE_Business_Panel.md": "business-panel-mode",
}
# Shared modules (copied to each skill that needs them)
SHARED_MODULES = [
"modules/git-status.md",
"modules/token-counter.md",
"modules/pm-formatter.md",
]
def create_skill_md(skill_name: str, original_file: Path) -> str:
"""Generate SKILL.md content from original file"""
# Extract frontmatter if exists
content = original_file.read_text()
lines = content.split("\n")
description = f"{skill_name.replace('-', ' ').title()} - Skills-based implementation"
# Try to extract description from frontmatter
if lines[0].strip() == "---":
for line in lines[1:10]:
if line.startswith("description:"):
description = line.split(":", 1)[1].strip().strip('"')
break
return f"""---
name: {skill_name}
description: {description}
version: 1.0.0
author: SuperClaude
migrated: true
---
# {skill_name.replace('-', ' ').title()}
Skills-based on-demand loading implementation.
**Token Efficiency**:
- Startup: 0 tokens (not loaded)
- Description: ~50-100 tokens
- Full load: ~2,500 tokens (when used)
**Activation**: `/sc:{skill_name}` or auto-triggered by context
**Implementation**: See `implementation.md` for full protocol
**Modules**: Additional support files in `modules/` directory
"""
def migrate_component(source_path: Path, skill_name: str, dry_run: bool = False) -> dict:
"""Migrate a single component to Skills structure"""
result = {
"skill": skill_name,
"source": str(source_path),
"status": "skipped",
"token_savings": 0,
}
if not source_path.exists():
result["status"] = "source_missing"
return result
# Calculate token savings
word_count = len(source_path.read_text().split())
original_tokens = int(word_count * 1.3)
skill_tokens = 70 # SKILL.md description only
result["token_savings"] = original_tokens - skill_tokens
skill_dir = SKILLS_DIR / skill_name
if dry_run:
result["status"] = "would_migrate"
result["target"] = str(skill_dir)
return result
# Create skill directory
skill_dir.mkdir(parents=True, exist_ok=True)
# Create SKILL.md
skill_md = skill_dir / "SKILL.md"
skill_md.write_text(create_skill_md(skill_name, source_path))
# Copy implementation
impl_md = skill_dir / "implementation.md"
shutil.copy2(source_path, impl_md)
# Copy modules if this is an agent
if "agents" in str(source_path):
modules_dir = skill_dir / "modules"
modules_dir.mkdir(exist_ok=True)
for module_path in SHARED_MODULES:
module_file = SUPERCLAUDE_DIR / module_path
if module_file.exists():
shutil.copy2(module_file, modules_dir / module_file.name)
result["status"] = "migrated"
result["target"] = str(skill_dir)
return result
def backup_superclaude(dry_run: bool = False) -> bool:
"""Create backup of current SuperClaude directory"""
if not SUPERCLAUDE_DIR.exists():
print(f"❌ SuperClaude directory not found: {SUPERCLAUDE_DIR}")
return False
if BACKUP_DIR.exists():
print(f"⚠️ Backup already exists: {BACKUP_DIR}")
print(" Skipping backup (use --force to overwrite)")
return True
if dry_run:
print(f"Would create backup: {SUPERCLAUDE_DIR}{BACKUP_DIR}")
return True
print(f"Creating backup: {BACKUP_DIR}")
shutil.copytree(SUPERCLAUDE_DIR, BACKUP_DIR)
print("✅ Backup created")
return True
def rollback_migration() -> bool:
"""Restore from backup"""
if not BACKUP_DIR.exists():
print(f"❌ No backup found: {BACKUP_DIR}")
return False
print(f"Rolling back to backup...")
# Remove skills directory
if SKILLS_DIR.exists():
print(f"Removing skills: {SKILLS_DIR}")
shutil.rmtree(SKILLS_DIR)
# Restore superclaude
if SUPERCLAUDE_DIR.exists():
print(f"Removing current: {SUPERCLAUDE_DIR}")
shutil.rmtree(SUPERCLAUDE_DIR)
print(f"Restoring from backup...")
shutil.copytree(BACKUP_DIR, SUPERCLAUDE_DIR)
print("✅ Rollback complete")
return True
def main():
parser = argparse.ArgumentParser(
description="Migrate SuperClaude to Skills-based architecture"
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Preview changes without executing"
)
parser.add_argument(
"--rollback",
action="store_true",
help="Restore from backup"
)
parser.add_argument(
"--no-backup",
action="store_true",
help="Skip backup creation (dangerous)"
)
args = parser.parse_args()
# Rollback mode
if args.rollback:
success = rollback_migration()
sys.exit(0 if success else 1)
# Migration mode
print("=" * 60)
print("SuperClaude → Skills Migration")
print("=" * 60)
if args.dry_run:
print("🔍 DRY RUN MODE - No changes will be made\n")
# Backup
if not args.no_backup:
if not backup_superclaude(args.dry_run):
sys.exit(1)
print(f"\nMigrating {len(COMPONENTS)} components...\n")
# Migrate components
results = []
total_savings = 0
for source_rel, skill_name in COMPONENTS.items():
source_path = SUPERCLAUDE_DIR / source_rel
result = migrate_component(source_path, skill_name, args.dry_run)
results.append(result)
status_icon = {
"migrated": "",
"would_migrate": "📋",
"source_missing": "⚠️",
"skipped": "⏭️",
}.get(result["status"], "")
print(f"{status_icon} {skill_name:25} {result['status']:15} "
f"(saves {result['token_savings']:,} tokens)")
total_savings += result["token_savings"]
# Summary
print("\n" + "=" * 60)
print("SUMMARY")
print("=" * 60)
migrated = sum(1 for r in results if r["status"] in ["migrated", "would_migrate"])
skipped = sum(1 for r in results if r["status"] in ["source_missing", "skipped"])
print(f"Migrated: {migrated}/{len(COMPONENTS)}")
print(f"Skipped: {skipped}/{len(COMPONENTS)}")
print(f"Total token savings: {total_savings:,} tokens")
print(f"Savings percentage: {total_savings * 100 // (total_savings + 500):.0f}%")
if args.dry_run:
print("\n💡 Run without --dry-run to execute migration")
else:
print(f"\n✅ Migration complete!")
print(f" Backup: {BACKUP_DIR}")
print(f" Skills: {SKILLS_DIR}")
print(f"\n Use --rollback to undo changes")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,231 +0,0 @@
#!/usr/bin/env python3
"""
PyPI Readiness Validation Script
Checks if SuperClaude project is ready for PyPI publication
"""
import sys
import toml
from pathlib import Path
from typing import List, Tuple
# Project root
PROJECT_ROOT = Path(__file__).parent.parent
def check_file_exists(file_path: Path, description: str) -> bool:
"""Check if a required file exists"""
if file_path.exists():
print(f"{description}: {file_path}")
return True
else:
print(f"❌ Missing {description}: {file_path}")
return False
def check_version_consistency() -> bool:
"""Check if versions are consistent across files"""
print("\n🔍 Checking version consistency...")
versions = {}
# Check pyproject.toml
try:
pyproject_path = PROJECT_ROOT / "pyproject.toml"
with open(pyproject_path, "r") as f:
pyproject = toml.load(f)
versions["pyproject.toml"] = pyproject["project"]["version"]
print(f"📋 pyproject.toml version: {versions['pyproject.toml']}")
except Exception as e:
print(f"❌ Error reading pyproject.toml: {e}")
return False
# Check superclaude/__init__.py
try:
sys.path.insert(0, str(PROJECT_ROOT))
from superclaude import __version__
versions["superclaude/__init__.py"] = __version__
print(f"📦 Package version: {versions['superclaude/__init__.py']}")
except Exception as e:
print(f"❌ Error importing SuperClaude version: {e}")
return False
# Check setup/__init__.py
try:
from setup import __version__ as setup_version
versions["setup/__init__.py"] = setup_version
print(f"🔧 Setup version: {versions['setup/__init__.py']}")
except Exception as e:
print(f"❌ Error importing setup version: {e}")
return False
# Check consistency
all_versions = list(versions.values())
if len(set(all_versions)) == 1:
print(f"✅ All versions consistent: {all_versions[0]}")
return True
else:
print(f"❌ Version mismatch: {versions}")
return False
def check_package_structure() -> bool:
"""Check if package structure is correct"""
print("\n🏗️ Checking package structure...")
required_structure = [
("superclaude/__init__.py", "Main package __init__.py"),
("superclaude/__main__.py", "Main entry point"),
("superclaude/Core/__init__.py", "Core module __init__.py"),
("superclaude/Commands/__init__.py", "Commands module __init__.py"),
("superclaude/Agents/__init__.py", "Agents module __init__.py"),
("superclaude/Modes/__init__.py", "Modes module __init__.py"),
("superclaude/MCP/__init__.py", "MCP module __init__.py"),
("setup/__init__.py", "Setup package __init__.py"),
]
all_good = True
for file_path, description in required_structure:
full_path = PROJECT_ROOT / file_path
if not check_file_exists(full_path, description):
all_good = False
return all_good
def check_required_files() -> bool:
"""Check if all required files are present"""
print("\n📄 Checking required files...")
required_files = [
("pyproject.toml", "Package configuration"),
("README.md", "Project README"),
("LICENSE", "License file"),
("MANIFEST.in", "Package manifest"),
("setup.py", "Setup script"),
]
all_good = True
for file_path, description in required_files:
full_path = PROJECT_ROOT / file_path
if not check_file_exists(full_path, description):
all_good = False
return all_good
def check_pyproject_config() -> bool:
"""Check pyproject.toml configuration"""
print("\n⚙️ Checking pyproject.toml configuration...")
try:
pyproject_path = PROJECT_ROOT / "pyproject.toml"
with open(pyproject_path, "r") as f:
pyproject = toml.load(f)
project = pyproject.get("project", {})
# Required fields
required_fields = ["name", "version", "description", "authors"]
for field in required_fields:
if field in project:
print(f"{field}: {project[field]}")
else:
print(f"❌ Missing required field: {field}")
return False
# Check entry points
scripts = project.get("scripts", {})
if "superclaude" in scripts:
print(f"✅ CLI entry point: {scripts['superclaude']}")
else:
print("❌ Missing CLI entry point")
return False
# Check classifiers
classifiers = project.get("classifiers", [])
if len(classifiers) > 0:
print(f"{len(classifiers)} PyPI classifiers defined")
else:
print("⚠️ No PyPI classifiers defined")
return True
except Exception as e:
print(f"❌ Error reading pyproject.toml: {e}")
return False
def check_import_test() -> bool:
"""Test if the package can be imported"""
print("\n🧪 Testing package import...")
try:
sys.path.insert(0, str(PROJECT_ROOT))
import superclaude
print(f"✅ SuperClaude import successful")
print(f"📦 Version: {superclaude.__version__}")
print(f"👤 Author: {superclaude.__author__}")
return True
except Exception as e:
print(f"❌ Import failed: {e}")
return False
def main():
"""Main validation function"""
print("🔍 SuperClaude PyPI Readiness Validation")
print(f"📁 Project root: {PROJECT_ROOT}")
print("=" * 50)
checks = [
("Required Files", check_required_files),
("Package Structure", check_package_structure),
("Version Consistency", check_version_consistency),
("PyProject Configuration", check_pyproject_config),
("Import Test", check_import_test),
]
results = []
for name, check_func in checks:
try:
result = check_func()
results.append((name, result))
except Exception as e:
print(f"{name} check failed with exception: {e}")
results.append((name, False))
# Summary
print("\n" + "=" * 50)
print("📊 VALIDATION SUMMARY")
print("=" * 50)
passed = 0
total = len(results)
for name, result in results:
status = "✅ PASS" if result else "❌ FAIL"
print(f"{status} {name}")
if result:
passed += 1
print(f"\n📈 Overall: {passed}/{total} checks passed")
if passed == total:
print("🎉 Project is ready for PyPI publication!")
print("\nNext steps:")
print("1. ./scripts/publish.sh test # Test on TestPyPI")
print("2. ./scripts/publish.sh prod # Publish to PyPI")
return True
else:
print("❌ Project needs fixes before PyPI publication")
return False
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)

View File

@@ -1,168 +0,0 @@
#!/bin/bash
# Deep Research Integration Verification Script
# Tests that all components are properly integrated
set -e
echo "========================================"
echo "Deep Research Integration Verification"
echo "========================================"
echo ""
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Track errors
ERRORS=0
WARNINGS=0
# Function to check file exists
check_file() {
local file=$1
local description=$2
if [ -f "$file" ]; then
echo -e "${GREEN}${NC} $description exists: $file"
return 0
else
echo -e "${RED}${NC} $description missing: $file"
((ERRORS++))
return 1
fi
}
# Function to check string in file
check_string_in_file() {
local file=$1
local string=$2
local description=$3
if grep -q "$string" "$file" 2>/dev/null; then
echo -e "${GREEN}${NC} $description found in $file"
return 0
else
echo -e "${RED}${NC} $description not found in $file"
((ERRORS++))
return 1
fi
}
echo "1. Checking Research Files..."
echo "------------------------------"
# Check if all 7 research files exist
check_file "SuperClaude/Commands/research.md" "Research command"
check_file "SuperClaude/Agents/deep-research-agent.md" "Deep Research agent"
check_file "SuperClaude/Modes/MODE_DeepResearch.md" "Deep Research mode"
check_file "SuperClaude/MCP/MCP_Tavily.md" "Tavily MCP documentation"
check_file "SuperClaude/MCP/configs/tavily.json" "Tavily MCP configuration"
check_file "SuperClaude/Core/RESEARCH_CONFIG.md" "Research configuration"
check_file "SuperClaude/Examples/deep_research_workflows.md" "Research workflow examples"
echo ""
echo "2. Checking Setup Component Updates..."
echo "---------------------------------------"
# Check mcp_docs.py has Tavily in server_docs_map
echo -e "${BLUE}Checking mcp_docs.py...${NC}"
check_string_in_file "setup/components/mcp_docs.py" '"tavily": "MCP_Tavily.md"' "Tavily in server_docs_map"
# Check mcp.py has Tavily configuration
echo -e "${BLUE}Checking mcp.py...${NC}"
check_string_in_file "setup/components/mcp.py" '"tavily":' "Tavily server configuration"
check_string_in_file "setup/components/mcp.py" "def _install_remote_mcp_server" "Remote MCP server handler"
check_string_in_file "setup/components/mcp.py" "TAVILY_API_KEY" "Tavily API key reference"
# Check agents.py has count updated
echo -e "${BLUE}Checking agents.py...${NC}"
check_string_in_file "setup/components/agents.py" "15 specialized AI agents" "15 agents count"
# Check modes.py has count updated
echo -e "${BLUE}Checking modes.py...${NC}"
check_string_in_file "setup/components/modes.py" "7 behavioral modes" "7 modes count"
# Check environment.py has research prerequisites check
echo -e "${BLUE}Checking environment.py...${NC}"
check_string_in_file "setup/utils/environment.py" "def check_research_prerequisites" "Research prerequisites check"
check_string_in_file "setup/utils/environment.py" "TAVILY_API_KEY" "Tavily API key check"
echo ""
echo "3. Checking Environment..."
echo "---------------------------"
# Check for Node.js
if command -v node &> /dev/null; then
NODE_VERSION=$(node --version)
echo -e "${GREEN}${NC} Node.js installed: $NODE_VERSION"
else
echo -e "${YELLOW}${NC} Node.js not installed (required for Tavily MCP)"
((WARNINGS++))
fi
# Check for npm
if command -v npm &> /dev/null; then
NPM_VERSION=$(npm --version)
echo -e "${GREEN}${NC} npm installed: $NPM_VERSION"
else
echo -e "${YELLOW}${NC} npm not installed (required for MCP servers)"
((WARNINGS++))
fi
# Check for TAVILY_API_KEY
if [ -n "$TAVILY_API_KEY" ]; then
echo -e "${GREEN}${NC} TAVILY_API_KEY is set"
else
echo -e "${YELLOW}${NC} TAVILY_API_KEY not set - get from https://app.tavily.com"
((WARNINGS++))
fi
echo ""
echo "4. Checking Auto-Discovery Components..."
echo "-----------------------------------------"
# These components should auto-discover the new files
echo -e "${BLUE}Components that will auto-discover files:${NC}"
echo -e "${GREEN}${NC} commands.py will find research.md"
echo -e "${GREEN}${NC} agents.py will find deep-research-agent.md"
echo -e "${GREEN}${NC} modes.py will find MODE_DeepResearch.md"
echo -e "${GREEN}${NC} core.py will find RESEARCH_CONFIG.md"
echo ""
echo "5. Checking Python Syntax..."
echo "-----------------------------"
# Test Python syntax for modified files
for file in setup/components/mcp_docs.py setup/components/mcp.py setup/components/agents.py setup/components/modes.py setup/utils/environment.py; do
if python3 -m py_compile "$file" 2>/dev/null; then
echo -e "${GREEN}${NC} $file syntax is valid"
else
echo -e "${RED}${NC} $file has syntax errors"
((ERRORS++))
fi
done
echo ""
echo "========================================"
echo "Verification Summary"
echo "========================================"
if [ $ERRORS -eq 0 ]; then
echo -e "${GREEN}✓ All critical checks passed!${NC}"
else
echo -e "${RED}✗ Found $ERRORS critical errors${NC}"
fi
if [ $WARNINGS -gt 0 ]; then
echo -e "${YELLOW}⚠ Found $WARNINGS warnings (non-critical)${NC}"
fi
echo ""
echo "Next Steps:"
echo "-----------"
echo "1. Set TAVILY_API_KEY: export TAVILY_API_KEY='your-key-here'"
echo "2. Run installation: SuperClaude install"
echo "3. Test in Claude Code: /sc:research 'test query'"
exit $ERRORS