style: apply ruff formatting to all files

Formatted 14 files to comply with ruff formatting rules:
- Consistent code style across codebase
- Improved readability
- All formatting checks now pass

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
mithun50 2025-11-12 18:19:45 +01:00
parent e2896335ba
commit b00547ad53
14 changed files with 147 additions and 128 deletions

View File

@ -55,8 +55,7 @@ def _check_pytest_plugin() -> Dict[str, Any]:
# Check if superclaude plugin is loaded # Check if superclaude plugin is loaded
superclaude_loaded = any( superclaude_loaded = any(
"superclaude" in str(plugin[0]).lower() "superclaude" in str(plugin[0]).lower() for plugin in plugins
for plugin in plugins
) )
if superclaude_loaded: if superclaude_loaded:
@ -132,6 +131,7 @@ def _check_configuration() -> Dict[str, Any]:
# Check if package is importable # Check if package is importable
try: try:
import superclaude import superclaude
version = superclaude.__version__ version = superclaude.__version__
return { return {

View File

@ -9,10 +9,7 @@ from pathlib import Path
from typing import List, Tuple from typing import List, Tuple
def install_commands( def install_commands(target_path: Path = None, force: bool = False) -> Tuple[bool, str]:
target_path: Path = None,
force: bool = False
) -> Tuple[bool, str]:
""" """
Install all SuperClaude commands to Claude Code Install all SuperClaude commands to Claude Code
@ -71,7 +68,9 @@ def install_commands(
messages.append(f" - /{cmd}") messages.append(f" - /{cmd}")
if skipped_commands: if skipped_commands:
messages.append(f"\n⚠️ Skipped {len(skipped_commands)} existing commands (use --force to reinstall):") messages.append(
f"\n⚠️ Skipped {len(skipped_commands)} existing commands (use --force to reinstall):"
)
for cmd in skipped_commands: for cmd in skipped_commands:
messages.append(f" - /{cmd}") messages.append(f" - /{cmd}")

View File

@ -10,9 +10,7 @@ from typing import List, Optional, Tuple
def install_skill_command( def install_skill_command(
skill_name: str, skill_name: str, target_path: Path, force: bool = False
target_path: Path,
force: bool = False
) -> Tuple[bool, str]: ) -> Tuple[bool, str]:
""" """
Install a skill to target directory Install a skill to target directory
@ -40,7 +38,10 @@ def install_skill_command(
# Check if skill already installed # Check if skill already installed
if skill_target.exists() and not force: if skill_target.exists() and not force:
return False, f"Skill '{skill_name}' already installed (use --force to reinstall)" return (
False,
f"Skill '{skill_name}' already installed (use --force to reinstall)",
)
# Remove existing if force # Remove existing if force
if skill_target.exists() and force: if skill_target.exists() and force:

View File

@ -118,9 +118,7 @@ def install_skill(skill_name: str, target: str, force: bool):
click.echo(f"📦 Installing skill '{skill_name}' to {target_path}...") click.echo(f"📦 Installing skill '{skill_name}' to {target_path}...")
success, message = install_skill_command( success, message = install_skill_command(
skill_name=skill_name, skill_name=skill_name, target_path=target_path, force=force
target_path=target_path,
force=force
) )
if success: if success:

View File

@ -43,7 +43,7 @@ def intelligent_execute(
operations: List[Callable], operations: List[Callable],
context: Optional[Dict[str, Any]] = None, context: Optional[Dict[str, Any]] = None,
repo_path: Optional[Path] = None, repo_path: Optional[Path] = None,
auto_correct: bool = True auto_correct: bool = True,
) -> Dict[str, Any]: ) -> Dict[str, Any]:
""" """
Intelligent Task Execution with Reflection, Parallelization, and Self-Correction Intelligent Task Execution with Reflection, Parallelization, and Self-Correction
@ -96,7 +96,7 @@ def intelligent_execute(
"status": "blocked", "status": "blocked",
"confidence": confidence.confidence, "confidence": confidence.confidence,
"blockers": confidence.blockers, "blockers": confidence.blockers,
"recommendations": confidence.recommendations "recommendations": confidence.recommendations,
} }
print(f"\n✅ HIGH CONFIDENCE ({confidence.confidence:.0%}) - PROCEEDING") print(f"\n✅ HIGH CONFIDENCE ({confidence.confidence:.0%}) - PROCEEDING")
@ -111,9 +111,9 @@ def intelligent_execute(
tasks = [ tasks = [
Task( Task(
id=f"task_{i}", id=f"task_{i}",
description=f"Operation {i+1}", description=f"Operation {i + 1}",
execute=op, execute=op,
depends_on=[] # Assume independent for now (can enhance later) depends_on=[], # Assume independent for now (can enhance later)
) )
for i, op in enumerate(operations) for i, op in enumerate(operations)
] ]
@ -145,7 +145,7 @@ def intelligent_execute(
failure_info = { failure_info = {
"type": "execution_error", "type": "execution_error",
"error": "Operation returned None", "error": "Operation returned None",
"task_id": task_id "task_id": task_id,
} }
root_cause = correction_engine.analyze_root_cause(task, failure_info) root_cause = correction_engine.analyze_root_cause(task, failure_info)
@ -162,7 +162,7 @@ def intelligent_execute(
"confidence": confidence.confidence, "confidence": confidence.confidence,
"results": results, "results": results,
"failures": len(failures), "failures": len(failures),
"speedup": plan.speedup "speedup": plan.speedup,
} }
except Exception as e: except Exception as e:
@ -174,11 +174,7 @@ def intelligent_execute(
correction_engine = SelfCorrectionEngine(repo_path) correction_engine = SelfCorrectionEngine(repo_path)
failure_info = { failure_info = {"type": "exception", "error": str(e), "exception": e}
"type": "exception",
"error": str(e),
"exception": e
}
root_cause = correction_engine.analyze_root_cause(task, failure_info) root_cause = correction_engine.analyze_root_cause(task, failure_info)
correction_engine.learn_and_prevent(task, failure_info, root_cause) correction_engine.learn_and_prevent(task, failure_info, root_cause)
@ -188,12 +184,13 @@ def intelligent_execute(
return { return {
"status": "failed", "status": "failed",
"error": str(e), "error": str(e),
"confidence": confidence.confidence "confidence": confidence.confidence,
} }
# Convenience functions # Convenience functions
def quick_execute(operations: List[Callable]) -> List[Any]: def quick_execute(operations: List[Callable]) -> List[Any]:
""" """
Quick parallel execution without reflection Quick parallel execution without reflection

View File

@ -20,6 +20,7 @@ from typing import Any, Callable, Dict, List, Optional, Set
class TaskStatus(Enum): class TaskStatus(Enum):
"""Task execution status""" """Task execution status"""
PENDING = "pending" PENDING = "pending"
RUNNING = "running" RUNNING = "running"
COMPLETED = "completed" COMPLETED = "completed"
@ -29,6 +30,7 @@ class TaskStatus(Enum):
@dataclass @dataclass
class Task: class Task:
"""Single executable task""" """Single executable task"""
id: str id: str
description: str description: str
execute: Callable execute: Callable
@ -45,6 +47,7 @@ class Task:
@dataclass @dataclass
class ParallelGroup: class ParallelGroup:
"""Group of tasks that can execute in parallel""" """Group of tasks that can execute in parallel"""
group_id: int group_id: int
tasks: List[Task] tasks: List[Task]
dependencies: Set[str] # External task IDs this group depends on dependencies: Set[str] # External task IDs this group depends on
@ -56,6 +59,7 @@ class ParallelGroup:
@dataclass @dataclass
class ExecutionPlan: class ExecutionPlan:
"""Complete execution plan with parallelization strategy""" """Complete execution plan with parallelization strategy"""
groups: List[ParallelGroup] groups: List[ParallelGroup]
total_tasks: int total_tasks: int
sequential_time_estimate: float sequential_time_estimate: float
@ -114,7 +118,8 @@ class ParallelExecutor:
while len(completed) < len(tasks): while len(completed) < len(tasks):
# Find tasks that can execute now (dependencies met) # Find tasks that can execute now (dependencies met)
ready = [ ready = [
task for task in tasks task
for task in tasks
if task.id not in completed and task.can_execute(completed) if task.id not in completed and task.can_execute(completed)
] ]
@ -127,7 +132,7 @@ class ParallelExecutor:
group = ParallelGroup( group = ParallelGroup(
group_id=group_id, group_id=group_id,
tasks=ready, tasks=ready,
dependencies=set().union(*[set(t.depends_on) for t in ready]) dependencies=set().union(*[set(t.depends_on) for t in ready]),
) )
groups.append(group) groups.append(group)
@ -143,8 +148,7 @@ class ParallelExecutor:
# Parallel time = sum of slowest task in each group # Parallel time = sum of slowest task in each group
parallel_time = sum( parallel_time = sum(
max(1, len(group.tasks) // self.max_workers) * task_time max(1, len(group.tasks) // self.max_workers) * task_time for group in groups
for group in groups
) )
speedup = sequential_time / parallel_time if parallel_time > 0 else 1.0 speedup = sequential_time / parallel_time if parallel_time > 0 else 1.0
@ -154,7 +158,7 @@ class ParallelExecutor:
total_tasks=len(tasks), total_tasks=len(tasks),
sequential_time_estimate=sequential_time, sequential_time_estimate=sequential_time,
parallel_time_estimate=parallel_time, parallel_time_estimate=parallel_time,
speedup=speedup speedup=speedup,
) )
print(plan) print(plan)
@ -205,8 +209,7 @@ class ParallelExecutor:
with ThreadPoolExecutor(max_workers=self.max_workers) as executor: with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
# Submit all tasks in group # Submit all tasks in group
future_to_task = { future_to_task = {
executor.submit(task.execute): task executor.submit(task.execute): task for task in group.tasks
for task in group.tasks
} }
# Collect results as they complete # Collect results as they complete
@ -233,6 +236,7 @@ class ParallelExecutor:
# Convenience functions for common patterns # Convenience functions for common patterns
def parallel_file_operations(files: List[str], operation: Callable) -> List[Any]: def parallel_file_operations(files: List[str], operation: Callable) -> List[Any]:
""" """
Execute operation on multiple files in parallel Execute operation on multiple files in parallel
@ -251,7 +255,7 @@ def parallel_file_operations(files: List[str], operation: Callable) -> List[Any]
id=f"op_{i}", id=f"op_{i}",
description=f"Process {file}", description=f"Process {file}",
execute=lambda f=file: operation(f), execute=lambda f=file: operation(f),
depends_on=[] depends_on=[],
) )
for i, file in enumerate(files) for i, file in enumerate(files)
] ]
@ -273,6 +277,7 @@ def should_parallelize(items: List[Any], threshold: int = 3) -> bool:
# Example usage patterns # Example usage patterns
def example_parallel_read(): def example_parallel_read():
"""Example: Parallel file reading""" """Example: Parallel file reading"""
@ -285,7 +290,7 @@ def example_parallel_read():
id=f"read_{i}", id=f"read_{i}",
description=f"Read {file}", description=f"Read {file}",
execute=lambda f=file: f"Content of {f}", # Placeholder execute=lambda f=file: f"Content of {f}", # Placeholder
depends_on=[] depends_on=[],
) )
for i, file in enumerate(files) for i, file in enumerate(files)
] ]
@ -306,10 +311,10 @@ def example_dependent_tasks():
Task("read1", "Read config.py", lambda: "config", []), Task("read1", "Read config.py", lambda: "config", []),
Task("read2", "Read utils.py", lambda: "utils", []), Task("read2", "Read utils.py", lambda: "utils", []),
Task("read3", "Read main.py", lambda: "main", []), Task("read3", "Read main.py", lambda: "main", []),
# Wave 2: Analysis (depends on reads) # Wave 2: Analysis (depends on reads)
Task("analyze", "Analyze code", lambda: "analysis", ["read1", "read2", "read3"]), Task(
"analyze", "Analyze code", lambda: "analysis", ["read1", "read2", "read3"]
),
# Wave 3: Generate report (depends on analysis) # Wave 3: Generate report (depends on analysis)
Task("report", "Generate report", lambda: "report", ["analyze"]), Task("report", "Generate report", lambda: "report", ["analyze"]),
] ]

View File

@ -19,6 +19,7 @@ from typing import Any, Dict, List, Optional
@dataclass @dataclass
class ReflectionResult: class ReflectionResult:
"""Single reflection analysis result""" """Single reflection analysis result"""
stage: str stage: str
score: float # 0.0 - 1.0 score: float # 0.0 - 1.0
evidence: List[str] evidence: List[str]
@ -48,10 +49,12 @@ class ConfidenceScore:
def __repr__(self) -> str: def __repr__(self) -> str:
status = "🟢 PROCEED" if self.should_proceed else "🔴 BLOCKED" status = "🟢 PROCEED" if self.should_proceed else "🔴 BLOCKED"
return f"{status} | Confidence: {self.confidence:.0%}\n" + \ return (
f" Clarity: {self.requirement_clarity}\n" + \ f"{status} | Confidence: {self.confidence:.0%}\n"
f" Mistakes: {self.mistake_check}\n" + \ + f" Clarity: {self.requirement_clarity}\n"
f" Context: {self.context_ready}" + f" Mistakes: {self.mistake_check}\n"
+ f" Context: {self.context_ready}"
)
class ReflectionEngine: class ReflectionEngine:
@ -79,12 +82,14 @@ class ReflectionEngine:
# Weights for confidence calculation # Weights for confidence calculation
self.WEIGHTS = { self.WEIGHTS = {
"clarity": 0.5, # Most important "clarity": 0.5, # Most important
"mistakes": 0.3, # Learn from past "mistakes": 0.3, # Learn from past
"context": 0.2, # Least critical (can load more) "context": 0.2, # Least critical (can load more)
} }
def reflect(self, task: str, context: Optional[Dict[str, Any]] = None) -> ConfidenceScore: def reflect(
self, task: str, context: Optional[Dict[str, Any]] = None
) -> ConfidenceScore:
""" """
3-Stage Reflection Process 3-Stage Reflection Process
@ -108,9 +113,9 @@ class ReflectionEngine:
# Calculate overall confidence # Calculate overall confidence
confidence = ( confidence = (
clarity.score * self.WEIGHTS["clarity"] + clarity.score * self.WEIGHTS["clarity"]
mistakes.score * self.WEIGHTS["mistakes"] + + mistakes.score * self.WEIGHTS["mistakes"]
context_ready.score * self.WEIGHTS["context"] + context_ready.score * self.WEIGHTS["context"]
) )
# Decision logic # Decision logic
@ -139,7 +144,7 @@ class ReflectionEngine:
confidence=confidence, confidence=confidence,
should_proceed=should_proceed, should_proceed=should_proceed,
blockers=blockers, blockers=blockers,
recommendations=recommendations recommendations=recommendations,
) )
print("=" * 60) print("=" * 60)
@ -148,7 +153,9 @@ class ReflectionEngine:
return result return result
def _reflect_clarity(self, task: str, context: Optional[Dict] = None) -> ReflectionResult: def _reflect_clarity(
self, task: str, context: Optional[Dict] = None
) -> ReflectionResult:
""" """
Reflection 1: Requirement Clarity Reflection 1: Requirement Clarity
@ -161,7 +168,15 @@ class ReflectionEngine:
score = 0.5 # Start neutral score = 0.5 # Start neutral
# Check for specificity indicators # Check for specificity indicators
specific_verbs = ["create", "fix", "add", "update", "delete", "refactor", "implement"] specific_verbs = [
"create",
"fix",
"add",
"update",
"delete",
"refactor",
"implement",
]
vague_verbs = ["improve", "optimize", "enhance", "better", "something"] vague_verbs = ["improve", "optimize", "enhance", "better", "something"]
task_lower = task.lower() task_lower = task.lower()
@ -172,7 +187,10 @@ class ReflectionEngine:
evidence.append("Contains specific action verb") evidence.append("Contains specific action verb")
# Technical terms present # Technical terms present
if any(term in task_lower for term in ["function", "class", "file", "api", "endpoint"]): if any(
term in task_lower
for term in ["function", "class", "file", "api", "endpoint"]
):
score += 0.15 score += 0.15
evidence.append("Includes technical specifics") evidence.append("Includes technical specifics")
@ -198,10 +216,12 @@ class ReflectionEngine:
stage="Requirement Clarity", stage="Requirement Clarity",
score=score, score=score,
evidence=evidence, evidence=evidence,
concerns=concerns concerns=concerns,
) )
def _reflect_mistakes(self, task: str, context: Optional[Dict] = None) -> ReflectionResult: def _reflect_mistakes(
self, task: str, context: Optional[Dict] = None
) -> ReflectionResult:
""" """
Reflection 2: Past Mistake Check Reflection 2: Past Mistake Check
@ -218,10 +238,7 @@ class ReflectionEngine:
if not reflexion_file.exists(): if not reflexion_file.exists():
evidence.append("No past mistakes recorded") evidence.append("No past mistakes recorded")
return ReflectionResult( return ReflectionResult(
stage="Past Mistakes", stage="Past Mistakes", score=score, evidence=evidence, concerns=concerns
score=score,
evidence=evidence,
concerns=concerns
) )
try: try:
@ -248,7 +265,9 @@ class ReflectionEngine:
for mistake in similar_mistakes[:3]: # Show max 3 for mistake in similar_mistakes[:3]: # Show max 3
concerns.append(f" ⚠️ {mistake.get('mistake', 'Unknown')}") concerns.append(f" ⚠️ {mistake.get('mistake', 'Unknown')}")
else: else:
evidence.append(f"Checked {len(past_mistakes)} past mistakes - none similar") evidence.append(
f"Checked {len(past_mistakes)} past mistakes - none similar"
)
except Exception as e: except Exception as e:
concerns.append(f"Could not load reflexion memory: {e}") concerns.append(f"Could not load reflexion memory: {e}")
@ -258,13 +277,12 @@ class ReflectionEngine:
score = max(0.0, min(1.0, score)) score = max(0.0, min(1.0, score))
return ReflectionResult( return ReflectionResult(
stage="Past Mistakes", stage="Past Mistakes", score=score, evidence=evidence, concerns=concerns
score=score,
evidence=evidence,
concerns=concerns
) )
def _reflect_context(self, task: str, context: Optional[Dict] = None) -> ReflectionResult: def _reflect_context(
self, task: str, context: Optional[Dict] = None
) -> ReflectionResult:
""" """
Reflection 3: Context Readiness Reflection 3: Context Readiness
@ -283,7 +301,7 @@ class ReflectionEngine:
stage="Context Readiness", stage="Context Readiness",
score=score, score=score,
evidence=evidence, evidence=evidence,
concerns=concerns concerns=concerns,
) )
# Check for essential context elements # Check for essential context elements
@ -319,10 +337,7 @@ class ReflectionEngine:
score = max(0.0, min(1.0, score)) score = max(0.0, min(1.0, score))
return ReflectionResult( return ReflectionResult(
stage="Context Readiness", stage="Context Readiness", score=score, evidence=evidence, concerns=concerns
score=score,
evidence=evidence,
concerns=concerns
) )
def record_reflection(self, task: str, confidence: ConfidenceScore, decision: str): def record_reflection(self, task: str, confidence: ConfidenceScore, decision: str):
@ -336,7 +351,7 @@ class ReflectionEngine:
"confidence": confidence.confidence, "confidence": confidence.confidence,
"decision": decision, "decision": decision,
"blockers": confidence.blockers, "blockers": confidence.blockers,
"recommendations": confidence.recommendations "recommendations": confidence.recommendations,
} }
# Append to log # Append to log
@ -349,7 +364,7 @@ class ReflectionEngine:
log_data["reflections"].append(entry) log_data["reflections"].append(entry)
with open(reflection_log, 'w') as f: with open(reflection_log, "w") as f:
json.dump(log_data, f, indent=2) json.dump(log_data, f, indent=2)
except Exception as e: except Exception as e:
@ -373,7 +388,9 @@ def get_reflection_engine(repo_path: Optional[Path] = None) -> ReflectionEngine:
# Convenience function # Convenience function
def reflect_before_execution(task: str, context: Optional[Dict] = None) -> ConfidenceScore: def reflect_before_execution(
task: str, context: Optional[Dict] = None
) -> ConfidenceScore:
""" """
Perform 3-stage reflection before task execution Perform 3-stage reflection before task execution

View File

@ -23,6 +23,7 @@ from typing import Any, Dict, List, Optional
@dataclass @dataclass
class RootCause: class RootCause:
"""Identified root cause of failure""" """Identified root cause of failure"""
category: str # e.g., "validation", "dependency", "logic", "assumption" category: str # e.g., "validation", "dependency", "logic", "assumption"
description: str description: str
evidence: List[str] evidence: List[str]
@ -41,6 +42,7 @@ class RootCause:
@dataclass @dataclass
class FailureEntry: class FailureEntry:
"""Single failure entry in Reflexion memory""" """Single failure entry in Reflexion memory"""
id: str id: str
timestamp: str timestamp: str
task: str task: str
@ -95,10 +97,10 @@ class SelfCorrectionEngine:
"created": datetime.now().isoformat(), "created": datetime.now().isoformat(),
"mistakes": [], "mistakes": [],
"patterns": [], "patterns": [],
"prevention_rules": [] "prevention_rules": [],
} }
with open(self.reflexion_file, 'w') as f: with open(self.reflexion_file, "w") as f:
json.dump(initial_data, f, indent=2) json.dump(initial_data, f, indent=2)
def detect_failure(self, execution_result: Dict[str, Any]) -> bool: def detect_failure(self, execution_result: Dict[str, Any]) -> bool:
@ -110,11 +112,7 @@ class SelfCorrectionEngine:
status = execution_result.get("status", "unknown") status = execution_result.get("status", "unknown")
return status in ["failed", "error", "exception"] return status in ["failed", "error", "exception"]
def analyze_root_cause( def analyze_root_cause(self, task: str, failure: Dict[str, Any]) -> RootCause:
self,
task: str,
failure: Dict[str, Any]
) -> RootCause:
""" """
Analyze root cause of failure Analyze root cause of failure
@ -148,7 +146,7 @@ class SelfCorrectionEngine:
description=error_msg, description=error_msg,
evidence=[error_msg, stack_trace] if stack_trace else [error_msg], evidence=[error_msg, stack_trace] if stack_trace else [error_msg],
prevention_rule=prevention_rule, prevention_rule=prevention_rule,
validation_tests=validation_tests validation_tests=validation_tests,
) )
print(root_cause) print(root_cause)
@ -162,11 +160,15 @@ class SelfCorrectionEngine:
error_lower = error_msg.lower() error_lower = error_msg.lower()
# Validation failures # Validation failures
if any(word in error_lower for word in ["invalid", "missing", "required", "must"]): if any(
word in error_lower for word in ["invalid", "missing", "required", "must"]
):
return "validation" return "validation"
# Dependency failures # Dependency failures
if any(word in error_lower for word in ["not found", "missing", "import", "module"]): if any(
word in error_lower for word in ["not found", "missing", "import", "module"]
):
return "dependency" return "dependency"
# Logic errors # Logic errors
@ -191,8 +193,7 @@ class SelfCorrectionEngine:
data = json.load(f) data = json.load(f)
past_failures = [ past_failures = [
FailureEntry.from_dict(entry) FailureEntry.from_dict(entry) for entry in data.get("mistakes", [])
for entry in data.get("mistakes", [])
] ]
# Simple similarity: keyword overlap # Simple similarity: keyword overlap
@ -217,10 +218,7 @@ class SelfCorrectionEngine:
return [] return []
def _generate_prevention_rule( def _generate_prevention_rule(
self, self, category: str, error_msg: str, similar: List[FailureEntry]
category: str,
error_msg: str,
similar: List[FailureEntry]
) -> str: ) -> str:
"""Generate prevention rule based on failure analysis""" """Generate prevention rule based on failure analysis"""
@ -230,7 +228,7 @@ class SelfCorrectionEngine:
"logic": "ALWAYS verify assumptions with assertions", "logic": "ALWAYS verify assumptions with assertions",
"assumption": "NEVER assume - always verify with checks", "assumption": "NEVER assume - always verify with checks",
"type": "ALWAYS use type hints and runtime type checking", "type": "ALWAYS use type hints and runtime type checking",
"unknown": "ALWAYS add error handling for unknown cases" "unknown": "ALWAYS add error handling for unknown cases",
} }
base_rule = rules.get(category, "ALWAYS add defensive checks") base_rule = rules.get(category, "ALWAYS add defensive checks")
@ -248,28 +246,28 @@ class SelfCorrectionEngine:
"validation": [ "validation": [
"Check input is not None", "Check input is not None",
"Verify input type matches expected", "Verify input type matches expected",
"Validate input range/constraints" "Validate input range/constraints",
], ],
"dependency": [ "dependency": [
"Verify module exists before import", "Verify module exists before import",
"Check file exists before reading", "Check file exists before reading",
"Validate path is accessible" "Validate path is accessible",
], ],
"logic": [ "logic": [
"Add assertion for pre-conditions", "Add assertion for pre-conditions",
"Add assertion for post-conditions", "Add assertion for post-conditions",
"Verify intermediate results" "Verify intermediate results",
], ],
"assumption": [ "assumption": [
"Explicitly check assumed condition", "Explicitly check assumed condition",
"Add logging for assumption verification", "Add logging for assumption verification",
"Document assumption with test" "Document assumption with test",
], ],
"type": [ "type": [
"Add type hints", "Add type hints",
"Add runtime type checking", "Add runtime type checking",
"Use dataclass with validation" "Use dataclass with validation",
] ],
} }
return tests.get(category, ["Add defensive check", "Add error handling"]) return tests.get(category, ["Add defensive check", "Add error handling"])
@ -280,7 +278,7 @@ class SelfCorrectionEngine:
failure: Dict[str, Any], failure: Dict[str, Any],
root_cause: RootCause, root_cause: RootCause,
fixed: bool = False, fixed: bool = False,
fix_description: Optional[str] = None fix_description: Optional[str] = None,
): ):
""" """
Learn from failure and store prevention rules Learn from failure and store prevention rules
@ -305,7 +303,7 @@ class SelfCorrectionEngine:
root_cause=root_cause, root_cause=root_cause,
fixed=fixed, fixed=fixed,
fix_description=fix_description, fix_description=fix_description,
recurrence_count=0 recurrence_count=0,
) )
# Load current reflexion memory # Load current reflexion memory
@ -337,7 +335,7 @@ class SelfCorrectionEngine:
print("📝 Prevention rule added") print("📝 Prevention rule added")
# Save updated memory # Save updated memory
with open(self.reflexion_file, 'w') as f: with open(self.reflexion_file, "w") as f:
json.dump(data, f, indent=2) json.dump(data, f, indent=2)
print("💾 Reflexion memory updated") print("💾 Reflexion memory updated")
@ -366,8 +364,7 @@ class SelfCorrectionEngine:
data = json.load(f) data = json.load(f)
past_failures = [ past_failures = [
FailureEntry.from_dict(entry) FailureEntry.from_dict(entry) for entry in data.get("mistakes", [])
for entry in data.get("mistakes", [])
] ]
# Find similar tasks # Find similar tasks
@ -391,7 +388,9 @@ class SelfCorrectionEngine:
_self_correction_engine: Optional[SelfCorrectionEngine] = None _self_correction_engine: Optional[SelfCorrectionEngine] = None
def get_self_correction_engine(repo_path: Optional[Path] = None) -> SelfCorrectionEngine: def get_self_correction_engine(
repo_path: Optional[Path] = None,
) -> SelfCorrectionEngine:
"""Get or create self-correction engine singleton""" """Get or create self-correction engine singleton"""
global _self_correction_engine global _self_correction_engine
@ -408,7 +407,7 @@ def learn_from_failure(
task: str, task: str,
failure: Dict[str, Any], failure: Dict[str, Any],
fixed: bool = False, fixed: bool = False,
fix_description: Optional[str] = None fix_description: Optional[str] = None,
): ):
""" """
Learn from execution failure Learn from execution failure

View File

@ -242,8 +242,12 @@ class ConfidenceChecker:
# Check for markers indicating test type # Check for markers indicating test type
markers = context.get("markers", []) markers = context.get("markers", [])
known_markers = { known_markers = {
"unit", "integration", "hallucination", "unit",
"performance", "confidence_check", "self_check" "integration",
"hallucination",
"performance",
"confidence_check",
"self_check",
} }
has_markers = bool(set(markers) & known_markers) has_markers = bool(set(markers) & known_markers)

View File

@ -152,7 +152,8 @@ class ReflexionPattern:
message = error_info["error_message"] message = error_info["error_message"]
# Remove numbers (often varies between errors) # Remove numbers (often varies between errors)
import re import re
message = re.sub(r'\d+', 'N', message)
message = re.sub(r"\d+", "N", message)
parts.append(message[:100]) # First 100 chars parts.append(message[:100]) # First 100 chars
if "test_name" in error_info: if "test_name" in error_info:
@ -261,47 +262,47 @@ class ReflexionPattern:
content = f"""# Mistake Record: {test_name} content = f"""# Mistake Record: {test_name}
**Date**: {date} **Date**: {date}
**Error Type**: {error_info.get('error_type', 'Unknown')} **Error Type**: {error_info.get("error_type", "Unknown")}
--- ---
## ❌ What Happened ## ❌ What Happened
{error_info.get('error_message', 'No error message')} {error_info.get("error_message", "No error message")}
``` ```
{error_info.get('traceback', 'No traceback')} {error_info.get("traceback", "No traceback")}
``` ```
--- ---
## 🔍 Root Cause ## 🔍 Root Cause
{error_info.get('root_cause', 'Not analyzed')} {error_info.get("root_cause", "Not analyzed")}
--- ---
## 🤔 Why Missed ## 🤔 Why Missed
{error_info.get('why_missed', 'Not analyzed')} {error_info.get("why_missed", "Not analyzed")}
--- ---
## ✅ Fix Applied ## ✅ Fix Applied
{error_info.get('solution', 'Not documented')} {error_info.get("solution", "Not documented")}
--- ---
## 🛡️ Prevention Checklist ## 🛡️ Prevention Checklist
{error_info.get('prevention', 'Not documented')} {error_info.get("prevention", "Not documented")}
--- ---
## 💡 Lesson Learned ## 💡 Lesson Learned
{error_info.get('lesson', 'Not documented')} {error_info.get("lesson", "Not documented")}
""" """
filepath.write_text(content) filepath.write_text(content)

View File

@ -9,7 +9,6 @@ Entry point registered in pyproject.toml:
superclaude = "superclaude.pytest_plugin" superclaude = "superclaude.pytest_plugin"
""" """
import pytest import pytest
from .pm_agent.confidence import ConfidenceChecker from .pm_agent.confidence import ConfidenceChecker
@ -29,20 +28,17 @@ def pytest_configure(config):
- complexity(level): Set test complexity (simple, medium, complex) - complexity(level): Set test complexity (simple, medium, complex)
""" """
config.addinivalue_line( config.addinivalue_line(
"markers", "markers", "confidence_check: Pre-execution confidence assessment (min 70%)"
"confidence_check: Pre-execution confidence assessment (min 70%)"
) )
config.addinivalue_line( config.addinivalue_line(
"markers", "markers",
"self_check: Post-implementation validation with evidence requirement" "self_check: Post-implementation validation with evidence requirement",
) )
config.addinivalue_line( config.addinivalue_line(
"markers", "markers", "reflexion: Error learning and prevention pattern"
"reflexion: Error learning and prevention pattern"
) )
config.addinivalue_line( config.addinivalue_line(
"markers", "markers", "complexity(level): Set test complexity (simple, medium, complex)"
"complexity(level): Set test complexity (simple, medium, complex)"
) )
@ -158,9 +154,7 @@ def pytest_runtest_setup(item):
confidence = checker.assess(context) confidence = checker.assess(context)
if confidence < 0.7: if confidence < 0.7:
pytest.skip( pytest.skip(f"Confidence too low: {confidence:.0%} (minimum: 70%)")
f"Confidence too low: {confidence:.0%} (minimum: 70%)"
)
def pytest_runtest_makereport(item, call): def pytest_runtest_makereport(item, call):
@ -193,6 +187,7 @@ def pytest_runtest_makereport(item, call):
def pytest_report_header(config): def pytest_report_header(config):
"""Add SuperClaude version to pytest header""" """Add SuperClaude version to pytest header"""
from . import __version__ from . import __version__
return f"SuperClaude: {__version__}" return f"SuperClaude: {__version__}"

View File

@ -5,7 +5,6 @@ This file is automatically loaded by pytest and provides
shared fixtures available to all test modules. shared fixtures available to all test modules.
""" """
import pytest import pytest

View File

@ -138,7 +138,9 @@ class TestInstallCommands:
expected = ["agent", "index-repo", "recommend", "research"] expected = ["agent", "index-repo", "recommend", "research"]
for expected_cmd in expected: for expected_cmd in expected:
assert expected_cmd in commands, f"Expected command '{expected_cmd}' not found" assert expected_cmd in commands, (
f"Expected command '{expected_cmd}' not found"
)
class TestInstallCommandsEdgeCases: class TestInstallCommandsEdgeCases:

View File

@ -60,7 +60,9 @@ class TestConfidenceChecker:
confidence = checker.assess(context) confidence = checker.assess(context)
assert 0.7 <= confidence < 0.9, f"Expected medium confidence 0.7-0.9, got {confidence}" assert 0.7 <= confidence < 0.9, (
f"Expected medium confidence 0.7-0.9, got {confidence}"
)
assert confidence == 0.7, "Should be exactly 70%" assert confidence == 0.7, "Should be exactly 70%"
def test_confidence_checks_recorded(self, sample_context): def test_confidence_checks_recorded(self, sample_context):