mirror of
https://github.com/SuperClaude-Org/SuperClaude_Framework.git
synced 2025-12-29 16:16:08 +00:00
refactor: Complete V4 Beta framework restructuring
Major reorganization of SuperClaude V4 Beta directories: - Moved SuperClaude-Lite content to Framework-Hooks/ - Renamed SuperClaude/ directories to Framework/ for clarity - Created separate Framework-Lite/ for lightweight variant - Consolidated hooks system under Framework-Hooks/ This restructuring aligns with the V4 Beta architecture: - Framework/: Full framework with all features - Framework-Lite/: Lightweight variant - Framework-Hooks/: Hooks system implementation Part of SuperClaude V4 Beta development roadmap. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
602
Framework-Hooks/hooks/notification.py
Normal file
602
Framework-Hooks/hooks/notification.py
Normal file
@@ -0,0 +1,602 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SuperClaude-Lite Notification Hook
|
||||
|
||||
Implements just-in-time MCP documentation loading and pattern updates.
|
||||
Performance target: <100ms execution time.
|
||||
|
||||
This hook runs when Claude Code sends notifications and provides:
|
||||
- Just-in-time loading of MCP server documentation
|
||||
- Dynamic pattern updates based on operation context
|
||||
- Framework intelligence updates and adaptations
|
||||
- Real-time learning from notification patterns
|
||||
- Performance optimization through intelligent caching
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
# Add shared modules to path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "shared"))
|
||||
|
||||
from framework_logic import FrameworkLogic
|
||||
from pattern_detection import PatternDetector
|
||||
from mcp_intelligence import MCPIntelligence
|
||||
from compression_engine import CompressionEngine
|
||||
from learning_engine import LearningEngine, LearningType, AdaptationScope
|
||||
from yaml_loader import config_loader
|
||||
from logger import log_hook_start, log_hook_end, log_decision, log_error
|
||||
|
||||
|
||||
class NotificationHook:
|
||||
"""
|
||||
Notification hook implementing just-in-time intelligence loading.
|
||||
|
||||
Responsibilities:
|
||||
- Process Claude Code notifications for intelligence opportunities
|
||||
- Load relevant MCP documentation on-demand
|
||||
- Update pattern detection based on real-time context
|
||||
- Provide framework intelligence updates
|
||||
- Cache and optimize frequently accessed information
|
||||
- Learn from notification patterns for future optimization
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
start_time = time.time()
|
||||
|
||||
# Initialize core components
|
||||
self.framework_logic = FrameworkLogic()
|
||||
self.pattern_detector = PatternDetector()
|
||||
self.mcp_intelligence = MCPIntelligence()
|
||||
self.compression_engine = CompressionEngine()
|
||||
|
||||
# Initialize learning engine
|
||||
cache_dir = Path("cache")
|
||||
self.learning_engine = LearningEngine(cache_dir)
|
||||
|
||||
# Load notification configuration
|
||||
self.notification_config = config_loader.get_section('session', 'notifications', {})
|
||||
|
||||
# Initialize notification cache
|
||||
self.notification_cache = {}
|
||||
self.pattern_cache = {}
|
||||
|
||||
# Load hook-specific configuration from SuperClaude config
|
||||
self.hook_config = config_loader.get_hook_config('notification')
|
||||
|
||||
# Performance tracking using configuration
|
||||
self.initialization_time = (time.time() - start_time) * 1000
|
||||
self.performance_target_ms = config_loader.get_hook_config('notification', 'performance_target_ms', 100)
|
||||
|
||||
def process_notification(self, notification: dict) -> dict:
|
||||
"""
|
||||
Process notification with just-in-time intelligence loading.
|
||||
|
||||
Args:
|
||||
notification: Notification from Claude Code
|
||||
|
||||
Returns:
|
||||
Enhanced notification response with intelligence updates
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
# Log hook start
|
||||
log_hook_start("notification", {
|
||||
"notification_type": notification.get('type', 'unknown'),
|
||||
"has_context": bool(notification.get('context')),
|
||||
"priority": notification.get('priority', 'normal')
|
||||
})
|
||||
|
||||
try:
|
||||
# Extract notification context
|
||||
context = self._extract_notification_context(notification)
|
||||
|
||||
# Analyze notification for intelligence opportunities
|
||||
intelligence_analysis = self._analyze_intelligence_opportunities(context)
|
||||
|
||||
# Determine intelligence needs
|
||||
intelligence_needs = self._analyze_intelligence_needs(context)
|
||||
|
||||
# Log intelligence loading decision
|
||||
if intelligence_needs.get('mcp_docs_needed'):
|
||||
log_decision(
|
||||
"notification",
|
||||
"mcp_docs_loading",
|
||||
",".join(intelligence_needs.get('mcp_servers', [])),
|
||||
f"Documentation needed for: {intelligence_needs.get('reason', 'notification context')}"
|
||||
)
|
||||
|
||||
# Load just-in-time documentation if needed
|
||||
documentation_updates = self._load_jit_documentation(context, intelligence_analysis)
|
||||
|
||||
# Update patterns if needed
|
||||
pattern_updates = self._update_patterns_if_needed(context, intelligence_needs)
|
||||
|
||||
# Log pattern update decision
|
||||
if pattern_updates.get('patterns_updated'):
|
||||
log_decision(
|
||||
"notification",
|
||||
"pattern_update",
|
||||
pattern_updates.get('pattern_type', 'unknown'),
|
||||
f"Updated {pattern_updates.get('update_count', 0)} patterns"
|
||||
)
|
||||
|
||||
# Generate framework intelligence updates
|
||||
framework_updates = self._generate_framework_updates(context, intelligence_analysis)
|
||||
|
||||
# Record learning events
|
||||
self._record_notification_learning(context, intelligence_analysis)
|
||||
|
||||
# Create intelligence response
|
||||
intelligence_response = self._create_intelligence_response(
|
||||
context, documentation_updates, pattern_updates, framework_updates
|
||||
)
|
||||
|
||||
# Performance validation
|
||||
execution_time = (time.time() - start_time) * 1000
|
||||
intelligence_response['performance_metrics'] = {
|
||||
'processing_time_ms': execution_time,
|
||||
'target_met': execution_time < self.performance_target_ms,
|
||||
'cache_hit_rate': self._calculate_cache_hit_rate()
|
||||
}
|
||||
|
||||
# Log successful completion
|
||||
log_hook_end(
|
||||
"notification",
|
||||
int(execution_time),
|
||||
True,
|
||||
{
|
||||
"notification_type": context['notification_type'],
|
||||
"intelligence_loaded": bool(intelligence_needs.get('mcp_docs_needed')),
|
||||
"patterns_updated": pattern_updates.get('patterns_updated', False)
|
||||
}
|
||||
)
|
||||
|
||||
return intelligence_response
|
||||
|
||||
except Exception as e:
|
||||
# Log error
|
||||
execution_time = (time.time() - start_time) * 1000
|
||||
log_error(
|
||||
"notification",
|
||||
str(e),
|
||||
{"notification_type": notification.get('type', 'unknown')}
|
||||
)
|
||||
log_hook_end("notification", int(execution_time), False)
|
||||
|
||||
# Graceful fallback on error
|
||||
return self._create_fallback_response(notification, str(e))
|
||||
|
||||
def _extract_notification_context(self, notification: dict) -> dict:
|
||||
"""Extract and enrich notification context."""
|
||||
context = {
|
||||
'notification_type': notification.get('type', 'unknown'),
|
||||
'notification_data': notification.get('data', {}),
|
||||
'session_context': notification.get('session_context', {}),
|
||||
'user_context': notification.get('user_context', {}),
|
||||
'operation_context': notification.get('operation_context', {}),
|
||||
'trigger_event': notification.get('trigger', ''),
|
||||
'timestamp': time.time()
|
||||
}
|
||||
|
||||
# Analyze notification importance
|
||||
context['priority'] = self._assess_notification_priority(context)
|
||||
|
||||
# Extract operation characteristics
|
||||
context.update(self._extract_operation_characteristics(context))
|
||||
|
||||
return context
|
||||
|
||||
def _assess_notification_priority(self, context: dict) -> str:
|
||||
"""Assess notification priority for processing."""
|
||||
notification_type = context['notification_type']
|
||||
|
||||
# High priority notifications
|
||||
if notification_type in ['error', 'failure', 'security_alert']:
|
||||
return 'high'
|
||||
elif notification_type in ['performance_issue', 'validation_failure']:
|
||||
return 'high'
|
||||
|
||||
# Medium priority notifications
|
||||
elif notification_type in ['tool_request', 'context_change', 'resource_constraint']:
|
||||
return 'medium'
|
||||
|
||||
# Low priority notifications
|
||||
elif notification_type in ['info', 'debug', 'status_update']:
|
||||
return 'low'
|
||||
|
||||
return 'medium'
|
||||
|
||||
def _extract_operation_characteristics(self, context: dict) -> dict:
|
||||
"""Extract operation characteristics from notification."""
|
||||
operation_context = context.get('operation_context', {})
|
||||
|
||||
return {
|
||||
'operation_type': operation_context.get('type', 'unknown'),
|
||||
'complexity_indicators': operation_context.get('complexity', 0.0),
|
||||
'tool_requests': operation_context.get('tools_requested', []),
|
||||
'mcp_server_hints': operation_context.get('mcp_hints', []),
|
||||
'performance_requirements': operation_context.get('performance', {}),
|
||||
'intelligence_requirements': operation_context.get('intelligence_needed', False)
|
||||
}
|
||||
|
||||
def _analyze_intelligence_opportunities(self, context: dict) -> dict:
|
||||
"""Analyze notification for intelligence loading opportunities."""
|
||||
analysis = {
|
||||
'documentation_needed': [],
|
||||
'pattern_updates_needed': [],
|
||||
'framework_updates_needed': [],
|
||||
'learning_opportunities': [],
|
||||
'optimization_opportunities': []
|
||||
}
|
||||
|
||||
notification_type = context['notification_type']
|
||||
operation_type = context.get('operation_type', 'unknown')
|
||||
|
||||
# Documentation loading opportunities
|
||||
if notification_type == 'tool_request':
|
||||
requested_tools = context.get('tool_requests', [])
|
||||
for tool in requested_tools:
|
||||
if tool in ['ui_component', 'component_generation']:
|
||||
analysis['documentation_needed'].append('magic_patterns')
|
||||
elif tool in ['library_integration', 'framework_usage']:
|
||||
analysis['documentation_needed'].append('context7_patterns')
|
||||
elif tool in ['complex_analysis', 'debugging']:
|
||||
analysis['documentation_needed'].append('sequential_patterns')
|
||||
elif tool in ['testing', 'validation']:
|
||||
analysis['documentation_needed'].append('playwright_patterns')
|
||||
|
||||
# Pattern update opportunities
|
||||
if notification_type in ['context_change', 'operation_start']:
|
||||
analysis['pattern_updates_needed'].extend([
|
||||
'operation_patterns',
|
||||
'context_patterns'
|
||||
])
|
||||
|
||||
# Framework update opportunities
|
||||
if notification_type in ['performance_issue', 'optimization_request']:
|
||||
analysis['framework_updates_needed'].extend([
|
||||
'performance_optimization',
|
||||
'resource_management'
|
||||
])
|
||||
|
||||
# Learning opportunities
|
||||
if notification_type in ['error', 'failure']:
|
||||
analysis['learning_opportunities'].append('error_pattern_learning')
|
||||
elif notification_type in ['success', 'completion']:
|
||||
analysis['learning_opportunities'].append('success_pattern_learning')
|
||||
|
||||
# Optimization opportunities
|
||||
if context.get('performance_requirements'):
|
||||
analysis['optimization_opportunities'].append('performance_optimization')
|
||||
|
||||
return analysis
|
||||
|
||||
def _analyze_intelligence_needs(self, context: dict) -> dict:
|
||||
"""Determine intelligence needs based on context."""
|
||||
needs = {
|
||||
'mcp_docs_needed': False,
|
||||
'mcp_servers': [],
|
||||
'reason': ''
|
||||
}
|
||||
|
||||
# Check for MCP server hints
|
||||
mcp_hints = context.get('mcp_server_hints', [])
|
||||
if mcp_hints:
|
||||
needs['mcp_docs_needed'] = True
|
||||
needs['mcp_servers'] = mcp_hints
|
||||
needs['reason'] = 'MCP server hints'
|
||||
|
||||
# Check for tool requests
|
||||
tool_requests = context.get('tool_requests', [])
|
||||
if tool_requests:
|
||||
needs['mcp_docs_needed'] = True
|
||||
needs['mcp_servers'] = [tool for tool in tool_requests if tool in ['ui_component', 'component_generation', 'library_integration', 'framework_usage', 'complex_analysis', 'debugging', 'testing', 'validation']]
|
||||
needs['reason'] = 'Tool requests'
|
||||
|
||||
# Check for performance requirements
|
||||
performance_requirements = context.get('performance_requirements', {})
|
||||
if performance_requirements:
|
||||
needs['mcp_docs_needed'] = True
|
||||
needs['mcp_servers'] = ['performance_optimization', 'resource_management']
|
||||
needs['reason'] = 'Performance requirements'
|
||||
|
||||
return needs
|
||||
|
||||
def _load_jit_documentation(self, context: dict, intelligence_analysis: dict) -> dict:
|
||||
"""Load just-in-time documentation based on analysis."""
|
||||
documentation_updates = {
|
||||
'loaded_patterns': [],
|
||||
'cached_content': {},
|
||||
'documentation_summaries': {}
|
||||
}
|
||||
|
||||
needed_docs = intelligence_analysis.get('documentation_needed', [])
|
||||
|
||||
for doc_type in needed_docs:
|
||||
# Check cache first
|
||||
if doc_type in self.notification_cache:
|
||||
documentation_updates['cached_content'][doc_type] = self.notification_cache[doc_type]
|
||||
documentation_updates['loaded_patterns'].append(f"{doc_type}_cached")
|
||||
continue
|
||||
|
||||
# Load documentation on-demand
|
||||
doc_content = self._load_documentation_content(doc_type, context)
|
||||
if doc_content:
|
||||
# Cache for future use
|
||||
self.notification_cache[doc_type] = doc_content
|
||||
documentation_updates['cached_content'][doc_type] = doc_content
|
||||
documentation_updates['loaded_patterns'].append(f"{doc_type}_loaded")
|
||||
|
||||
# Create summary for quick access
|
||||
summary = self._create_documentation_summary(doc_content)
|
||||
documentation_updates['documentation_summaries'][doc_type] = summary
|
||||
|
||||
return documentation_updates
|
||||
|
||||
def _load_documentation_content(self, doc_type: str, context: dict) -> Optional[dict]:
|
||||
"""Load specific documentation content."""
|
||||
# Simulated documentation loading - real implementation would fetch from MCP servers
|
||||
documentation_patterns = {
|
||||
'magic_patterns': {
|
||||
'ui_components': ['button', 'form', 'modal', 'card'],
|
||||
'design_systems': ['theme', 'tokens', 'spacing'],
|
||||
'accessibility': ['aria-labels', 'keyboard-navigation', 'screen-readers']
|
||||
},
|
||||
'context7_patterns': {
|
||||
'library_integration': ['import_patterns', 'configuration', 'best_practices'],
|
||||
'framework_usage': ['react_patterns', 'vue_patterns', 'angular_patterns'],
|
||||
'documentation_access': ['api_docs', 'examples', 'tutorials']
|
||||
},
|
||||
'sequential_patterns': {
|
||||
'analysis_workflows': ['step_by_step', 'hypothesis_testing', 'validation'],
|
||||
'debugging_strategies': ['systematic_approach', 'root_cause', 'verification'],
|
||||
'complex_reasoning': ['decomposition', 'synthesis', 'optimization']
|
||||
},
|
||||
'playwright_patterns': {
|
||||
'testing_strategies': ['e2e_tests', 'unit_tests', 'integration_tests'],
|
||||
'automation_patterns': ['page_objects', 'test_data', 'assertions'],
|
||||
'performance_testing': ['load_testing', 'stress_testing', 'monitoring']
|
||||
}
|
||||
}
|
||||
|
||||
return documentation_patterns.get(doc_type, {})
|
||||
|
||||
def _create_documentation_summary(self, doc_content: dict) -> dict:
|
||||
"""Create summary of documentation content for quick access."""
|
||||
summary = {
|
||||
'categories': list(doc_content.keys()),
|
||||
'total_patterns': sum(len(patterns) if isinstance(patterns, list) else 1
|
||||
for patterns in doc_content.values()),
|
||||
'quick_access_items': []
|
||||
}
|
||||
|
||||
# Extract most commonly used patterns
|
||||
for category, patterns in doc_content.items():
|
||||
if isinstance(patterns, list) and patterns:
|
||||
summary['quick_access_items'].append({
|
||||
'category': category,
|
||||
'top_pattern': patterns[0],
|
||||
'pattern_count': len(patterns)
|
||||
})
|
||||
|
||||
return summary
|
||||
|
||||
def _update_patterns_if_needed(self, context: dict, intelligence_needs: dict) -> dict:
|
||||
"""Update pattern detection based on context."""
|
||||
pattern_updates = {
|
||||
'updated_patterns': [],
|
||||
'new_patterns_detected': [],
|
||||
'pattern_effectiveness': {}
|
||||
}
|
||||
|
||||
if intelligence_needs.get('mcp_docs_needed'):
|
||||
# Update operation-specific patterns
|
||||
operation_type = context.get('operation_type', 'unknown')
|
||||
self._update_operation_patterns(operation_type, pattern_updates)
|
||||
|
||||
# Update context-specific patterns
|
||||
session_context = context.get('session_context', {})
|
||||
self._update_context_patterns(session_context, pattern_updates)
|
||||
|
||||
return pattern_updates
|
||||
|
||||
def _update_operation_patterns(self, operation_type: str, pattern_updates: dict):
|
||||
"""Update operation-specific patterns."""
|
||||
if operation_type in ['build', 'implement']:
|
||||
pattern_updates['updated_patterns'].append('build_operation_patterns')
|
||||
# Update pattern detection for build operations
|
||||
elif operation_type in ['analyze', 'debug']:
|
||||
pattern_updates['updated_patterns'].append('analysis_operation_patterns')
|
||||
# Update pattern detection for analysis operations
|
||||
elif operation_type in ['test', 'validate']:
|
||||
pattern_updates['updated_patterns'].append('testing_operation_patterns')
|
||||
# Update pattern detection for testing operations
|
||||
|
||||
def _update_context_patterns(self, session_context: dict, pattern_updates: dict):
|
||||
"""Update context-specific patterns."""
|
||||
if session_context.get('project_type') == 'frontend':
|
||||
pattern_updates['updated_patterns'].append('frontend_context_patterns')
|
||||
elif session_context.get('project_type') == 'backend':
|
||||
pattern_updates['updated_patterns'].append('backend_context_patterns')
|
||||
elif session_context.get('project_type') == 'fullstack':
|
||||
pattern_updates['updated_patterns'].append('fullstack_context_patterns')
|
||||
|
||||
def _generate_framework_updates(self, context: dict, intelligence_analysis: dict) -> dict:
|
||||
"""Generate framework intelligence updates."""
|
||||
framework_updates = {
|
||||
'configuration_updates': {},
|
||||
'optimization_recommendations': [],
|
||||
'intelligence_enhancements': []
|
||||
}
|
||||
|
||||
needed_updates = intelligence_analysis.get('framework_updates_needed', [])
|
||||
|
||||
for update_type in needed_updates:
|
||||
if update_type == 'performance_optimization':
|
||||
framework_updates['optimization_recommendations'].extend([
|
||||
'Enable parallel processing for multi-file operations',
|
||||
'Activate compression for resource-constrained scenarios',
|
||||
'Use intelligent caching for repeated operations'
|
||||
])
|
||||
|
||||
elif update_type == 'resource_management':
|
||||
resource_usage = context.get('session_context', {}).get('resource_usage', 0)
|
||||
if resource_usage > 75:
|
||||
framework_updates['configuration_updates']['compression'] = 'enable_aggressive'
|
||||
framework_updates['optimization_recommendations'].append(
|
||||
'Resource usage high - enabling aggressive compression'
|
||||
)
|
||||
|
||||
return framework_updates
|
||||
|
||||
def _record_notification_learning(self, context: dict, intelligence_analysis: dict):
|
||||
"""Record notification learning for optimization."""
|
||||
learning_opportunities = intelligence_analysis.get('learning_opportunities', [])
|
||||
|
||||
for opportunity in learning_opportunities:
|
||||
if opportunity == 'error_pattern_learning':
|
||||
self.learning_engine.record_learning_event(
|
||||
LearningType.ERROR_RECOVERY,
|
||||
AdaptationScope.USER,
|
||||
context,
|
||||
{
|
||||
'notification_type': context['notification_type'],
|
||||
'error_context': context.get('notification_data', {}),
|
||||
'intelligence_loaded': len(intelligence_analysis.get('documentation_needed', []))
|
||||
},
|
||||
0.7, # Learning value from errors
|
||||
0.8,
|
||||
{'hook': 'notification', 'learning_type': 'error'}
|
||||
)
|
||||
|
||||
elif opportunity == 'success_pattern_learning':
|
||||
self.learning_engine.record_learning_event(
|
||||
LearningType.OPERATION_PATTERN,
|
||||
AdaptationScope.USER,
|
||||
context,
|
||||
{
|
||||
'notification_type': context['notification_type'],
|
||||
'success_context': context.get('notification_data', {}),
|
||||
'patterns_updated': len(intelligence_analysis.get('pattern_updates_needed', []))
|
||||
},
|
||||
0.9, # High learning value from success
|
||||
0.9,
|
||||
{'hook': 'notification', 'learning_type': 'success'}
|
||||
)
|
||||
|
||||
def _calculate_cache_hit_rate(self) -> float:
|
||||
"""Calculate cache hit ratio for performance metrics."""
|
||||
if not hasattr(self, '_cache_requests'):
|
||||
self._cache_requests = 0
|
||||
self._cache_hits = 0
|
||||
|
||||
if self._cache_requests == 0:
|
||||
return 0.0
|
||||
|
||||
return self._cache_hits / self._cache_requests
|
||||
|
||||
def _create_intelligence_response(self, context: dict, documentation_updates: dict,
|
||||
pattern_updates: dict, framework_updates: dict) -> dict:
|
||||
"""Create comprehensive intelligence response."""
|
||||
return {
|
||||
'notification_type': context['notification_type'],
|
||||
'priority': context['priority'],
|
||||
'timestamp': context['timestamp'],
|
||||
|
||||
'intelligence_updates': {
|
||||
'documentation_loaded': len(documentation_updates.get('loaded_patterns', [])) > 0,
|
||||
'patterns_updated': len(pattern_updates.get('updated_patterns', [])) > 0,
|
||||
'framework_enhanced': len(framework_updates.get('optimization_recommendations', [])) > 0
|
||||
},
|
||||
|
||||
'documentation': {
|
||||
'patterns_loaded': documentation_updates.get('loaded_patterns', []),
|
||||
'summaries': documentation_updates.get('documentation_summaries', {}),
|
||||
'cache_status': 'active'
|
||||
},
|
||||
|
||||
'patterns': {
|
||||
'updated_patterns': pattern_updates.get('updated_patterns', []),
|
||||
'new_patterns': pattern_updates.get('new_patterns_detected', []),
|
||||
'effectiveness': pattern_updates.get('pattern_effectiveness', {})
|
||||
},
|
||||
|
||||
'framework': {
|
||||
'configuration_updates': framework_updates.get('configuration_updates', {}),
|
||||
'optimization_recommendations': framework_updates.get('optimization_recommendations', []),
|
||||
'intelligence_enhancements': framework_updates.get('intelligence_enhancements', [])
|
||||
},
|
||||
|
||||
'optimization': {
|
||||
'just_in_time_loading': True,
|
||||
'intelligent_caching': True,
|
||||
'performance_optimized': True,
|
||||
'learning_enabled': True
|
||||
},
|
||||
|
||||
'metadata': {
|
||||
'hook_version': 'notification_1.0',
|
||||
'processing_timestamp': time.time(),
|
||||
'intelligence_level': 'adaptive'
|
||||
}
|
||||
}
|
||||
|
||||
def _create_fallback_response(self, notification: dict, error: str) -> dict:
|
||||
"""Create fallback response on error."""
|
||||
return {
|
||||
'notification_type': notification.get('type', 'unknown'),
|
||||
'priority': 'low',
|
||||
'error': error,
|
||||
'fallback_mode': True,
|
||||
|
||||
'intelligence_updates': {
|
||||
'documentation_loaded': False,
|
||||
'patterns_updated': False,
|
||||
'framework_enhanced': False
|
||||
},
|
||||
|
||||
'documentation': {
|
||||
'patterns_loaded': [],
|
||||
'summaries': {},
|
||||
'cache_status': 'error'
|
||||
},
|
||||
|
||||
'performance_metrics': {
|
||||
'processing_time_ms': 0,
|
||||
'target_met': False,
|
||||
'error_occurred': True
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
"""Main hook execution function."""
|
||||
try:
|
||||
# Read notification from stdin
|
||||
notification = json.loads(sys.stdin.read())
|
||||
|
||||
# Initialize and run hook
|
||||
hook = NotificationHook()
|
||||
result = hook.process_notification(notification)
|
||||
|
||||
# Output result as JSON
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
except Exception as e:
|
||||
# Output error as JSON
|
||||
error_result = {
|
||||
'intelligence_updates_enabled': False,
|
||||
'error': str(e),
|
||||
'fallback_mode': True
|
||||
}
|
||||
print(json.dumps(error_result, indent=2))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
764
Framework-Hooks/hooks/post_tool_use.py
Normal file
764
Framework-Hooks/hooks/post_tool_use.py
Normal file
@@ -0,0 +1,764 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SuperClaude-Lite Post-Tool-Use Hook
|
||||
|
||||
Implements RULES.md + PRINCIPLES.md validation and learning system.
|
||||
Performance target: <100ms execution time.
|
||||
|
||||
This hook runs after every tool usage and provides:
|
||||
- Quality validation against SuperClaude principles
|
||||
- Effectiveness measurement and learning
|
||||
- Error pattern detection and prevention
|
||||
- Performance optimization feedback
|
||||
- Adaptation and improvement recommendations
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional, Tuple
|
||||
|
||||
# Add shared modules to path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "shared"))
|
||||
|
||||
from framework_logic import FrameworkLogic, ValidationResult, OperationContext, OperationType, RiskLevel
|
||||
from pattern_detection import PatternDetector
|
||||
from mcp_intelligence import MCPIntelligence
|
||||
from compression_engine import CompressionEngine
|
||||
from learning_engine import LearningEngine, LearningType, AdaptationScope
|
||||
from yaml_loader import config_loader
|
||||
from logger import log_hook_start, log_hook_end, log_decision, log_error
|
||||
|
||||
|
||||
class PostToolUseHook:
|
||||
"""
|
||||
Post-tool-use hook implementing SuperClaude validation and learning.
|
||||
|
||||
Responsibilities:
|
||||
- Validate tool execution against RULES.md and PRINCIPLES.md
|
||||
- Measure operation effectiveness and quality
|
||||
- Learn from successful and failed patterns
|
||||
- Detect error patterns and suggest improvements
|
||||
- Record performance metrics for optimization
|
||||
- Generate adaptation recommendations
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
start_time = time.time()
|
||||
|
||||
# Initialize core components
|
||||
self.framework_logic = FrameworkLogic()
|
||||
self.pattern_detector = PatternDetector()
|
||||
self.mcp_intelligence = MCPIntelligence()
|
||||
self.compression_engine = CompressionEngine()
|
||||
|
||||
# Initialize learning engine
|
||||
cache_dir = Path("cache")
|
||||
self.learning_engine = LearningEngine(cache_dir)
|
||||
|
||||
# Load hook-specific configuration from SuperClaude config
|
||||
self.hook_config = config_loader.get_hook_config('post_tool_use')
|
||||
|
||||
# Load validation configuration (from YAML if exists, otherwise use hook config)
|
||||
try:
|
||||
self.validation_config = config_loader.load_config('validation')
|
||||
except FileNotFoundError:
|
||||
# Fall back to hook configuration if YAML file not found
|
||||
self.validation_config = self.hook_config.get('configuration', {})
|
||||
|
||||
# Load quality standards (from YAML if exists, otherwise use hook config)
|
||||
try:
|
||||
self.quality_standards = config_loader.load_config('performance')
|
||||
except FileNotFoundError:
|
||||
# Fall back to performance targets from global configuration
|
||||
self.quality_standards = config_loader.get_performance_targets()
|
||||
|
||||
# Performance tracking using configuration
|
||||
self.initialization_time = (time.time() - start_time) * 1000
|
||||
self.performance_target_ms = config_loader.get_hook_config('post_tool_use', 'performance_target_ms', 100)
|
||||
|
||||
def process_tool_result(self, tool_result: dict) -> dict:
|
||||
"""
|
||||
Process tool execution result with validation and learning.
|
||||
|
||||
Args:
|
||||
tool_result: Tool execution result from Claude Code
|
||||
|
||||
Returns:
|
||||
Enhanced result with SuperClaude validation and insights
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
# Log hook start
|
||||
log_hook_start("post_tool_use", {
|
||||
"tool_name": tool_result.get('tool_name', 'unknown'),
|
||||
"success": tool_result.get('success', False),
|
||||
"has_error": bool(tool_result.get('error'))
|
||||
})
|
||||
|
||||
try:
|
||||
# Extract execution context
|
||||
context = self._extract_execution_context(tool_result)
|
||||
|
||||
# Validate against SuperClaude principles
|
||||
validation_result = self._validate_tool_result(context)
|
||||
|
||||
# Log validation decision
|
||||
if not validation_result.is_valid:
|
||||
log_decision(
|
||||
"post_tool_use",
|
||||
"validation_failure",
|
||||
validation_result.failed_checks[0] if validation_result.failed_checks else "unknown",
|
||||
f"Tool '{context['tool_name']}' failed validation: {validation_result.message}"
|
||||
)
|
||||
|
||||
# Measure effectiveness and quality
|
||||
effectiveness_metrics = self._measure_effectiveness(context, validation_result)
|
||||
|
||||
# Detect patterns and learning opportunities
|
||||
learning_analysis = self._analyze_learning_opportunities(context, effectiveness_metrics)
|
||||
|
||||
# Record learning events
|
||||
self._record_learning_events(context, effectiveness_metrics, learning_analysis)
|
||||
|
||||
# Generate recommendations
|
||||
recommendations = self._generate_recommendations(context, validation_result, learning_analysis)
|
||||
|
||||
# Create validation report
|
||||
validation_report = self._create_validation_report(
|
||||
context, validation_result, effectiveness_metrics,
|
||||
learning_analysis, recommendations
|
||||
)
|
||||
|
||||
# Detect patterns in tool execution
|
||||
pattern_analysis = self._analyze_execution_patterns(context, validation_result)
|
||||
|
||||
# Log pattern detection
|
||||
if pattern_analysis.get('error_pattern_detected'):
|
||||
log_decision(
|
||||
"post_tool_use",
|
||||
"error_pattern_detected",
|
||||
pattern_analysis.get('pattern_type', 'unknown'),
|
||||
pattern_analysis.get('description', 'Error pattern identified')
|
||||
)
|
||||
|
||||
# Performance tracking
|
||||
execution_time = (time.time() - start_time) * 1000
|
||||
validation_report['performance_metrics'] = {
|
||||
'processing_time_ms': execution_time,
|
||||
'target_met': execution_time < self.performance_target_ms,
|
||||
'quality_score': self._calculate_quality_score(context, validation_result)
|
||||
}
|
||||
|
||||
# Log successful completion
|
||||
log_hook_end(
|
||||
"post_tool_use",
|
||||
int(execution_time),
|
||||
True,
|
||||
{
|
||||
"tool_name": context['tool_name'],
|
||||
"validation_passed": validation_result.is_valid,
|
||||
"quality_score": validation_report['performance_metrics']['quality_score']
|
||||
}
|
||||
)
|
||||
|
||||
return validation_report
|
||||
|
||||
except Exception as e:
|
||||
# Log error
|
||||
execution_time = (time.time() - start_time) * 1000
|
||||
log_error(
|
||||
"post_tool_use",
|
||||
str(e),
|
||||
{"tool_name": tool_result.get('tool_name', 'unknown')}
|
||||
)
|
||||
log_hook_end("post_tool_use", int(execution_time), False)
|
||||
|
||||
# Graceful fallback on error
|
||||
return self._create_fallback_result(tool_result, str(e))
|
||||
|
||||
def _extract_execution_context(self, tool_result: dict) -> dict:
|
||||
"""Extract and enrich tool execution context."""
|
||||
context = {
|
||||
'tool_name': tool_result.get('tool_name', ''),
|
||||
'execution_status': tool_result.get('status', 'unknown'),
|
||||
'execution_time_ms': tool_result.get('execution_time_ms', 0),
|
||||
'parameters_used': tool_result.get('parameters', {}),
|
||||
'result_data': tool_result.get('result', {}),
|
||||
'error_info': tool_result.get('error', {}),
|
||||
'mcp_servers_used': tool_result.get('mcp_servers', []),
|
||||
'performance_data': tool_result.get('performance', {}),
|
||||
'user_intent': tool_result.get('user_intent', ''),
|
||||
'session_context': tool_result.get('session_context', {}),
|
||||
'timestamp': time.time()
|
||||
}
|
||||
|
||||
# Analyze operation characteristics
|
||||
context.update(self._analyze_operation_outcome(context))
|
||||
|
||||
# Extract quality indicators
|
||||
context.update(self._extract_quality_indicators(context))
|
||||
|
||||
return context
|
||||
|
||||
def _analyze_operation_outcome(self, context: dict) -> dict:
|
||||
"""Analyze the outcome of the tool operation."""
|
||||
outcome_analysis = {
|
||||
'success': context['execution_status'] == 'success',
|
||||
'partial_success': False,
|
||||
'error_occurred': context['execution_status'] == 'error',
|
||||
'performance_acceptable': True,
|
||||
'quality_indicators': [],
|
||||
'risk_factors': []
|
||||
}
|
||||
|
||||
# Analyze execution status
|
||||
if context['execution_status'] in ['partial', 'warning']:
|
||||
outcome_analysis['partial_success'] = True
|
||||
|
||||
# Performance analysis
|
||||
execution_time = context.get('execution_time_ms', 0)
|
||||
if execution_time > 5000: # 5 second threshold
|
||||
outcome_analysis['performance_acceptable'] = False
|
||||
outcome_analysis['risk_factors'].append('slow_execution')
|
||||
|
||||
# Error analysis
|
||||
if context.get('error_info'):
|
||||
error_type = context['error_info'].get('type', 'unknown')
|
||||
outcome_analysis['error_type'] = error_type
|
||||
outcome_analysis['error_recoverable'] = error_type not in ['fatal', 'security', 'corruption']
|
||||
|
||||
# Quality indicators from result data
|
||||
result_data = context.get('result_data', {})
|
||||
if result_data:
|
||||
if result_data.get('validation_passed'):
|
||||
outcome_analysis['quality_indicators'].append('validation_passed')
|
||||
if result_data.get('tests_passed'):
|
||||
outcome_analysis['quality_indicators'].append('tests_passed')
|
||||
if result_data.get('linting_clean'):
|
||||
outcome_analysis['quality_indicators'].append('linting_clean')
|
||||
|
||||
return outcome_analysis
|
||||
|
||||
def _extract_quality_indicators(self, context: dict) -> dict:
|
||||
"""Extract quality indicators from execution context."""
|
||||
quality_indicators = {
|
||||
'code_quality_score': 0.0,
|
||||
'security_compliance': True,
|
||||
'performance_efficiency': 1.0,
|
||||
'error_handling_present': False,
|
||||
'documentation_adequate': False,
|
||||
'test_coverage_acceptable': False
|
||||
}
|
||||
|
||||
# Analyze tool output for quality indicators
|
||||
tool_name = context['tool_name']
|
||||
result_data = context.get('result_data', {})
|
||||
|
||||
# Code quality analysis
|
||||
if tool_name in ['Write', 'Edit', 'Generate']:
|
||||
# Check for quality indicators in the result
|
||||
if 'quality_score' in result_data:
|
||||
quality_indicators['code_quality_score'] = result_data['quality_score']
|
||||
|
||||
# Infer quality from operation success and performance
|
||||
if context.get('success') and context.get('performance_acceptable'):
|
||||
quality_indicators['code_quality_score'] = max(
|
||||
quality_indicators['code_quality_score'], 0.7
|
||||
)
|
||||
|
||||
# Security compliance
|
||||
if context.get('error_type') in ['security', 'vulnerability']:
|
||||
quality_indicators['security_compliance'] = False
|
||||
|
||||
# Performance efficiency
|
||||
execution_time = context.get('execution_time_ms', 0)
|
||||
expected_time = context.get('performance_data', {}).get('expected_time_ms', 1000)
|
||||
if execution_time > 0 and expected_time > 0:
|
||||
quality_indicators['performance_efficiency'] = min(expected_time / execution_time, 2.0)
|
||||
|
||||
# Error handling detection
|
||||
if tool_name in ['Write', 'Edit'] and 'try' in str(result_data).lower():
|
||||
quality_indicators['error_handling_present'] = True
|
||||
|
||||
# Documentation assessment
|
||||
if tool_name in ['Document', 'Generate'] or 'doc' in context.get('user_intent', '').lower():
|
||||
quality_indicators['documentation_adequate'] = context.get('success', False)
|
||||
|
||||
return quality_indicators
|
||||
|
||||
def _validate_tool_result(self, context: dict) -> ValidationResult:
|
||||
"""Validate execution against SuperClaude principles."""
|
||||
# Create operation data for validation
|
||||
operation_data = {
|
||||
'operation_type': context['tool_name'],
|
||||
'has_error_handling': context.get('error_handling_present', False),
|
||||
'affects_logic': context['tool_name'] in ['Write', 'Edit', 'Generate'],
|
||||
'has_tests': context.get('test_coverage_acceptable', False),
|
||||
'is_public_api': 'api' in context.get('user_intent', '').lower(),
|
||||
'has_documentation': context.get('documentation_adequate', False),
|
||||
'handles_user_input': 'input' in context.get('user_intent', '').lower(),
|
||||
'has_input_validation': context.get('security_compliance', True),
|
||||
'evidence': context.get('success', False)
|
||||
}
|
||||
|
||||
# Run framework validation
|
||||
validation_result = self.framework_logic.validate_operation(operation_data)
|
||||
|
||||
# Enhance with SuperClaude-specific validations
|
||||
validation_result = self._enhance_validation_with_superclaude_rules(
|
||||
validation_result, context
|
||||
)
|
||||
|
||||
return validation_result
|
||||
|
||||
def _enhance_validation_with_superclaude_rules(self,
|
||||
base_validation: ValidationResult,
|
||||
context: dict) -> ValidationResult:
|
||||
"""Enhance validation with SuperClaude-specific rules."""
|
||||
enhanced_validation = ValidationResult(
|
||||
is_valid=base_validation.is_valid,
|
||||
issues=base_validation.issues.copy(),
|
||||
warnings=base_validation.warnings.copy(),
|
||||
suggestions=base_validation.suggestions.copy(),
|
||||
quality_score=base_validation.quality_score
|
||||
)
|
||||
|
||||
# RULES.md validation
|
||||
|
||||
# Rule: Always use Read tool before Write or Edit operations
|
||||
if context['tool_name'] in ['Write', 'Edit']:
|
||||
session_context = context.get('session_context', {})
|
||||
recent_tools = session_context.get('recent_tools', [])
|
||||
if not any('Read' in tool for tool in recent_tools[-3:]):
|
||||
enhanced_validation.warnings.append(
|
||||
"RULES violation: No Read operation detected before Write/Edit"
|
||||
)
|
||||
enhanced_validation.quality_score -= 0.1
|
||||
|
||||
# Rule: Use absolute paths only
|
||||
params = context.get('parameters_used', {})
|
||||
for param_name, param_value in params.items():
|
||||
if 'path' in param_name.lower() and isinstance(param_value, str):
|
||||
if not os.path.isabs(param_value) and not param_value.startswith(('http', 'https')):
|
||||
enhanced_validation.issues.append(
|
||||
f"RULES violation: Relative path used in {param_name}: {param_value}"
|
||||
)
|
||||
enhanced_validation.quality_score -= 0.2
|
||||
|
||||
# Rule: Validate before execution for high-risk operations
|
||||
if context.get('risk_factors'):
|
||||
if not context.get('validation_performed', False):
|
||||
enhanced_validation.warnings.append(
|
||||
"RULES recommendation: High-risk operation should include validation"
|
||||
)
|
||||
|
||||
# PRINCIPLES.md validation
|
||||
|
||||
# Principle: Evidence > assumptions
|
||||
if not context.get('evidence_provided', False) and context.get('assumptions_made', False):
|
||||
enhanced_validation.suggestions.append(
|
||||
"PRINCIPLES: Provide evidence to support assumptions"
|
||||
)
|
||||
|
||||
# Principle: Code > documentation
|
||||
if context['tool_name'] == 'Document' and not context.get('working_code_exists', True):
|
||||
enhanced_validation.warnings.append(
|
||||
"PRINCIPLES: Documentation should follow working code, not precede it"
|
||||
)
|
||||
|
||||
# Principle: Efficiency > verbosity
|
||||
result_size = len(str(context.get('result_data', '')))
|
||||
if result_size > 5000 and not context.get('complexity_justifies_length', False):
|
||||
enhanced_validation.suggestions.append(
|
||||
"PRINCIPLES: Consider token efficiency techniques for large outputs"
|
||||
)
|
||||
|
||||
# Recalculate overall validity
|
||||
enhanced_validation.is_valid = (
|
||||
len(enhanced_validation.issues) == 0 and
|
||||
enhanced_validation.quality_score >= 0.7
|
||||
)
|
||||
|
||||
return enhanced_validation
|
||||
|
||||
def _measure_effectiveness(self, context: dict, validation_result: ValidationResult) -> dict:
|
||||
"""Measure operation effectiveness and quality."""
|
||||
effectiveness_metrics = {
|
||||
'overall_effectiveness': 0.0,
|
||||
'quality_score': validation_result.quality_score,
|
||||
'performance_score': 0.0,
|
||||
'user_satisfaction_estimate': 0.0,
|
||||
'learning_value': 0.0,
|
||||
'improvement_potential': 0.0
|
||||
}
|
||||
|
||||
# Performance scoring
|
||||
execution_time = context.get('execution_time_ms', 0)
|
||||
expected_time = context.get('performance_data', {}).get('expected_time_ms', 1000)
|
||||
if execution_time > 0:
|
||||
time_ratio = expected_time / max(execution_time, 1)
|
||||
effectiveness_metrics['performance_score'] = min(time_ratio, 1.0)
|
||||
else:
|
||||
effectiveness_metrics['performance_score'] = 1.0
|
||||
|
||||
# User satisfaction estimation
|
||||
if context.get('success'):
|
||||
base_satisfaction = 0.8
|
||||
if validation_result.quality_score > 0.8:
|
||||
base_satisfaction += 0.15
|
||||
if effectiveness_metrics['performance_score'] > 0.8:
|
||||
base_satisfaction += 0.05
|
||||
effectiveness_metrics['user_satisfaction_estimate'] = min(base_satisfaction, 1.0)
|
||||
else:
|
||||
# Reduce satisfaction based on error severity
|
||||
error_severity = self._assess_error_severity(context)
|
||||
effectiveness_metrics['user_satisfaction_estimate'] = max(0.3 - error_severity * 0.3, 0.0)
|
||||
|
||||
# Learning value assessment
|
||||
if context.get('mcp_servers_used'):
|
||||
effectiveness_metrics['learning_value'] += 0.2 # MCP usage provides learning
|
||||
if context.get('error_occurred'):
|
||||
effectiveness_metrics['learning_value'] += 0.3 # Errors provide valuable learning
|
||||
if context.get('complexity_score', 0) > 0.6:
|
||||
effectiveness_metrics['learning_value'] += 0.2 # Complex operations provide insights
|
||||
|
||||
effectiveness_metrics['learning_value'] = min(effectiveness_metrics['learning_value'], 1.0)
|
||||
|
||||
# Improvement potential
|
||||
if len(validation_result.suggestions) > 0:
|
||||
effectiveness_metrics['improvement_potential'] = min(len(validation_result.suggestions) * 0.2, 1.0)
|
||||
|
||||
# Overall effectiveness calculation
|
||||
weights = {
|
||||
'quality': 0.3,
|
||||
'performance': 0.25,
|
||||
'satisfaction': 0.35,
|
||||
'learning': 0.1
|
||||
}
|
||||
|
||||
effectiveness_metrics['overall_effectiveness'] = (
|
||||
effectiveness_metrics['quality_score'] * weights['quality'] +
|
||||
effectiveness_metrics['performance_score'] * weights['performance'] +
|
||||
effectiveness_metrics['user_satisfaction_estimate'] * weights['satisfaction'] +
|
||||
effectiveness_metrics['learning_value'] * weights['learning']
|
||||
)
|
||||
|
||||
return effectiveness_metrics
|
||||
|
||||
def _assess_error_severity(self, context: dict) -> float:
|
||||
"""Assess error severity on a scale of 0.0 to 1.0."""
|
||||
if not context.get('error_occurred'):
|
||||
return 0.0
|
||||
|
||||
error_type = context.get('error_type', 'unknown')
|
||||
|
||||
severity_map = {
|
||||
'fatal': 1.0,
|
||||
'security': 0.9,
|
||||
'corruption': 0.8,
|
||||
'timeout': 0.6,
|
||||
'validation': 0.4,
|
||||
'warning': 0.2,
|
||||
'unknown': 0.5
|
||||
}
|
||||
|
||||
return severity_map.get(error_type, 0.5)
|
||||
|
||||
def _analyze_learning_opportunities(self, context: dict, effectiveness_metrics: dict) -> dict:
|
||||
"""Analyze learning opportunities from the execution."""
|
||||
learning_analysis = {
|
||||
'patterns_detected': [],
|
||||
'success_factors': [],
|
||||
'failure_factors': [],
|
||||
'optimization_opportunities': [],
|
||||
'adaptation_recommendations': []
|
||||
}
|
||||
|
||||
# Pattern detection
|
||||
if context.get('mcp_servers_used'):
|
||||
for server in context['mcp_servers_used']:
|
||||
if effectiveness_metrics['overall_effectiveness'] > 0.8:
|
||||
learning_analysis['patterns_detected'].append(f"effective_{server}_usage")
|
||||
elif effectiveness_metrics['overall_effectiveness'] < 0.5:
|
||||
learning_analysis['patterns_detected'].append(f"ineffective_{server}_usage")
|
||||
|
||||
# Success factor analysis
|
||||
if effectiveness_metrics['overall_effectiveness'] > 0.8:
|
||||
if effectiveness_metrics['performance_score'] > 0.8:
|
||||
learning_analysis['success_factors'].append('optimal_performance')
|
||||
if effectiveness_metrics['quality_score'] > 0.8:
|
||||
learning_analysis['success_factors'].append('high_quality_output')
|
||||
if context.get('mcp_servers_used'):
|
||||
learning_analysis['success_factors'].append('effective_mcp_coordination')
|
||||
|
||||
# Failure factor analysis
|
||||
if effectiveness_metrics['overall_effectiveness'] < 0.5:
|
||||
if effectiveness_metrics['performance_score'] < 0.5:
|
||||
learning_analysis['failure_factors'].append('poor_performance')
|
||||
if effectiveness_metrics['quality_score'] < 0.5:
|
||||
learning_analysis['failure_factors'].append('quality_issues')
|
||||
if context.get('error_occurred'):
|
||||
learning_analysis['failure_factors'].append(f"error_{context.get('error_type', 'unknown')}")
|
||||
|
||||
# Optimization opportunities
|
||||
if effectiveness_metrics['improvement_potential'] > 0.3:
|
||||
learning_analysis['optimization_opportunities'].append('validation_improvements_available')
|
||||
|
||||
if context.get('execution_time_ms', 0) > 2000:
|
||||
learning_analysis['optimization_opportunities'].append('performance_optimization_needed')
|
||||
|
||||
# Adaptation recommendations
|
||||
if len(learning_analysis['success_factors']) > 0:
|
||||
learning_analysis['adaptation_recommendations'].append(
|
||||
f"Reinforce patterns: {', '.join(learning_analysis['success_factors'])}"
|
||||
)
|
||||
|
||||
if len(learning_analysis['failure_factors']) > 0:
|
||||
learning_analysis['adaptation_recommendations'].append(
|
||||
f"Address failure patterns: {', '.join(learning_analysis['failure_factors'])}"
|
||||
)
|
||||
|
||||
return learning_analysis
|
||||
|
||||
def _record_learning_events(self, context: dict, effectiveness_metrics: dict, learning_analysis: dict):
|
||||
"""Record learning events for future adaptation."""
|
||||
overall_effectiveness = effectiveness_metrics['overall_effectiveness']
|
||||
|
||||
# Record general operation learning
|
||||
self.learning_engine.record_learning_event(
|
||||
LearningType.OPERATION_PATTERN,
|
||||
AdaptationScope.USER,
|
||||
context,
|
||||
{
|
||||
'tool_name': context['tool_name'],
|
||||
'mcp_servers': context.get('mcp_servers_used', []),
|
||||
'success_factors': learning_analysis['success_factors'],
|
||||
'failure_factors': learning_analysis['failure_factors']
|
||||
},
|
||||
overall_effectiveness,
|
||||
0.8, # High confidence in post-execution analysis
|
||||
{'hook': 'post_tool_use', 'effectiveness': overall_effectiveness}
|
||||
)
|
||||
|
||||
# Record MCP server effectiveness
|
||||
for server in context.get('mcp_servers_used', []):
|
||||
self.learning_engine.record_learning_event(
|
||||
LearningType.EFFECTIVENESS_FEEDBACK,
|
||||
AdaptationScope.USER,
|
||||
context,
|
||||
{'mcp_server': server},
|
||||
overall_effectiveness,
|
||||
0.9, # Very high confidence in direct feedback
|
||||
{'server_performance': effectiveness_metrics['performance_score']}
|
||||
)
|
||||
|
||||
# Record error patterns if applicable
|
||||
if context.get('error_occurred'):
|
||||
self.learning_engine.record_learning_event(
|
||||
LearningType.ERROR_RECOVERY,
|
||||
AdaptationScope.PROJECT,
|
||||
context,
|
||||
{
|
||||
'error_type': context.get('error_type'),
|
||||
'recovery_successful': context.get('error_recoverable', False),
|
||||
'context_factors': learning_analysis['failure_factors']
|
||||
},
|
||||
1.0 - self._assess_error_severity(context), # Inverse of severity
|
||||
1.0, # Full confidence in error data
|
||||
{'error_learning': True}
|
||||
)
|
||||
|
||||
def _generate_recommendations(self, context: dict, validation_result: ValidationResult,
|
||||
learning_analysis: dict) -> dict:
|
||||
"""Generate recommendations for improvement."""
|
||||
recommendations = {
|
||||
'immediate_actions': [],
|
||||
'optimization_suggestions': [],
|
||||
'learning_adaptations': [],
|
||||
'prevention_measures': []
|
||||
}
|
||||
|
||||
# Immediate actions from validation issues
|
||||
for issue in validation_result.issues:
|
||||
recommendations['immediate_actions'].append(f"Fix: {issue}")
|
||||
|
||||
for warning in validation_result.warnings:
|
||||
recommendations['immediate_actions'].append(f"Address: {warning}")
|
||||
|
||||
# Optimization suggestions
|
||||
for suggestion in validation_result.suggestions:
|
||||
recommendations['optimization_suggestions'].append(suggestion)
|
||||
|
||||
for opportunity in learning_analysis['optimization_opportunities']:
|
||||
recommendations['optimization_suggestions'].append(f"Optimize: {opportunity}")
|
||||
|
||||
# Learning adaptations
|
||||
for adaptation in learning_analysis['adaptation_recommendations']:
|
||||
recommendations['learning_adaptations'].append(adaptation)
|
||||
|
||||
# Prevention measures for errors
|
||||
if context.get('error_occurred'):
|
||||
error_type = context.get('error_type', 'unknown')
|
||||
if error_type == 'timeout':
|
||||
recommendations['prevention_measures'].append("Consider parallel execution for large operations")
|
||||
elif error_type == 'validation':
|
||||
recommendations['prevention_measures'].append("Enable pre-validation for similar operations")
|
||||
elif error_type == 'security':
|
||||
recommendations['prevention_measures'].append("Implement security validation checks")
|
||||
|
||||
return recommendations
|
||||
|
||||
def _calculate_quality_score(self, context: dict, validation_result: ValidationResult) -> float:
|
||||
"""Calculate quality score based on validation and execution."""
|
||||
base_score = validation_result.quality_score
|
||||
|
||||
# Adjust for execution time
|
||||
execution_time = context.get('execution_time_ms', 0)
|
||||
time_ratio = execution_time / max(self.performance_target_ms, 1)
|
||||
time_penalty = min(time_ratio, 1.0)
|
||||
|
||||
# Adjust for error occurrence
|
||||
if context.get('error_occurred'):
|
||||
error_severity = self._assess_error_severity(context)
|
||||
error_penalty = 1.0 - error_severity
|
||||
|
||||
# Combine adjustments
|
||||
quality_score = base_score * time_penalty * error_penalty
|
||||
|
||||
return quality_score
|
||||
|
||||
def _create_validation_report(self, context: dict, validation_result: ValidationResult,
|
||||
effectiveness_metrics: dict, learning_analysis: dict,
|
||||
recommendations: dict) -> dict:
|
||||
"""Create comprehensive validation report."""
|
||||
return {
|
||||
'tool_name': context['tool_name'],
|
||||
'execution_status': context['execution_status'],
|
||||
'timestamp': context['timestamp'],
|
||||
|
||||
'validation': {
|
||||
'is_valid': validation_result.is_valid,
|
||||
'quality_score': validation_result.quality_score,
|
||||
'issues': validation_result.issues,
|
||||
'warnings': validation_result.warnings,
|
||||
'suggestions': validation_result.suggestions
|
||||
},
|
||||
|
||||
'effectiveness': effectiveness_metrics,
|
||||
|
||||
'learning': {
|
||||
'patterns_detected': learning_analysis['patterns_detected'],
|
||||
'success_factors': learning_analysis['success_factors'],
|
||||
'failure_factors': learning_analysis['failure_factors'],
|
||||
'learning_value': effectiveness_metrics['learning_value']
|
||||
},
|
||||
|
||||
'recommendations': recommendations,
|
||||
|
||||
'compliance': {
|
||||
'rules_compliance': len([i for i in validation_result.issues if 'RULES' in i]) == 0,
|
||||
'principles_alignment': len([w for w in validation_result.warnings if 'PRINCIPLES' in w]) == 0,
|
||||
'superclaude_score': self._calculate_superclaude_compliance_score(validation_result)
|
||||
},
|
||||
|
||||
'metadata': {
|
||||
'hook_version': 'post_tool_use_1.0',
|
||||
'validation_timestamp': time.time(),
|
||||
'learning_events_recorded': len(learning_analysis['patterns_detected']) + 1
|
||||
}
|
||||
}
|
||||
|
||||
def _calculate_superclaude_compliance_score(self, validation_result: ValidationResult) -> float:
|
||||
"""Calculate overall SuperClaude compliance score."""
|
||||
base_score = validation_result.quality_score
|
||||
|
||||
# Penalties for specific violations
|
||||
rules_violations = len([i for i in validation_result.issues if 'RULES' in i])
|
||||
principles_violations = len([w for w in validation_result.warnings if 'PRINCIPLES' in w])
|
||||
|
||||
penalty = (rules_violations * 0.2) + (principles_violations * 0.1)
|
||||
|
||||
return max(base_score - penalty, 0.0)
|
||||
|
||||
def _create_fallback_result(self, tool_result: dict, error: str) -> dict:
|
||||
"""Create fallback validation report on error."""
|
||||
return {
|
||||
'tool_name': tool_result.get('tool_name', 'unknown'),
|
||||
'execution_status': 'validation_error',
|
||||
'timestamp': time.time(),
|
||||
'error': error,
|
||||
'fallback_mode': True,
|
||||
|
||||
'validation': {
|
||||
'is_valid': False,
|
||||
'quality_score': 0.0,
|
||||
'issues': [f"Validation hook error: {error}"],
|
||||
'warnings': [],
|
||||
'suggestions': ['Fix validation hook error']
|
||||
},
|
||||
|
||||
'effectiveness': {
|
||||
'overall_effectiveness': 0.0,
|
||||
'quality_score': 0.0,
|
||||
'performance_score': 0.0,
|
||||
'user_satisfaction_estimate': 0.0,
|
||||
'learning_value': 0.0
|
||||
},
|
||||
|
||||
'performance_metrics': {
|
||||
'processing_time_ms': 0,
|
||||
'target_met': False,
|
||||
'error_occurred': True
|
||||
}
|
||||
}
|
||||
|
||||
def _analyze_execution_patterns(self, context: dict, validation_result: ValidationResult) -> dict:
|
||||
"""Analyze patterns in tool execution."""
|
||||
pattern_analysis = {
|
||||
'error_pattern_detected': False,
|
||||
'pattern_type': 'unknown',
|
||||
'description': 'No error pattern detected'
|
||||
}
|
||||
|
||||
# Check for error occurrence
|
||||
if context.get('error_occurred'):
|
||||
error_type = context.get('error_type', 'unknown')
|
||||
|
||||
# Check for specific error types
|
||||
if error_type in ['fatal', 'security', 'corruption']:
|
||||
pattern_analysis['error_pattern_detected'] = True
|
||||
pattern_analysis['pattern_type'] = error_type
|
||||
pattern_analysis['description'] = f"Error pattern detected: {error_type}"
|
||||
|
||||
return pattern_analysis
|
||||
|
||||
|
||||
def main():
|
||||
"""Main hook execution function."""
|
||||
try:
|
||||
# Read tool result from stdin
|
||||
tool_result = json.loads(sys.stdin.read())
|
||||
|
||||
# Initialize and run hook
|
||||
hook = PostToolUseHook()
|
||||
result = hook.process_tool_result(tool_result)
|
||||
|
||||
# Output result as JSON
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
except Exception as e:
|
||||
# Output error as JSON
|
||||
error_result = {
|
||||
'validation_error': True,
|
||||
'error': str(e),
|
||||
'fallback_mode': True
|
||||
}
|
||||
print(json.dumps(error_result, indent=2))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
771
Framework-Hooks/hooks/pre_compact.py
Executable file
771
Framework-Hooks/hooks/pre_compact.py
Executable file
@@ -0,0 +1,771 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SuperClaude-Lite Pre-Compact Hook
|
||||
|
||||
Implements MODE_Token_Efficiency.md compression algorithms for intelligent context optimization.
|
||||
Performance target: <150ms execution time.
|
||||
|
||||
This hook runs before context compaction and provides:
|
||||
- Intelligent compression strategy selection
|
||||
- Selective content preservation with framework exclusion
|
||||
- Symbol systems and abbreviation optimization
|
||||
- Quality-gated compression with ≥95% information preservation
|
||||
- Adaptive compression based on resource constraints
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional, Tuple
|
||||
|
||||
# Add shared modules to path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "shared"))
|
||||
|
||||
from framework_logic import FrameworkLogic
|
||||
from pattern_detection import PatternDetector
|
||||
from mcp_intelligence import MCPIntelligence
|
||||
from compression_engine import (
|
||||
CompressionEngine, CompressionLevel, ContentType, CompressionResult, CompressionStrategy
|
||||
)
|
||||
from learning_engine import LearningEngine, LearningType, AdaptationScope
|
||||
from yaml_loader import config_loader
|
||||
from logger import log_hook_start, log_hook_end, log_decision, log_error
|
||||
|
||||
|
||||
class PreCompactHook:
|
||||
"""
|
||||
Pre-compact hook implementing SuperClaude token efficiency intelligence.
|
||||
|
||||
Responsibilities:
|
||||
- Analyze context for compression opportunities
|
||||
- Apply selective compression with framework protection
|
||||
- Implement symbol systems and abbreviation optimization
|
||||
- Maintain ≥95% information preservation quality
|
||||
- Adapt compression strategy based on resource constraints
|
||||
- Learn from compression effectiveness and user preferences
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
start_time = time.time()
|
||||
|
||||
# Initialize core components
|
||||
self.framework_logic = FrameworkLogic()
|
||||
self.pattern_detector = PatternDetector()
|
||||
self.mcp_intelligence = MCPIntelligence()
|
||||
self.compression_engine = CompressionEngine()
|
||||
|
||||
# Initialize learning engine
|
||||
cache_dir = Path("cache")
|
||||
self.learning_engine = LearningEngine(cache_dir)
|
||||
|
||||
# Load hook-specific configuration from SuperClaude config
|
||||
self.hook_config = config_loader.get_hook_config('pre_compact')
|
||||
|
||||
# Load compression configuration (from YAML if exists, otherwise use hook config)
|
||||
try:
|
||||
self.compression_config = config_loader.load_config('compression')
|
||||
except FileNotFoundError:
|
||||
# Fall back to hook configuration if YAML file not found
|
||||
self.compression_config = self.hook_config.get('configuration', {})
|
||||
|
||||
# Performance tracking using configuration
|
||||
self.initialization_time = (time.time() - start_time) * 1000
|
||||
self.performance_target_ms = config_loader.get_hook_config('pre_compact', 'performance_target_ms', 150)
|
||||
|
||||
def process_pre_compact(self, compact_request: dict) -> dict:
|
||||
"""
|
||||
Process pre-compact request with intelligent compression.
|
||||
|
||||
Args:
|
||||
compact_request: Context compaction request from Claude Code
|
||||
|
||||
Returns:
|
||||
Compression configuration and optimized content strategy
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
# Log hook start
|
||||
log_hook_start("pre_compact", {
|
||||
"session_id": compact_request.get('session_id', ''),
|
||||
"content_size": len(compact_request.get('content', '')),
|
||||
"resource_state": compact_request.get('resource_state', {}),
|
||||
"triggers": compact_request.get('triggers', [])
|
||||
})
|
||||
|
||||
try:
|
||||
# Extract compression context
|
||||
context = self._extract_compression_context(compact_request)
|
||||
|
||||
# Analyze content for compression strategy
|
||||
content_analysis = self._analyze_content_for_compression(context)
|
||||
|
||||
# Determine optimal compression strategy
|
||||
compression_strategy = self._determine_compression_strategy(context, content_analysis)
|
||||
|
||||
# Log compression strategy decision
|
||||
log_decision(
|
||||
"pre_compact",
|
||||
"compression_strategy",
|
||||
compression_strategy.level.value,
|
||||
f"Based on resource usage: {context.get('token_usage_percent', 0)}%, content type: {content_analysis['content_type'].value}"
|
||||
)
|
||||
|
||||
# Apply selective compression with framework protection
|
||||
compression_results = self._apply_selective_compression(
|
||||
context, compression_strategy, content_analysis
|
||||
)
|
||||
|
||||
# Validate compression quality
|
||||
quality_validation = self._validate_compression_quality(
|
||||
compression_results, compression_strategy
|
||||
)
|
||||
|
||||
# Log quality validation results
|
||||
if not quality_validation['overall_quality_met']:
|
||||
log_decision(
|
||||
"pre_compact",
|
||||
"quality_validation",
|
||||
"failed",
|
||||
f"Preservation score: {quality_validation['preservation_score']:.2f}, Issues: {', '.join(quality_validation['quality_issues'])}"
|
||||
)
|
||||
|
||||
# Record learning events
|
||||
self._record_compression_learning(context, compression_results, quality_validation)
|
||||
|
||||
# Generate compression configuration
|
||||
compression_config = self._generate_compression_config(
|
||||
context, compression_strategy, compression_results, quality_validation
|
||||
)
|
||||
|
||||
# Performance tracking
|
||||
execution_time = (time.time() - start_time) * 1000
|
||||
compression_config['performance_metrics'] = {
|
||||
'compression_time_ms': execution_time,
|
||||
'target_met': execution_time < self.performance_target_ms,
|
||||
'efficiency_score': self._calculate_compression_efficiency(context, execution_time)
|
||||
}
|
||||
|
||||
# Log compression results
|
||||
log_decision(
|
||||
"pre_compact",
|
||||
"compression_results",
|
||||
f"{compression_config['results']['compression_ratio']:.1%}",
|
||||
f"Saved {compression_config['optimization']['estimated_token_savings']} tokens"
|
||||
)
|
||||
|
||||
# Log hook end
|
||||
log_hook_end(
|
||||
"pre_compact",
|
||||
int(execution_time),
|
||||
True,
|
||||
{
|
||||
"compression_ratio": compression_config['results']['compression_ratio'],
|
||||
"preservation_score": compression_config['quality']['preservation_score'],
|
||||
"token_savings": compression_config['optimization']['estimated_token_savings'],
|
||||
"performance_target_met": execution_time < self.performance_target_ms
|
||||
}
|
||||
)
|
||||
|
||||
return compression_config
|
||||
|
||||
except Exception as e:
|
||||
# Log error
|
||||
log_error("pre_compact", str(e), {"request": compact_request})
|
||||
|
||||
# Log hook end with failure
|
||||
log_hook_end("pre_compact", int((time.time() - start_time) * 1000), False)
|
||||
|
||||
# Graceful fallback on error
|
||||
return self._create_fallback_compression_config(compact_request, str(e))
|
||||
|
||||
def _extract_compression_context(self, compact_request: dict) -> dict:
|
||||
"""Extract and enrich compression context."""
|
||||
context = {
|
||||
'session_id': compact_request.get('session_id', ''),
|
||||
'content_to_compress': compact_request.get('content', ''),
|
||||
'content_metadata': compact_request.get('metadata', {}),
|
||||
'resource_constraints': compact_request.get('resource_state', {}),
|
||||
'user_preferences': compact_request.get('user_preferences', {}),
|
||||
'compression_triggers': compact_request.get('triggers', []),
|
||||
'previous_compressions': compact_request.get('compression_history', []),
|
||||
'session_context': compact_request.get('session_context', {}),
|
||||
'timestamp': time.time()
|
||||
}
|
||||
|
||||
# Analyze content characteristics
|
||||
context.update(self._analyze_content_characteristics(context))
|
||||
|
||||
# Extract resource state
|
||||
context.update(self._extract_resource_state(context))
|
||||
|
||||
return context
|
||||
|
||||
def _analyze_content_characteristics(self, context: dict) -> dict:
|
||||
"""Analyze content characteristics for compression decisions."""
|
||||
content = context.get('content_to_compress', '')
|
||||
metadata = context.get('content_metadata', {})
|
||||
|
||||
characteristics = {
|
||||
'content_length': len(content),
|
||||
'content_complexity': 0.0,
|
||||
'repetition_factor': 0.0,
|
||||
'technical_density': 0.0,
|
||||
'framework_content_ratio': 0.0,
|
||||
'user_content_ratio': 0.0,
|
||||
'compressibility_score': 0.0
|
||||
}
|
||||
|
||||
if not content:
|
||||
return characteristics
|
||||
|
||||
# Content complexity analysis
|
||||
lines = content.split('\n')
|
||||
characteristics['content_complexity'] = self._calculate_content_complexity(content, lines)
|
||||
|
||||
# Repetition analysis
|
||||
characteristics['repetition_factor'] = self._calculate_repetition_factor(content, lines)
|
||||
|
||||
# Technical density
|
||||
characteristics['technical_density'] = self._calculate_technical_density(content)
|
||||
|
||||
# Framework vs user content ratio
|
||||
framework_ratio, user_ratio = self._analyze_content_sources(content, metadata)
|
||||
characteristics['framework_content_ratio'] = framework_ratio
|
||||
characteristics['user_content_ratio'] = user_ratio
|
||||
|
||||
# Overall compressibility score
|
||||
characteristics['compressibility_score'] = self._calculate_compressibility_score(characteristics)
|
||||
|
||||
return characteristics
|
||||
|
||||
def _calculate_content_complexity(self, content: str, lines: List[str]) -> float:
|
||||
"""Calculate content complexity score (0.0 to 1.0)."""
|
||||
complexity_indicators = [
|
||||
len([line for line in lines if len(line) > 100]) / max(len(lines), 1), # Long lines
|
||||
len([char for char in content if char in '{}[]()']) / max(len(content), 1), # Structural chars
|
||||
len(set(content.split())) / max(len(content.split()), 1), # Vocabulary richness
|
||||
]
|
||||
|
||||
return min(sum(complexity_indicators) / len(complexity_indicators), 1.0)
|
||||
|
||||
def _calculate_repetition_factor(self, content: str, lines: List[str]) -> float:
|
||||
"""Calculate repetition factor for compression potential."""
|
||||
if not lines:
|
||||
return 0.0
|
||||
|
||||
# Line repetition
|
||||
unique_lines = len(set(lines))
|
||||
line_repetition = 1.0 - (unique_lines / len(lines))
|
||||
|
||||
# Word repetition
|
||||
words = content.split()
|
||||
if words:
|
||||
unique_words = len(set(words))
|
||||
word_repetition = 1.0 - (unique_words / len(words))
|
||||
else:
|
||||
word_repetition = 0.0
|
||||
|
||||
return (line_repetition + word_repetition) / 2
|
||||
|
||||
def _calculate_technical_density(self, content: str) -> float:
|
||||
"""Calculate technical density for compression strategy."""
|
||||
technical_patterns = [
|
||||
r'\b[A-Z][a-zA-Z]*\b', # CamelCase
|
||||
r'\b\w+\.\w+\b', # Dotted notation
|
||||
r'\b\d+\.\d+\.\d+\b', # Version numbers
|
||||
r'\b[a-z]+_[a-z]+\b', # Snake_case
|
||||
r'\b[A-Z]{2,}\b', # CONSTANTS
|
||||
]
|
||||
|
||||
import re
|
||||
technical_matches = 0
|
||||
for pattern in technical_patterns:
|
||||
technical_matches += len(re.findall(pattern, content))
|
||||
|
||||
total_words = len(content.split())
|
||||
return min(technical_matches / max(total_words, 1), 1.0)
|
||||
|
||||
def _analyze_content_sources(self, content: str, metadata: dict) -> Tuple[float, float]:
|
||||
"""Analyze ratio of framework vs user content."""
|
||||
# Framework content indicators
|
||||
framework_indicators = [
|
||||
'SuperClaude', 'CLAUDE.md', 'FLAGS.md', 'PRINCIPLES.md',
|
||||
'ORCHESTRATOR.md', 'MCP_', 'MODE_', 'SESSION_LIFECYCLE'
|
||||
]
|
||||
|
||||
# User content indicators
|
||||
user_indicators = [
|
||||
'project_files', 'user_documentation', 'source_code',
|
||||
'configuration_files', 'custom_content'
|
||||
]
|
||||
|
||||
framework_score = 0
|
||||
user_score = 0
|
||||
|
||||
# Check content text
|
||||
content_lower = content.lower()
|
||||
for indicator in framework_indicators:
|
||||
if indicator.lower() in content_lower:
|
||||
framework_score += 1
|
||||
|
||||
for indicator in user_indicators:
|
||||
if indicator.lower() in content_lower:
|
||||
user_score += 1
|
||||
|
||||
# Check metadata
|
||||
content_type = metadata.get('content_type', '')
|
||||
file_path = metadata.get('file_path', '')
|
||||
|
||||
if any(pattern in file_path for pattern in ['/SuperClaude/', '/.claude/', 'framework']):
|
||||
framework_score += 3
|
||||
|
||||
if any(pattern in content_type for pattern in user_indicators):
|
||||
user_score += 3
|
||||
|
||||
total_score = framework_score + user_score
|
||||
if total_score == 0:
|
||||
return 0.5, 0.5 # Unknown, assume mixed
|
||||
|
||||
return framework_score / total_score, user_score / total_score
|
||||
|
||||
def _calculate_compressibility_score(self, characteristics: dict) -> float:
|
||||
"""Calculate overall compressibility score."""
|
||||
# Higher repetition = higher compressibility
|
||||
repetition_contribution = characteristics['repetition_factor'] * 0.4
|
||||
|
||||
# Higher technical density = better compression with abbreviations
|
||||
technical_contribution = characteristics['technical_density'] * 0.3
|
||||
|
||||
# Framework content is not compressed (exclusion)
|
||||
framework_penalty = characteristics['framework_content_ratio'] * 0.5
|
||||
|
||||
# Content complexity affects compression effectiveness
|
||||
complexity_factor = 1.0 - (characteristics['content_complexity'] * 0.2)
|
||||
|
||||
score = (repetition_contribution + technical_contribution) * complexity_factor - framework_penalty
|
||||
|
||||
return max(min(score, 1.0), 0.0)
|
||||
|
||||
def _extract_resource_state(self, context: dict) -> dict:
|
||||
"""Extract resource state for compression decisions."""
|
||||
resource_constraints = context.get('resource_constraints', {})
|
||||
|
||||
return {
|
||||
'memory_usage_percent': resource_constraints.get('memory_usage', 0),
|
||||
'token_usage_percent': resource_constraints.get('token_usage', 0),
|
||||
'conversation_length': resource_constraints.get('conversation_length', 0),
|
||||
'resource_pressure': resource_constraints.get('pressure_level', 'normal'),
|
||||
'user_requests_compression': resource_constraints.get('user_compression_request', False)
|
||||
}
|
||||
|
||||
def _analyze_content_for_compression(self, context: dict) -> dict:
|
||||
"""Analyze content to determine compression approach."""
|
||||
content = context.get('content_to_compress', '')
|
||||
metadata = context.get('content_metadata', {})
|
||||
|
||||
# Classify content type
|
||||
content_type = self.compression_engine.classify_content(content, metadata)
|
||||
|
||||
# Analyze compression opportunities
|
||||
analysis = {
|
||||
'content_type': content_type,
|
||||
'compression_opportunities': [],
|
||||
'preservation_requirements': [],
|
||||
'optimization_techniques': []
|
||||
}
|
||||
|
||||
# Framework content - complete exclusion
|
||||
if content_type == ContentType.FRAMEWORK_CONTENT:
|
||||
analysis['preservation_requirements'].append('complete_exclusion')
|
||||
analysis['compression_opportunities'] = []
|
||||
log_decision(
|
||||
"pre_compact",
|
||||
"content_classification",
|
||||
"framework_content",
|
||||
"Complete exclusion from compression - framework protection"
|
||||
)
|
||||
return analysis
|
||||
|
||||
# User content - minimal compression only
|
||||
if content_type == ContentType.USER_CONTENT:
|
||||
analysis['preservation_requirements'].extend([
|
||||
'high_fidelity_preservation',
|
||||
'minimal_compression_only'
|
||||
])
|
||||
analysis['compression_opportunities'].append('whitespace_optimization')
|
||||
log_decision(
|
||||
"pre_compact",
|
||||
"content_classification",
|
||||
"user_content",
|
||||
"Minimal compression only - user content preservation"
|
||||
)
|
||||
return analysis
|
||||
|
||||
# Session/working data - full compression applicable
|
||||
compressibility = context.get('compressibility_score', 0.0)
|
||||
|
||||
if compressibility > 0.7:
|
||||
analysis['compression_opportunities'].extend([
|
||||
'symbol_systems',
|
||||
'abbreviation_systems',
|
||||
'structural_optimization',
|
||||
'redundancy_removal'
|
||||
])
|
||||
elif compressibility > 0.4:
|
||||
analysis['compression_opportunities'].extend([
|
||||
'symbol_systems',
|
||||
'structural_optimization'
|
||||
])
|
||||
else:
|
||||
analysis['compression_opportunities'].append('minimal_optimization')
|
||||
|
||||
# Technical content optimization
|
||||
if context.get('technical_density', 0) > 0.6:
|
||||
analysis['optimization_techniques'].append('technical_abbreviations')
|
||||
|
||||
# Repetitive content optimization
|
||||
if context.get('repetition_factor', 0) > 0.5:
|
||||
analysis['optimization_techniques'].append('pattern_compression')
|
||||
|
||||
return analysis
|
||||
|
||||
def _determine_compression_strategy(self, context: dict, content_analysis: dict) -> CompressionStrategy:
|
||||
"""Determine optimal compression strategy."""
|
||||
# Determine compression level based on resource state
|
||||
compression_level = self.compression_engine.determine_compression_level({
|
||||
'resource_usage_percent': context.get('token_usage_percent', 0),
|
||||
'conversation_length': context.get('conversation_length', 0),
|
||||
'user_requests_brevity': context.get('user_requests_compression', False),
|
||||
'complexity_score': context.get('content_complexity', 0.0)
|
||||
})
|
||||
|
||||
# Adjust for content type
|
||||
content_type = content_analysis['content_type']
|
||||
if content_type == ContentType.FRAMEWORK_CONTENT:
|
||||
compression_level = CompressionLevel.MINIMAL # Actually no compression
|
||||
elif content_type == ContentType.USER_CONTENT:
|
||||
compression_level = CompressionLevel.MINIMAL
|
||||
|
||||
# Create strategy
|
||||
strategy = self.compression_engine._create_compression_strategy(compression_level, content_type)
|
||||
|
||||
# Customize based on content analysis
|
||||
opportunities = content_analysis.get('compression_opportunities', [])
|
||||
|
||||
if 'symbol_systems' not in opportunities:
|
||||
strategy.symbol_systems_enabled = False
|
||||
if 'abbreviation_systems' not in opportunities:
|
||||
strategy.abbreviation_systems_enabled = False
|
||||
if 'structural_optimization' not in opportunities:
|
||||
strategy.structural_optimization = False
|
||||
|
||||
return strategy
|
||||
|
||||
def _apply_selective_compression(self, context: dict, strategy: CompressionStrategy,
|
||||
content_analysis: dict) -> Dict[str, CompressionResult]:
|
||||
"""Apply selective compression with framework protection."""
|
||||
content = context.get('content_to_compress', '')
|
||||
metadata = context.get('content_metadata', {})
|
||||
|
||||
# Split content into sections for selective processing
|
||||
content_sections = self._split_content_into_sections(content, metadata)
|
||||
|
||||
compression_results = {}
|
||||
|
||||
for section_name, section_data in content_sections.items():
|
||||
section_content = section_data['content']
|
||||
section_metadata = section_data['metadata']
|
||||
|
||||
# Apply compression to each section
|
||||
result = self.compression_engine.compress_content(
|
||||
section_content,
|
||||
context,
|
||||
section_metadata
|
||||
)
|
||||
|
||||
compression_results[section_name] = result
|
||||
|
||||
return compression_results
|
||||
|
||||
def _split_content_into_sections(self, content: str, metadata: dict) -> dict:
|
||||
"""Split content into sections for selective compression."""
|
||||
sections = {}
|
||||
|
||||
# Simple splitting strategy - can be enhanced
|
||||
lines = content.split('\n')
|
||||
|
||||
# Detect different content types within the text
|
||||
current_section = 'default'
|
||||
current_content = []
|
||||
|
||||
for line in lines:
|
||||
# Framework content detection
|
||||
if any(indicator in line for indicator in ['SuperClaude', 'CLAUDE.md', 'FLAGS.md']):
|
||||
if current_content and current_section != 'framework':
|
||||
sections[current_section] = {
|
||||
'content': '\n'.join(current_content),
|
||||
'metadata': {**metadata, 'content_type': current_section}
|
||||
}
|
||||
current_content = []
|
||||
current_section = 'framework'
|
||||
|
||||
# User code detection
|
||||
elif any(indicator in line for indicator in ['def ', 'class ', 'function', 'import ']):
|
||||
if current_content and current_section != 'user_code':
|
||||
sections[current_section] = {
|
||||
'content': '\n'.join(current_content),
|
||||
'metadata': {**metadata, 'content_type': current_section}
|
||||
}
|
||||
current_content = []
|
||||
current_section = 'user_code'
|
||||
|
||||
# Session data detection
|
||||
elif any(indicator in line for indicator in ['session_', 'checkpoint_', 'cache_']):
|
||||
if current_content and current_section != 'session_data':
|
||||
sections[current_section] = {
|
||||
'content': '\n'.join(current_content),
|
||||
'metadata': {**metadata, 'content_type': current_section}
|
||||
}
|
||||
current_content = []
|
||||
current_section = 'session_data'
|
||||
|
||||
current_content.append(line)
|
||||
|
||||
# Add final section
|
||||
if current_content:
|
||||
sections[current_section] = {
|
||||
'content': '\n'.join(current_content),
|
||||
'metadata': {**metadata, 'content_type': current_section}
|
||||
}
|
||||
|
||||
# If no sections detected, treat as single section
|
||||
if not sections:
|
||||
sections['default'] = {
|
||||
'content': content,
|
||||
'metadata': metadata
|
||||
}
|
||||
|
||||
return sections
|
||||
|
||||
def _validate_compression_quality(self, compression_results: Dict[str, CompressionResult],
|
||||
strategy: CompressionStrategy) -> dict:
|
||||
"""Validate compression quality against standards."""
|
||||
validation = {
|
||||
'overall_quality_met': True,
|
||||
'preservation_score': 0.0,
|
||||
'compression_efficiency': 0.0,
|
||||
'quality_issues': [],
|
||||
'quality_warnings': []
|
||||
}
|
||||
|
||||
if not compression_results:
|
||||
return validation
|
||||
|
||||
# Calculate overall metrics
|
||||
total_original = sum(result.original_length for result in compression_results.values())
|
||||
total_compressed = sum(result.compressed_length for result in compression_results.values())
|
||||
total_preservation = sum(result.preservation_score for result in compression_results.values())
|
||||
|
||||
if total_original > 0:
|
||||
validation['compression_efficiency'] = (total_original - total_compressed) / total_original
|
||||
|
||||
validation['preservation_score'] = total_preservation / len(compression_results)
|
||||
|
||||
# Quality threshold validation
|
||||
if validation['preservation_score'] < strategy.quality_threshold:
|
||||
validation['overall_quality_met'] = False
|
||||
validation['quality_issues'].append(
|
||||
f"Preservation score {validation['preservation_score']:.2f} below threshold {strategy.quality_threshold}"
|
||||
)
|
||||
|
||||
# Individual section validation
|
||||
for section_name, result in compression_results.items():
|
||||
if result.quality_score < 0.8:
|
||||
validation['quality_warnings'].append(
|
||||
f"Section '{section_name}' quality score low: {result.quality_score:.2f}"
|
||||
)
|
||||
|
||||
if result.compression_ratio > 0.9: # Over 90% compression might be too aggressive
|
||||
validation['quality_warnings'].append(
|
||||
f"Section '{section_name}' compression ratio very high: {result.compression_ratio:.2f}"
|
||||
)
|
||||
|
||||
return validation
|
||||
|
||||
def _record_compression_learning(self, context: dict, compression_results: Dict[str, CompressionResult],
|
||||
quality_validation: dict):
|
||||
"""Record compression learning for future optimization."""
|
||||
overall_effectiveness = quality_validation['preservation_score'] * quality_validation['compression_efficiency']
|
||||
|
||||
# Record compression effectiveness
|
||||
self.learning_engine.record_learning_event(
|
||||
LearningType.PERFORMANCE_OPTIMIZATION,
|
||||
AdaptationScope.USER,
|
||||
context,
|
||||
{
|
||||
'compression_level': self.compression_engine.determine_compression_level(context).value,
|
||||
'techniques_used': list(set().union(*[result.techniques_used for result in compression_results.values()])),
|
||||
'preservation_score': quality_validation['preservation_score'],
|
||||
'compression_efficiency': quality_validation['compression_efficiency']
|
||||
},
|
||||
overall_effectiveness,
|
||||
0.9, # High confidence in compression metrics
|
||||
{'hook': 'pre_compact', 'compression_learning': True}
|
||||
)
|
||||
|
||||
# Record user preference if compression was requested
|
||||
if context.get('user_requests_compression'):
|
||||
self.learning_engine.record_learning_event(
|
||||
LearningType.USER_PREFERENCE,
|
||||
AdaptationScope.USER,
|
||||
context,
|
||||
{'compression_preference': 'enabled', 'user_satisfaction': overall_effectiveness},
|
||||
overall_effectiveness,
|
||||
0.8,
|
||||
{'user_initiated_compression': True}
|
||||
)
|
||||
|
||||
def _calculate_compression_efficiency(self, context: dict, execution_time_ms: float) -> float:
|
||||
"""Calculate compression processing efficiency."""
|
||||
content_length = context.get('content_length', 1)
|
||||
|
||||
# Efficiency based on processing speed per character
|
||||
chars_per_ms = content_length / max(execution_time_ms, 1)
|
||||
|
||||
# Target: 100 chars per ms for good efficiency
|
||||
target_chars_per_ms = 100
|
||||
efficiency = min(chars_per_ms / target_chars_per_ms, 1.0)
|
||||
|
||||
return efficiency
|
||||
|
||||
def _generate_compression_config(self, context: dict, strategy: CompressionStrategy,
|
||||
compression_results: Dict[str, CompressionResult],
|
||||
quality_validation: dict) -> dict:
|
||||
"""Generate comprehensive compression configuration."""
|
||||
total_original = sum(result.original_length for result in compression_results.values())
|
||||
total_compressed = sum(result.compressed_length for result in compression_results.values())
|
||||
|
||||
config = {
|
||||
'compression_enabled': True,
|
||||
'compression_level': strategy.level.value,
|
||||
'selective_compression': True,
|
||||
|
||||
'strategy': {
|
||||
'symbol_systems_enabled': strategy.symbol_systems_enabled,
|
||||
'abbreviation_systems_enabled': strategy.abbreviation_systems_enabled,
|
||||
'structural_optimization': strategy.structural_optimization,
|
||||
'quality_threshold': strategy.quality_threshold
|
||||
},
|
||||
|
||||
'results': {
|
||||
'original_length': total_original,
|
||||
'compressed_length': total_compressed,
|
||||
'compression_ratio': (total_original - total_compressed) / max(total_original, 1),
|
||||
'sections_processed': len(compression_results),
|
||||
'techniques_used': list(set().union(*[result.techniques_used for result in compression_results.values()]))
|
||||
},
|
||||
|
||||
'quality': {
|
||||
'preservation_score': quality_validation['preservation_score'],
|
||||
'quality_met': quality_validation['overall_quality_met'],
|
||||
'issues': quality_validation['quality_issues'],
|
||||
'warnings': quality_validation['quality_warnings']
|
||||
},
|
||||
|
||||
'framework_protection': {
|
||||
'framework_content_excluded': True,
|
||||
'user_content_preserved': True,
|
||||
'selective_processing_enabled': True
|
||||
},
|
||||
|
||||
'optimization': {
|
||||
'estimated_token_savings': int((total_original - total_compressed) * 0.7), # Rough estimate
|
||||
'processing_efficiency': quality_validation['compression_efficiency'],
|
||||
'recommendation': self._get_compression_recommendation(context, quality_validation)
|
||||
},
|
||||
|
||||
'metadata': {
|
||||
'hook_version': 'pre_compact_1.0',
|
||||
'compression_timestamp': context['timestamp'],
|
||||
'content_classification': 'selective_compression_applied'
|
||||
}
|
||||
}
|
||||
|
||||
return config
|
||||
|
||||
def _get_compression_recommendation(self, context: dict, quality_validation: dict) -> str:
|
||||
"""Get compression recommendation based on results."""
|
||||
if not quality_validation['overall_quality_met']:
|
||||
return "Reduce compression level to maintain quality"
|
||||
elif quality_validation['compression_efficiency'] > 0.7:
|
||||
return "Excellent compression efficiency achieved"
|
||||
elif quality_validation['compression_efficiency'] > 0.4:
|
||||
return "Good compression efficiency, consider slight optimization"
|
||||
else:
|
||||
return "Low compression efficiency, consider alternative strategies"
|
||||
|
||||
def _create_fallback_compression_config(self, compact_request: dict, error: str) -> dict:
|
||||
"""Create fallback compression configuration on error."""
|
||||
return {
|
||||
'compression_enabled': False,
|
||||
'fallback_mode': True,
|
||||
'error': error,
|
||||
|
||||
'strategy': {
|
||||
'symbol_systems_enabled': False,
|
||||
'abbreviation_systems_enabled': False,
|
||||
'structural_optimization': False,
|
||||
'quality_threshold': 1.0
|
||||
},
|
||||
|
||||
'results': {
|
||||
'original_length': len(compact_request.get('content', '')),
|
||||
'compressed_length': len(compact_request.get('content', '')),
|
||||
'compression_ratio': 0.0,
|
||||
'sections_processed': 0,
|
||||
'techniques_used': []
|
||||
},
|
||||
|
||||
'quality': {
|
||||
'preservation_score': 1.0,
|
||||
'quality_met': False,
|
||||
'issues': [f"Compression hook error: {error}"],
|
||||
'warnings': []
|
||||
},
|
||||
|
||||
'performance_metrics': {
|
||||
'compression_time_ms': 0,
|
||||
'target_met': False,
|
||||
'error_occurred': True
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
"""Main hook execution function."""
|
||||
try:
|
||||
# Read compact request from stdin
|
||||
compact_request = json.loads(sys.stdin.read())
|
||||
|
||||
# Initialize and run hook
|
||||
hook = PreCompactHook()
|
||||
result = hook.process_pre_compact(compact_request)
|
||||
|
||||
# Output result as JSON
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
except Exception as e:
|
||||
# Output error as JSON
|
||||
error_result = {
|
||||
'compression_enabled': False,
|
||||
'error': str(e),
|
||||
'fallback_mode': True
|
||||
}
|
||||
print(json.dumps(error_result, indent=2))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
646
Framework-Hooks/hooks/pre_tool_use.py
Normal file
646
Framework-Hooks/hooks/pre_tool_use.py
Normal file
@@ -0,0 +1,646 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SuperClaude-Lite Pre-Tool-Use Hook
|
||||
|
||||
Implements ORCHESTRATOR.md + MCP routing intelligence for optimal tool selection.
|
||||
Performance target: <200ms execution time.
|
||||
|
||||
This hook runs before every tool usage and provides:
|
||||
- Intelligent tool routing and MCP server selection
|
||||
- Performance optimization and parallel execution planning
|
||||
- Context-aware tool configuration
|
||||
- Fallback strategy implementation
|
||||
- Real-time adaptation based on effectiveness
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
# Add shared modules to path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "shared"))
|
||||
|
||||
from framework_logic import FrameworkLogic, OperationContext, OperationType, RiskLevel
|
||||
from pattern_detection import PatternDetector, PatternMatch
|
||||
from mcp_intelligence import MCPIntelligence, MCPActivationPlan
|
||||
from compression_engine import CompressionEngine
|
||||
from learning_engine import LearningEngine, LearningType, AdaptationScope
|
||||
from yaml_loader import config_loader
|
||||
from logger import log_hook_start, log_hook_end, log_decision, log_error
|
||||
|
||||
|
||||
class PreToolUseHook:
|
||||
"""
|
||||
Pre-tool-use hook implementing SuperClaude orchestration intelligence.
|
||||
|
||||
Responsibilities:
|
||||
- Analyze tool usage context and requirements
|
||||
- Route to optimal MCP servers based on capability matching
|
||||
- Configure parallel execution and performance optimization
|
||||
- Apply learned adaptations for tool selection
|
||||
- Implement fallback strategies for server failures
|
||||
- Track tool effectiveness and performance metrics
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
start_time = time.time()
|
||||
|
||||
# Initialize core components
|
||||
self.framework_logic = FrameworkLogic()
|
||||
self.pattern_detector = PatternDetector()
|
||||
self.mcp_intelligence = MCPIntelligence()
|
||||
self.compression_engine = CompressionEngine()
|
||||
|
||||
# Initialize learning engine
|
||||
cache_dir = Path("cache")
|
||||
self.learning_engine = LearningEngine(cache_dir)
|
||||
|
||||
# Load hook-specific configuration from SuperClaude config
|
||||
self.hook_config = config_loader.get_hook_config('pre_tool_use')
|
||||
|
||||
# Load orchestrator configuration (from YAML if exists, otherwise use hook config)
|
||||
try:
|
||||
self.orchestrator_config = config_loader.load_config('orchestrator')
|
||||
except FileNotFoundError:
|
||||
# Fall back to hook configuration if YAML file not found
|
||||
self.orchestrator_config = self.hook_config.get('configuration', {})
|
||||
|
||||
# Load performance configuration (from YAML if exists, otherwise use hook config)
|
||||
try:
|
||||
self.performance_config = config_loader.load_config('performance')
|
||||
except FileNotFoundError:
|
||||
# Fall back to performance targets from global configuration
|
||||
self.performance_config = config_loader.get_performance_targets()
|
||||
|
||||
# Performance tracking using configuration
|
||||
self.initialization_time = (time.time() - start_time) * 1000
|
||||
self.performance_target_ms = config_loader.get_hook_config('pre_tool_use', 'performance_target_ms', 200)
|
||||
|
||||
def process_tool_use(self, tool_request: dict) -> dict:
|
||||
"""
|
||||
Process tool use request with intelligent routing.
|
||||
|
||||
Args:
|
||||
tool_request: Tool usage request from Claude Code
|
||||
|
||||
Returns:
|
||||
Enhanced tool configuration with SuperClaude intelligence
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
# Log hook start
|
||||
log_hook_start("pre_tool_use", {
|
||||
"tool_name": tool_request.get('tool_name', 'unknown'),
|
||||
"has_parameters": bool(tool_request.get('parameters'))
|
||||
})
|
||||
|
||||
try:
|
||||
# Extract tool context
|
||||
context = self._extract_tool_context(tool_request)
|
||||
|
||||
# Analyze tool requirements and capabilities
|
||||
requirements = self._analyze_tool_requirements(context)
|
||||
|
||||
# Log routing decision
|
||||
if requirements.get('mcp_server_hints'):
|
||||
log_decision(
|
||||
"pre_tool_use",
|
||||
"mcp_server_selection",
|
||||
",".join(requirements['mcp_server_hints']),
|
||||
f"Tool '{context['tool_name']}' requires capabilities: {', '.join(requirements.get('capabilities_needed', []))}"
|
||||
)
|
||||
|
||||
# Detect patterns for intelligent routing
|
||||
routing_analysis = self._analyze_routing_patterns(context, requirements)
|
||||
|
||||
# Apply learned adaptations
|
||||
enhanced_routing = self._apply_routing_adaptations(context, routing_analysis)
|
||||
|
||||
# Create optimal execution plan
|
||||
execution_plan = self._create_execution_plan(context, enhanced_routing)
|
||||
|
||||
# Log execution strategy decision
|
||||
log_decision(
|
||||
"pre_tool_use",
|
||||
"execution_strategy",
|
||||
execution_plan['execution_strategy'],
|
||||
f"Complexity: {context.get('complexity_score', 0):.2f}, Files: {context.get('file_count', 1)}"
|
||||
)
|
||||
|
||||
# Configure tool enhancement
|
||||
tool_config = self._configure_tool_enhancement(context, execution_plan)
|
||||
|
||||
# Record learning event
|
||||
self._record_tool_learning(context, tool_config)
|
||||
|
||||
# Performance validation
|
||||
execution_time = (time.time() - start_time) * 1000
|
||||
tool_config['performance_metrics'] = {
|
||||
'routing_time_ms': execution_time,
|
||||
'target_met': execution_time < self.performance_target_ms,
|
||||
'efficiency_score': self._calculate_efficiency_score(context, execution_time)
|
||||
}
|
||||
|
||||
# Log successful completion
|
||||
log_hook_end(
|
||||
"pre_tool_use",
|
||||
int(execution_time),
|
||||
True,
|
||||
{
|
||||
"tool_name": context['tool_name'],
|
||||
"mcp_servers": tool_config.get('mcp_integration', {}).get('servers', []),
|
||||
"enhanced_mode": tool_config.get('enhanced_mode', False)
|
||||
}
|
||||
)
|
||||
|
||||
return tool_config
|
||||
|
||||
except Exception as e:
|
||||
# Log error
|
||||
execution_time = (time.time() - start_time) * 1000
|
||||
log_error(
|
||||
"pre_tool_use",
|
||||
str(e),
|
||||
{"tool_name": tool_request.get('tool_name', 'unknown')}
|
||||
)
|
||||
log_hook_end("pre_tool_use", int(execution_time), False)
|
||||
|
||||
# Graceful fallback on error
|
||||
return self._create_fallback_tool_config(tool_request, str(e))
|
||||
|
||||
def _extract_tool_context(self, tool_request: dict) -> dict:
|
||||
"""Extract and enrich tool usage context."""
|
||||
context = {
|
||||
'tool_name': tool_request.get('tool_name', ''),
|
||||
'tool_parameters': tool_request.get('parameters', {}),
|
||||
'user_intent': tool_request.get('user_intent', ''),
|
||||
'session_context': tool_request.get('session_context', {}),
|
||||
'previous_tools': tool_request.get('previous_tools', []),
|
||||
'operation_sequence': tool_request.get('operation_sequence', []),
|
||||
'resource_state': tool_request.get('resource_state', {}),
|
||||
'timestamp': time.time()
|
||||
}
|
||||
|
||||
# Extract operation characteristics
|
||||
context.update(self._analyze_operation_characteristics(context))
|
||||
|
||||
# Analyze tool chain context
|
||||
context.update(self._analyze_tool_chain_context(context))
|
||||
|
||||
return context
|
||||
|
||||
def _analyze_operation_characteristics(self, context: dict) -> dict:
|
||||
"""Analyze operation characteristics for routing decisions."""
|
||||
characteristics = {
|
||||
'operation_type': OperationType.READ,
|
||||
'complexity_score': 0.0,
|
||||
'file_count': 1,
|
||||
'directory_count': 1,
|
||||
'parallelizable': False,
|
||||
'resource_intensive': False,
|
||||
'requires_intelligence': False
|
||||
}
|
||||
|
||||
tool_name = context['tool_name']
|
||||
tool_params = context['tool_parameters']
|
||||
|
||||
# Determine operation type from tool
|
||||
if tool_name in ['Write', 'Edit', 'MultiEdit']:
|
||||
characteristics['operation_type'] = OperationType.WRITE
|
||||
characteristics['complexity_score'] += 0.2
|
||||
elif tool_name in ['Build', 'Implement']:
|
||||
characteristics['operation_type'] = OperationType.BUILD
|
||||
characteristics['complexity_score'] += 0.4
|
||||
elif tool_name in ['Test', 'Validate']:
|
||||
characteristics['operation_type'] = OperationType.TEST
|
||||
characteristics['complexity_score'] += 0.1
|
||||
elif tool_name in ['Analyze', 'Debug']:
|
||||
characteristics['operation_type'] = OperationType.ANALYZE
|
||||
characteristics['complexity_score'] += 0.3
|
||||
characteristics['requires_intelligence'] = True
|
||||
|
||||
# Analyze file/directory scope
|
||||
if 'file_path' in tool_params:
|
||||
characteristics['file_count'] = 1
|
||||
elif 'files' in tool_params:
|
||||
file_list = tool_params['files']
|
||||
characteristics['file_count'] = len(file_list) if isinstance(file_list, list) else 1
|
||||
if characteristics['file_count'] > 3:
|
||||
characteristics['parallelizable'] = True
|
||||
characteristics['complexity_score'] += 0.3
|
||||
|
||||
if 'directory' in tool_params or 'path' in tool_params:
|
||||
path_param = tool_params.get('directory') or tool_params.get('path', '')
|
||||
if '*' in str(path_param) or '**' in str(path_param):
|
||||
characteristics['directory_count'] = 5 # Estimate for glob patterns
|
||||
characteristics['complexity_score'] += 0.2
|
||||
characteristics['parallelizable'] = True
|
||||
|
||||
# Resource intensity analysis
|
||||
if characteristics['file_count'] > 10 or characteristics['complexity_score'] > 0.6:
|
||||
characteristics['resource_intensive'] = True
|
||||
|
||||
# Intelligence requirements
|
||||
intelligence_tools = ['Analyze', 'Debug', 'Optimize', 'Refactor', 'Generate']
|
||||
if any(tool in tool_name for tool in intelligence_tools):
|
||||
characteristics['requires_intelligence'] = True
|
||||
|
||||
return characteristics
|
||||
|
||||
def _analyze_tool_chain_context(self, context: dict) -> dict:
|
||||
"""Analyze tool chain context for optimization opportunities."""
|
||||
chain_analysis = {
|
||||
'chain_length': len(context['previous_tools']),
|
||||
'pattern_detected': None,
|
||||
'optimization_opportunity': False,
|
||||
'cache_opportunity': False
|
||||
}
|
||||
|
||||
previous_tools = context['previous_tools']
|
||||
|
||||
if len(previous_tools) >= 2:
|
||||
# Detect common patterns
|
||||
tool_names = [tool.get('name', '') for tool in previous_tools[-3:]]
|
||||
|
||||
# Read-Edit pattern
|
||||
if any('Read' in name for name in tool_names) and any('Edit' in name for name in tool_names):
|
||||
chain_analysis['pattern_detected'] = 'read_edit_pattern'
|
||||
chain_analysis['optimization_opportunity'] = True
|
||||
|
||||
# Multiple file operations
|
||||
if sum(1 for name in tool_names if 'file' in name.lower()) >= 2:
|
||||
chain_analysis['pattern_detected'] = 'multi_file_pattern'
|
||||
chain_analysis['optimization_opportunity'] = True
|
||||
|
||||
# Analysis chain
|
||||
if sum(1 for name in tool_names if any(word in name for word in ['Analyze', 'Search', 'Find'])) >= 2:
|
||||
chain_analysis['pattern_detected'] = 'analysis_chain'
|
||||
chain_analysis['cache_opportunity'] = True
|
||||
|
||||
return chain_analysis
|
||||
|
||||
def _analyze_tool_requirements(self, context: dict) -> dict:
|
||||
"""Analyze tool requirements for capability matching."""
|
||||
requirements = {
|
||||
'capabilities_needed': [],
|
||||
'performance_requirements': {},
|
||||
'quality_requirements': {},
|
||||
'mcp_server_hints': [],
|
||||
'native_tool_sufficient': True
|
||||
}
|
||||
|
||||
tool_name = context['tool_name']
|
||||
characteristics = context
|
||||
|
||||
# Determine required capabilities
|
||||
if characteristics.get('requires_intelligence'):
|
||||
requirements['capabilities_needed'].extend(['analysis', 'reasoning', 'context_understanding'])
|
||||
requirements['native_tool_sufficient'] = False
|
||||
|
||||
if characteristics.get('complexity_score', 0) > 0.6:
|
||||
requirements['capabilities_needed'].extend(['complex_reasoning', 'systematic_analysis'])
|
||||
requirements['mcp_server_hints'].append('sequential')
|
||||
|
||||
if characteristics.get('file_count', 1) > 5:
|
||||
requirements['capabilities_needed'].extend(['multi_file_coordination', 'semantic_understanding'])
|
||||
requirements['mcp_server_hints'].append('serena')
|
||||
|
||||
# UI/component operations
|
||||
if any(word in context.get('user_intent', '').lower() for word in ['component', 'ui', 'frontend', 'design']):
|
||||
requirements['capabilities_needed'].append('ui_generation')
|
||||
requirements['mcp_server_hints'].append('magic')
|
||||
|
||||
# Documentation/library operations
|
||||
if any(word in context.get('user_intent', '').lower() for word in ['library', 'documentation', 'framework', 'api']):
|
||||
requirements['capabilities_needed'].append('documentation_access')
|
||||
requirements['mcp_server_hints'].append('context7')
|
||||
|
||||
# Testing operations
|
||||
if tool_name in ['Test'] or 'test' in context.get('user_intent', '').lower():
|
||||
requirements['capabilities_needed'].append('testing_automation')
|
||||
requirements['mcp_server_hints'].append('playwright')
|
||||
|
||||
# Performance requirements
|
||||
if characteristics.get('resource_intensive'):
|
||||
requirements['performance_requirements'] = {
|
||||
'max_execution_time_ms': 5000,
|
||||
'memory_efficiency_required': True,
|
||||
'parallel_execution_preferred': True
|
||||
}
|
||||
else:
|
||||
requirements['performance_requirements'] = {
|
||||
'max_execution_time_ms': 2000,
|
||||
'response_time_critical': True
|
||||
}
|
||||
|
||||
# Quality requirements
|
||||
if context.get('session_context', {}).get('is_production', False):
|
||||
requirements['quality_requirements'] = {
|
||||
'validation_required': True,
|
||||
'error_handling_critical': True,
|
||||
'rollback_capability_needed': True
|
||||
}
|
||||
|
||||
return requirements
|
||||
|
||||
def _analyze_routing_patterns(self, context: dict, requirements: dict) -> dict:
|
||||
"""Analyze patterns for intelligent routing decisions."""
|
||||
# Create operation data for pattern detection
|
||||
operation_data = {
|
||||
'operation_type': context.get('operation_type', OperationType.READ).value,
|
||||
'file_count': context.get('file_count', 1),
|
||||
'complexity_score': context.get('complexity_score', 0.0),
|
||||
'tool_name': context['tool_name']
|
||||
}
|
||||
|
||||
# Run pattern detection
|
||||
detection_result = self.pattern_detector.detect_patterns(
|
||||
context.get('user_intent', ''),
|
||||
context,
|
||||
operation_data
|
||||
)
|
||||
|
||||
# Create MCP activation plan
|
||||
mcp_plan = self.mcp_intelligence.create_activation_plan(
|
||||
context.get('user_intent', ''),
|
||||
context,
|
||||
operation_data
|
||||
)
|
||||
|
||||
return {
|
||||
'pattern_matches': detection_result.matches,
|
||||
'recommended_mcp_servers': detection_result.recommended_mcp_servers,
|
||||
'mcp_activation_plan': mcp_plan,
|
||||
'routing_confidence': detection_result.confidence_score,
|
||||
'optimization_opportunities': self._identify_optimization_opportunities(context, requirements)
|
||||
}
|
||||
|
||||
def _identify_optimization_opportunities(self, context: dict, requirements: dict) -> list:
|
||||
"""Identify optimization opportunities for tool execution."""
|
||||
opportunities = []
|
||||
|
||||
# Parallel execution opportunity
|
||||
if context.get('parallelizable') and context.get('file_count', 1) > 3:
|
||||
opportunities.append({
|
||||
'type': 'parallel_execution',
|
||||
'description': 'Multi-file operation suitable for parallel processing',
|
||||
'estimated_speedup': min(context.get('file_count', 1) * 0.3, 2.0)
|
||||
})
|
||||
|
||||
# Caching opportunity
|
||||
if context.get('cache_opportunity'):
|
||||
opportunities.append({
|
||||
'type': 'result_caching',
|
||||
'description': 'Analysis results can be cached for reuse',
|
||||
'estimated_speedup': 1.5
|
||||
})
|
||||
|
||||
# MCP server coordination
|
||||
if len(requirements.get('mcp_server_hints', [])) > 1:
|
||||
opportunities.append({
|
||||
'type': 'mcp_coordination',
|
||||
'description': 'Multiple MCP servers can work together',
|
||||
'quality_improvement': 0.2
|
||||
})
|
||||
|
||||
# Intelligence routing
|
||||
if not requirements.get('native_tool_sufficient'):
|
||||
opportunities.append({
|
||||
'type': 'intelligence_routing',
|
||||
'description': 'Operation benefits from MCP server intelligence',
|
||||
'quality_improvement': 0.3
|
||||
})
|
||||
|
||||
return opportunities
|
||||
|
||||
def _apply_routing_adaptations(self, context: dict, routing_analysis: dict) -> dict:
|
||||
"""Apply learned adaptations to routing decisions."""
|
||||
base_routing = {
|
||||
'recommended_mcp_servers': routing_analysis['recommended_mcp_servers'],
|
||||
'mcp_activation_plan': routing_analysis['mcp_activation_plan'],
|
||||
'optimization_opportunities': routing_analysis['optimization_opportunities']
|
||||
}
|
||||
|
||||
# Apply learning engine adaptations
|
||||
enhanced_routing = self.learning_engine.apply_adaptations(context, base_routing)
|
||||
|
||||
return enhanced_routing
|
||||
|
||||
def _create_execution_plan(self, context: dict, enhanced_routing: dict) -> dict:
|
||||
"""Create optimal execution plan for tool usage."""
|
||||
plan = {
|
||||
'execution_strategy': 'direct',
|
||||
'mcp_servers_required': enhanced_routing.get('recommended_mcp_servers', []),
|
||||
'parallel_execution': False,
|
||||
'caching_enabled': False,
|
||||
'fallback_strategy': 'native_tools',
|
||||
'performance_optimizations': [],
|
||||
'estimated_execution_time_ms': 500
|
||||
}
|
||||
|
||||
# Determine execution strategy
|
||||
if context.get('complexity_score', 0) > 0.6:
|
||||
plan['execution_strategy'] = 'intelligent_routing'
|
||||
elif context.get('file_count', 1) > 5:
|
||||
plan['execution_strategy'] = 'parallel_coordination'
|
||||
|
||||
# Configure parallel execution
|
||||
if context.get('parallelizable') and context.get('file_count', 1) > 3:
|
||||
plan['parallel_execution'] = True
|
||||
plan['performance_optimizations'].append('parallel_file_processing')
|
||||
plan['estimated_execution_time_ms'] = int(plan['estimated_execution_time_ms'] * 0.6)
|
||||
|
||||
# Configure caching
|
||||
if context.get('cache_opportunity'):
|
||||
plan['caching_enabled'] = True
|
||||
plan['performance_optimizations'].append('result_caching')
|
||||
|
||||
# Configure MCP coordination
|
||||
mcp_servers = plan['mcp_servers_required']
|
||||
if len(mcp_servers) > 1:
|
||||
plan['coordination_strategy'] = enhanced_routing.get('mcp_activation_plan', {}).get('coordination_strategy', 'collaborative')
|
||||
|
||||
# Estimate execution time based on complexity
|
||||
base_time = 200
|
||||
complexity_multiplier = 1 + context.get('complexity_score', 0.0)
|
||||
file_multiplier = 1 + (context.get('file_count', 1) - 1) * 0.1
|
||||
|
||||
plan['estimated_execution_time_ms'] = int(base_time * complexity_multiplier * file_multiplier)
|
||||
|
||||
return plan
|
||||
|
||||
def _configure_tool_enhancement(self, context: dict, execution_plan: dict) -> dict:
|
||||
"""Configure tool enhancement based on execution plan."""
|
||||
tool_config = {
|
||||
'tool_name': context['tool_name'],
|
||||
'enhanced_mode': execution_plan['execution_strategy'] != 'direct',
|
||||
'mcp_integration': {
|
||||
'enabled': len(execution_plan['mcp_servers_required']) > 0,
|
||||
'servers': execution_plan['mcp_servers_required'],
|
||||
'coordination_strategy': execution_plan.get('coordination_strategy', 'single_server')
|
||||
},
|
||||
'performance_optimization': {
|
||||
'parallel_execution': execution_plan['parallel_execution'],
|
||||
'caching_enabled': execution_plan['caching_enabled'],
|
||||
'optimizations': execution_plan['performance_optimizations']
|
||||
},
|
||||
'quality_enhancement': {
|
||||
'validation_enabled': context.get('session_context', {}).get('is_production', False),
|
||||
'error_recovery': True,
|
||||
'context_preservation': True
|
||||
},
|
||||
'execution_metadata': {
|
||||
'estimated_time_ms': execution_plan['estimated_execution_time_ms'],
|
||||
'complexity_score': context.get('complexity_score', 0.0),
|
||||
'intelligence_level': self._determine_intelligence_level(context)
|
||||
}
|
||||
}
|
||||
|
||||
# Add tool-specific enhancements
|
||||
tool_config.update(self._get_tool_specific_enhancements(context, execution_plan))
|
||||
|
||||
return tool_config
|
||||
|
||||
def _determine_intelligence_level(self, context: dict) -> str:
|
||||
"""Determine required intelligence level for operation."""
|
||||
complexity = context.get('complexity_score', 0.0)
|
||||
|
||||
if complexity >= 0.8:
|
||||
return 'high'
|
||||
elif complexity >= 0.5:
|
||||
return 'medium'
|
||||
elif context.get('requires_intelligence'):
|
||||
return 'medium'
|
||||
else:
|
||||
return 'low'
|
||||
|
||||
def _get_tool_specific_enhancements(self, context: dict, execution_plan: dict) -> dict:
|
||||
"""Get tool-specific enhancement configurations."""
|
||||
tool_name = context['tool_name']
|
||||
enhancements = {}
|
||||
|
||||
# File operation enhancements
|
||||
if tool_name in ['Read', 'Write', 'Edit']:
|
||||
enhancements['file_operations'] = {
|
||||
'integrity_check': True,
|
||||
'backup_on_write': context.get('session_context', {}).get('is_production', False),
|
||||
'encoding_detection': True
|
||||
}
|
||||
|
||||
# Multi-file operation enhancements
|
||||
if tool_name in ['MultiEdit', 'Batch'] or context.get('file_count', 1) > 3:
|
||||
enhancements['multi_file_operations'] = {
|
||||
'transaction_mode': True,
|
||||
'rollback_capability': True,
|
||||
'progress_tracking': True
|
||||
}
|
||||
|
||||
# Analysis operation enhancements
|
||||
if tool_name in ['Analyze', 'Debug', 'Search']:
|
||||
enhancements['analysis_operations'] = {
|
||||
'deep_context_analysis': context.get('complexity_score', 0.0) > 0.5,
|
||||
'semantic_understanding': 'serena' in execution_plan['mcp_servers_required'],
|
||||
'pattern_recognition': True
|
||||
}
|
||||
|
||||
# Build/Implementation enhancements
|
||||
if tool_name in ['Build', 'Implement', 'Generate']:
|
||||
enhancements['build_operations'] = {
|
||||
'framework_integration': 'context7' in execution_plan['mcp_servers_required'],
|
||||
'component_generation': 'magic' in execution_plan['mcp_servers_required'],
|
||||
'quality_validation': True
|
||||
}
|
||||
|
||||
return enhancements
|
||||
|
||||
def _calculate_efficiency_score(self, context: dict, execution_time_ms: float) -> float:
|
||||
"""Calculate efficiency score for the routing decision."""
|
||||
# Base efficiency is inverse of execution time relative to target
|
||||
time_efficiency = min(self.performance_target_ms / max(execution_time_ms, 1), 1.0)
|
||||
|
||||
# Complexity handling efficiency
|
||||
complexity = context.get('complexity_score', 0.0)
|
||||
complexity_efficiency = 1.0 - (complexity * 0.3) # Some complexity is expected
|
||||
|
||||
# Resource utilization efficiency
|
||||
resource_usage = context.get('resource_state', {}).get('usage_percent', 0)
|
||||
resource_efficiency = 1.0 - max(resource_usage - 70, 0) / 100.0
|
||||
|
||||
# Weighted efficiency score
|
||||
efficiency_score = (time_efficiency * 0.4 +
|
||||
complexity_efficiency * 0.3 +
|
||||
resource_efficiency * 0.3)
|
||||
|
||||
return max(min(efficiency_score, 1.0), 0.0)
|
||||
|
||||
def _record_tool_learning(self, context: dict, tool_config: dict):
|
||||
"""Record tool usage for learning purposes."""
|
||||
self.learning_engine.record_learning_event(
|
||||
LearningType.OPERATION_PATTERN,
|
||||
AdaptationScope.USER,
|
||||
context,
|
||||
{
|
||||
'tool_name': context['tool_name'],
|
||||
'mcp_servers_used': tool_config.get('mcp_integration', {}).get('servers', []),
|
||||
'execution_strategy': tool_config.get('execution_metadata', {}).get('intelligence_level', 'low'),
|
||||
'optimizations_applied': tool_config.get('performance_optimization', {}).get('optimizations', [])
|
||||
},
|
||||
0.8, # Assume good effectiveness (will be updated later)
|
||||
0.7, # Medium confidence until validated
|
||||
{'hook': 'pre_tool_use', 'version': '1.0'}
|
||||
)
|
||||
|
||||
def _create_fallback_tool_config(self, tool_request: dict, error: str) -> dict:
|
||||
"""Create fallback tool configuration on error."""
|
||||
return {
|
||||
'tool_name': tool_request.get('tool_name', 'unknown'),
|
||||
'enhanced_mode': False,
|
||||
'fallback_mode': True,
|
||||
'error': error,
|
||||
'mcp_integration': {
|
||||
'enabled': False,
|
||||
'servers': [],
|
||||
'coordination_strategy': 'none'
|
||||
},
|
||||
'performance_optimization': {
|
||||
'parallel_execution': False,
|
||||
'caching_enabled': False,
|
||||
'optimizations': []
|
||||
},
|
||||
'performance_metrics': {
|
||||
'routing_time_ms': 0,
|
||||
'target_met': False,
|
||||
'error_occurred': True
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
"""Main hook execution function."""
|
||||
try:
|
||||
# Read tool request from stdin
|
||||
tool_request = json.loads(sys.stdin.read())
|
||||
|
||||
# Initialize and run hook
|
||||
hook = PreToolUseHook()
|
||||
result = hook.process_tool_use(tool_request)
|
||||
|
||||
# Output result as JSON
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
except Exception as e:
|
||||
# Output error as JSON
|
||||
error_result = {
|
||||
'enhanced_mode': False,
|
||||
'error': str(e),
|
||||
'fallback_mode': True
|
||||
}
|
||||
print(json.dumps(error_result, indent=2))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
612
Framework-Hooks/hooks/session_start.py
Normal file
612
Framework-Hooks/hooks/session_start.py
Normal file
@@ -0,0 +1,612 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SuperClaude-Lite Session Start Hook
|
||||
|
||||
Implements SESSION_LIFECYCLE.md + FLAGS.md logic for intelligent session bootstrap.
|
||||
Performance target: <50ms execution time.
|
||||
|
||||
This hook runs at the start of every Claude Code session and provides:
|
||||
- Smart project context loading with framework exclusion
|
||||
- Automatic mode detection and activation
|
||||
- MCP server intelligence routing
|
||||
- User preference adaptation
|
||||
- Performance-optimized initialization
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# Add shared modules to path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "shared"))
|
||||
|
||||
from framework_logic import FrameworkLogic, OperationContext, OperationType, RiskLevel
|
||||
from pattern_detection import PatternDetector, PatternType
|
||||
from mcp_intelligence import MCPIntelligence
|
||||
from compression_engine import CompressionEngine, CompressionLevel, ContentType
|
||||
from learning_engine import LearningEngine, LearningType, AdaptationScope
|
||||
from yaml_loader import config_loader
|
||||
from logger import log_hook_start, log_hook_end, log_decision, log_error
|
||||
|
||||
|
||||
class SessionStartHook:
|
||||
"""
|
||||
Session start hook implementing SuperClaude intelligence.
|
||||
|
||||
Responsibilities:
|
||||
- Initialize session with project context
|
||||
- Apply user preferences and learned adaptations
|
||||
- Activate appropriate modes and MCP servers
|
||||
- Set up compression and performance optimization
|
||||
- Track session metrics and performance
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
start_time = time.time()
|
||||
|
||||
# Initialize core components
|
||||
self.framework_logic = FrameworkLogic()
|
||||
self.pattern_detector = PatternDetector()
|
||||
self.mcp_intelligence = MCPIntelligence()
|
||||
self.compression_engine = CompressionEngine()
|
||||
|
||||
# Initialize learning engine with cache directory
|
||||
cache_dir = Path("cache")
|
||||
self.learning_engine = LearningEngine(cache_dir)
|
||||
|
||||
# Load hook-specific configuration from SuperClaude config
|
||||
self.hook_config = config_loader.get_hook_config('session_start')
|
||||
|
||||
# Load session configuration (from YAML if exists, otherwise use hook config)
|
||||
try:
|
||||
self.session_config = config_loader.load_config('session')
|
||||
except FileNotFoundError:
|
||||
# Fall back to hook configuration if YAML file not found
|
||||
self.session_config = self.hook_config.get('configuration', {})
|
||||
|
||||
# Performance tracking using configuration
|
||||
self.initialization_time = (time.time() - start_time) * 1000
|
||||
self.performance_target_ms = config_loader.get_hook_config('session_start', 'performance_target_ms', 50)
|
||||
|
||||
def initialize_session(self, session_context: dict) -> dict:
|
||||
"""
|
||||
Initialize session with SuperClaude intelligence.
|
||||
|
||||
Args:
|
||||
session_context: Session initialization context from Claude Code
|
||||
|
||||
Returns:
|
||||
Enhanced session configuration
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
# Log hook start
|
||||
log_hook_start("session_start", {
|
||||
"project_path": session_context.get('project_path', 'unknown'),
|
||||
"user_id": session_context.get('user_id', 'anonymous'),
|
||||
"has_previous_session": bool(session_context.get('previous_session_id'))
|
||||
})
|
||||
|
||||
try:
|
||||
# Extract session context
|
||||
context = self._extract_session_context(session_context)
|
||||
|
||||
# Detect patterns and operation intent
|
||||
detection_result = self._detect_session_patterns(context)
|
||||
|
||||
# Apply learned adaptations
|
||||
enhanced_recommendations = self._apply_learning_adaptations(
|
||||
context, detection_result
|
||||
)
|
||||
|
||||
# Create MCP activation plan
|
||||
mcp_plan = self._create_mcp_activation_plan(
|
||||
context, enhanced_recommendations
|
||||
)
|
||||
|
||||
# Configure compression strategy
|
||||
compression_config = self._configure_compression(context)
|
||||
|
||||
# Generate session configuration
|
||||
session_config = self._generate_session_config(
|
||||
context, enhanced_recommendations, mcp_plan, compression_config
|
||||
)
|
||||
|
||||
# Record learning event
|
||||
self._record_session_learning(context, session_config)
|
||||
|
||||
# Detect and activate modes
|
||||
activated_modes = self._activate_intelligent_modes(context, enhanced_recommendations)
|
||||
|
||||
# Log mode activation decisions
|
||||
for mode in activated_modes:
|
||||
log_decision(
|
||||
"session_start",
|
||||
"mode_activation",
|
||||
mode['name'],
|
||||
f"Activated based on: {mode.get('trigger', 'automatic detection')}"
|
||||
)
|
||||
|
||||
# Configure MCP server activation
|
||||
mcp_configuration = self._configure_mcp_servers(context, activated_modes)
|
||||
|
||||
# Log MCP server decisions
|
||||
if mcp_configuration.get('enabled_servers'):
|
||||
log_decision(
|
||||
"session_start",
|
||||
"mcp_server_activation",
|
||||
",".join(mcp_configuration['enabled_servers']),
|
||||
f"Project type: {context.get('project_type', 'unknown')}"
|
||||
)
|
||||
|
||||
# Performance validation
|
||||
execution_time = (time.time() - start_time) * 1000
|
||||
session_config['performance_metrics'] = {
|
||||
'initialization_time_ms': execution_time,
|
||||
'target_met': execution_time < self.performance_target_ms,
|
||||
'efficiency_score': self._calculate_initialization_efficiency(execution_time)
|
||||
}
|
||||
|
||||
# Log successful completion
|
||||
log_hook_end(
|
||||
"session_start",
|
||||
int(execution_time),
|
||||
True,
|
||||
{
|
||||
"project_type": context.get('project_type', 'unknown'),
|
||||
"modes_activated": [m['name'] for m in activated_modes],
|
||||
"mcp_servers": mcp_configuration.get('enabled_servers', [])
|
||||
}
|
||||
)
|
||||
|
||||
return session_config
|
||||
|
||||
except Exception as e:
|
||||
# Log error
|
||||
execution_time = (time.time() - start_time) * 1000
|
||||
log_error(
|
||||
"session_start",
|
||||
str(e),
|
||||
{"project_path": session_context.get('project_path', 'unknown')}
|
||||
)
|
||||
log_hook_end("session_start", int(execution_time), False)
|
||||
|
||||
# Graceful fallback on error
|
||||
return self._create_fallback_session_config(session_context, str(e))
|
||||
|
||||
def _extract_session_context(self, session_data: dict) -> dict:
|
||||
"""Extract and enrich session context."""
|
||||
context = {
|
||||
'session_id': session_data.get('session_id', 'unknown'),
|
||||
'project_path': session_data.get('project_path', ''),
|
||||
'user_input': session_data.get('user_input', ''),
|
||||
'conversation_length': session_data.get('conversation_length', 0),
|
||||
'resource_usage_percent': session_data.get('resource_usage_percent', 0),
|
||||
'is_continuation': session_data.get('is_continuation', False),
|
||||
'previous_session_id': session_data.get('previous_session_id'),
|
||||
'timestamp': time.time()
|
||||
}
|
||||
|
||||
# Detect project characteristics
|
||||
if context['project_path']:
|
||||
project_path = Path(context['project_path'])
|
||||
context.update(self._analyze_project_structure(project_path))
|
||||
|
||||
# Analyze user input for intent
|
||||
if context['user_input']:
|
||||
context.update(self._analyze_user_intent(context['user_input']))
|
||||
|
||||
return context
|
||||
|
||||
def _analyze_project_structure(self, project_path: Path) -> dict:
|
||||
"""Analyze project structure for intelligent configuration."""
|
||||
analysis = {
|
||||
'project_type': 'unknown',
|
||||
'has_tests': False,
|
||||
'has_frontend': False,
|
||||
'has_backend': False,
|
||||
'framework_detected': None,
|
||||
'file_count_estimate': 0,
|
||||
'directory_count_estimate': 0,
|
||||
'is_production': False
|
||||
}
|
||||
|
||||
try:
|
||||
if not project_path.exists():
|
||||
return analysis
|
||||
|
||||
# Quick file/directory count (limited for performance)
|
||||
files = list(project_path.rglob('*'))[:100] # Limit for performance
|
||||
analysis['file_count_estimate'] = len([f for f in files if f.is_file()])
|
||||
analysis['directory_count_estimate'] = len([f for f in files if f.is_dir()])
|
||||
|
||||
# Detect project type
|
||||
if (project_path / 'package.json').exists():
|
||||
analysis['project_type'] = 'nodejs'
|
||||
analysis['has_frontend'] = True
|
||||
elif (project_path / 'pyproject.toml').exists() or (project_path / 'setup.py').exists():
|
||||
analysis['project_type'] = 'python'
|
||||
elif (project_path / 'Cargo.toml').exists():
|
||||
analysis['project_type'] = 'rust'
|
||||
elif (project_path / 'go.mod').exists():
|
||||
analysis['project_type'] = 'go'
|
||||
|
||||
# Check for tests
|
||||
test_patterns = ['test', 'tests', '__tests__', 'spec']
|
||||
analysis['has_tests'] = any(
|
||||
(project_path / pattern).exists() or
|
||||
any(pattern in str(f) for f in files[:20])
|
||||
for pattern in test_patterns
|
||||
)
|
||||
|
||||
# Check for production indicators
|
||||
prod_indicators = ['.env.production', 'docker-compose.yml', 'Dockerfile', '.github']
|
||||
analysis['is_production'] = any(
|
||||
(project_path / indicator).exists() for indicator in prod_indicators
|
||||
)
|
||||
|
||||
# Framework detection (quick check)
|
||||
if analysis['project_type'] == 'nodejs':
|
||||
package_json = project_path / 'package.json'
|
||||
if package_json.exists():
|
||||
try:
|
||||
with open(package_json) as f:
|
||||
pkg_data = json.load(f)
|
||||
deps = {**pkg_data.get('dependencies', {}), **pkg_data.get('devDependencies', {})}
|
||||
|
||||
if 'react' in deps:
|
||||
analysis['framework_detected'] = 'react'
|
||||
elif 'vue' in deps:
|
||||
analysis['framework_detected'] = 'vue'
|
||||
elif 'angular' in deps:
|
||||
analysis['framework_detected'] = 'angular'
|
||||
elif 'express' in deps:
|
||||
analysis['has_backend'] = True
|
||||
except:
|
||||
pass
|
||||
|
||||
except Exception:
|
||||
# Return partial analysis on error
|
||||
pass
|
||||
|
||||
return analysis
|
||||
|
||||
def _analyze_user_intent(self, user_input: str) -> dict:
|
||||
"""Analyze user input for session intent and complexity."""
|
||||
intent_analysis = {
|
||||
'operation_type': OperationType.READ,
|
||||
'complexity_score': 0.0,
|
||||
'brainstorming_likely': False,
|
||||
'user_expertise': 'intermediate',
|
||||
'urgency': 'normal'
|
||||
}
|
||||
|
||||
user_lower = user_input.lower()
|
||||
|
||||
# Detect operation type
|
||||
if any(word in user_lower for word in ['build', 'create', 'implement', 'develop']):
|
||||
intent_analysis['operation_type'] = OperationType.BUILD
|
||||
intent_analysis['complexity_score'] += 0.3
|
||||
elif any(word in user_lower for word in ['fix', 'debug', 'troubleshoot', 'solve']):
|
||||
intent_analysis['operation_type'] = OperationType.ANALYZE
|
||||
intent_analysis['complexity_score'] += 0.2
|
||||
elif any(word in user_lower for word in ['refactor', 'restructure', 'reorganize']):
|
||||
intent_analysis['operation_type'] = OperationType.REFACTOR
|
||||
intent_analysis['complexity_score'] += 0.4
|
||||
elif any(word in user_lower for word in ['test', 'validate', 'check']):
|
||||
intent_analysis['operation_type'] = OperationType.TEST
|
||||
intent_analysis['complexity_score'] += 0.1
|
||||
|
||||
# Detect brainstorming needs
|
||||
brainstorm_indicators = [
|
||||
'not sure', 'thinking about', 'maybe', 'possibly', 'could we',
|
||||
'brainstorm', 'explore', 'figure out', 'new project', 'startup idea'
|
||||
]
|
||||
intent_analysis['brainstorming_likely'] = any(
|
||||
indicator in user_lower for indicator in brainstorm_indicators
|
||||
)
|
||||
|
||||
# Complexity indicators
|
||||
complexity_indicators = [
|
||||
'complex', 'complicated', 'comprehensive', 'entire', 'whole', 'system-wide',
|
||||
'architecture', 'multiple', 'many', 'several'
|
||||
]
|
||||
for indicator in complexity_indicators:
|
||||
if indicator in user_lower:
|
||||
intent_analysis['complexity_score'] += 0.2
|
||||
|
||||
intent_analysis['complexity_score'] = min(intent_analysis['complexity_score'], 1.0)
|
||||
|
||||
# Detect urgency
|
||||
if any(word in user_lower for word in ['urgent', 'asap', 'quickly', 'fast']):
|
||||
intent_analysis['urgency'] = 'high'
|
||||
elif any(word in user_lower for word in ['when you can', 'no rush', 'eventually']):
|
||||
intent_analysis['urgency'] = 'low'
|
||||
|
||||
return intent_analysis
|
||||
|
||||
def _detect_session_patterns(self, context: dict) -> dict:
|
||||
"""Detect patterns for intelligent session configuration."""
|
||||
# Create operation context for pattern detection
|
||||
operation_data = {
|
||||
'operation_type': context.get('operation_type', OperationType.READ).value,
|
||||
'file_count': context.get('file_count_estimate', 1),
|
||||
'directory_count': context.get('directory_count_estimate', 1),
|
||||
'complexity_score': context.get('complexity_score', 0.0),
|
||||
'has_external_dependencies': context.get('framework_detected') is not None,
|
||||
'project_type': context.get('project_type', 'unknown')
|
||||
}
|
||||
|
||||
# Run pattern detection
|
||||
detection_result = self.pattern_detector.detect_patterns(
|
||||
context.get('user_input', ''),
|
||||
context,
|
||||
operation_data
|
||||
)
|
||||
|
||||
return {
|
||||
'pattern_matches': detection_result.matches,
|
||||
'recommended_modes': detection_result.recommended_modes,
|
||||
'recommended_mcp_servers': detection_result.recommended_mcp_servers,
|
||||
'suggested_flags': detection_result.suggested_flags,
|
||||
'confidence_score': detection_result.confidence_score
|
||||
}
|
||||
|
||||
def _apply_learning_adaptations(self, context: dict, detection_result: dict) -> dict:
|
||||
"""Apply learned adaptations to enhance recommendations."""
|
||||
base_recommendations = {
|
||||
'recommended_modes': detection_result['recommended_modes'],
|
||||
'recommended_mcp_servers': detection_result['recommended_mcp_servers'],
|
||||
'suggested_flags': detection_result['suggested_flags']
|
||||
}
|
||||
|
||||
# Apply learning engine adaptations
|
||||
enhanced_recommendations = self.learning_engine.apply_adaptations(
|
||||
context, base_recommendations
|
||||
)
|
||||
|
||||
return enhanced_recommendations
|
||||
|
||||
def _create_mcp_activation_plan(self, context: dict, recommendations: dict) -> dict:
|
||||
"""Create MCP server activation plan."""
|
||||
# Create operation data for MCP intelligence
|
||||
operation_data = {
|
||||
'file_count': context.get('file_count_estimate', 1),
|
||||
'complexity_score': context.get('complexity_score', 0.0),
|
||||
'operation_type': context.get('operation_type', OperationType.READ).value
|
||||
}
|
||||
|
||||
# Create MCP activation plan
|
||||
mcp_plan = self.mcp_intelligence.create_activation_plan(
|
||||
context.get('user_input', ''),
|
||||
context,
|
||||
operation_data
|
||||
)
|
||||
|
||||
return {
|
||||
'servers_to_activate': mcp_plan.servers_to_activate,
|
||||
'activation_order': mcp_plan.activation_order,
|
||||
'estimated_cost_ms': mcp_plan.estimated_cost_ms,
|
||||
'coordination_strategy': mcp_plan.coordination_strategy,
|
||||
'fallback_strategy': mcp_plan.fallback_strategy
|
||||
}
|
||||
|
||||
def _configure_compression(self, context: dict) -> dict:
|
||||
"""Configure compression strategy for the session."""
|
||||
compression_level = self.compression_engine.determine_compression_level(context)
|
||||
|
||||
return {
|
||||
'compression_level': compression_level.value,
|
||||
'estimated_savings': self.compression_engine._estimate_compression_savings(compression_level),
|
||||
'quality_impact': self.compression_engine._estimate_quality_impact(compression_level),
|
||||
'selective_compression_enabled': True
|
||||
}
|
||||
|
||||
def _generate_session_config(self, context: dict, recommendations: dict,
|
||||
mcp_plan: dict, compression_config: dict) -> dict:
|
||||
"""Generate comprehensive session configuration."""
|
||||
config = {
|
||||
'session_id': context['session_id'],
|
||||
'superclaude_enabled': True,
|
||||
'initialization_timestamp': context['timestamp'],
|
||||
|
||||
# Mode configuration
|
||||
'active_modes': recommendations.get('recommended_modes', []),
|
||||
'mode_configurations': self._get_mode_configurations(recommendations),
|
||||
|
||||
# MCP server configuration
|
||||
'mcp_servers': {
|
||||
'enabled_servers': mcp_plan['servers_to_activate'],
|
||||
'activation_order': mcp_plan['activation_order'],
|
||||
'coordination_strategy': mcp_plan['coordination_strategy']
|
||||
},
|
||||
|
||||
# Compression configuration
|
||||
'compression': compression_config,
|
||||
|
||||
# Performance configuration
|
||||
'performance': {
|
||||
'resource_monitoring_enabled': True,
|
||||
'optimization_targets': self.framework_logic.performance_targets,
|
||||
'delegation_threshold': 0.4 if context.get('complexity_score', 0) > 0.4 else 0.6
|
||||
},
|
||||
|
||||
# Learning configuration
|
||||
'learning': {
|
||||
'adaptation_enabled': True,
|
||||
'effectiveness_tracking': True,
|
||||
'applied_adaptations': recommendations.get('applied_adaptations', [])
|
||||
},
|
||||
|
||||
# Context preservation
|
||||
'context': {
|
||||
'project_type': context.get('project_type', 'unknown'),
|
||||
'complexity_score': context.get('complexity_score', 0.0),
|
||||
'brainstorming_mode': context.get('brainstorming_likely', False),
|
||||
'user_expertise': context.get('user_expertise', 'intermediate')
|
||||
},
|
||||
|
||||
# Quality gates
|
||||
'quality_gates': self._configure_quality_gates(context),
|
||||
|
||||
# Session metadata
|
||||
'metadata': {
|
||||
'framework_version': '1.0.0',
|
||||
'hook_version': 'session_start_1.0',
|
||||
'configuration_source': 'superclaude_intelligence'
|
||||
}
|
||||
}
|
||||
|
||||
return config
|
||||
|
||||
def _get_mode_configurations(self, recommendations: dict) -> dict:
|
||||
"""Get specific configuration for activated modes."""
|
||||
mode_configs = {}
|
||||
|
||||
for mode in recommendations.get('recommended_modes', []):
|
||||
if mode == 'brainstorming':
|
||||
mode_configs[mode] = {
|
||||
'max_rounds': 15,
|
||||
'convergence_threshold': 0.85,
|
||||
'auto_handoff_enabled': True
|
||||
}
|
||||
elif mode == 'task_management':
|
||||
mode_configs[mode] = {
|
||||
'delegation_enabled': True,
|
||||
'wave_orchestration': True,
|
||||
'auto_checkpoints': True
|
||||
}
|
||||
elif mode == 'token_efficiency':
|
||||
mode_configs[mode] = {
|
||||
'compression_level': 'adaptive',
|
||||
'symbol_systems_enabled': True,
|
||||
'selective_preservation': True
|
||||
}
|
||||
|
||||
return mode_configs
|
||||
|
||||
def _configure_quality_gates(self, context: dict) -> list:
|
||||
"""Configure quality gates based on context."""
|
||||
# Create operation context for quality gate determination
|
||||
operation_context = OperationContext(
|
||||
operation_type=context.get('operation_type', OperationType.READ),
|
||||
file_count=context.get('file_count_estimate', 1),
|
||||
directory_count=context.get('directory_count_estimate', 1),
|
||||
has_tests=context.get('has_tests', False),
|
||||
is_production=context.get('is_production', False),
|
||||
user_expertise=context.get('user_expertise', 'intermediate'),
|
||||
project_type=context.get('project_type', 'unknown'),
|
||||
complexity_score=context.get('complexity_score', 0.0),
|
||||
risk_level=RiskLevel.LOW
|
||||
)
|
||||
|
||||
return self.framework_logic.get_quality_gates(operation_context)
|
||||
|
||||
def _record_session_learning(self, context: dict, session_config: dict):
|
||||
"""Record session initialization for learning."""
|
||||
self.learning_engine.record_learning_event(
|
||||
LearningType.OPERATION_PATTERN,
|
||||
AdaptationScope.USER,
|
||||
context,
|
||||
{
|
||||
'session_config': session_config,
|
||||
'modes_activated': session_config.get('active_modes', []),
|
||||
'mcp_servers': session_config.get('mcp_servers', {}).get('enabled_servers', [])
|
||||
},
|
||||
1.0, # Assume successful initialization
|
||||
0.8, # High confidence in pattern
|
||||
{'hook': 'session_start', 'version': '1.0'}
|
||||
)
|
||||
|
||||
def _create_fallback_session_config(self, session_context: dict, error: str) -> dict:
|
||||
"""Create fallback configuration on error."""
|
||||
return {
|
||||
'session_id': session_context.get('session_id', 'unknown'),
|
||||
'superclaude_enabled': False,
|
||||
'fallback_mode': True,
|
||||
'error': error,
|
||||
'basic_config': {
|
||||
'compression_level': 'minimal',
|
||||
'mcp_servers_enabled': False,
|
||||
'learning_disabled': True
|
||||
},
|
||||
'performance_metrics': {
|
||||
'execution_time_ms': 0,
|
||||
'target_met': False,
|
||||
'error_occurred': True
|
||||
}
|
||||
}
|
||||
|
||||
def _activate_intelligent_modes(self, context: dict, recommendations: dict) -> list:
|
||||
"""Activate intelligent modes based on context and recommendations."""
|
||||
activated_modes = []
|
||||
|
||||
# Add brainstorming mode if likely
|
||||
if context.get('brainstorming_likely', False):
|
||||
activated_modes.append({'name': 'brainstorming', 'trigger': 'user input'})
|
||||
|
||||
# Add task management mode if recommended
|
||||
if 'task_management' in recommendations.get('recommended_modes', []):
|
||||
activated_modes.append({'name': 'task_management', 'trigger': 'pattern detection'})
|
||||
|
||||
# Add token efficiency mode if recommended
|
||||
if 'token_efficiency' in recommendations.get('recommended_modes', []):
|
||||
activated_modes.append({'name': 'token_efficiency', 'trigger': 'pattern detection'})
|
||||
|
||||
return activated_modes
|
||||
|
||||
def _configure_mcp_servers(self, context: dict, activated_modes: list) -> dict:
|
||||
"""Configure MCP servers based on context and activated modes."""
|
||||
# Create operation data for MCP intelligence
|
||||
operation_data = {
|
||||
'file_count': context.get('file_count_estimate', 1),
|
||||
'complexity_score': context.get('complexity_score', 0.0),
|
||||
'operation_type': context.get('operation_type', OperationType.READ).value
|
||||
}
|
||||
|
||||
# Create MCP activation plan
|
||||
mcp_plan = self.mcp_intelligence.create_activation_plan(
|
||||
context.get('user_input', ''),
|
||||
context,
|
||||
operation_data
|
||||
)
|
||||
|
||||
return {
|
||||
'enabled_servers': mcp_plan.servers_to_activate,
|
||||
'activation_order': mcp_plan.activation_order,
|
||||
'coordination_strategy': mcp_plan.coordination_strategy
|
||||
}
|
||||
|
||||
def _calculate_initialization_efficiency(self, execution_time: float) -> float:
|
||||
"""Calculate initialization efficiency score."""
|
||||
return 1.0 - (execution_time / self.performance_target_ms) if execution_time < self.performance_target_ms else 0.0
|
||||
|
||||
|
||||
def main():
|
||||
"""Main hook execution function."""
|
||||
try:
|
||||
# Read session data from stdin
|
||||
session_data = json.loads(sys.stdin.read())
|
||||
|
||||
# Initialize and run hook
|
||||
hook = SessionStartHook()
|
||||
result = hook.initialize_session(session_data)
|
||||
|
||||
# Output result as JSON
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
except Exception as e:
|
||||
# Output error as JSON
|
||||
error_result = {
|
||||
'superclaude_enabled': False,
|
||||
'error': str(e),
|
||||
'fallback_mode': True
|
||||
}
|
||||
print(json.dumps(error_result, indent=2))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
25
Framework-Hooks/hooks/shared/__init__.py
Normal file
25
Framework-Hooks/hooks/shared/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
||||
"""
|
||||
SuperClaude-Lite Shared Infrastructure
|
||||
|
||||
Core components for the executable SuperClaude intelligence framework.
|
||||
Provides shared functionality across all 7 Claude Code hooks.
|
||||
"""
|
||||
|
||||
__version__ = "1.0.0"
|
||||
__author__ = "SuperClaude Framework"
|
||||
|
||||
from .yaml_loader import UnifiedConfigLoader
|
||||
from .framework_logic import FrameworkLogic
|
||||
from .pattern_detection import PatternDetector
|
||||
from .mcp_intelligence import MCPIntelligence
|
||||
from .compression_engine import CompressionEngine
|
||||
from .learning_engine import LearningEngine
|
||||
|
||||
__all__ = [
|
||||
'UnifiedConfigLoader',
|
||||
'FrameworkLogic',
|
||||
'PatternDetector',
|
||||
'MCPIntelligence',
|
||||
'CompressionEngine',
|
||||
'LearningEngine'
|
||||
]
|
||||
567
Framework-Hooks/hooks/shared/compression_engine.py
Normal file
567
Framework-Hooks/hooks/shared/compression_engine.py
Normal file
@@ -0,0 +1,567 @@
|
||||
"""
|
||||
Compression Engine for SuperClaude-Lite
|
||||
|
||||
Intelligent token optimization implementing MODE_Token_Efficiency.md algorithms
|
||||
with adaptive compression, symbol systems, and quality-gated validation.
|
||||
"""
|
||||
|
||||
import re
|
||||
import json
|
||||
import hashlib
|
||||
from typing import Dict, Any, List, Optional, Tuple, Set
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
from yaml_loader import config_loader
|
||||
|
||||
|
||||
class CompressionLevel(Enum):
|
||||
"""Compression levels from MODE_Token_Efficiency.md."""
|
||||
MINIMAL = "minimal" # 0-40% compression
|
||||
EFFICIENT = "efficient" # 40-70% compression
|
||||
COMPRESSED = "compressed" # 70-85% compression
|
||||
CRITICAL = "critical" # 85-95% compression
|
||||
EMERGENCY = "emergency" # 95%+ compression
|
||||
|
||||
|
||||
class ContentType(Enum):
|
||||
"""Types of content for selective compression."""
|
||||
FRAMEWORK_CONTENT = "framework" # SuperClaude framework - EXCLUDE
|
||||
SESSION_DATA = "session" # Session metadata - COMPRESS
|
||||
USER_CONTENT = "user" # User project files - PRESERVE
|
||||
WORKING_ARTIFACTS = "artifacts" # Analysis results - COMPRESS
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompressionResult:
|
||||
"""Result of compression operation."""
|
||||
original_length: int
|
||||
compressed_length: int
|
||||
compression_ratio: float
|
||||
quality_score: float # 0.0 to 1.0
|
||||
techniques_used: List[str]
|
||||
preservation_score: float # Information preservation
|
||||
processing_time_ms: float
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompressionStrategy:
|
||||
"""Strategy configuration for compression."""
|
||||
level: CompressionLevel
|
||||
symbol_systems_enabled: bool
|
||||
abbreviation_systems_enabled: bool
|
||||
structural_optimization: bool
|
||||
selective_preservation: Dict[str, bool]
|
||||
quality_threshold: float
|
||||
|
||||
|
||||
class CompressionEngine:
|
||||
"""
|
||||
Intelligent token optimization engine implementing MODE_Token_Efficiency.md.
|
||||
|
||||
Features:
|
||||
- 5-level adaptive compression (minimal to emergency)
|
||||
- Symbol systems for mathematical and logical relationships
|
||||
- Abbreviation systems for technical domains
|
||||
- Selective compression with framework/user content protection
|
||||
- Quality-gated validation with ≥95% information preservation
|
||||
- Real-time compression effectiveness monitoring
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.config = config_loader.load_config('compression')
|
||||
self.symbol_mappings = self._load_symbol_mappings()
|
||||
self.abbreviation_mappings = self._load_abbreviation_mappings()
|
||||
self.compression_cache = {}
|
||||
self.performance_metrics = {}
|
||||
|
||||
def _load_symbol_mappings(self) -> Dict[str, str]:
|
||||
"""Load symbol system mappings from configuration."""
|
||||
return {
|
||||
# Core Logic & Flow
|
||||
'leads to': '→',
|
||||
'implies': '→',
|
||||
'transforms to': '⇒',
|
||||
'converts to': '⇒',
|
||||
'rollback': '←',
|
||||
'reverse': '←',
|
||||
'bidirectional': '⇄',
|
||||
'sync': '⇄',
|
||||
'and': '&',
|
||||
'combine': '&',
|
||||
'separator': '|',
|
||||
'or': '|',
|
||||
'define': ':',
|
||||
'specify': ':',
|
||||
'sequence': '»',
|
||||
'then': '»',
|
||||
'therefore': '∴',
|
||||
'because': '∵',
|
||||
'equivalent': '≡',
|
||||
'approximately': '≈',
|
||||
'not equal': '≠',
|
||||
|
||||
# Status & Progress
|
||||
'completed': '✅',
|
||||
'passed': '✅',
|
||||
'failed': '❌',
|
||||
'error': '❌',
|
||||
'warning': '⚠️',
|
||||
'information': 'ℹ️',
|
||||
'in progress': '🔄',
|
||||
'processing': '🔄',
|
||||
'waiting': '⏳',
|
||||
'pending': '⏳',
|
||||
'critical': '🚨',
|
||||
'urgent': '🚨',
|
||||
'target': '🎯',
|
||||
'goal': '🎯',
|
||||
'metrics': '📊',
|
||||
'data': '📊',
|
||||
'insight': '💡',
|
||||
'learning': '💡',
|
||||
|
||||
# Technical Domains
|
||||
'performance': '⚡',
|
||||
'optimization': '⚡',
|
||||
'analysis': '🔍',
|
||||
'investigation': '🔍',
|
||||
'configuration': '🔧',
|
||||
'setup': '🔧',
|
||||
'security': '🛡️',
|
||||
'protection': '🛡️',
|
||||
'deployment': '📦',
|
||||
'package': '📦',
|
||||
'design': '🎨',
|
||||
'frontend': '🎨',
|
||||
'network': '🌐',
|
||||
'connectivity': '🌐',
|
||||
'mobile': '📱',
|
||||
'responsive': '📱',
|
||||
'architecture': '🏗️',
|
||||
'system structure': '🏗️',
|
||||
'components': '🧩',
|
||||
'modular': '🧩'
|
||||
}
|
||||
|
||||
def _load_abbreviation_mappings(self) -> Dict[str, str]:
|
||||
"""Load abbreviation system mappings from configuration."""
|
||||
return {
|
||||
# System & Architecture
|
||||
'configuration': 'cfg',
|
||||
'settings': 'cfg',
|
||||
'implementation': 'impl',
|
||||
'code structure': 'impl',
|
||||
'architecture': 'arch',
|
||||
'system design': 'arch',
|
||||
'performance': 'perf',
|
||||
'optimization': 'perf',
|
||||
'operations': 'ops',
|
||||
'deployment': 'ops',
|
||||
'environment': 'env',
|
||||
'runtime context': 'env',
|
||||
|
||||
# Development Process
|
||||
'requirements': 'req',
|
||||
'dependencies': 'deps',
|
||||
'packages': 'deps',
|
||||
'validation': 'val',
|
||||
'verification': 'val',
|
||||
'testing': 'test',
|
||||
'quality assurance': 'test',
|
||||
'documentation': 'docs',
|
||||
'guides': 'docs',
|
||||
'standards': 'std',
|
||||
'conventions': 'std',
|
||||
|
||||
# Quality & Analysis
|
||||
'quality': 'qual',
|
||||
'maintainability': 'qual',
|
||||
'security': 'sec',
|
||||
'safety measures': 'sec',
|
||||
'error': 'err',
|
||||
'exception handling': 'err',
|
||||
'recovery': 'rec',
|
||||
'resilience': 'rec',
|
||||
'severity': 'sev',
|
||||
'priority level': 'sev',
|
||||
'optimization': 'opt',
|
||||
'improvement': 'opt'
|
||||
}
|
||||
|
||||
def determine_compression_level(self, context: Dict[str, Any]) -> CompressionLevel:
|
||||
"""
|
||||
Determine appropriate compression level based on context.
|
||||
|
||||
Args:
|
||||
context: Session context including resource usage, conversation length, etc.
|
||||
|
||||
Returns:
|
||||
Appropriate CompressionLevel for the situation
|
||||
"""
|
||||
resource_usage = context.get('resource_usage_percent', 0)
|
||||
conversation_length = context.get('conversation_length', 0)
|
||||
user_requests_brevity = context.get('user_requests_brevity', False)
|
||||
complexity_score = context.get('complexity_score', 0.0)
|
||||
|
||||
# Emergency compression for critical resource constraints
|
||||
if resource_usage >= 95:
|
||||
return CompressionLevel.EMERGENCY
|
||||
|
||||
# Critical compression for high resource usage
|
||||
if resource_usage >= 85 or conversation_length > 200:
|
||||
return CompressionLevel.CRITICAL
|
||||
|
||||
# Compressed level for moderate constraints
|
||||
if resource_usage >= 70 or conversation_length > 100 or user_requests_brevity:
|
||||
return CompressionLevel.COMPRESSED
|
||||
|
||||
# Efficient level for mild constraints or complex operations
|
||||
if resource_usage >= 40 or complexity_score > 0.6:
|
||||
return CompressionLevel.EFFICIENT
|
||||
|
||||
# Minimal compression for normal operations
|
||||
return CompressionLevel.MINIMAL
|
||||
|
||||
def classify_content(self, content: str, metadata: Dict[str, Any]) -> ContentType:
|
||||
"""
|
||||
Classify content type for selective compression.
|
||||
|
||||
Args:
|
||||
content: Content to classify
|
||||
metadata: Metadata about the content (file paths, context, etc.)
|
||||
|
||||
Returns:
|
||||
ContentType for compression decision making
|
||||
"""
|
||||
file_path = metadata.get('file_path', '')
|
||||
context_type = metadata.get('context_type', '')
|
||||
|
||||
# Framework content - complete exclusion
|
||||
framework_patterns = [
|
||||
'/SuperClaude/SuperClaude/',
|
||||
'~/.claude/',
|
||||
'.claude/',
|
||||
'SuperClaude/',
|
||||
'CLAUDE.md',
|
||||
'FLAGS.md',
|
||||
'PRINCIPLES.md',
|
||||
'ORCHESTRATOR.md',
|
||||
'MCP_',
|
||||
'MODE_',
|
||||
'SESSION_LIFECYCLE.md'
|
||||
]
|
||||
|
||||
for pattern in framework_patterns:
|
||||
if pattern in file_path or pattern in content:
|
||||
return ContentType.FRAMEWORK_CONTENT
|
||||
|
||||
# Session data - apply compression
|
||||
if context_type in ['session_metadata', 'checkpoint_data', 'cache_content']:
|
||||
return ContentType.SESSION_DATA
|
||||
|
||||
# Working artifacts - apply compression
|
||||
if context_type in ['analysis_results', 'processing_data', 'working_artifacts']:
|
||||
return ContentType.WORKING_ARTIFACTS
|
||||
|
||||
# User content - preserve with minimal compression only
|
||||
user_patterns = [
|
||||
'project_files',
|
||||
'user_documentation',
|
||||
'source_code',
|
||||
'configuration_files',
|
||||
'custom_content'
|
||||
]
|
||||
|
||||
for pattern in user_patterns:
|
||||
if pattern in context_type or pattern in file_path:
|
||||
return ContentType.USER_CONTENT
|
||||
|
||||
# Default to user content preservation
|
||||
return ContentType.USER_CONTENT
|
||||
|
||||
def compress_content(self,
|
||||
content: str,
|
||||
context: Dict[str, Any],
|
||||
metadata: Dict[str, Any] = None) -> CompressionResult:
|
||||
"""
|
||||
Compress content with intelligent optimization.
|
||||
|
||||
Args:
|
||||
content: Content to compress
|
||||
context: Session context for compression level determination
|
||||
metadata: Content metadata for selective compression
|
||||
|
||||
Returns:
|
||||
CompressionResult with metrics and compressed content
|
||||
"""
|
||||
import time
|
||||
start_time = time.time()
|
||||
|
||||
if metadata is None:
|
||||
metadata = {}
|
||||
|
||||
# Classify content type
|
||||
content_type = self.classify_content(content, metadata)
|
||||
|
||||
# Framework content - no compression
|
||||
if content_type == ContentType.FRAMEWORK_CONTENT:
|
||||
return CompressionResult(
|
||||
original_length=len(content),
|
||||
compressed_length=len(content),
|
||||
compression_ratio=0.0,
|
||||
quality_score=1.0,
|
||||
techniques_used=['framework_exclusion'],
|
||||
preservation_score=1.0,
|
||||
processing_time_ms=(time.time() - start_time) * 1000
|
||||
)
|
||||
|
||||
# User content - minimal compression only
|
||||
if content_type == ContentType.USER_CONTENT:
|
||||
compression_level = CompressionLevel.MINIMAL
|
||||
else:
|
||||
compression_level = self.determine_compression_level(context)
|
||||
|
||||
# Create compression strategy
|
||||
strategy = self._create_compression_strategy(compression_level, content_type)
|
||||
|
||||
# Apply compression techniques
|
||||
compressed_content = content
|
||||
techniques_used = []
|
||||
|
||||
if strategy.symbol_systems_enabled:
|
||||
compressed_content, symbol_techniques = self._apply_symbol_systems(compressed_content)
|
||||
techniques_used.extend(symbol_techniques)
|
||||
|
||||
if strategy.abbreviation_systems_enabled:
|
||||
compressed_content, abbrev_techniques = self._apply_abbreviation_systems(compressed_content)
|
||||
techniques_used.extend(abbrev_techniques)
|
||||
|
||||
if strategy.structural_optimization:
|
||||
compressed_content, struct_techniques = self._apply_structural_optimization(
|
||||
compressed_content, compression_level
|
||||
)
|
||||
techniques_used.extend(struct_techniques)
|
||||
|
||||
# Calculate metrics
|
||||
original_length = len(content)
|
||||
compressed_length = len(compressed_content)
|
||||
compression_ratio = (original_length - compressed_length) / original_length if original_length > 0 else 0.0
|
||||
|
||||
# Quality validation
|
||||
quality_score = self._validate_compression_quality(content, compressed_content, strategy)
|
||||
preservation_score = self._calculate_information_preservation(content, compressed_content)
|
||||
|
||||
processing_time = (time.time() - start_time) * 1000
|
||||
|
||||
# Cache result for performance
|
||||
cache_key = hashlib.md5(content.encode()).hexdigest()
|
||||
self.compression_cache[cache_key] = compressed_content
|
||||
|
||||
return CompressionResult(
|
||||
original_length=original_length,
|
||||
compressed_length=compressed_length,
|
||||
compression_ratio=compression_ratio,
|
||||
quality_score=quality_score,
|
||||
techniques_used=techniques_used,
|
||||
preservation_score=preservation_score,
|
||||
processing_time_ms=processing_time
|
||||
)
|
||||
|
||||
def _create_compression_strategy(self, level: CompressionLevel, content_type: ContentType) -> CompressionStrategy:
|
||||
"""Create compression strategy based on level and content type."""
|
||||
level_configs = {
|
||||
CompressionLevel.MINIMAL: {
|
||||
'symbol_systems': False,
|
||||
'abbreviations': False,
|
||||
'structural': False,
|
||||
'quality_threshold': 0.98
|
||||
},
|
||||
CompressionLevel.EFFICIENT: {
|
||||
'symbol_systems': True,
|
||||
'abbreviations': False,
|
||||
'structural': True,
|
||||
'quality_threshold': 0.95
|
||||
},
|
||||
CompressionLevel.COMPRESSED: {
|
||||
'symbol_systems': True,
|
||||
'abbreviations': True,
|
||||
'structural': True,
|
||||
'quality_threshold': 0.90
|
||||
},
|
||||
CompressionLevel.CRITICAL: {
|
||||
'symbol_systems': True,
|
||||
'abbreviations': True,
|
||||
'structural': True,
|
||||
'quality_threshold': 0.85
|
||||
},
|
||||
CompressionLevel.EMERGENCY: {
|
||||
'symbol_systems': True,
|
||||
'abbreviations': True,
|
||||
'structural': True,
|
||||
'quality_threshold': 0.80
|
||||
}
|
||||
}
|
||||
|
||||
config = level_configs[level]
|
||||
|
||||
# Adjust for content type
|
||||
if content_type == ContentType.USER_CONTENT:
|
||||
# More conservative for user content
|
||||
config['quality_threshold'] = min(config['quality_threshold'] + 0.1, 1.0)
|
||||
|
||||
return CompressionStrategy(
|
||||
level=level,
|
||||
symbol_systems_enabled=config['symbol_systems'],
|
||||
abbreviation_systems_enabled=config['abbreviations'],
|
||||
structural_optimization=config['structural'],
|
||||
selective_preservation={},
|
||||
quality_threshold=config['quality_threshold']
|
||||
)
|
||||
|
||||
def _apply_symbol_systems(self, content: str) -> Tuple[str, List[str]]:
|
||||
"""Apply symbol system replacements."""
|
||||
compressed = content
|
||||
techniques = []
|
||||
|
||||
# Apply symbol mappings with word boundary protection
|
||||
for phrase, symbol in self.symbol_mappings.items():
|
||||
pattern = r'\b' + re.escape(phrase) + r'\b'
|
||||
if re.search(pattern, compressed, re.IGNORECASE):
|
||||
compressed = re.sub(pattern, symbol, compressed, flags=re.IGNORECASE)
|
||||
techniques.append(f"symbol_{phrase.replace(' ', '_')}")
|
||||
|
||||
return compressed, techniques
|
||||
|
||||
def _apply_abbreviation_systems(self, content: str) -> Tuple[str, List[str]]:
|
||||
"""Apply abbreviation system replacements."""
|
||||
compressed = content
|
||||
techniques = []
|
||||
|
||||
# Apply abbreviation mappings with context awareness
|
||||
for phrase, abbrev in self.abbreviation_mappings.items():
|
||||
pattern = r'\b' + re.escape(phrase) + r'\b'
|
||||
if re.search(pattern, compressed, re.IGNORECASE):
|
||||
compressed = re.sub(pattern, abbrev, compressed, flags=re.IGNORECASE)
|
||||
techniques.append(f"abbrev_{phrase.replace(' ', '_')}")
|
||||
|
||||
return compressed, techniques
|
||||
|
||||
def _apply_structural_optimization(self, content: str, level: CompressionLevel) -> Tuple[str, List[str]]:
|
||||
"""Apply structural optimizations for token efficiency."""
|
||||
compressed = content
|
||||
techniques = []
|
||||
|
||||
# Remove redundant whitespace
|
||||
compressed = re.sub(r'\s+', ' ', compressed)
|
||||
compressed = re.sub(r'\n\s*\n', '\n', compressed)
|
||||
techniques.append('whitespace_optimization')
|
||||
|
||||
# Aggressive optimizations for higher compression levels
|
||||
if level in [CompressionLevel.COMPRESSED, CompressionLevel.CRITICAL, CompressionLevel.EMERGENCY]:
|
||||
# Remove redundant words
|
||||
compressed = re.sub(r'\b(the|a|an)\s+', '', compressed, flags=re.IGNORECASE)
|
||||
techniques.append('article_removal')
|
||||
|
||||
# Simplify common phrases
|
||||
phrase_simplifications = {
|
||||
r'in order to': 'to',
|
||||
r'it is important to note that': 'note:',
|
||||
r'please be aware that': 'note:',
|
||||
r'it should be noted that': 'note:',
|
||||
r'for the purpose of': 'for',
|
||||
r'with regard to': 'regarding',
|
||||
r'in relation to': 'regarding'
|
||||
}
|
||||
|
||||
for pattern, replacement in phrase_simplifications.items():
|
||||
if re.search(pattern, compressed, re.IGNORECASE):
|
||||
compressed = re.sub(pattern, replacement, compressed, flags=re.IGNORECASE)
|
||||
techniques.append(f'phrase_simplification_{replacement}')
|
||||
|
||||
return compressed, techniques
|
||||
|
||||
def _validate_compression_quality(self, original: str, compressed: str, strategy: CompressionStrategy) -> float:
|
||||
"""Validate compression quality against thresholds."""
|
||||
# Simple quality heuristics (real implementation would be more sophisticated)
|
||||
|
||||
# Check if key information is preserved
|
||||
original_words = set(re.findall(r'\b\w+\b', original.lower()))
|
||||
compressed_words = set(re.findall(r'\b\w+\b', compressed.lower()))
|
||||
|
||||
# Word preservation ratio
|
||||
word_preservation = len(compressed_words & original_words) / len(original_words) if original_words else 1.0
|
||||
|
||||
# Length efficiency (not too aggressive)
|
||||
length_ratio = len(compressed) / len(original) if original else 1.0
|
||||
|
||||
# Penalize over-compression
|
||||
if length_ratio < 0.3:
|
||||
word_preservation *= 0.8
|
||||
|
||||
quality_score = (word_preservation * 0.7) + (min(length_ratio * 2, 1.0) * 0.3)
|
||||
|
||||
return min(quality_score, 1.0)
|
||||
|
||||
def _calculate_information_preservation(self, original: str, compressed: str) -> float:
|
||||
"""Calculate information preservation score."""
|
||||
# Simple preservation metric based on key information retention
|
||||
|
||||
# Extract key concepts (capitalized words, technical terms)
|
||||
original_concepts = set(re.findall(r'\b[A-Z][a-z]+\b|\b\w+\.(js|py|md|yaml|json)\b', original))
|
||||
compressed_concepts = set(re.findall(r'\b[A-Z][a-z]+\b|\b\w+\.(js|py|md|yaml|json)\b', compressed))
|
||||
|
||||
if not original_concepts:
|
||||
return 1.0
|
||||
|
||||
preservation_ratio = len(compressed_concepts & original_concepts) / len(original_concepts)
|
||||
return preservation_ratio
|
||||
|
||||
def get_compression_recommendations(self, context: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Get recommendations for optimizing compression."""
|
||||
recommendations = []
|
||||
|
||||
current_level = self.determine_compression_level(context)
|
||||
resource_usage = context.get('resource_usage_percent', 0)
|
||||
|
||||
# Resource-based recommendations
|
||||
if resource_usage > 85:
|
||||
recommendations.append("Enable emergency compression mode for critical resource constraints")
|
||||
elif resource_usage > 70:
|
||||
recommendations.append("Consider compressed mode for better resource efficiency")
|
||||
elif resource_usage < 40:
|
||||
recommendations.append("Resource usage low - minimal compression sufficient")
|
||||
|
||||
# Performance recommendations
|
||||
if context.get('processing_time_ms', 0) > 500:
|
||||
recommendations.append("Compression processing time high - consider caching strategies")
|
||||
|
||||
return {
|
||||
'current_level': current_level.value,
|
||||
'recommendations': recommendations,
|
||||
'estimated_savings': self._estimate_compression_savings(current_level),
|
||||
'quality_impact': self._estimate_quality_impact(current_level),
|
||||
'performance_metrics': self.performance_metrics
|
||||
}
|
||||
|
||||
def _estimate_compression_savings(self, level: CompressionLevel) -> Dict[str, float]:
|
||||
"""Estimate compression savings for a given level."""
|
||||
savings_map = {
|
||||
CompressionLevel.MINIMAL: {'token_reduction': 0.15, 'time_savings': 0.05},
|
||||
CompressionLevel.EFFICIENT: {'token_reduction': 0.40, 'time_savings': 0.15},
|
||||
CompressionLevel.COMPRESSED: {'token_reduction': 0.60, 'time_savings': 0.25},
|
||||
CompressionLevel.CRITICAL: {'token_reduction': 0.75, 'time_savings': 0.35},
|
||||
CompressionLevel.EMERGENCY: {'token_reduction': 0.85, 'time_savings': 0.45}
|
||||
}
|
||||
return savings_map.get(level, {'token_reduction': 0.0, 'time_savings': 0.0})
|
||||
|
||||
def _estimate_quality_impact(self, level: CompressionLevel) -> float:
|
||||
"""Estimate quality preservation for a given level."""
|
||||
quality_map = {
|
||||
CompressionLevel.MINIMAL: 0.98,
|
||||
CompressionLevel.EFFICIENT: 0.95,
|
||||
CompressionLevel.COMPRESSED: 0.90,
|
||||
CompressionLevel.CRITICAL: 0.85,
|
||||
CompressionLevel.EMERGENCY: 0.80
|
||||
}
|
||||
return quality_map.get(level, 0.95)
|
||||
343
Framework-Hooks/hooks/shared/framework_logic.py
Normal file
343
Framework-Hooks/hooks/shared/framework_logic.py
Normal file
@@ -0,0 +1,343 @@
|
||||
"""
|
||||
Core SuperClaude Framework Logic
|
||||
|
||||
Implements the core decision-making algorithms from the SuperClaude framework,
|
||||
including RULES.md, PRINCIPLES.md, and ORCHESTRATOR.md patterns.
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
from typing import Dict, Any, List, Optional, Tuple, Union
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
from yaml_loader import config_loader
|
||||
|
||||
|
||||
class OperationType(Enum):
|
||||
"""Types of operations SuperClaude can perform."""
|
||||
READ = "read"
|
||||
WRITE = "write"
|
||||
EDIT = "edit"
|
||||
ANALYZE = "analyze"
|
||||
BUILD = "build"
|
||||
TEST = "test"
|
||||
DEPLOY = "deploy"
|
||||
REFACTOR = "refactor"
|
||||
|
||||
|
||||
class RiskLevel(Enum):
|
||||
"""Risk levels for operations."""
|
||||
LOW = "low"
|
||||
MEDIUM = "medium"
|
||||
HIGH = "high"
|
||||
CRITICAL = "critical"
|
||||
|
||||
|
||||
@dataclass
|
||||
class OperationContext:
|
||||
"""Context information for an operation."""
|
||||
operation_type: OperationType
|
||||
file_count: int
|
||||
directory_count: int
|
||||
has_tests: bool
|
||||
is_production: bool
|
||||
user_expertise: str # beginner, intermediate, expert
|
||||
project_type: str # web, api, cli, library, etc.
|
||||
complexity_score: float # 0.0 to 1.0
|
||||
risk_level: RiskLevel
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationResult:
|
||||
"""Result of validation checks."""
|
||||
is_valid: bool
|
||||
issues: List[str]
|
||||
warnings: List[str]
|
||||
suggestions: List[str]
|
||||
quality_score: float # 0.0 to 1.0
|
||||
|
||||
|
||||
class FrameworkLogic:
|
||||
"""
|
||||
Core SuperClaude framework logic implementation.
|
||||
|
||||
Encapsulates decision-making algorithms from:
|
||||
- RULES.md: Operational rules and security patterns
|
||||
- PRINCIPLES.md: Development principles and quality standards
|
||||
- ORCHESTRATOR.md: Intelligent routing and coordination
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# Load performance targets from SuperClaude configuration
|
||||
self.performance_targets = {}
|
||||
|
||||
# Get hook-specific performance targets
|
||||
self.performance_targets['session_start_ms'] = config_loader.get_hook_config(
|
||||
'session_start', 'performance_target_ms', 50
|
||||
)
|
||||
self.performance_targets['tool_routing_ms'] = config_loader.get_hook_config(
|
||||
'pre_tool_use', 'performance_target_ms', 200
|
||||
)
|
||||
self.performance_targets['validation_ms'] = config_loader.get_hook_config(
|
||||
'post_tool_use', 'performance_target_ms', 100
|
||||
)
|
||||
self.performance_targets['compression_ms'] = config_loader.get_hook_config(
|
||||
'pre_compact', 'performance_target_ms', 150
|
||||
)
|
||||
|
||||
# Load additional performance settings from global configuration
|
||||
global_perf = config_loader.get_performance_targets()
|
||||
if global_perf:
|
||||
self.performance_targets.update(global_perf)
|
||||
|
||||
def should_use_read_before_write(self, context: OperationContext) -> bool:
|
||||
"""
|
||||
RULES.md: Always use Read tool before Write or Edit operations.
|
||||
"""
|
||||
return context.operation_type in [OperationType.WRITE, OperationType.EDIT]
|
||||
|
||||
def calculate_complexity_score(self, operation_data: Dict[str, Any]) -> float:
|
||||
"""
|
||||
Calculate operation complexity score (0.0 to 1.0).
|
||||
|
||||
Factors:
|
||||
- File count and types
|
||||
- Operation scope
|
||||
- Dependencies
|
||||
- Risk factors
|
||||
"""
|
||||
score = 0.0
|
||||
|
||||
# File count factor (0.0 to 0.3)
|
||||
file_count = operation_data.get('file_count', 1)
|
||||
if file_count <= 1:
|
||||
score += 0.0
|
||||
elif file_count <= 3:
|
||||
score += 0.1
|
||||
elif file_count <= 10:
|
||||
score += 0.2
|
||||
else:
|
||||
score += 0.3
|
||||
|
||||
# Directory factor (0.0 to 0.2)
|
||||
dir_count = operation_data.get('directory_count', 1)
|
||||
if dir_count > 2:
|
||||
score += 0.2
|
||||
elif dir_count > 1:
|
||||
score += 0.1
|
||||
|
||||
# Operation type factor (0.0 to 0.3)
|
||||
op_type = operation_data.get('operation_type', '')
|
||||
if op_type in ['refactor', 'architecture', 'system-wide']:
|
||||
score += 0.3
|
||||
elif op_type in ['build', 'implement', 'migrate']:
|
||||
score += 0.2
|
||||
elif op_type in ['fix', 'update', 'improve']:
|
||||
score += 0.1
|
||||
|
||||
# Language/framework factor (0.0 to 0.2)
|
||||
if operation_data.get('multi_language', False):
|
||||
score += 0.2
|
||||
elif operation_data.get('framework_changes', False):
|
||||
score += 0.1
|
||||
|
||||
return min(score, 1.0)
|
||||
|
||||
def assess_risk_level(self, context: OperationContext) -> RiskLevel:
|
||||
"""
|
||||
Assess risk level based on operation context.
|
||||
"""
|
||||
if context.is_production:
|
||||
return RiskLevel.HIGH
|
||||
|
||||
if context.complexity_score > 0.7:
|
||||
return RiskLevel.HIGH
|
||||
elif context.complexity_score > 0.4:
|
||||
return RiskLevel.MEDIUM
|
||||
elif context.file_count > 10:
|
||||
return RiskLevel.MEDIUM
|
||||
else:
|
||||
return RiskLevel.LOW
|
||||
|
||||
def should_enable_validation(self, context: OperationContext) -> bool:
|
||||
"""
|
||||
ORCHESTRATOR.md: Enable validation for production code or high-risk operations.
|
||||
"""
|
||||
return (
|
||||
context.is_production or
|
||||
context.risk_level in [RiskLevel.HIGH, RiskLevel.CRITICAL] or
|
||||
context.operation_type in [OperationType.DEPLOY, OperationType.REFACTOR]
|
||||
)
|
||||
|
||||
def should_enable_delegation(self, context: OperationContext) -> Tuple[bool, str]:
|
||||
"""
|
||||
ORCHESTRATOR.md: Enable delegation for multi-file operations.
|
||||
|
||||
Returns:
|
||||
(should_delegate, delegation_strategy)
|
||||
"""
|
||||
if context.file_count > 3:
|
||||
return True, "files"
|
||||
elif context.directory_count > 2:
|
||||
return True, "folders"
|
||||
elif context.complexity_score > 0.4:
|
||||
return True, "auto"
|
||||
else:
|
||||
return False, "none"
|
||||
|
||||
def validate_operation(self, operation_data: Dict[str, Any]) -> ValidationResult:
|
||||
"""
|
||||
PRINCIPLES.md: Validate operation against core principles.
|
||||
"""
|
||||
issues = []
|
||||
warnings = []
|
||||
suggestions = []
|
||||
quality_score = 1.0
|
||||
|
||||
# Check for evidence-based decision making
|
||||
if 'evidence' not in operation_data:
|
||||
warnings.append("No evidence provided for decision")
|
||||
quality_score -= 0.1
|
||||
|
||||
# Check for proper error handling
|
||||
if operation_data.get('operation_type') in ['write', 'edit', 'deploy']:
|
||||
if not operation_data.get('has_error_handling', False):
|
||||
issues.append("Error handling not implemented")
|
||||
quality_score -= 0.2
|
||||
|
||||
# Check for test coverage
|
||||
if operation_data.get('affects_logic', False):
|
||||
if not operation_data.get('has_tests', False):
|
||||
warnings.append("No tests found for logic changes")
|
||||
quality_score -= 0.1
|
||||
suggestions.append("Add unit tests for new logic")
|
||||
|
||||
# Check for documentation
|
||||
if operation_data.get('is_public_api', False):
|
||||
if not operation_data.get('has_documentation', False):
|
||||
warnings.append("Public API lacks documentation")
|
||||
quality_score -= 0.1
|
||||
suggestions.append("Add API documentation")
|
||||
|
||||
# Security checks
|
||||
if operation_data.get('handles_user_input', False):
|
||||
if not operation_data.get('has_input_validation', False):
|
||||
issues.append("User input handling without validation")
|
||||
quality_score -= 0.3
|
||||
|
||||
is_valid = len(issues) == 0 and quality_score >= 0.7
|
||||
|
||||
return ValidationResult(
|
||||
is_valid=is_valid,
|
||||
issues=issues,
|
||||
warnings=warnings,
|
||||
suggestions=suggestions,
|
||||
quality_score=max(quality_score, 0.0)
|
||||
)
|
||||
|
||||
def determine_thinking_mode(self, context: OperationContext) -> Optional[str]:
|
||||
"""
|
||||
FLAGS.md: Determine appropriate thinking mode based on complexity.
|
||||
"""
|
||||
if context.complexity_score >= 0.8:
|
||||
return "--ultrathink"
|
||||
elif context.complexity_score >= 0.6:
|
||||
return "--think-hard"
|
||||
elif context.complexity_score >= 0.3:
|
||||
return "--think"
|
||||
else:
|
||||
return None
|
||||
|
||||
def should_enable_efficiency_mode(self, session_data: Dict[str, Any]) -> bool:
|
||||
"""
|
||||
MODE_Token_Efficiency.md: Enable efficiency mode based on resource usage.
|
||||
"""
|
||||
resource_usage = session_data.get('resource_usage_percent', 0)
|
||||
conversation_length = session_data.get('conversation_length', 0)
|
||||
|
||||
return (
|
||||
resource_usage > 75 or
|
||||
conversation_length > 100 or
|
||||
session_data.get('user_requests_brevity', False)
|
||||
)
|
||||
|
||||
def get_quality_gates(self, context: OperationContext) -> List[str]:
|
||||
"""
|
||||
ORCHESTRATOR.md: Get appropriate quality gates for operation.
|
||||
"""
|
||||
gates = ['syntax_validation']
|
||||
|
||||
if context.operation_type in [OperationType.WRITE, OperationType.EDIT]:
|
||||
gates.extend(['type_analysis', 'code_quality'])
|
||||
|
||||
if self.should_enable_validation(context):
|
||||
gates.extend(['security_assessment', 'performance_analysis'])
|
||||
|
||||
if context.has_tests:
|
||||
gates.append('test_validation')
|
||||
|
||||
if context.operation_type == OperationType.DEPLOY:
|
||||
gates.extend(['integration_testing', 'deployment_validation'])
|
||||
|
||||
return gates
|
||||
|
||||
def estimate_performance_impact(self, context: OperationContext) -> Dict[str, Any]:
|
||||
"""
|
||||
Estimate performance impact and suggested optimizations.
|
||||
"""
|
||||
base_time = 100 # ms
|
||||
|
||||
# Calculate estimated time based on complexity
|
||||
estimated_time = base_time * (1 + context.complexity_score * 3)
|
||||
|
||||
# Factor in file count
|
||||
if context.file_count > 5:
|
||||
estimated_time *= 1.5
|
||||
|
||||
# Suggest optimizations
|
||||
optimizations = []
|
||||
if context.file_count > 3:
|
||||
optimizations.append("Consider parallel processing")
|
||||
if context.complexity_score > 0.6:
|
||||
optimizations.append("Enable delegation mode")
|
||||
if context.directory_count > 2:
|
||||
optimizations.append("Use folder-based delegation")
|
||||
|
||||
return {
|
||||
'estimated_time_ms': int(estimated_time),
|
||||
'performance_risk': 'high' if estimated_time > 1000 else 'low',
|
||||
'suggested_optimizations': optimizations,
|
||||
'efficiency_gains_possible': len(optimizations) > 0
|
||||
}
|
||||
|
||||
def apply_superclaude_principles(self, operation_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Apply SuperClaude core principles to operation planning.
|
||||
|
||||
Returns enhanced operation data with principle-based recommendations.
|
||||
"""
|
||||
enhanced_data = operation_data.copy()
|
||||
|
||||
# Evidence > assumptions
|
||||
if 'assumptions' in enhanced_data and not enhanced_data.get('evidence'):
|
||||
enhanced_data['recommendations'] = enhanced_data.get('recommendations', [])
|
||||
enhanced_data['recommendations'].append(
|
||||
"Gather evidence to validate assumptions"
|
||||
)
|
||||
|
||||
# Code > documentation
|
||||
if enhanced_data.get('operation_type') == 'document' and not enhanced_data.get('has_working_code'):
|
||||
enhanced_data['warnings'] = enhanced_data.get('warnings', [])
|
||||
enhanced_data['warnings'].append(
|
||||
"Ensure working code exists before extensive documentation"
|
||||
)
|
||||
|
||||
# Efficiency > verbosity
|
||||
if enhanced_data.get('output_length', 0) > 1000 and not enhanced_data.get('justification_for_length'):
|
||||
enhanced_data['efficiency_suggestions'] = enhanced_data.get('efficiency_suggestions', [])
|
||||
enhanced_data['efficiency_suggestions'].append(
|
||||
"Consider token efficiency techniques for long outputs"
|
||||
)
|
||||
|
||||
return enhanced_data
|
||||
615
Framework-Hooks/hooks/shared/learning_engine.py
Normal file
615
Framework-Hooks/hooks/shared/learning_engine.py
Normal file
@@ -0,0 +1,615 @@
|
||||
"""
|
||||
Learning Engine for SuperClaude-Lite
|
||||
|
||||
Cross-hook adaptation system that learns from user patterns, operation effectiveness,
|
||||
and system performance to continuously improve SuperClaude intelligence.
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
import statistics
|
||||
from typing import Dict, Any, List, Optional, Tuple, Set
|
||||
from dataclasses import dataclass, asdict
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from yaml_loader import config_loader
|
||||
|
||||
|
||||
class LearningType(Enum):
|
||||
"""Types of learning patterns."""
|
||||
USER_PREFERENCE = "user_preference"
|
||||
OPERATION_PATTERN = "operation_pattern"
|
||||
PERFORMANCE_OPTIMIZATION = "performance_optimization"
|
||||
ERROR_RECOVERY = "error_recovery"
|
||||
EFFECTIVENESS_FEEDBACK = "effectiveness_feedback"
|
||||
|
||||
|
||||
class AdaptationScope(Enum):
|
||||
"""Scope of learning adaptations."""
|
||||
SESSION = "session" # Apply only to current session
|
||||
PROJECT = "project" # Apply to current project
|
||||
USER = "user" # Apply across all user sessions
|
||||
GLOBAL = "global" # Apply to all users (anonymized)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LearningRecord:
|
||||
"""Record of a learning event."""
|
||||
timestamp: float
|
||||
learning_type: LearningType
|
||||
scope: AdaptationScope
|
||||
context: Dict[str, Any]
|
||||
pattern: Dict[str, Any]
|
||||
effectiveness_score: float # 0.0 to 1.0
|
||||
confidence: float # 0.0 to 1.0
|
||||
metadata: Dict[str, Any]
|
||||
|
||||
|
||||
@dataclass
|
||||
class Adaptation:
|
||||
"""An adaptation learned from patterns."""
|
||||
adaptation_id: str
|
||||
pattern_signature: str
|
||||
trigger_conditions: Dict[str, Any]
|
||||
modifications: Dict[str, Any]
|
||||
effectiveness_history: List[float]
|
||||
usage_count: int
|
||||
last_used: float
|
||||
confidence_score: float
|
||||
|
||||
|
||||
@dataclass
|
||||
class LearningInsight:
|
||||
"""Insight derived from learning patterns."""
|
||||
insight_type: str
|
||||
description: str
|
||||
evidence: List[str]
|
||||
recommendations: List[str]
|
||||
confidence: float
|
||||
impact_score: float
|
||||
|
||||
|
||||
class LearningEngine:
|
||||
"""
|
||||
Cross-hook adaptation system for continuous improvement.
|
||||
|
||||
Features:
|
||||
- User preference learning and adaptation
|
||||
- Operation pattern recognition and optimization
|
||||
- Performance feedback integration
|
||||
- Cross-hook coordination and knowledge sharing
|
||||
- Effectiveness measurement and validation
|
||||
- Personalization and project-specific adaptations
|
||||
"""
|
||||
|
||||
def __init__(self, cache_dir: Path):
|
||||
self.cache_dir = Path(cache_dir)
|
||||
self.cache_dir.mkdir(exist_ok=True)
|
||||
|
||||
self.learning_records: List[LearningRecord] = []
|
||||
self.adaptations: Dict[str, Adaptation] = {}
|
||||
self.user_preferences: Dict[str, Any] = {}
|
||||
self.project_patterns: Dict[str, Dict[str, Any]] = {}
|
||||
|
||||
self._load_learning_data()
|
||||
|
||||
def _load_learning_data(self):
|
||||
"""Load existing learning data from cache."""
|
||||
try:
|
||||
# Load learning records
|
||||
records_file = self.cache_dir / "learning_records.json"
|
||||
if records_file.exists():
|
||||
with open(records_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
self.learning_records = [
|
||||
LearningRecord(**record) for record in data
|
||||
]
|
||||
|
||||
# Load adaptations
|
||||
adaptations_file = self.cache_dir / "adaptations.json"
|
||||
if adaptations_file.exists():
|
||||
with open(adaptations_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
self.adaptations = {
|
||||
k: Adaptation(**v) for k, v in data.items()
|
||||
}
|
||||
|
||||
# Load user preferences
|
||||
preferences_file = self.cache_dir / "user_preferences.json"
|
||||
if preferences_file.exists():
|
||||
with open(preferences_file, 'r') as f:
|
||||
self.user_preferences = json.load(f)
|
||||
|
||||
# Load project patterns
|
||||
patterns_file = self.cache_dir / "project_patterns.json"
|
||||
if patterns_file.exists():
|
||||
with open(patterns_file, 'r') as f:
|
||||
self.project_patterns = json.load(f)
|
||||
|
||||
except Exception as e:
|
||||
# Initialize empty data on error
|
||||
self.learning_records = []
|
||||
self.adaptations = {}
|
||||
self.user_preferences = {}
|
||||
self.project_patterns = {}
|
||||
|
||||
def record_learning_event(self,
|
||||
learning_type: LearningType,
|
||||
scope: AdaptationScope,
|
||||
context: Dict[str, Any],
|
||||
pattern: Dict[str, Any],
|
||||
effectiveness_score: float,
|
||||
confidence: float = 1.0,
|
||||
metadata: Dict[str, Any] = None) -> str:
|
||||
"""
|
||||
Record a learning event for future adaptation.
|
||||
|
||||
Args:
|
||||
learning_type: Type of learning event
|
||||
scope: Scope of the learning (session, project, user, global)
|
||||
context: Context in which the learning occurred
|
||||
pattern: Pattern or behavior that was observed
|
||||
effectiveness_score: How effective the pattern was (0.0 to 1.0)
|
||||
confidence: Confidence in the learning (0.0 to 1.0)
|
||||
metadata: Additional metadata about the learning event
|
||||
|
||||
Returns:
|
||||
Learning record ID
|
||||
"""
|
||||
if metadata is None:
|
||||
metadata = {}
|
||||
|
||||
record = LearningRecord(
|
||||
timestamp=time.time(),
|
||||
learning_type=learning_type,
|
||||
scope=scope,
|
||||
context=context,
|
||||
pattern=pattern,
|
||||
effectiveness_score=effectiveness_score,
|
||||
confidence=confidence,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
self.learning_records.append(record)
|
||||
|
||||
# Trigger adaptation creation if pattern is significant
|
||||
if effectiveness_score > 0.7 and confidence > 0.6:
|
||||
self._create_adaptation_from_record(record)
|
||||
|
||||
# Save to cache
|
||||
self._save_learning_data()
|
||||
|
||||
return f"learning_{int(record.timestamp)}"
|
||||
|
||||
def _create_adaptation_from_record(self, record: LearningRecord):
|
||||
"""Create an adaptation from a significant learning record."""
|
||||
pattern_signature = self._generate_pattern_signature(record.pattern, record.context)
|
||||
|
||||
# Check if adaptation already exists
|
||||
if pattern_signature in self.adaptations:
|
||||
adaptation = self.adaptations[pattern_signature]
|
||||
adaptation.effectiveness_history.append(record.effectiveness_score)
|
||||
adaptation.usage_count += 1
|
||||
adaptation.last_used = record.timestamp
|
||||
|
||||
# Update confidence based on consistency
|
||||
if len(adaptation.effectiveness_history) > 1:
|
||||
consistency = 1.0 - statistics.stdev(adaptation.effectiveness_history[-5:]) / max(statistics.mean(adaptation.effectiveness_history[-5:]), 0.1)
|
||||
adaptation.confidence_score = min(consistency * record.confidence, 1.0)
|
||||
else:
|
||||
# Create new adaptation
|
||||
adaptation_id = f"adapt_{int(record.timestamp)}_{len(self.adaptations)}"
|
||||
|
||||
adaptation = Adaptation(
|
||||
adaptation_id=adaptation_id,
|
||||
pattern_signature=pattern_signature,
|
||||
trigger_conditions=self._extract_trigger_conditions(record.context),
|
||||
modifications=self._extract_modifications(record.pattern),
|
||||
effectiveness_history=[record.effectiveness_score],
|
||||
usage_count=1,
|
||||
last_used=record.timestamp,
|
||||
confidence_score=record.confidence
|
||||
)
|
||||
|
||||
self.adaptations[pattern_signature] = adaptation
|
||||
|
||||
def _generate_pattern_signature(self, pattern: Dict[str, Any], context: Dict[str, Any]) -> str:
|
||||
"""Generate a unique signature for a pattern."""
|
||||
# Create a simplified signature based on key pattern elements
|
||||
key_elements = []
|
||||
|
||||
# Pattern type
|
||||
if 'type' in pattern:
|
||||
key_elements.append(f"type:{pattern['type']}")
|
||||
|
||||
# Context elements
|
||||
if 'operation_type' in context:
|
||||
key_elements.append(f"op:{context['operation_type']}")
|
||||
|
||||
if 'complexity_score' in context:
|
||||
complexity_bucket = int(context['complexity_score'] * 10) / 10 # Round to 0.1
|
||||
key_elements.append(f"complexity:{complexity_bucket}")
|
||||
|
||||
if 'file_count' in context:
|
||||
file_bucket = min(context['file_count'], 10) # Cap at 10 for grouping
|
||||
key_elements.append(f"files:{file_bucket}")
|
||||
|
||||
# Pattern-specific elements
|
||||
for key in ['mcp_server', 'mode', 'compression_level', 'delegation_strategy']:
|
||||
if key in pattern:
|
||||
key_elements.append(f"{key}:{pattern[key]}")
|
||||
|
||||
return "_".join(sorted(key_elements))
|
||||
|
||||
def _extract_trigger_conditions(self, context: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Extract trigger conditions from context."""
|
||||
conditions = {}
|
||||
|
||||
# Operational conditions
|
||||
for key in ['operation_type', 'complexity_score', 'file_count', 'directory_count']:
|
||||
if key in context:
|
||||
conditions[key] = context[key]
|
||||
|
||||
# Environmental conditions
|
||||
for key in ['resource_usage_percent', 'conversation_length', 'user_expertise']:
|
||||
if key in context:
|
||||
conditions[key] = context[key]
|
||||
|
||||
# Project conditions
|
||||
for key in ['project_type', 'has_tests', 'is_production']:
|
||||
if key in context:
|
||||
conditions[key] = context[key]
|
||||
|
||||
return conditions
|
||||
|
||||
def _extract_modifications(self, pattern: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Extract modifications to apply from pattern."""
|
||||
modifications = {}
|
||||
|
||||
# MCP server preferences
|
||||
if 'mcp_server' in pattern:
|
||||
modifications['preferred_mcp_server'] = pattern['mcp_server']
|
||||
|
||||
# Mode preferences
|
||||
if 'mode' in pattern:
|
||||
modifications['preferred_mode'] = pattern['mode']
|
||||
|
||||
# Flag preferences
|
||||
if 'flags' in pattern:
|
||||
modifications['suggested_flags'] = pattern['flags']
|
||||
|
||||
# Performance optimizations
|
||||
if 'optimization' in pattern:
|
||||
modifications['optimization'] = pattern['optimization']
|
||||
|
||||
return modifications
|
||||
|
||||
def get_adaptations_for_context(self, context: Dict[str, Any]) -> List[Adaptation]:
|
||||
"""Get relevant adaptations for the current context."""
|
||||
relevant_adaptations = []
|
||||
|
||||
for adaptation in self.adaptations.values():
|
||||
if self._matches_trigger_conditions(adaptation.trigger_conditions, context):
|
||||
# Check effectiveness threshold
|
||||
if adaptation.confidence_score > 0.5 and len(adaptation.effectiveness_history) > 0:
|
||||
avg_effectiveness = statistics.mean(adaptation.effectiveness_history)
|
||||
if avg_effectiveness > 0.6:
|
||||
relevant_adaptations.append(adaptation)
|
||||
|
||||
# Sort by effectiveness and confidence
|
||||
relevant_adaptations.sort(
|
||||
key=lambda a: statistics.mean(a.effectiveness_history) * a.confidence_score,
|
||||
reverse=True
|
||||
)
|
||||
|
||||
return relevant_adaptations
|
||||
|
||||
def _matches_trigger_conditions(self, conditions: Dict[str, Any], context: Dict[str, Any]) -> bool:
|
||||
"""Check if context matches adaptation trigger conditions."""
|
||||
for key, expected_value in conditions.items():
|
||||
if key not in context:
|
||||
continue
|
||||
|
||||
context_value = context[key]
|
||||
|
||||
# Exact match for strings and booleans
|
||||
if isinstance(expected_value, (str, bool)):
|
||||
if context_value != expected_value:
|
||||
return False
|
||||
|
||||
# Range match for numbers
|
||||
elif isinstance(expected_value, (int, float)):
|
||||
tolerance = 0.1 if isinstance(expected_value, float) else 1
|
||||
if abs(context_value - expected_value) > tolerance:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def apply_adaptations(self,
|
||||
context: Dict[str, Any],
|
||||
base_recommendations: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Apply learned adaptations to enhance recommendations.
|
||||
|
||||
Args:
|
||||
context: Current operation context
|
||||
base_recommendations: Base recommendations before adaptation
|
||||
|
||||
Returns:
|
||||
Enhanced recommendations with learned adaptations applied
|
||||
"""
|
||||
relevant_adaptations = self.get_adaptations_for_context(context)
|
||||
enhanced_recommendations = base_recommendations.copy()
|
||||
|
||||
for adaptation in relevant_adaptations:
|
||||
# Apply modifications from adaptation
|
||||
for modification_type, modification_value in adaptation.modifications.items():
|
||||
if modification_type == 'preferred_mcp_server':
|
||||
# Enhance MCP server selection
|
||||
if 'recommended_mcp_servers' not in enhanced_recommendations:
|
||||
enhanced_recommendations['recommended_mcp_servers'] = []
|
||||
|
||||
servers = enhanced_recommendations['recommended_mcp_servers']
|
||||
if modification_value not in servers:
|
||||
servers.insert(0, modification_value) # Prioritize learned preference
|
||||
|
||||
elif modification_type == 'preferred_mode':
|
||||
# Enhance mode selection
|
||||
if 'recommended_modes' not in enhanced_recommendations:
|
||||
enhanced_recommendations['recommended_modes'] = []
|
||||
|
||||
modes = enhanced_recommendations['recommended_modes']
|
||||
if modification_value not in modes:
|
||||
modes.insert(0, modification_value)
|
||||
|
||||
elif modification_type == 'suggested_flags':
|
||||
# Enhance flag suggestions
|
||||
if 'suggested_flags' not in enhanced_recommendations:
|
||||
enhanced_recommendations['suggested_flags'] = []
|
||||
|
||||
for flag in modification_value:
|
||||
if flag not in enhanced_recommendations['suggested_flags']:
|
||||
enhanced_recommendations['suggested_flags'].append(flag)
|
||||
|
||||
elif modification_type == 'optimization':
|
||||
# Apply performance optimizations
|
||||
if 'optimizations' not in enhanced_recommendations:
|
||||
enhanced_recommendations['optimizations'] = []
|
||||
enhanced_recommendations['optimizations'].append(modification_value)
|
||||
|
||||
# Update usage tracking
|
||||
adaptation.usage_count += 1
|
||||
adaptation.last_used = time.time()
|
||||
|
||||
# Add learning metadata
|
||||
enhanced_recommendations['applied_adaptations'] = [
|
||||
{
|
||||
'id': adaptation.adaptation_id,
|
||||
'confidence': adaptation.confidence_score,
|
||||
'effectiveness': statistics.mean(adaptation.effectiveness_history)
|
||||
}
|
||||
for adaptation in relevant_adaptations
|
||||
]
|
||||
|
||||
return enhanced_recommendations
|
||||
|
||||
def record_effectiveness_feedback(self,
|
||||
adaptation_ids: List[str],
|
||||
effectiveness_score: float,
|
||||
context: Dict[str, Any]):
|
||||
"""Record feedback on adaptation effectiveness."""
|
||||
for adaptation_id in adaptation_ids:
|
||||
# Find adaptation by ID
|
||||
adaptation = None
|
||||
for adapt in self.adaptations.values():
|
||||
if adapt.adaptation_id == adaptation_id:
|
||||
adaptation = adapt
|
||||
break
|
||||
|
||||
if adaptation:
|
||||
adaptation.effectiveness_history.append(effectiveness_score)
|
||||
|
||||
# Update confidence based on consistency
|
||||
if len(adaptation.effectiveness_history) > 2:
|
||||
recent_scores = adaptation.effectiveness_history[-5:]
|
||||
consistency = 1.0 - statistics.stdev(recent_scores) / max(statistics.mean(recent_scores), 0.1)
|
||||
adaptation.confidence_score = min(consistency, 1.0)
|
||||
|
||||
# Record learning event
|
||||
self.record_learning_event(
|
||||
LearningType.EFFECTIVENESS_FEEDBACK,
|
||||
AdaptationScope.USER,
|
||||
context,
|
||||
{'adaptation_id': adaptation_id},
|
||||
effectiveness_score,
|
||||
adaptation.confidence_score
|
||||
)
|
||||
|
||||
def generate_learning_insights(self) -> List[LearningInsight]:
|
||||
"""Generate insights from learning patterns."""
|
||||
insights = []
|
||||
|
||||
# User preference insights
|
||||
insights.extend(self._analyze_user_preferences())
|
||||
|
||||
# Performance pattern insights
|
||||
insights.extend(self._analyze_performance_patterns())
|
||||
|
||||
# Error pattern insights
|
||||
insights.extend(self._analyze_error_patterns())
|
||||
|
||||
# Effectiveness insights
|
||||
insights.extend(self._analyze_effectiveness_patterns())
|
||||
|
||||
return insights
|
||||
|
||||
def _analyze_user_preferences(self) -> List[LearningInsight]:
|
||||
"""Analyze user preference patterns."""
|
||||
insights = []
|
||||
|
||||
# Analyze MCP server preferences
|
||||
mcp_usage = {}
|
||||
for record in self.learning_records:
|
||||
if record.learning_type == LearningType.USER_PREFERENCE:
|
||||
server = record.pattern.get('mcp_server')
|
||||
if server:
|
||||
if server not in mcp_usage:
|
||||
mcp_usage[server] = []
|
||||
mcp_usage[server].append(record.effectiveness_score)
|
||||
|
||||
if mcp_usage:
|
||||
# Find most effective server
|
||||
server_effectiveness = {
|
||||
server: statistics.mean(scores)
|
||||
for server, scores in mcp_usage.items()
|
||||
if len(scores) >= 3
|
||||
}
|
||||
|
||||
if server_effectiveness:
|
||||
best_server = max(server_effectiveness, key=server_effectiveness.get)
|
||||
best_score = server_effectiveness[best_server]
|
||||
|
||||
if best_score > 0.8:
|
||||
insights.append(LearningInsight(
|
||||
insight_type="user_preference",
|
||||
description=f"User consistently prefers {best_server} MCP server",
|
||||
evidence=[f"Effectiveness score: {best_score:.2f}", f"Usage count: {len(mcp_usage[best_server])}"],
|
||||
recommendations=[f"Auto-suggest {best_server} for similar operations"],
|
||||
confidence=min(best_score, 1.0),
|
||||
impact_score=0.7
|
||||
))
|
||||
|
||||
return insights
|
||||
|
||||
def _analyze_performance_patterns(self) -> List[LearningInsight]:
|
||||
"""Analyze performance optimization patterns."""
|
||||
insights = []
|
||||
|
||||
# Analyze delegation effectiveness
|
||||
delegation_records = [
|
||||
r for r in self.learning_records
|
||||
if r.learning_type == LearningType.PERFORMANCE_OPTIMIZATION
|
||||
and 'delegation' in r.pattern
|
||||
]
|
||||
|
||||
if len(delegation_records) >= 5:
|
||||
avg_effectiveness = statistics.mean([r.effectiveness_score for r in delegation_records])
|
||||
|
||||
if avg_effectiveness > 0.75:
|
||||
insights.append(LearningInsight(
|
||||
insight_type="performance_optimization",
|
||||
description="Delegation consistently improves performance",
|
||||
evidence=[f"Average effectiveness: {avg_effectiveness:.2f}", f"Sample size: {len(delegation_records)}"],
|
||||
recommendations=["Enable delegation for multi-file operations", "Lower delegation threshold"],
|
||||
confidence=avg_effectiveness,
|
||||
impact_score=0.8
|
||||
))
|
||||
|
||||
return insights
|
||||
|
||||
def _analyze_error_patterns(self) -> List[LearningInsight]:
|
||||
"""Analyze error recovery patterns."""
|
||||
insights = []
|
||||
|
||||
error_records = [
|
||||
r for r in self.learning_records
|
||||
if r.learning_type == LearningType.ERROR_RECOVERY
|
||||
]
|
||||
|
||||
if len(error_records) >= 3:
|
||||
# Analyze common error contexts
|
||||
error_contexts = {}
|
||||
for record in error_records:
|
||||
context_key = record.context.get('operation_type', 'unknown')
|
||||
if context_key not in error_contexts:
|
||||
error_contexts[context_key] = []
|
||||
error_contexts[context_key].append(record)
|
||||
|
||||
for context, records in error_contexts.items():
|
||||
if len(records) >= 2:
|
||||
avg_recovery_effectiveness = statistics.mean([r.effectiveness_score for r in records])
|
||||
|
||||
insights.append(LearningInsight(
|
||||
insight_type="error_recovery",
|
||||
description=f"Error patterns identified for {context} operations",
|
||||
evidence=[f"Occurrence count: {len(records)}", f"Recovery effectiveness: {avg_recovery_effectiveness:.2f}"],
|
||||
recommendations=[f"Add proactive validation for {context} operations"],
|
||||
confidence=min(len(records) / 5, 1.0),
|
||||
impact_score=0.6
|
||||
))
|
||||
|
||||
return insights
|
||||
|
||||
def _analyze_effectiveness_patterns(self) -> List[LearningInsight]:
|
||||
"""Analyze overall effectiveness patterns."""
|
||||
insights = []
|
||||
|
||||
if len(self.learning_records) >= 10:
|
||||
recent_records = sorted(self.learning_records, key=lambda r: r.timestamp)[-10:]
|
||||
avg_effectiveness = statistics.mean([r.effectiveness_score for r in recent_records])
|
||||
|
||||
if avg_effectiveness > 0.8:
|
||||
insights.append(LearningInsight(
|
||||
insight_type="effectiveness_trend",
|
||||
description="SuperClaude effectiveness is high and improving",
|
||||
evidence=[f"Recent average effectiveness: {avg_effectiveness:.2f}"],
|
||||
recommendations=["Continue current learning patterns", "Consider expanding adaptation scope"],
|
||||
confidence=avg_effectiveness,
|
||||
impact_score=0.9
|
||||
))
|
||||
elif avg_effectiveness < 0.6:
|
||||
insights.append(LearningInsight(
|
||||
insight_type="effectiveness_concern",
|
||||
description="SuperClaude effectiveness below optimal",
|
||||
evidence=[f"Recent average effectiveness: {avg_effectiveness:.2f}"],
|
||||
recommendations=["Review recent adaptations", "Gather more user feedback", "Adjust learning thresholds"],
|
||||
confidence=1.0 - avg_effectiveness,
|
||||
impact_score=0.8
|
||||
))
|
||||
|
||||
return insights
|
||||
|
||||
def _save_learning_data(self):
|
||||
"""Save learning data to cache files."""
|
||||
try:
|
||||
# Save learning records
|
||||
records_file = self.cache_dir / "learning_records.json"
|
||||
with open(records_file, 'w') as f:
|
||||
json.dump([asdict(record) for record in self.learning_records], f, indent=2)
|
||||
|
||||
# Save adaptations
|
||||
adaptations_file = self.cache_dir / "adaptations.json"
|
||||
with open(adaptations_file, 'w') as f:
|
||||
json.dump({k: asdict(v) for k, v in self.adaptations.items()}, f, indent=2)
|
||||
|
||||
# Save user preferences
|
||||
preferences_file = self.cache_dir / "user_preferences.json"
|
||||
with open(preferences_file, 'w') as f:
|
||||
json.dump(self.user_preferences, f, indent=2)
|
||||
|
||||
# Save project patterns
|
||||
patterns_file = self.cache_dir / "project_patterns.json"
|
||||
with open(patterns_file, 'w') as f:
|
||||
json.dump(self.project_patterns, f, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
pass # Silent fail for cache operations
|
||||
|
||||
def cleanup_old_data(self, max_age_days: int = 30):
|
||||
"""Clean up old learning data to prevent cache bloat."""
|
||||
cutoff_time = time.time() - (max_age_days * 24 * 60 * 60)
|
||||
|
||||
# Remove old learning records
|
||||
self.learning_records = [
|
||||
record for record in self.learning_records
|
||||
if record.timestamp > cutoff_time
|
||||
]
|
||||
|
||||
# Remove unused adaptations
|
||||
self.adaptations = {
|
||||
k: v for k, v in self.adaptations.items()
|
||||
if v.last_used > cutoff_time or v.usage_count > 5
|
||||
}
|
||||
|
||||
self._save_learning_data()
|
||||
275
Framework-Hooks/hooks/shared/logger.py
Normal file
275
Framework-Hooks/hooks/shared/logger.py
Normal file
@@ -0,0 +1,275 @@
|
||||
"""
|
||||
Simple logger for SuperClaude-Lite hooks.
|
||||
|
||||
Provides structured logging of hook events for later analysis.
|
||||
Focuses on capturing hook lifecycle, decisions, and errors in a
|
||||
structured format without any analysis or complex features.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any
|
||||
import uuid
|
||||
import glob
|
||||
|
||||
# Import configuration loader
|
||||
try:
|
||||
from .yaml_loader import UnifiedConfigLoader
|
||||
except ImportError:
|
||||
# Fallback if yaml_loader is not available
|
||||
UnifiedConfigLoader = None
|
||||
|
||||
|
||||
class HookLogger:
|
||||
"""Simple logger for SuperClaude-Lite hooks."""
|
||||
|
||||
def __init__(self, log_dir: str = None, retention_days: int = None):
|
||||
"""
|
||||
Initialize the logger.
|
||||
|
||||
Args:
|
||||
log_dir: Directory to store log files. Defaults to cache/logs/
|
||||
retention_days: Number of days to keep log files. Defaults to 30.
|
||||
"""
|
||||
# Load configuration
|
||||
self.config = self._load_config()
|
||||
|
||||
# Check if logging is enabled
|
||||
if not self.config.get('logging', {}).get('enabled', True):
|
||||
self.enabled = False
|
||||
return
|
||||
|
||||
self.enabled = True
|
||||
|
||||
# Set up log directory
|
||||
if log_dir is None:
|
||||
# Get SuperClaude-Lite root directory (2 levels up from shared/)
|
||||
root_dir = Path(__file__).parent.parent.parent
|
||||
log_dir_config = self.config.get('logging', {}).get('file_settings', {}).get('log_directory', 'cache/logs')
|
||||
log_dir = root_dir / log_dir_config
|
||||
|
||||
self.log_dir = Path(log_dir)
|
||||
self.log_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Log retention settings
|
||||
if retention_days is None:
|
||||
retention_days = self.config.get('logging', {}).get('file_settings', {}).get('retention_days', 30)
|
||||
self.retention_days = retention_days
|
||||
|
||||
# Session ID for correlating events
|
||||
self.session_id = str(uuid.uuid4())[:8]
|
||||
|
||||
# Set up Python logger
|
||||
self._setup_logger()
|
||||
|
||||
# Clean up old logs on initialization
|
||||
self._cleanup_old_logs()
|
||||
|
||||
def _load_config(self) -> Dict[str, Any]:
|
||||
"""Load logging configuration from YAML file."""
|
||||
if UnifiedConfigLoader is None:
|
||||
# Return default configuration if loader not available
|
||||
return {
|
||||
'logging': {
|
||||
'enabled': True,
|
||||
'level': 'INFO',
|
||||
'file_settings': {
|
||||
'log_directory': 'cache/logs',
|
||||
'retention_days': 30
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
# Get project root
|
||||
root_dir = Path(__file__).parent.parent.parent
|
||||
loader = UnifiedConfigLoader(root_dir)
|
||||
|
||||
# Load logging configuration
|
||||
config = loader.load_yaml('logging')
|
||||
return config or {}
|
||||
except Exception:
|
||||
# Return default configuration on error
|
||||
return {
|
||||
'logging': {
|
||||
'enabled': True,
|
||||
'level': 'INFO',
|
||||
'file_settings': {
|
||||
'log_directory': 'cache/logs',
|
||||
'retention_days': 30
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def _setup_logger(self):
|
||||
"""Set up the Python logger with JSON formatting."""
|
||||
self.logger = logging.getLogger("superclaude_lite_hooks")
|
||||
|
||||
# Set log level from configuration
|
||||
log_level_str = self.config.get('logging', {}).get('level', 'INFO').upper()
|
||||
log_level = getattr(logging, log_level_str, logging.INFO)
|
||||
self.logger.setLevel(log_level)
|
||||
|
||||
# Remove existing handlers to avoid duplicates
|
||||
self.logger.handlers.clear()
|
||||
|
||||
# Create daily log file
|
||||
today = datetime.now().strftime("%Y-%m-%d")
|
||||
log_file = self.log_dir / f"superclaude-lite-{today}.log"
|
||||
|
||||
# File handler
|
||||
handler = logging.FileHandler(log_file, mode='a', encoding='utf-8')
|
||||
handler.setLevel(logging.INFO)
|
||||
|
||||
# Simple formatter - just output the message (which is already JSON)
|
||||
formatter = logging.Formatter('%(message)s')
|
||||
handler.setFormatter(formatter)
|
||||
|
||||
self.logger.addHandler(handler)
|
||||
|
||||
def _create_event(self, event_type: str, hook_name: str, data: Dict[str, Any] = None) -> Dict[str, Any]:
|
||||
"""Create a structured event."""
|
||||
event = {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"session": self.session_id,
|
||||
"hook": hook_name,
|
||||
"event": event_type
|
||||
}
|
||||
|
||||
if data:
|
||||
event["data"] = data
|
||||
|
||||
return event
|
||||
|
||||
def _should_log_event(self, hook_name: str, event_type: str) -> bool:
|
||||
"""Check if this event should be logged based on configuration."""
|
||||
if not self.enabled:
|
||||
return False
|
||||
|
||||
# Check hook-specific configuration
|
||||
hook_config = self.config.get('hook_configuration', {}).get(hook_name, {})
|
||||
if not hook_config.get('enabled', True):
|
||||
return False
|
||||
|
||||
# Check event type configuration
|
||||
hook_logging = self.config.get('logging', {}).get('hook_logging', {})
|
||||
event_mapping = {
|
||||
'start': 'log_lifecycle',
|
||||
'end': 'log_lifecycle',
|
||||
'decision': 'log_decisions',
|
||||
'error': 'log_errors'
|
||||
}
|
||||
|
||||
config_key = event_mapping.get(event_type, 'log_lifecycle')
|
||||
return hook_logging.get(config_key, True)
|
||||
|
||||
def log_hook_start(self, hook_name: str, context: Optional[Dict[str, Any]] = None):
|
||||
"""Log the start of a hook execution."""
|
||||
if not self._should_log_event(hook_name, 'start'):
|
||||
return
|
||||
|
||||
event = self._create_event("start", hook_name, context)
|
||||
self.logger.info(json.dumps(event))
|
||||
|
||||
def log_hook_end(self, hook_name: str, duration_ms: int, success: bool, result: Optional[Dict[str, Any]] = None):
|
||||
"""Log the end of a hook execution."""
|
||||
if not self._should_log_event(hook_name, 'end'):
|
||||
return
|
||||
|
||||
data = {
|
||||
"duration_ms": duration_ms,
|
||||
"success": success
|
||||
}
|
||||
if result:
|
||||
data["result"] = result
|
||||
|
||||
event = self._create_event("end", hook_name, data)
|
||||
self.logger.info(json.dumps(event))
|
||||
|
||||
def log_decision(self, hook_name: str, decision_type: str, choice: str, reason: str):
|
||||
"""Log a decision made by a hook."""
|
||||
if not self._should_log_event(hook_name, 'decision'):
|
||||
return
|
||||
|
||||
data = {
|
||||
"type": decision_type,
|
||||
"choice": choice,
|
||||
"reason": reason
|
||||
}
|
||||
event = self._create_event("decision", hook_name, data)
|
||||
self.logger.info(json.dumps(event))
|
||||
|
||||
def log_error(self, hook_name: str, error: str, context: Optional[Dict[str, Any]] = None):
|
||||
"""Log an error that occurred in a hook."""
|
||||
if not self._should_log_event(hook_name, 'error'):
|
||||
return
|
||||
|
||||
data = {
|
||||
"error": error
|
||||
}
|
||||
if context:
|
||||
data["context"] = context
|
||||
|
||||
event = self._create_event("error", hook_name, data)
|
||||
self.logger.info(json.dumps(event))
|
||||
|
||||
def _cleanup_old_logs(self):
|
||||
"""Remove log files older than retention_days."""
|
||||
if self.retention_days <= 0:
|
||||
return
|
||||
|
||||
cutoff_date = datetime.now() - timedelta(days=self.retention_days)
|
||||
|
||||
# Find all log files
|
||||
log_pattern = self.log_dir / "superclaude-lite-*.log"
|
||||
for log_file in glob.glob(str(log_pattern)):
|
||||
try:
|
||||
# Extract date from filename
|
||||
filename = os.path.basename(log_file)
|
||||
date_str = filename.replace("superclaude-lite-", "").replace(".log", "")
|
||||
file_date = datetime.strptime(date_str, "%Y-%m-%d")
|
||||
|
||||
# Remove if older than cutoff
|
||||
if file_date < cutoff_date:
|
||||
os.remove(log_file)
|
||||
|
||||
except (ValueError, OSError):
|
||||
# Skip files that don't match expected format or can't be removed
|
||||
continue
|
||||
|
||||
|
||||
# Global logger instance
|
||||
_logger = None
|
||||
|
||||
|
||||
def get_logger() -> HookLogger:
|
||||
"""Get the global logger instance."""
|
||||
global _logger
|
||||
if _logger is None:
|
||||
_logger = HookLogger()
|
||||
return _logger
|
||||
|
||||
|
||||
# Convenience functions for easy hook integration
|
||||
def log_hook_start(hook_name: str, context: Optional[Dict[str, Any]] = None):
|
||||
"""Log the start of a hook execution."""
|
||||
get_logger().log_hook_start(hook_name, context)
|
||||
|
||||
|
||||
def log_hook_end(hook_name: str, duration_ms: int, success: bool, result: Optional[Dict[str, Any]] = None):
|
||||
"""Log the end of a hook execution."""
|
||||
get_logger().log_hook_end(hook_name, duration_ms, success, result)
|
||||
|
||||
|
||||
def log_decision(hook_name: str, decision_type: str, choice: str, reason: str):
|
||||
"""Log a decision made by a hook."""
|
||||
get_logger().log_decision(hook_name, decision_type, choice, reason)
|
||||
|
||||
|
||||
def log_error(hook_name: str, error: str, context: Optional[Dict[str, Any]] = None):
|
||||
"""Log an error that occurred in a hook."""
|
||||
get_logger().log_error(hook_name, error, context)
|
||||
478
Framework-Hooks/hooks/shared/mcp_intelligence.py
Normal file
478
Framework-Hooks/hooks/shared/mcp_intelligence.py
Normal file
@@ -0,0 +1,478 @@
|
||||
"""
|
||||
MCP Intelligence Engine for SuperClaude-Lite
|
||||
|
||||
Intelligent MCP server activation, coordination, and optimization based on
|
||||
ORCHESTRATOR.md patterns and real-time context analysis.
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
from typing import Dict, Any, List, Optional, Set, Tuple
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
from yaml_loader import config_loader
|
||||
from pattern_detection import PatternDetector, PatternMatch
|
||||
|
||||
|
||||
class MCPServerState(Enum):
|
||||
"""States of MCP server availability."""
|
||||
AVAILABLE = "available"
|
||||
UNAVAILABLE = "unavailable"
|
||||
LOADING = "loading"
|
||||
ERROR = "error"
|
||||
|
||||
|
||||
@dataclass
|
||||
class MCPServerCapability:
|
||||
"""Capability definition for an MCP server."""
|
||||
server_name: str
|
||||
primary_functions: List[str]
|
||||
performance_profile: str # lightweight, standard, intensive
|
||||
activation_cost_ms: int
|
||||
token_efficiency: float # 0.0 to 1.0
|
||||
quality_impact: float # 0.0 to 1.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class MCPActivationPlan:
|
||||
"""Plan for MCP server activation."""
|
||||
servers_to_activate: List[str]
|
||||
activation_order: List[str]
|
||||
estimated_cost_ms: int
|
||||
efficiency_gains: Dict[str, float]
|
||||
fallback_strategy: Dict[str, str]
|
||||
coordination_strategy: str
|
||||
|
||||
|
||||
class MCPIntelligence:
|
||||
"""
|
||||
Intelligent MCP server management and coordination.
|
||||
|
||||
Implements ORCHESTRATOR.md patterns for:
|
||||
- Smart server selection based on context
|
||||
- Performance-optimized activation sequences
|
||||
- Fallback strategies for server failures
|
||||
- Cross-server coordination and caching
|
||||
- Real-time adaptation based on effectiveness
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.pattern_detector = PatternDetector()
|
||||
self.server_capabilities = self._load_server_capabilities()
|
||||
self.server_states = self._initialize_server_states()
|
||||
self.activation_history = []
|
||||
self.performance_metrics = {}
|
||||
|
||||
def _load_server_capabilities(self) -> Dict[str, MCPServerCapability]:
|
||||
"""Load MCP server capabilities from configuration."""
|
||||
config = config_loader.load_config('orchestrator')
|
||||
capabilities = {}
|
||||
|
||||
servers_config = config.get('mcp_servers', {})
|
||||
|
||||
capabilities['context7'] = MCPServerCapability(
|
||||
server_name='context7',
|
||||
primary_functions=['library_docs', 'framework_patterns', 'best_practices'],
|
||||
performance_profile='standard',
|
||||
activation_cost_ms=150,
|
||||
token_efficiency=0.8,
|
||||
quality_impact=0.9
|
||||
)
|
||||
|
||||
capabilities['sequential'] = MCPServerCapability(
|
||||
server_name='sequential',
|
||||
primary_functions=['complex_analysis', 'multi_step_reasoning', 'debugging'],
|
||||
performance_profile='intensive',
|
||||
activation_cost_ms=200,
|
||||
token_efficiency=0.6,
|
||||
quality_impact=0.95
|
||||
)
|
||||
|
||||
capabilities['magic'] = MCPServerCapability(
|
||||
server_name='magic',
|
||||
primary_functions=['ui_components', 'design_systems', 'frontend_generation'],
|
||||
performance_profile='standard',
|
||||
activation_cost_ms=120,
|
||||
token_efficiency=0.85,
|
||||
quality_impact=0.9
|
||||
)
|
||||
|
||||
capabilities['playwright'] = MCPServerCapability(
|
||||
server_name='playwright',
|
||||
primary_functions=['e2e_testing', 'browser_automation', 'performance_testing'],
|
||||
performance_profile='intensive',
|
||||
activation_cost_ms=300,
|
||||
token_efficiency=0.7,
|
||||
quality_impact=0.85
|
||||
)
|
||||
|
||||
capabilities['morphllm'] = MCPServerCapability(
|
||||
server_name='morphllm',
|
||||
primary_functions=['intelligent_editing', 'pattern_application', 'fast_apply'],
|
||||
performance_profile='lightweight',
|
||||
activation_cost_ms=80,
|
||||
token_efficiency=0.9,
|
||||
quality_impact=0.8
|
||||
)
|
||||
|
||||
capabilities['serena'] = MCPServerCapability(
|
||||
server_name='serena',
|
||||
primary_functions=['semantic_analysis', 'project_context', 'memory_management'],
|
||||
performance_profile='standard',
|
||||
activation_cost_ms=100,
|
||||
token_efficiency=0.75,
|
||||
quality_impact=0.95
|
||||
)
|
||||
|
||||
return capabilities
|
||||
|
||||
def _initialize_server_states(self) -> Dict[str, MCPServerState]:
|
||||
"""Initialize server state tracking."""
|
||||
return {
|
||||
server: MCPServerState.AVAILABLE
|
||||
for server in self.server_capabilities.keys()
|
||||
}
|
||||
|
||||
def create_activation_plan(self,
|
||||
user_input: str,
|
||||
context: Dict[str, Any],
|
||||
operation_data: Dict[str, Any]) -> MCPActivationPlan:
|
||||
"""
|
||||
Create intelligent MCP server activation plan.
|
||||
|
||||
Args:
|
||||
user_input: User's request or command
|
||||
context: Session and environment context
|
||||
operation_data: Information about the planned operation
|
||||
|
||||
Returns:
|
||||
MCPActivationPlan with optimized server selection and coordination
|
||||
"""
|
||||
# Detect patterns to determine server needs
|
||||
detection_result = self.pattern_detector.detect_patterns(
|
||||
user_input, context, operation_data
|
||||
)
|
||||
|
||||
# Extract recommended servers from pattern detection
|
||||
recommended_servers = detection_result.recommended_mcp_servers
|
||||
|
||||
# Apply intelligent selection based on context
|
||||
optimized_servers = self._optimize_server_selection(
|
||||
recommended_servers, context, operation_data
|
||||
)
|
||||
|
||||
# Determine activation order for optimal performance
|
||||
activation_order = self._calculate_activation_order(optimized_servers, context)
|
||||
|
||||
# Calculate estimated costs and gains
|
||||
estimated_cost = self._calculate_activation_cost(optimized_servers)
|
||||
efficiency_gains = self._calculate_efficiency_gains(optimized_servers, operation_data)
|
||||
|
||||
# Create fallback strategy
|
||||
fallback_strategy = self._create_fallback_strategy(optimized_servers)
|
||||
|
||||
# Determine coordination strategy
|
||||
coordination_strategy = self._determine_coordination_strategy(
|
||||
optimized_servers, operation_data
|
||||
)
|
||||
|
||||
return MCPActivationPlan(
|
||||
servers_to_activate=optimized_servers,
|
||||
activation_order=activation_order,
|
||||
estimated_cost_ms=estimated_cost,
|
||||
efficiency_gains=efficiency_gains,
|
||||
fallback_strategy=fallback_strategy,
|
||||
coordination_strategy=coordination_strategy
|
||||
)
|
||||
|
||||
def _optimize_server_selection(self,
|
||||
recommended_servers: List[str],
|
||||
context: Dict[str, Any],
|
||||
operation_data: Dict[str, Any]) -> List[str]:
|
||||
"""Apply intelligent optimization to server selection."""
|
||||
optimized = set(recommended_servers)
|
||||
|
||||
# Morphllm vs Serena intelligence selection
|
||||
file_count = operation_data.get('file_count', 1)
|
||||
complexity_score = operation_data.get('complexity_score', 0.0)
|
||||
|
||||
if 'morphllm' in optimized and 'serena' in optimized:
|
||||
# Choose the more appropriate server based on complexity
|
||||
if file_count > 10 or complexity_score > 0.6:
|
||||
optimized.remove('morphllm') # Use Serena for complex operations
|
||||
else:
|
||||
optimized.remove('serena') # Use Morphllm for efficient operations
|
||||
elif file_count > 10 or complexity_score > 0.6:
|
||||
# Auto-add Serena for complex operations
|
||||
optimized.add('serena')
|
||||
optimized.discard('morphllm')
|
||||
elif file_count <= 10 and complexity_score <= 0.6:
|
||||
# Auto-add Morphllm for simple operations
|
||||
optimized.add('morphllm')
|
||||
optimized.discard('serena')
|
||||
|
||||
# Resource constraint optimization
|
||||
resource_usage = context.get('resource_usage_percent', 0)
|
||||
if resource_usage > 85:
|
||||
# Remove intensive servers under resource constraints
|
||||
intensive_servers = {
|
||||
name for name, cap in self.server_capabilities.items()
|
||||
if cap.performance_profile == 'intensive'
|
||||
}
|
||||
optimized -= intensive_servers
|
||||
|
||||
# Performance optimization based on operation type
|
||||
operation_type = operation_data.get('operation_type', '')
|
||||
if operation_type in ['read', 'analyze'] and 'sequential' not in optimized:
|
||||
# Add Sequential for analysis operations
|
||||
optimized.add('sequential')
|
||||
|
||||
# Auto-add Context7 if external libraries detected
|
||||
if operation_data.get('has_external_dependencies', False):
|
||||
optimized.add('context7')
|
||||
|
||||
return list(optimized)
|
||||
|
||||
def _calculate_activation_order(self, servers: List[str], context: Dict[str, Any]) -> List[str]:
|
||||
"""Calculate optimal activation order for performance."""
|
||||
if not servers:
|
||||
return []
|
||||
|
||||
# Sort by activation cost (lightweight first)
|
||||
server_costs = [
|
||||
(server, self.server_capabilities[server].activation_cost_ms)
|
||||
for server in servers
|
||||
]
|
||||
server_costs.sort(key=lambda x: x[1])
|
||||
|
||||
# Special ordering rules
|
||||
ordered = []
|
||||
|
||||
# 1. Serena first if present (provides context for others)
|
||||
if 'serena' in servers:
|
||||
ordered.append('serena')
|
||||
servers = [s for s in servers if s != 'serena']
|
||||
|
||||
# 2. Context7 early for documentation context
|
||||
if 'context7' in servers:
|
||||
ordered.append('context7')
|
||||
servers = [s for s in servers if s != 'context7']
|
||||
|
||||
# 3. Remaining servers by cost
|
||||
remaining_costs = [
|
||||
(server, self.server_capabilities[server].activation_cost_ms)
|
||||
for server in servers
|
||||
]
|
||||
remaining_costs.sort(key=lambda x: x[1])
|
||||
ordered.extend([server for server, _ in remaining_costs])
|
||||
|
||||
return ordered
|
||||
|
||||
def _calculate_activation_cost(self, servers: List[str]) -> int:
|
||||
"""Calculate total activation cost in milliseconds."""
|
||||
return sum(
|
||||
self.server_capabilities[server].activation_cost_ms
|
||||
for server in servers
|
||||
if server in self.server_capabilities
|
||||
)
|
||||
|
||||
def _calculate_efficiency_gains(self, servers: List[str], operation_data: Dict[str, Any]) -> Dict[str, float]:
|
||||
"""Calculate expected efficiency gains from server activation."""
|
||||
gains = {}
|
||||
|
||||
for server in servers:
|
||||
if server not in self.server_capabilities:
|
||||
continue
|
||||
|
||||
capability = self.server_capabilities[server]
|
||||
|
||||
# Base efficiency gain
|
||||
base_gain = capability.token_efficiency * capability.quality_impact
|
||||
|
||||
# Context-specific adjustments
|
||||
if server == 'morphllm' and operation_data.get('file_count', 1) <= 5:
|
||||
gains[server] = base_gain * 1.2 # Extra efficient for small operations
|
||||
elif server == 'serena' and operation_data.get('complexity_score', 0) > 0.6:
|
||||
gains[server] = base_gain * 1.3 # Extra valuable for complex operations
|
||||
elif server == 'sequential' and 'debug' in operation_data.get('operation_type', ''):
|
||||
gains[server] = base_gain * 1.4 # Extra valuable for debugging
|
||||
else:
|
||||
gains[server] = base_gain
|
||||
|
||||
return gains
|
||||
|
||||
def _create_fallback_strategy(self, servers: List[str]) -> Dict[str, str]:
|
||||
"""Create fallback strategy for server failures."""
|
||||
fallbacks = {}
|
||||
|
||||
# Define fallback mappings
|
||||
fallback_map = {
|
||||
'morphllm': 'serena', # Serena can handle editing
|
||||
'serena': 'morphllm', # Morphllm can handle simple edits
|
||||
'sequential': 'context7', # Context7 for documentation-based analysis
|
||||
'context7': 'sequential', # Sequential for complex analysis
|
||||
'magic': 'morphllm', # Morphllm for component generation
|
||||
'playwright': 'sequential' # Sequential for test planning
|
||||
}
|
||||
|
||||
for server in servers:
|
||||
fallback = fallback_map.get(server)
|
||||
if fallback and fallback not in servers:
|
||||
fallbacks[server] = fallback
|
||||
else:
|
||||
fallbacks[server] = 'native_tools' # Fall back to native Claude tools
|
||||
|
||||
return fallbacks
|
||||
|
||||
def _determine_coordination_strategy(self, servers: List[str], operation_data: Dict[str, Any]) -> str:
|
||||
"""Determine how servers should coordinate."""
|
||||
if len(servers) <= 1:
|
||||
return 'single_server'
|
||||
|
||||
# Sequential coordination for complex analysis
|
||||
if 'sequential' in servers and operation_data.get('complexity_score', 0) > 0.6:
|
||||
return 'sequential_lead'
|
||||
|
||||
# Serena coordination for multi-file operations
|
||||
if 'serena' in servers and operation_data.get('file_count', 1) > 5:
|
||||
return 'serena_lead'
|
||||
|
||||
# Parallel coordination for independent operations
|
||||
if len(servers) >= 3:
|
||||
return 'parallel_with_sync'
|
||||
|
||||
return 'collaborative'
|
||||
|
||||
def execute_activation_plan(self, plan: MCPActivationPlan, context: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Execute MCP server activation plan with error handling and performance tracking.
|
||||
|
||||
Args:
|
||||
plan: MCPActivationPlan to execute
|
||||
context: Current session context
|
||||
|
||||
Returns:
|
||||
Execution results with performance metrics and activated servers
|
||||
"""
|
||||
start_time = time.time()
|
||||
activated_servers = []
|
||||
failed_servers = []
|
||||
fallback_activations = []
|
||||
|
||||
for server in plan.activation_order:
|
||||
try:
|
||||
# Check server availability
|
||||
if self.server_states.get(server) == MCPServerState.UNAVAILABLE:
|
||||
failed_servers.append(server)
|
||||
self._handle_server_fallback(server, plan, fallback_activations)
|
||||
continue
|
||||
|
||||
# Activate server (simulated - real implementation would call MCP)
|
||||
self.server_states[server] = MCPServerState.LOADING
|
||||
activation_start = time.time()
|
||||
|
||||
# Simulate activation time
|
||||
expected_cost = self.server_capabilities[server].activation_cost_ms
|
||||
actual_cost = expected_cost * (0.8 + 0.4 * hash(server) % 1000 / 1000) # Simulated variance
|
||||
|
||||
self.server_states[server] = MCPServerState.AVAILABLE
|
||||
activated_servers.append(server)
|
||||
|
||||
# Track performance
|
||||
activation_time = (time.time() - activation_start) * 1000
|
||||
self.performance_metrics[server] = {
|
||||
'last_activation_ms': activation_time,
|
||||
'expected_ms': expected_cost,
|
||||
'efficiency_ratio': expected_cost / max(activation_time, 1)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
failed_servers.append(server)
|
||||
self.server_states[server] = MCPServerState.ERROR
|
||||
self._handle_server_fallback(server, plan, fallback_activations)
|
||||
|
||||
total_time = (time.time() - start_time) * 1000
|
||||
|
||||
# Update activation history
|
||||
self.activation_history.append({
|
||||
'timestamp': time.time(),
|
||||
'plan': plan,
|
||||
'activated': activated_servers,
|
||||
'failed': failed_servers,
|
||||
'fallbacks': fallback_activations,
|
||||
'total_time_ms': total_time
|
||||
})
|
||||
|
||||
return {
|
||||
'activated_servers': activated_servers,
|
||||
'failed_servers': failed_servers,
|
||||
'fallback_activations': fallback_activations,
|
||||
'total_activation_time_ms': total_time,
|
||||
'coordination_strategy': plan.coordination_strategy,
|
||||
'performance_metrics': self.performance_metrics
|
||||
}
|
||||
|
||||
def _handle_server_fallback(self, failed_server: str, plan: MCPActivationPlan, fallback_activations: List[str]):
|
||||
"""Handle server activation failure with fallback strategy."""
|
||||
fallback = plan.fallback_strategy.get(failed_server)
|
||||
|
||||
if fallback and fallback != 'native_tools' and fallback not in plan.servers_to_activate:
|
||||
# Try to activate fallback server
|
||||
if self.server_states.get(fallback) == MCPServerState.AVAILABLE:
|
||||
fallback_activations.append(f"{failed_server}->{fallback}")
|
||||
# In real implementation, would activate fallback server
|
||||
|
||||
def get_optimization_recommendations(self, context: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Get recommendations for optimizing MCP server usage."""
|
||||
recommendations = []
|
||||
|
||||
# Analyze activation history for patterns
|
||||
if len(self.activation_history) >= 5:
|
||||
recent_activations = self.activation_history[-5:]
|
||||
|
||||
# Check for frequently failing servers
|
||||
failed_counts = {}
|
||||
for activation in recent_activations:
|
||||
for failed in activation['failed']:
|
||||
failed_counts[failed] = failed_counts.get(failed, 0) + 1
|
||||
|
||||
for server, count in failed_counts.items():
|
||||
if count >= 3:
|
||||
recommendations.append(f"Server {server} failing frequently - consider fallback strategy")
|
||||
|
||||
# Check for performance issues
|
||||
avg_times = {}
|
||||
for activation in recent_activations:
|
||||
total_time = activation['total_time_ms']
|
||||
server_count = len(activation['activated'])
|
||||
if server_count > 0:
|
||||
avg_time_per_server = total_time / server_count
|
||||
avg_times[len(activation['activated'])] = avg_time_per_server
|
||||
|
||||
if avg_times and max(avg_times.values()) > 500:
|
||||
recommendations.append("Consider reducing concurrent server activations for better performance")
|
||||
|
||||
# Resource usage recommendations
|
||||
resource_usage = context.get('resource_usage_percent', 0)
|
||||
if resource_usage > 80:
|
||||
recommendations.append("High resource usage - consider lightweight servers only")
|
||||
|
||||
return {
|
||||
'recommendations': recommendations,
|
||||
'performance_metrics': self.performance_metrics,
|
||||
'server_states': {k: v.value for k, v in self.server_states.items()},
|
||||
'efficiency_score': self._calculate_overall_efficiency()
|
||||
}
|
||||
|
||||
def _calculate_overall_efficiency(self) -> float:
|
||||
"""Calculate overall MCP system efficiency."""
|
||||
if not self.performance_metrics:
|
||||
return 1.0
|
||||
|
||||
efficiency_scores = []
|
||||
for server, metrics in self.performance_metrics.items():
|
||||
efficiency_ratio = metrics.get('efficiency_ratio', 1.0)
|
||||
efficiency_scores.append(min(efficiency_ratio, 2.0)) # Cap at 200% efficiency
|
||||
|
||||
return sum(efficiency_scores) / len(efficiency_scores) if efficiency_scores else 1.0
|
||||
459
Framework-Hooks/hooks/shared/pattern_detection.py
Normal file
459
Framework-Hooks/hooks/shared/pattern_detection.py
Normal file
@@ -0,0 +1,459 @@
|
||||
"""
|
||||
Pattern Detection Engine for SuperClaude-Lite
|
||||
|
||||
Intelligent pattern detection for automatic mode activation,
|
||||
MCP server selection, and operational optimization.
|
||||
"""
|
||||
|
||||
import re
|
||||
import json
|
||||
from typing import Dict, Any, List, Set, Optional, Tuple
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
from yaml_loader import config_loader
|
||||
|
||||
|
||||
class PatternType(Enum):
|
||||
"""Types of patterns we can detect."""
|
||||
MODE_TRIGGER = "mode_trigger"
|
||||
MCP_SERVER = "mcp_server"
|
||||
OPERATION_TYPE = "operation_type"
|
||||
COMPLEXITY_INDICATOR = "complexity_indicator"
|
||||
PERSONA_HINT = "persona_hint"
|
||||
PERFORMANCE_HINT = "performance_hint"
|
||||
|
||||
|
||||
@dataclass
|
||||
class PatternMatch:
|
||||
"""A detected pattern match."""
|
||||
pattern_type: PatternType
|
||||
pattern_name: str
|
||||
confidence: float # 0.0 to 1.0
|
||||
matched_text: str
|
||||
suggestions: List[str]
|
||||
metadata: Dict[str, Any]
|
||||
|
||||
|
||||
@dataclass
|
||||
class DetectionResult:
|
||||
"""Result of pattern detection analysis."""
|
||||
matches: List[PatternMatch]
|
||||
recommended_modes: List[str]
|
||||
recommended_mcp_servers: List[str]
|
||||
suggested_flags: List[str]
|
||||
complexity_score: float
|
||||
confidence_score: float
|
||||
|
||||
|
||||
class PatternDetector:
|
||||
"""
|
||||
Intelligent pattern detection system.
|
||||
|
||||
Analyzes user input, context, and operation patterns to determine:
|
||||
- Which SuperClaude modes should be activated
|
||||
- Which MCP servers are needed
|
||||
- What optimization flags to apply
|
||||
- Complexity and performance considerations
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.patterns = config_loader.load_config('modes')
|
||||
self.mcp_patterns = config_loader.load_config('orchestrator')
|
||||
self._compile_patterns()
|
||||
|
||||
def _compile_patterns(self):
|
||||
"""Compile regex patterns for efficient matching."""
|
||||
self.compiled_patterns = {}
|
||||
|
||||
# Mode detection patterns
|
||||
for mode_name, mode_config in self.patterns.get('mode_detection', {}).items():
|
||||
patterns = mode_config.get('trigger_patterns', [])
|
||||
self.compiled_patterns[f"mode_{mode_name}"] = [
|
||||
re.compile(pattern, re.IGNORECASE) for pattern in patterns
|
||||
]
|
||||
|
||||
# MCP server patterns
|
||||
for server_name, server_config in self.mcp_patterns.get('routing_patterns', {}).items():
|
||||
triggers = server_config.get('triggers', [])
|
||||
self.compiled_patterns[f"mcp_{server_name}"] = [
|
||||
re.compile(trigger, re.IGNORECASE) for trigger in triggers
|
||||
]
|
||||
|
||||
def detect_patterns(self,
|
||||
user_input: str,
|
||||
context: Dict[str, Any],
|
||||
operation_data: Dict[str, Any]) -> DetectionResult:
|
||||
"""
|
||||
Perform comprehensive pattern detection.
|
||||
|
||||
Args:
|
||||
user_input: User's request or command
|
||||
context: Session and environment context
|
||||
operation_data: Information about the planned operation
|
||||
|
||||
Returns:
|
||||
DetectionResult with all detected patterns and recommendations
|
||||
"""
|
||||
matches = []
|
||||
|
||||
# Detect mode triggers
|
||||
mode_matches = self._detect_mode_patterns(user_input, context)
|
||||
matches.extend(mode_matches)
|
||||
|
||||
# Detect MCP server needs
|
||||
mcp_matches = self._detect_mcp_patterns(user_input, context, operation_data)
|
||||
matches.extend(mcp_matches)
|
||||
|
||||
# Detect complexity indicators
|
||||
complexity_matches = self._detect_complexity_patterns(user_input, operation_data)
|
||||
matches.extend(complexity_matches)
|
||||
|
||||
# Detect persona hints
|
||||
persona_matches = self._detect_persona_patterns(user_input, context)
|
||||
matches.extend(persona_matches)
|
||||
|
||||
# Calculate overall scores
|
||||
complexity_score = self._calculate_complexity_score(matches, operation_data)
|
||||
confidence_score = self._calculate_confidence_score(matches)
|
||||
|
||||
# Generate recommendations
|
||||
recommended_modes = self._get_recommended_modes(matches, complexity_score)
|
||||
recommended_mcp_servers = self._get_recommended_mcp_servers(matches, context)
|
||||
suggested_flags = self._get_suggested_flags(matches, complexity_score, context)
|
||||
|
||||
return DetectionResult(
|
||||
matches=matches,
|
||||
recommended_modes=recommended_modes,
|
||||
recommended_mcp_servers=recommended_mcp_servers,
|
||||
suggested_flags=suggested_flags,
|
||||
complexity_score=complexity_score,
|
||||
confidence_score=confidence_score
|
||||
)
|
||||
|
||||
def _detect_mode_patterns(self, user_input: str, context: Dict[str, Any]) -> List[PatternMatch]:
|
||||
"""Detect which SuperClaude modes should be activated."""
|
||||
matches = []
|
||||
|
||||
# Brainstorming mode detection
|
||||
brainstorm_indicators = [
|
||||
r"(?:i want to|thinking about|not sure|maybe|could we)\s+(?:build|create|make)",
|
||||
r"(?:brainstorm|explore|figure out|discuss)",
|
||||
r"(?:new project|startup idea|feature concept)",
|
||||
r"(?:ambiguous|uncertain|unclear)\s+(?:requirements|needs)"
|
||||
]
|
||||
|
||||
for pattern in brainstorm_indicators:
|
||||
if re.search(pattern, user_input, re.IGNORECASE):
|
||||
matches.append(PatternMatch(
|
||||
pattern_type=PatternType.MODE_TRIGGER,
|
||||
pattern_name="brainstorming",
|
||||
confidence=0.8,
|
||||
matched_text=re.search(pattern, user_input, re.IGNORECASE).group(),
|
||||
suggestions=["Enable brainstorming mode for requirements discovery"],
|
||||
metadata={"mode": "brainstorming", "auto_activate": True}
|
||||
))
|
||||
break
|
||||
|
||||
# Task management mode detection
|
||||
task_management_indicators = [
|
||||
r"(?:multiple|many|several)\s+(?:tasks|files|components)",
|
||||
r"(?:build|implement|create)\s+(?:system|feature|application)",
|
||||
r"(?:complex|comprehensive|large-scale)",
|
||||
r"(?:manage|coordinate|orchestrate)\s+(?:work|tasks|operations)"
|
||||
]
|
||||
|
||||
for pattern in task_management_indicators:
|
||||
if re.search(pattern, user_input, re.IGNORECASE):
|
||||
matches.append(PatternMatch(
|
||||
pattern_type=PatternType.MODE_TRIGGER,
|
||||
pattern_name="task_management",
|
||||
confidence=0.7,
|
||||
matched_text=re.search(pattern, user_input, re.IGNORECASE).group(),
|
||||
suggestions=["Enable task management for complex operations"],
|
||||
metadata={"mode": "task_management", "delegation_likely": True}
|
||||
))
|
||||
break
|
||||
|
||||
# Token efficiency mode detection
|
||||
efficiency_indicators = [
|
||||
r"(?:brief|concise|compressed|short)",
|
||||
r"(?:token|resource|memory)\s+(?:limit|constraint|optimization)",
|
||||
r"(?:efficient|optimized|minimal)\s+(?:output|response)"
|
||||
]
|
||||
|
||||
for pattern in efficiency_indicators:
|
||||
if re.search(pattern, user_input, re.IGNORECASE):
|
||||
matches.append(PatternMatch(
|
||||
pattern_type=PatternType.MODE_TRIGGER,
|
||||
pattern_name="token_efficiency",
|
||||
confidence=0.9,
|
||||
matched_text=re.search(pattern, user_input, re.IGNORECASE).group(),
|
||||
suggestions=["Enable token efficiency mode"],
|
||||
metadata={"mode": "token_efficiency", "compression_needed": True}
|
||||
))
|
||||
break
|
||||
|
||||
# Check resource usage for automatic efficiency mode
|
||||
resource_usage = context.get('resource_usage_percent', 0)
|
||||
if resource_usage > 75:
|
||||
matches.append(PatternMatch(
|
||||
pattern_type=PatternType.MODE_TRIGGER,
|
||||
pattern_name="token_efficiency",
|
||||
confidence=0.85,
|
||||
matched_text="high_resource_usage",
|
||||
suggestions=["Auto-enable token efficiency due to resource constraints"],
|
||||
metadata={"mode": "token_efficiency", "trigger": "resource_constraint"}
|
||||
))
|
||||
|
||||
return matches
|
||||
|
||||
def _detect_mcp_patterns(self, user_input: str, context: Dict[str, Any], operation_data: Dict[str, Any]) -> List[PatternMatch]:
|
||||
"""Detect which MCP servers should be activated."""
|
||||
matches = []
|
||||
|
||||
# Context7 (library documentation)
|
||||
context7_patterns = [
|
||||
r"(?:library|framework|package)\s+(?:documentation|docs|patterns)",
|
||||
r"(?:react|vue|angular|express|django|flask)",
|
||||
r"(?:import|require|install|dependency)",
|
||||
r"(?:official|standard|best practice)\s+(?:way|pattern|approach)"
|
||||
]
|
||||
|
||||
for pattern in context7_patterns:
|
||||
if re.search(pattern, user_input, re.IGNORECASE):
|
||||
matches.append(PatternMatch(
|
||||
pattern_type=PatternType.MCP_SERVER,
|
||||
pattern_name="context7",
|
||||
confidence=0.8,
|
||||
matched_text=re.search(pattern, user_input, re.IGNORECASE).group(),
|
||||
suggestions=["Enable Context7 for library documentation"],
|
||||
metadata={"mcp_server": "context7", "focus": "documentation"}
|
||||
))
|
||||
break
|
||||
|
||||
# Sequential (complex analysis)
|
||||
sequential_patterns = [
|
||||
r"(?:analyze|debug|troubleshoot|investigate)",
|
||||
r"(?:complex|complicated|multi-step|systematic)",
|
||||
r"(?:architecture|system|design)\s+(?:review|analysis)",
|
||||
r"(?:root cause|performance|bottleneck)"
|
||||
]
|
||||
|
||||
for pattern in sequential_patterns:
|
||||
if re.search(pattern, user_input, re.IGNORECASE):
|
||||
matches.append(PatternMatch(
|
||||
pattern_type=PatternType.MCP_SERVER,
|
||||
pattern_name="sequential",
|
||||
confidence=0.75,
|
||||
matched_text=re.search(pattern, user_input, re.IGNORECASE).group(),
|
||||
suggestions=["Enable Sequential for multi-step analysis"],
|
||||
metadata={"mcp_server": "sequential", "analysis_type": "complex"}
|
||||
))
|
||||
break
|
||||
|
||||
# Magic (UI components)
|
||||
magic_patterns = [
|
||||
r"(?:component|button|form|modal|dialog)",
|
||||
r"(?:ui|frontend|interface|design)",
|
||||
r"(?:react|vue|angular)\s+(?:component|element)",
|
||||
r"(?:responsive|mobile|accessibility)"
|
||||
]
|
||||
|
||||
for pattern in magic_patterns:
|
||||
if re.search(pattern, user_input, re.IGNORECASE):
|
||||
matches.append(PatternMatch(
|
||||
pattern_type=PatternType.MCP_SERVER,
|
||||
pattern_name="magic",
|
||||
confidence=0.85,
|
||||
matched_text=re.search(pattern, user_input, re.IGNORECASE).group(),
|
||||
suggestions=["Enable Magic for UI component generation"],
|
||||
metadata={"mcp_server": "magic", "component_type": "ui"}
|
||||
))
|
||||
break
|
||||
|
||||
# Playwright (testing)
|
||||
playwright_patterns = [
|
||||
r"(?:test|testing|e2e|end-to-end)",
|
||||
r"(?:browser|cross-browser|automation)",
|
||||
r"(?:performance|visual|regression)\s+(?:test|testing)",
|
||||
r"(?:validate|verify|check)\s+(?:functionality|behavior)"
|
||||
]
|
||||
|
||||
for pattern in playwright_patterns:
|
||||
if re.search(pattern, user_input, re.IGNORECASE):
|
||||
matches.append(PatternMatch(
|
||||
pattern_type=PatternType.MCP_SERVER,
|
||||
pattern_name="playwright",
|
||||
confidence=0.8,
|
||||
matched_text=re.search(pattern, user_input, re.IGNORECASE).group(),
|
||||
suggestions=["Enable Playwright for testing operations"],
|
||||
metadata={"mcp_server": "playwright", "test_type": "e2e"}
|
||||
))
|
||||
break
|
||||
|
||||
# Morphllm vs Serena intelligence selection
|
||||
file_count = operation_data.get('file_count', 1)
|
||||
complexity = operation_data.get('complexity_score', 0.0)
|
||||
|
||||
if file_count > 10 or complexity > 0.6:
|
||||
matches.append(PatternMatch(
|
||||
pattern_type=PatternType.MCP_SERVER,
|
||||
pattern_name="serena",
|
||||
confidence=0.9,
|
||||
matched_text="high_complexity_operation",
|
||||
suggestions=["Use Serena for complex multi-file operations"],
|
||||
metadata={"mcp_server": "serena", "reason": "complexity_threshold"}
|
||||
))
|
||||
elif file_count <= 10 and complexity <= 0.6:
|
||||
matches.append(PatternMatch(
|
||||
pattern_type=PatternType.MCP_SERVER,
|
||||
pattern_name="morphllm",
|
||||
confidence=0.8,
|
||||
matched_text="moderate_complexity_operation",
|
||||
suggestions=["Use Morphllm for efficient editing operations"],
|
||||
metadata={"mcp_server": "morphllm", "reason": "efficiency_optimized"}
|
||||
))
|
||||
|
||||
return matches
|
||||
|
||||
def _detect_complexity_patterns(self, user_input: str, operation_data: Dict[str, Any]) -> List[PatternMatch]:
|
||||
"""Detect complexity indicators in the request."""
|
||||
matches = []
|
||||
|
||||
# High complexity indicators
|
||||
high_complexity_patterns = [
|
||||
r"(?:entire|whole|complete)\s+(?:codebase|system|application)",
|
||||
r"(?:refactor|migrate|restructure)\s+(?:all|everything|entire)",
|
||||
r"(?:architecture|system-wide|comprehensive)\s+(?:change|update|redesign)",
|
||||
r"(?:complex|complicated|sophisticated)\s+(?:logic|algorithm|system)"
|
||||
]
|
||||
|
||||
for pattern in high_complexity_patterns:
|
||||
if re.search(pattern, user_input, re.IGNORECASE):
|
||||
matches.append(PatternMatch(
|
||||
pattern_type=PatternType.COMPLEXITY_INDICATOR,
|
||||
pattern_name="high_complexity",
|
||||
confidence=0.8,
|
||||
matched_text=re.search(pattern, user_input, re.IGNORECASE).group(),
|
||||
suggestions=["Consider delegation and thinking modes"],
|
||||
metadata={"complexity_level": "high", "score_boost": 0.3}
|
||||
))
|
||||
break
|
||||
|
||||
# File count indicators
|
||||
file_count = operation_data.get('file_count', 1)
|
||||
if file_count > 5:
|
||||
matches.append(PatternMatch(
|
||||
pattern_type=PatternType.COMPLEXITY_INDICATOR,
|
||||
pattern_name="multi_file_operation",
|
||||
confidence=0.9,
|
||||
matched_text=f"{file_count}_files",
|
||||
suggestions=["Enable delegation for multi-file operations"],
|
||||
metadata={"file_count": file_count, "delegation_recommended": True}
|
||||
))
|
||||
|
||||
return matches
|
||||
|
||||
def _detect_persona_patterns(self, user_input: str, context: Dict[str, Any]) -> List[PatternMatch]:
|
||||
"""Detect hints about which persona should be active."""
|
||||
matches = []
|
||||
|
||||
persona_patterns = {
|
||||
"architect": [r"(?:architecture|design|structure|system)\s+(?:review|analysis|planning)"],
|
||||
"performance": [r"(?:performance|optimization|speed|efficiency|bottleneck)"],
|
||||
"security": [r"(?:security|vulnerability|audit|secure|safety)"],
|
||||
"frontend": [r"(?:ui|frontend|interface|component|design|responsive)"],
|
||||
"backend": [r"(?:api|server|database|backend|service)"],
|
||||
"devops": [r"(?:deploy|deployment|ci|cd|infrastructure|docker|kubernetes)"],
|
||||
"testing": [r"(?:test|testing|qa|quality|coverage|validation)"]
|
||||
}
|
||||
|
||||
for persona, patterns in persona_patterns.items():
|
||||
for pattern in patterns:
|
||||
if re.search(pattern, user_input, re.IGNORECASE):
|
||||
matches.append(PatternMatch(
|
||||
pattern_type=PatternType.PERSONA_HINT,
|
||||
pattern_name=persona,
|
||||
confidence=0.7,
|
||||
matched_text=re.search(pattern, user_input, re.IGNORECASE).group(),
|
||||
suggestions=[f"Consider {persona} persona for specialized expertise"],
|
||||
metadata={"persona": persona, "domain_specific": True}
|
||||
))
|
||||
break
|
||||
|
||||
return matches
|
||||
|
||||
def _calculate_complexity_score(self, matches: List[PatternMatch], operation_data: Dict[str, Any]) -> float:
|
||||
"""Calculate overall complexity score from detected patterns."""
|
||||
base_score = operation_data.get('complexity_score', 0.0)
|
||||
|
||||
# Add complexity from pattern matches
|
||||
for match in matches:
|
||||
if match.pattern_type == PatternType.COMPLEXITY_INDICATOR:
|
||||
score_boost = match.metadata.get('score_boost', 0.1)
|
||||
base_score += score_boost
|
||||
|
||||
return min(base_score, 1.0)
|
||||
|
||||
def _calculate_confidence_score(self, matches: List[PatternMatch]) -> float:
|
||||
"""Calculate overall confidence in pattern detection."""
|
||||
if not matches:
|
||||
return 0.0
|
||||
|
||||
total_confidence = sum(match.confidence for match in matches)
|
||||
return min(total_confidence / len(matches), 1.0)
|
||||
|
||||
def _get_recommended_modes(self, matches: List[PatternMatch], complexity_score: float) -> List[str]:
|
||||
"""Get recommended modes based on detected patterns."""
|
||||
modes = set()
|
||||
|
||||
for match in matches:
|
||||
if match.pattern_type == PatternType.MODE_TRIGGER:
|
||||
modes.add(match.pattern_name)
|
||||
|
||||
# Auto-activate based on complexity
|
||||
if complexity_score > 0.6:
|
||||
modes.add("task_management")
|
||||
|
||||
return list(modes)
|
||||
|
||||
def _get_recommended_mcp_servers(self, matches: List[PatternMatch], context: Dict[str, Any]) -> List[str]:
|
||||
"""Get recommended MCP servers based on detected patterns."""
|
||||
servers = set()
|
||||
|
||||
for match in matches:
|
||||
if match.pattern_type == PatternType.MCP_SERVER:
|
||||
servers.add(match.pattern_name)
|
||||
|
||||
return list(servers)
|
||||
|
||||
def _get_suggested_flags(self, matches: List[PatternMatch], complexity_score: float, context: Dict[str, Any]) -> List[str]:
|
||||
"""Get suggested flags based on patterns and complexity."""
|
||||
flags = []
|
||||
|
||||
# Thinking flags based on complexity
|
||||
if complexity_score >= 0.8:
|
||||
flags.append("--ultrathink")
|
||||
elif complexity_score >= 0.6:
|
||||
flags.append("--think-hard")
|
||||
elif complexity_score >= 0.3:
|
||||
flags.append("--think")
|
||||
|
||||
# Delegation flags
|
||||
for match in matches:
|
||||
if match.metadata.get("delegation_recommended"):
|
||||
flags.append("--delegate auto")
|
||||
break
|
||||
|
||||
# Efficiency flags
|
||||
for match in matches:
|
||||
if match.metadata.get("compression_needed") or context.get('resource_usage_percent', 0) > 75:
|
||||
flags.append("--uc")
|
||||
break
|
||||
|
||||
# Validation flags for high-risk operations
|
||||
if complexity_score > 0.7 or context.get('is_production', False):
|
||||
flags.append("--validate")
|
||||
|
||||
return flags
|
||||
295
Framework-Hooks/hooks/shared/yaml_loader.py
Normal file
295
Framework-Hooks/hooks/shared/yaml_loader.py
Normal file
@@ -0,0 +1,295 @@
|
||||
"""
|
||||
Unified Configuration Loader for SuperClaude-Lite
|
||||
|
||||
High-performance configuration loading with support for both JSON and YAML formats,
|
||||
caching, hot-reload capabilities, and comprehensive error handling.
|
||||
|
||||
Supports:
|
||||
- Claude Code settings.json (JSON format)
|
||||
- SuperClaude superclaude-config.json (JSON format)
|
||||
- YAML configuration files
|
||||
- Unified configuration interface for hooks
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import yaml
|
||||
import time
|
||||
import hashlib
|
||||
from typing import Dict, Any, Optional, Union
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class UnifiedConfigLoader:
|
||||
"""
|
||||
Intelligent configuration loader with support for JSON and YAML formats.
|
||||
|
||||
Features:
|
||||
- Dual-configuration support (Claude Code + SuperClaude)
|
||||
- File modification detection for hot-reload
|
||||
- In-memory caching for performance (<10ms access)
|
||||
- Comprehensive error handling and validation
|
||||
- Environment variable interpolation
|
||||
- Include/merge support for modular configs
|
||||
- Unified configuration interface
|
||||
"""
|
||||
|
||||
def __init__(self, project_root: Union[str, Path]):
|
||||
self.project_root = Path(project_root)
|
||||
self.config_dir = self.project_root / "config"
|
||||
|
||||
# Configuration file paths
|
||||
self.claude_settings_path = self.project_root / "settings.json"
|
||||
self.superclaude_config_path = self.project_root / "superclaude-config.json"
|
||||
|
||||
# Cache for all configuration sources
|
||||
self._cache: Dict[str, Dict[str, Any]] = {}
|
||||
self._file_hashes: Dict[str, str] = {}
|
||||
self._last_check: Dict[str, float] = {}
|
||||
self.check_interval = 1.0 # Check files every 1 second max
|
||||
|
||||
# Configuration source registry
|
||||
self._config_sources = {
|
||||
'claude_settings': self.claude_settings_path,
|
||||
'superclaude_config': self.superclaude_config_path
|
||||
}
|
||||
|
||||
def load_config(self, config_name: str, force_reload: bool = False) -> Dict[str, Any]:
|
||||
"""
|
||||
Load configuration with intelligent caching (supports JSON and YAML).
|
||||
|
||||
Args:
|
||||
config_name: Name of config file or special config identifier
|
||||
- For YAML: config file name without .yaml extension
|
||||
- For JSON: 'claude_settings' or 'superclaude_config'
|
||||
force_reload: Force reload even if cached
|
||||
|
||||
Returns:
|
||||
Parsed configuration dictionary
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If config file doesn't exist
|
||||
ValueError: If config parsing fails
|
||||
"""
|
||||
# Handle special configuration sources
|
||||
if config_name in self._config_sources:
|
||||
return self._load_json_config(config_name, force_reload)
|
||||
|
||||
# Handle YAML configuration files
|
||||
config_path = self.config_dir / f"{config_name}.yaml"
|
||||
|
||||
if not config_path.exists():
|
||||
raise FileNotFoundError(f"Configuration file not found: {config_path}")
|
||||
|
||||
# Check if we need to reload
|
||||
if not force_reload and self._should_use_cache(config_name, config_path):
|
||||
return self._cache[config_name]
|
||||
|
||||
# Load and parse the YAML configuration
|
||||
try:
|
||||
with open(config_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Environment variable interpolation
|
||||
content = self._interpolate_env_vars(content)
|
||||
|
||||
# Parse YAML
|
||||
config = yaml.safe_load(content)
|
||||
|
||||
# Handle includes/merges
|
||||
config = self._process_includes(config, config_path.parent)
|
||||
|
||||
# Update cache
|
||||
self._cache[config_name] = config
|
||||
self._file_hashes[config_name] = self._compute_hash(config_path)
|
||||
self._last_check[config_name] = time.time()
|
||||
|
||||
return config
|
||||
|
||||
except yaml.YAMLError as e:
|
||||
raise ValueError(f"YAML parsing error in {config_path}: {e}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Error loading config {config_name}: {e}")
|
||||
|
||||
def _load_json_config(self, config_name: str, force_reload: bool = False) -> Dict[str, Any]:
|
||||
"""Load JSON configuration file."""
|
||||
config_path = self._config_sources[config_name]
|
||||
|
||||
if not config_path.exists():
|
||||
raise FileNotFoundError(f"Configuration file not found: {config_path}")
|
||||
|
||||
# Check if we need to reload
|
||||
if not force_reload and self._should_use_cache(config_name, config_path):
|
||||
return self._cache[config_name]
|
||||
|
||||
# Load and parse the JSON configuration
|
||||
try:
|
||||
with open(config_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Environment variable interpolation
|
||||
content = self._interpolate_env_vars(content)
|
||||
|
||||
# Parse JSON
|
||||
config = json.loads(content)
|
||||
|
||||
# Update cache
|
||||
self._cache[config_name] = config
|
||||
self._file_hashes[config_name] = self._compute_hash(config_path)
|
||||
self._last_check[config_name] = time.time()
|
||||
|
||||
return config
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"JSON parsing error in {config_path}: {e}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Error loading JSON config {config_name}: {e}")
|
||||
|
||||
def get_section(self, config_name: str, section_path: str, default: Any = None) -> Any:
|
||||
"""
|
||||
Get specific section from configuration using dot notation.
|
||||
|
||||
Args:
|
||||
config_name: Configuration file name or identifier
|
||||
section_path: Dot-separated path (e.g., 'routing.ui_components')
|
||||
default: Default value if section not found
|
||||
|
||||
Returns:
|
||||
Configuration section value or default
|
||||
"""
|
||||
config = self.load_config(config_name)
|
||||
|
||||
try:
|
||||
result = config
|
||||
for key in section_path.split('.'):
|
||||
result = result[key]
|
||||
return result
|
||||
except (KeyError, TypeError):
|
||||
return default
|
||||
|
||||
def get_hook_config(self, hook_name: str, section_path: str = None, default: Any = None) -> Any:
|
||||
"""
|
||||
Get hook-specific configuration from SuperClaude config.
|
||||
|
||||
Args:
|
||||
hook_name: Hook name (e.g., 'session_start', 'pre_tool_use')
|
||||
section_path: Optional dot-separated path within hook config
|
||||
default: Default value if not found
|
||||
|
||||
Returns:
|
||||
Hook configuration or specific section
|
||||
"""
|
||||
base_path = f"hook_configurations.{hook_name}"
|
||||
if section_path:
|
||||
full_path = f"{base_path}.{section_path}"
|
||||
else:
|
||||
full_path = base_path
|
||||
|
||||
return self.get_section('superclaude_config', full_path, default)
|
||||
|
||||
def get_claude_hooks(self) -> Dict[str, Any]:
|
||||
"""Get Claude Code hook definitions from settings.json."""
|
||||
return self.get_section('claude_settings', 'hooks', {})
|
||||
|
||||
def get_superclaude_config(self, section_path: str = None, default: Any = None) -> Any:
|
||||
"""
|
||||
Get SuperClaude framework configuration.
|
||||
|
||||
Args:
|
||||
section_path: Optional dot-separated path (e.g., 'global_configuration.performance_monitoring')
|
||||
default: Default value if not found
|
||||
|
||||
Returns:
|
||||
Configuration section or full config if no path specified
|
||||
"""
|
||||
if section_path:
|
||||
return self.get_section('superclaude_config', section_path, default)
|
||||
else:
|
||||
return self.load_config('superclaude_config')
|
||||
|
||||
def get_mcp_server_config(self, server_name: str = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Get MCP server configuration.
|
||||
|
||||
Args:
|
||||
server_name: Optional specific server name
|
||||
|
||||
Returns:
|
||||
MCP server configuration
|
||||
"""
|
||||
if server_name:
|
||||
return self.get_section('superclaude_config', f'mcp_server_integration.servers.{server_name}', {})
|
||||
else:
|
||||
return self.get_section('superclaude_config', 'mcp_server_integration', {})
|
||||
|
||||
def get_performance_targets(self) -> Dict[str, Any]:
|
||||
"""Get performance targets for all components."""
|
||||
return self.get_section('superclaude_config', 'global_configuration.performance_monitoring', {})
|
||||
|
||||
def is_hook_enabled(self, hook_name: str) -> bool:
|
||||
"""Check if a specific hook is enabled."""
|
||||
return self.get_hook_config(hook_name, 'enabled', False)
|
||||
|
||||
def reload_all(self) -> None:
|
||||
"""Force reload of all cached configurations."""
|
||||
for config_name in list(self._cache.keys()):
|
||||
self.load_config(config_name, force_reload=True)
|
||||
|
||||
def _should_use_cache(self, config_name: str, config_path: Path) -> bool:
|
||||
"""Check if cached version is still valid."""
|
||||
if config_name not in self._cache:
|
||||
return False
|
||||
|
||||
# Rate limit file checks
|
||||
now = time.time()
|
||||
if now - self._last_check.get(config_name, 0) < self.check_interval:
|
||||
return True
|
||||
|
||||
# Check if file changed
|
||||
current_hash = self._compute_hash(config_path)
|
||||
return current_hash == self._file_hashes.get(config_name)
|
||||
|
||||
def _compute_hash(self, file_path: Path) -> str:
|
||||
"""Compute file hash for change detection."""
|
||||
stat = file_path.stat()
|
||||
return hashlib.md5(f"{stat.st_mtime}:{stat.st_size}".encode()).hexdigest()
|
||||
|
||||
def _interpolate_env_vars(self, content: str) -> str:
|
||||
"""Replace environment variables in YAML content."""
|
||||
import re
|
||||
|
||||
def replace_env_var(match):
|
||||
var_name = match.group(1)
|
||||
default_value = match.group(2) if match.group(2) else ""
|
||||
return os.getenv(var_name, default_value)
|
||||
|
||||
# Support ${VAR} and ${VAR:default} syntax
|
||||
pattern = r'\$\{([^}:]+)(?::([^}]*))?\}'
|
||||
return re.sub(pattern, replace_env_var, content)
|
||||
|
||||
def _process_includes(self, config: Dict[str, Any], base_dir: Path) -> Dict[str, Any]:
|
||||
"""Process include directives in configuration."""
|
||||
if not isinstance(config, dict):
|
||||
return config
|
||||
|
||||
# Handle special include key
|
||||
if '__include__' in config:
|
||||
includes = config.pop('__include__')
|
||||
if isinstance(includes, str):
|
||||
includes = [includes]
|
||||
|
||||
for include_file in includes:
|
||||
include_path = base_dir / include_file
|
||||
if include_path.exists():
|
||||
with open(include_path, 'r', encoding='utf-8') as f:
|
||||
included_config = yaml.safe_load(f.read())
|
||||
if isinstance(included_config, dict):
|
||||
# Merge included config (current config takes precedence)
|
||||
included_config.update(config)
|
||||
config = included_config
|
||||
|
||||
return config
|
||||
|
||||
|
||||
# Global instance for shared use across hooks
|
||||
config_loader = UnifiedConfigLoader(".")
|
||||
711
Framework-Hooks/hooks/stop.py
Executable file
711
Framework-Hooks/hooks/stop.py
Executable file
@@ -0,0 +1,711 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SuperClaude-Lite Stop Hook
|
||||
|
||||
Implements session analytics + /sc:save logic with performance tracking.
|
||||
Performance target: <200ms execution time.
|
||||
|
||||
This hook runs at session end and provides:
|
||||
- Comprehensive session analytics and performance metrics
|
||||
- Learning consolidation and adaptation updates
|
||||
- Session persistence with intelligent compression
|
||||
- Performance optimization recommendations
|
||||
- Quality assessment and improvement suggestions
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional
|
||||
import statistics
|
||||
|
||||
# Add shared modules to path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "shared"))
|
||||
|
||||
from framework_logic import FrameworkLogic
|
||||
from pattern_detection import PatternDetector
|
||||
from mcp_intelligence import MCPIntelligence
|
||||
from compression_engine import CompressionEngine
|
||||
from learning_engine import LearningEngine, LearningType, AdaptationScope
|
||||
from yaml_loader import config_loader
|
||||
from logger import log_hook_start, log_hook_end, log_decision, log_error
|
||||
|
||||
|
||||
class StopHook:
|
||||
"""
|
||||
Stop hook implementing session analytics and persistence.
|
||||
|
||||
Responsibilities:
|
||||
- Analyze session performance and effectiveness
|
||||
- Consolidate learning events and adaptations
|
||||
- Generate comprehensive session analytics
|
||||
- Implement intelligent session persistence
|
||||
- Provide optimization recommendations for future sessions
|
||||
- Track SuperClaude framework effectiveness metrics
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
start_time = time.time()
|
||||
|
||||
# Initialize core components
|
||||
self.framework_logic = FrameworkLogic()
|
||||
self.pattern_detector = PatternDetector()
|
||||
self.mcp_intelligence = MCPIntelligence()
|
||||
self.compression_engine = CompressionEngine()
|
||||
|
||||
# Initialize learning engine
|
||||
cache_dir = Path("cache")
|
||||
self.learning_engine = LearningEngine(cache_dir)
|
||||
|
||||
# Load hook-specific configuration from SuperClaude config
|
||||
self.hook_config = config_loader.get_hook_config('stop')
|
||||
|
||||
# Load session configuration (from YAML if exists, otherwise use hook config)
|
||||
try:
|
||||
self.session_config = config_loader.load_config('session')
|
||||
except FileNotFoundError:
|
||||
# Fall back to hook configuration if YAML file not found
|
||||
self.session_config = self.hook_config.get('configuration', {})
|
||||
|
||||
# Performance tracking using configuration
|
||||
self.initialization_time = (time.time() - start_time) * 1000
|
||||
self.performance_target_ms = config_loader.get_hook_config('stop', 'performance_target_ms', 200)
|
||||
|
||||
def process_session_stop(self, session_data: dict) -> dict:
|
||||
"""
|
||||
Process session stop with analytics and persistence.
|
||||
|
||||
Args:
|
||||
session_data: Session termination data from Claude Code
|
||||
|
||||
Returns:
|
||||
Session analytics report with learning insights and persistence status
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
# Log hook start
|
||||
log_hook_start("stop", {
|
||||
"session_id": session_data.get('session_id', ''),
|
||||
"session_duration_ms": session_data.get('duration_ms', 0),
|
||||
"operations_count": len(session_data.get('operations', [])),
|
||||
"errors_count": len(session_data.get('errors', [])),
|
||||
"superclaude_enabled": session_data.get('superclaude_enabled', False)
|
||||
})
|
||||
|
||||
try:
|
||||
# Extract session context
|
||||
context = self._extract_session_context(session_data)
|
||||
|
||||
# Analyze session performance
|
||||
performance_analysis = self._analyze_session_performance(context)
|
||||
|
||||
# Log performance analysis results
|
||||
log_decision(
|
||||
"stop",
|
||||
"performance_analysis",
|
||||
f"{performance_analysis['overall_score']:.2f}",
|
||||
f"Productivity: {context.get('session_productivity', 0):.2f}, Errors: {context.get('error_rate', 0):.2f}, Bottlenecks: {', '.join(performance_analysis['bottlenecks_identified'])}"
|
||||
)
|
||||
|
||||
# Consolidate learning events
|
||||
learning_consolidation = self._consolidate_learning_events(context)
|
||||
|
||||
# Generate session analytics
|
||||
session_analytics = self._generate_session_analytics(
|
||||
context, performance_analysis, learning_consolidation
|
||||
)
|
||||
|
||||
# Perform session persistence
|
||||
persistence_result = self._perform_session_persistence(context, session_analytics)
|
||||
|
||||
# Log persistence results
|
||||
if persistence_result['persistence_enabled']:
|
||||
log_decision(
|
||||
"stop",
|
||||
"session_persistence",
|
||||
"saved",
|
||||
f"Analytics saved: {persistence_result['analytics_saved']}, Compression: {persistence_result['compression_applied']}"
|
||||
)
|
||||
|
||||
# Generate recommendations
|
||||
recommendations = self._generate_recommendations(
|
||||
context, performance_analysis, learning_consolidation
|
||||
)
|
||||
|
||||
# Log recommendations generated
|
||||
total_recommendations = sum(len(recs) for recs in recommendations.values())
|
||||
if total_recommendations > 0:
|
||||
log_decision(
|
||||
"stop",
|
||||
"recommendations_generated",
|
||||
str(total_recommendations),
|
||||
f"Categories: {', '.join(k for k, v in recommendations.items() if v)}"
|
||||
)
|
||||
|
||||
# Create final learning events
|
||||
self._create_final_learning_events(context, session_analytics)
|
||||
|
||||
# Generate session report
|
||||
session_report = self._generate_session_report(
|
||||
context, session_analytics, persistence_result, recommendations
|
||||
)
|
||||
|
||||
# Performance tracking
|
||||
execution_time = (time.time() - start_time) * 1000
|
||||
session_report['performance_metrics'] = {
|
||||
'stop_processing_time_ms': execution_time,
|
||||
'target_met': execution_time < self.performance_target_ms,
|
||||
'total_session_efficiency': self._calculate_session_efficiency(session_analytics)
|
||||
}
|
||||
|
||||
# Log hook end with success
|
||||
log_hook_end(
|
||||
"stop",
|
||||
int(execution_time),
|
||||
True,
|
||||
{
|
||||
"session_score": session_analytics['performance_metrics']['overall_score'],
|
||||
"superclaude_effectiveness": session_analytics['superclaude_effectiveness']['effectiveness_score'],
|
||||
"learning_insights": session_analytics['learning_summary']['insights_generated'],
|
||||
"recommendations": total_recommendations,
|
||||
"performance_target_met": execution_time < self.performance_target_ms
|
||||
}
|
||||
)
|
||||
|
||||
return session_report
|
||||
|
||||
except Exception as e:
|
||||
# Log error
|
||||
log_error("stop", str(e), {"session_data": session_data})
|
||||
|
||||
# Log hook end with failure
|
||||
log_hook_end("stop", int((time.time() - start_time) * 1000), False)
|
||||
|
||||
# Graceful fallback on error
|
||||
return self._create_fallback_report(session_data, str(e))
|
||||
|
||||
def _extract_session_context(self, session_data: dict) -> dict:
|
||||
"""Extract and enrich session context."""
|
||||
context = {
|
||||
'session_id': session_data.get('session_id', ''),
|
||||
'session_duration_ms': session_data.get('duration_ms', 0),
|
||||
'session_start_time': session_data.get('start_time', 0),
|
||||
'session_end_time': time.time(),
|
||||
'operations_performed': session_data.get('operations', []),
|
||||
'tools_used': session_data.get('tools_used', []),
|
||||
'mcp_servers_activated': session_data.get('mcp_servers', []),
|
||||
'errors_encountered': session_data.get('errors', []),
|
||||
'user_interactions': session_data.get('user_interactions', []),
|
||||
'resource_usage': session_data.get('resource_usage', {}),
|
||||
'quality_metrics': session_data.get('quality_metrics', {}),
|
||||
'superclaude_enabled': session_data.get('superclaude_enabled', False)
|
||||
}
|
||||
|
||||
# Calculate derived metrics
|
||||
context.update(self._calculate_derived_metrics(context))
|
||||
|
||||
return context
|
||||
|
||||
def _calculate_derived_metrics(self, context: dict) -> dict:
|
||||
"""Calculate derived session metrics."""
|
||||
operations = context.get('operations_performed', [])
|
||||
tools = context.get('tools_used', [])
|
||||
|
||||
return {
|
||||
'operation_count': len(operations),
|
||||
'unique_tools_count': len(set(tools)),
|
||||
'error_rate': len(context.get('errors_encountered', [])) / max(len(operations), 1),
|
||||
'mcp_usage_ratio': len(context.get('mcp_servers_activated', [])) / max(len(operations), 1),
|
||||
'session_productivity': self._calculate_productivity_score(context),
|
||||
'superclaude_effectiveness': self._calculate_superclaude_effectiveness(context)
|
||||
}
|
||||
|
||||
def _calculate_productivity_score(self, context: dict) -> float:
|
||||
"""Calculate session productivity score (0.0 to 1.0)."""
|
||||
operations = context.get('operations_performed', [])
|
||||
errors = context.get('errors_encountered', [])
|
||||
duration_ms = context.get('session_duration_ms', 1)
|
||||
|
||||
if not operations:
|
||||
return 0.0
|
||||
|
||||
# Base productivity from operation completion
|
||||
completion_rate = (len(operations) - len(errors)) / len(operations)
|
||||
|
||||
# Time efficiency (operations per minute)
|
||||
duration_minutes = duration_ms / (1000 * 60)
|
||||
operations_per_minute = len(operations) / max(duration_minutes, 0.1)
|
||||
|
||||
# Normalize operations per minute (assume 5 ops/min is very productive)
|
||||
time_efficiency = min(operations_per_minute / 5.0, 1.0)
|
||||
|
||||
# Combined productivity score
|
||||
productivity = (completion_rate * 0.7) + (time_efficiency * 0.3)
|
||||
|
||||
return min(productivity, 1.0)
|
||||
|
||||
def _calculate_superclaude_effectiveness(self, context: dict) -> float:
|
||||
"""Calculate SuperClaude framework effectiveness score."""
|
||||
if not context.get('superclaude_enabled'):
|
||||
return 0.0
|
||||
|
||||
# Factors that indicate SuperClaude effectiveness
|
||||
factors = []
|
||||
|
||||
# MCP server utilization
|
||||
mcp_ratio = context.get('mcp_usage_ratio', 0)
|
||||
factors.append(min(mcp_ratio * 2, 1.0)) # More MCP usage = better intelligence
|
||||
|
||||
# Error reduction (assume SuperClaude reduces errors)
|
||||
error_rate = context.get('error_rate', 0)
|
||||
error_effectiveness = max(1.0 - (error_rate * 2), 0.0)
|
||||
factors.append(error_effectiveness)
|
||||
|
||||
# Productivity enhancement
|
||||
productivity = context.get('session_productivity', 0)
|
||||
factors.append(productivity)
|
||||
|
||||
# Quality metrics if available
|
||||
quality_metrics = context.get('quality_metrics', {})
|
||||
if quality_metrics:
|
||||
avg_quality = statistics.mean(quality_metrics.values()) if quality_metrics.values() else 0.5
|
||||
factors.append(avg_quality)
|
||||
|
||||
return statistics.mean(factors) if factors else 0.5
|
||||
|
||||
def _analyze_session_performance(self, context: dict) -> dict:
|
||||
"""Analyze overall session performance."""
|
||||
performance_analysis = {
|
||||
'overall_score': 0.0,
|
||||
'performance_categories': {},
|
||||
'bottlenecks_identified': [],
|
||||
'optimization_opportunities': [],
|
||||
'performance_trends': {}
|
||||
}
|
||||
|
||||
# Overall performance scoring
|
||||
productivity = context.get('session_productivity', 0)
|
||||
effectiveness = context.get('superclaude_effectiveness', 0)
|
||||
error_rate = context.get('error_rate', 0)
|
||||
|
||||
performance_analysis['overall_score'] = (
|
||||
productivity * 0.4 +
|
||||
effectiveness * 0.4 +
|
||||
(1.0 - error_rate) * 0.2
|
||||
)
|
||||
|
||||
# Category-specific performance
|
||||
performance_analysis['performance_categories'] = {
|
||||
'productivity': productivity,
|
||||
'quality': 1.0 - error_rate,
|
||||
'intelligence_utilization': context.get('mcp_usage_ratio', 0),
|
||||
'resource_efficiency': self._calculate_resource_efficiency(context),
|
||||
'user_satisfaction_estimate': self._estimate_user_satisfaction(context)
|
||||
}
|
||||
|
||||
# Identify bottlenecks
|
||||
if error_rate > 0.2:
|
||||
performance_analysis['bottlenecks_identified'].append('high_error_rate')
|
||||
|
||||
if productivity < 0.5:
|
||||
performance_analysis['bottlenecks_identified'].append('low_productivity')
|
||||
|
||||
if context.get('mcp_usage_ratio', 0) < 0.3 and context.get('superclaude_enabled'):
|
||||
performance_analysis['bottlenecks_identified'].append('underutilized_intelligence')
|
||||
log_decision(
|
||||
"stop",
|
||||
"intelligence_utilization",
|
||||
"low",
|
||||
f"MCP usage ratio: {context.get('mcp_usage_ratio', 0):.2f}, SuperClaude enabled but underutilized"
|
||||
)
|
||||
|
||||
# Optimization opportunities
|
||||
if context.get('unique_tools_count', 0) > 10:
|
||||
performance_analysis['optimization_opportunities'].append('tool_usage_optimization')
|
||||
|
||||
if len(context.get('mcp_servers_activated', [])) < 2 and context.get('operation_count', 0) > 5:
|
||||
performance_analysis['optimization_opportunities'].append('mcp_server_coordination')
|
||||
|
||||
return performance_analysis
|
||||
|
||||
def _calculate_resource_efficiency(self, context: dict) -> float:
|
||||
"""Calculate resource usage efficiency."""
|
||||
resource_usage = context.get('resource_usage', {})
|
||||
|
||||
if not resource_usage:
|
||||
return 0.8 # Assume good efficiency if no data
|
||||
|
||||
# Extract resource metrics
|
||||
memory_usage = resource_usage.get('memory_percent', 50)
|
||||
cpu_usage = resource_usage.get('cpu_percent', 50)
|
||||
token_usage = resource_usage.get('token_percent', 50)
|
||||
|
||||
# Efficiency is inversely related to usage (but some usage is good)
|
||||
memory_efficiency = 1.0 - max((memory_usage - 60) / 40, 0) # Penalty above 60%
|
||||
cpu_efficiency = 1.0 - max((cpu_usage - 70) / 30, 0) # Penalty above 70%
|
||||
token_efficiency = 1.0 - max((token_usage - 75) / 25, 0) # Penalty above 75%
|
||||
|
||||
return (memory_efficiency + cpu_efficiency + token_efficiency) / 3
|
||||
|
||||
def _estimate_user_satisfaction(self, context: dict) -> float:
|
||||
"""Estimate user satisfaction based on session metrics."""
|
||||
satisfaction_factors = []
|
||||
|
||||
# Low error rate increases satisfaction
|
||||
error_rate = context.get('error_rate', 0)
|
||||
satisfaction_factors.append(1.0 - error_rate)
|
||||
|
||||
# High productivity increases satisfaction
|
||||
productivity = context.get('session_productivity', 0)
|
||||
satisfaction_factors.append(productivity)
|
||||
|
||||
# SuperClaude effectiveness increases satisfaction
|
||||
if context.get('superclaude_enabled'):
|
||||
effectiveness = context.get('superclaude_effectiveness', 0)
|
||||
satisfaction_factors.append(effectiveness)
|
||||
|
||||
# Session duration factor (not too short, not too long)
|
||||
duration_minutes = context.get('session_duration_ms', 0) / (1000 * 60)
|
||||
if duration_minutes > 0:
|
||||
# Optimal session length is 15-60 minutes
|
||||
if 15 <= duration_minutes <= 60:
|
||||
duration_satisfaction = 1.0
|
||||
elif duration_minutes < 15:
|
||||
duration_satisfaction = duration_minutes / 15
|
||||
else:
|
||||
duration_satisfaction = max(1.0 - (duration_minutes - 60) / 120, 0.3)
|
||||
satisfaction_factors.append(duration_satisfaction)
|
||||
|
||||
return statistics.mean(satisfaction_factors) if satisfaction_factors else 0.5
|
||||
|
||||
def _consolidate_learning_events(self, context: dict) -> dict:
|
||||
"""Consolidate learning events from the session."""
|
||||
learning_consolidation = {
|
||||
'total_learning_events': 0,
|
||||
'learning_categories': {},
|
||||
'adaptations_created': 0,
|
||||
'effectiveness_feedback': [],
|
||||
'learning_insights': []
|
||||
}
|
||||
|
||||
# Generate learning insights from session
|
||||
insights = self.learning_engine.generate_learning_insights()
|
||||
learning_consolidation['learning_insights'] = [
|
||||
{
|
||||
'insight_type': insight.insight_type,
|
||||
'description': insight.description,
|
||||
'confidence': insight.confidence,
|
||||
'impact_score': insight.impact_score
|
||||
}
|
||||
for insight in insights
|
||||
]
|
||||
|
||||
# Session-specific learning
|
||||
session_learning = {
|
||||
'session_effectiveness': context.get('superclaude_effectiveness', 0),
|
||||
'performance_score': context.get('session_productivity', 0),
|
||||
'mcp_coordination_effectiveness': min(context.get('mcp_usage_ratio', 0) * 2, 1.0),
|
||||
'error_recovery_success': 1.0 - context.get('error_rate', 0)
|
||||
}
|
||||
|
||||
# Record session learning
|
||||
self.learning_engine.record_learning_event(
|
||||
LearningType.EFFECTIVENESS_FEEDBACK,
|
||||
AdaptationScope.SESSION,
|
||||
context,
|
||||
session_learning,
|
||||
context.get('superclaude_effectiveness', 0),
|
||||
0.9,
|
||||
{'hook': 'stop', 'session_end': True}
|
||||
)
|
||||
|
||||
learning_consolidation['total_learning_events'] = 1 + len(insights)
|
||||
|
||||
return learning_consolidation
|
||||
|
||||
def _generate_session_analytics(self, context: dict, performance_analysis: dict,
|
||||
learning_consolidation: dict) -> dict:
|
||||
"""Generate comprehensive session analytics."""
|
||||
analytics = {
|
||||
'session_summary': {
|
||||
'session_id': context['session_id'],
|
||||
'duration_minutes': context.get('session_duration_ms', 0) / (1000 * 60),
|
||||
'operations_completed': context.get('operation_count', 0),
|
||||
'tools_utilized': context.get('unique_tools_count', 0),
|
||||
'mcp_servers_used': len(context.get('mcp_servers_activated', [])),
|
||||
'superclaude_enabled': context.get('superclaude_enabled', False)
|
||||
},
|
||||
|
||||
'performance_metrics': {
|
||||
'overall_score': performance_analysis['overall_score'],
|
||||
'productivity_score': context.get('session_productivity', 0),
|
||||
'quality_score': 1.0 - context.get('error_rate', 0),
|
||||
'efficiency_score': performance_analysis['performance_categories'].get('resource_efficiency', 0),
|
||||
'satisfaction_estimate': performance_analysis['performance_categories'].get('user_satisfaction_estimate', 0)
|
||||
},
|
||||
|
||||
'superclaude_effectiveness': {
|
||||
'framework_enabled': context.get('superclaude_enabled', False),
|
||||
'effectiveness_score': context.get('superclaude_effectiveness', 0),
|
||||
'intelligence_utilization': context.get('mcp_usage_ratio', 0),
|
||||
'learning_events_generated': learning_consolidation['total_learning_events'],
|
||||
'adaptations_created': learning_consolidation['adaptations_created']
|
||||
},
|
||||
|
||||
'quality_analysis': {
|
||||
'error_rate': context.get('error_rate', 0),
|
||||
'operation_success_rate': 1.0 - context.get('error_rate', 0),
|
||||
'bottlenecks': performance_analysis['bottlenecks_identified'],
|
||||
'optimization_opportunities': performance_analysis['optimization_opportunities']
|
||||
},
|
||||
|
||||
'learning_summary': {
|
||||
'insights_generated': len(learning_consolidation['learning_insights']),
|
||||
'key_insights': learning_consolidation['learning_insights'][:3], # Top 3 insights
|
||||
'learning_effectiveness': statistics.mean([
|
||||
insight['confidence'] * insight['impact_score']
|
||||
for insight in learning_consolidation['learning_insights']
|
||||
]) if learning_consolidation['learning_insights'] else 0.0
|
||||
},
|
||||
|
||||
'resource_utilization': context.get('resource_usage', {}),
|
||||
|
||||
'session_metadata': {
|
||||
'start_time': context.get('session_start_time', 0),
|
||||
'end_time': context.get('session_end_time', 0),
|
||||
'framework_version': '1.0.0',
|
||||
'analytics_version': 'stop_1.0'
|
||||
}
|
||||
}
|
||||
|
||||
return analytics
|
||||
|
||||
def _perform_session_persistence(self, context: dict, session_analytics: dict) -> dict:
|
||||
"""Perform intelligent session persistence."""
|
||||
persistence_result = {
|
||||
'persistence_enabled': True,
|
||||
'session_data_saved': False,
|
||||
'analytics_saved': False,
|
||||
'learning_data_saved': False,
|
||||
'compression_applied': False,
|
||||
'storage_optimized': False
|
||||
}
|
||||
|
||||
try:
|
||||
# Save session analytics
|
||||
analytics_data = json.dumps(session_analytics, indent=2)
|
||||
|
||||
# Apply compression if session data is large
|
||||
if len(analytics_data) > 10000: # 10KB threshold
|
||||
compression_result = self.compression_engine.compress_content(
|
||||
analytics_data,
|
||||
context,
|
||||
{'content_type': 'session_data'}
|
||||
)
|
||||
persistence_result['compression_applied'] = True
|
||||
persistence_result['compression_ratio'] = compression_result.compression_ratio
|
||||
|
||||
# Simulate saving (real implementation would use actual storage)
|
||||
cache_dir = Path("cache")
|
||||
session_file = cache_dir / f"session_{context['session_id']}.json"
|
||||
|
||||
with open(session_file, 'w') as f:
|
||||
f.write(analytics_data)
|
||||
|
||||
persistence_result['session_data_saved'] = True
|
||||
persistence_result['analytics_saved'] = True
|
||||
|
||||
# Learning data is automatically saved by learning engine
|
||||
persistence_result['learning_data_saved'] = True
|
||||
|
||||
# Optimize storage by cleaning old sessions
|
||||
self._cleanup_old_sessions(cache_dir)
|
||||
persistence_result['storage_optimized'] = True
|
||||
|
||||
except Exception as e:
|
||||
persistence_result['error'] = str(e)
|
||||
persistence_result['persistence_enabled'] = False
|
||||
|
||||
return persistence_result
|
||||
|
||||
def _cleanup_old_sessions(self, cache_dir: Path):
|
||||
"""Clean up old session files to optimize storage."""
|
||||
session_files = list(cache_dir.glob("session_*.json"))
|
||||
|
||||
# Keep only the most recent 50 sessions
|
||||
if len(session_files) > 50:
|
||||
session_files.sort(key=lambda f: f.stat().st_mtime, reverse=True)
|
||||
for old_file in session_files[50:]:
|
||||
try:
|
||||
old_file.unlink()
|
||||
except:
|
||||
pass # Ignore cleanup errors
|
||||
|
||||
def _generate_recommendations(self, context: dict, performance_analysis: dict,
|
||||
learning_consolidation: dict) -> dict:
|
||||
"""Generate recommendations for future sessions."""
|
||||
recommendations = {
|
||||
'performance_improvements': [],
|
||||
'superclaude_optimizations': [],
|
||||
'learning_suggestions': [],
|
||||
'workflow_enhancements': []
|
||||
}
|
||||
|
||||
# Performance recommendations
|
||||
if performance_analysis['overall_score'] < 0.7:
|
||||
recommendations['performance_improvements'].extend([
|
||||
'Focus on reducing error rate through validation',
|
||||
'Consider enabling more SuperClaude intelligence features',
|
||||
'Optimize tool selection and usage patterns'
|
||||
])
|
||||
|
||||
# SuperClaude optimization recommendations
|
||||
if context.get('superclaude_enabled') and context.get('superclaude_effectiveness', 0) < 0.6:
|
||||
recommendations['superclaude_optimizations'].extend([
|
||||
'Enable more MCP servers for better intelligence',
|
||||
'Use delegation features for complex operations',
|
||||
'Activate compression for resource optimization'
|
||||
])
|
||||
elif not context.get('superclaude_enabled'):
|
||||
recommendations['superclaude_optimizations'].append(
|
||||
'Consider enabling SuperClaude framework for enhanced productivity'
|
||||
)
|
||||
|
||||
# Learning suggestions
|
||||
if learning_consolidation['total_learning_events'] < 3:
|
||||
recommendations['learning_suggestions'].append(
|
||||
'Engage with more complex operations to improve system learning'
|
||||
)
|
||||
|
||||
# Workflow enhancements
|
||||
if context.get('error_rate', 0) > 0.1:
|
||||
recommendations['workflow_enhancements'].extend([
|
||||
'Use validation hooks to catch errors early',
|
||||
'Enable pre-tool-use intelligence for better routing'
|
||||
])
|
||||
|
||||
return recommendations
|
||||
|
||||
def _create_final_learning_events(self, context: dict, session_analytics: dict):
|
||||
"""Create final learning events for the session."""
|
||||
# Record overall session effectiveness
|
||||
self.learning_engine.record_learning_event(
|
||||
LearningType.USER_PREFERENCE,
|
||||
AdaptationScope.USER,
|
||||
context,
|
||||
{
|
||||
'session_pattern': 'completion',
|
||||
'satisfaction_score': session_analytics['performance_metrics']['satisfaction_estimate'],
|
||||
'productivity_achieved': session_analytics['performance_metrics']['productivity_score'],
|
||||
'superclaude_usage': context.get('superclaude_enabled', False)
|
||||
},
|
||||
session_analytics['performance_metrics']['overall_score'],
|
||||
1.0, # High confidence in final session metrics
|
||||
{'hook': 'stop', 'final_learning': True}
|
||||
)
|
||||
|
||||
def _calculate_session_efficiency(self, session_analytics: dict) -> float:
|
||||
"""Calculate overall session efficiency score."""
|
||||
performance_metrics = session_analytics.get('performance_metrics', {})
|
||||
|
||||
efficiency_components = [
|
||||
performance_metrics.get('productivity_score', 0),
|
||||
performance_metrics.get('quality_score', 0),
|
||||
performance_metrics.get('efficiency_score', 0),
|
||||
session_analytics.get('superclaude_effectiveness', {}).get('effectiveness_score', 0)
|
||||
]
|
||||
|
||||
return statistics.mean([comp for comp in efficiency_components if comp > 0])
|
||||
|
||||
def _generate_session_report(self, context: dict, session_analytics: dict,
|
||||
persistence_result: dict, recommendations: dict) -> dict:
|
||||
"""Generate final session report."""
|
||||
return {
|
||||
'session_id': context['session_id'],
|
||||
'session_completed': True,
|
||||
'completion_timestamp': context.get('session_end_time', time.time()),
|
||||
|
||||
'analytics': session_analytics,
|
||||
'persistence': persistence_result,
|
||||
'recommendations': recommendations,
|
||||
|
||||
'summary': {
|
||||
'session_success': session_analytics['performance_metrics']['overall_score'] > 0.6,
|
||||
'superclaude_effective': session_analytics['superclaude_effectiveness']['effectiveness_score'] > 0.6,
|
||||
'learning_achieved': session_analytics['learning_summary']['insights_generated'] > 0,
|
||||
'recommendations_generated': sum(len(recs) for recs in recommendations.values()) > 0
|
||||
},
|
||||
|
||||
'next_session_preparation': {
|
||||
'enable_superclaude': True,
|
||||
'suggested_optimizations': recommendations.get('superclaude_optimizations', [])[:2],
|
||||
'learning_focus_areas': [insight['insight_type'] for insight in
|
||||
session_analytics['learning_summary']['key_insights']]
|
||||
},
|
||||
|
||||
'metadata': {
|
||||
'hook_version': 'stop_1.0',
|
||||
'report_timestamp': time.time(),
|
||||
'analytics_comprehensive': True
|
||||
}
|
||||
}
|
||||
|
||||
def _create_fallback_report(self, session_data: dict, error: str) -> dict:
|
||||
"""Create fallback session report on error."""
|
||||
return {
|
||||
'session_id': session_data.get('session_id', 'unknown'),
|
||||
'session_completed': False,
|
||||
'error': error,
|
||||
'fallback_mode': True,
|
||||
|
||||
'analytics': {
|
||||
'session_summary': {
|
||||
'session_id': session_data.get('session_id', 'unknown'),
|
||||
'error_occurred': True
|
||||
},
|
||||
'performance_metrics': {
|
||||
'overall_score': 0.0
|
||||
}
|
||||
},
|
||||
|
||||
'persistence': {
|
||||
'persistence_enabled': False,
|
||||
'error': error
|
||||
},
|
||||
|
||||
'performance_metrics': {
|
||||
'stop_processing_time_ms': 0,
|
||||
'target_met': False,
|
||||
'error_occurred': True
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
"""Main hook execution function."""
|
||||
try:
|
||||
# Read session data from stdin
|
||||
session_data = json.loads(sys.stdin.read())
|
||||
|
||||
# Initialize and run hook
|
||||
hook = StopHook()
|
||||
result = hook.process_session_stop(session_data)
|
||||
|
||||
# Output result as JSON
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
except Exception as e:
|
||||
# Output error as JSON
|
||||
error_result = {
|
||||
'session_completed': False,
|
||||
'error': str(e),
|
||||
'fallback_mode': True
|
||||
}
|
||||
print(json.dumps(error_result, indent=2))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
769
Framework-Hooks/hooks/subagent_stop.py
Executable file
769
Framework-Hooks/hooks/subagent_stop.py
Executable file
@@ -0,0 +1,769 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SuperClaude-Lite Subagent Stop Hook
|
||||
|
||||
Implements MODE_Task_Management delegation coordination and analytics.
|
||||
Performance target: <150ms execution time.
|
||||
|
||||
This hook runs when subagents complete tasks and provides:
|
||||
- Subagent performance analytics and coordination metrics
|
||||
- Task delegation effectiveness measurement
|
||||
- Cross-agent learning and adaptation
|
||||
- Wave orchestration optimization
|
||||
- Parallel execution performance tracking
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional
|
||||
import statistics
|
||||
|
||||
# Add shared modules to path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "shared"))
|
||||
|
||||
from framework_logic import FrameworkLogic
|
||||
from pattern_detection import PatternDetector
|
||||
from mcp_intelligence import MCPIntelligence
|
||||
from compression_engine import CompressionEngine
|
||||
from learning_engine import LearningEngine, LearningType, AdaptationScope
|
||||
from yaml_loader import config_loader
|
||||
from logger import log_hook_start, log_hook_end, log_decision, log_error
|
||||
|
||||
|
||||
class SubagentStopHook:
|
||||
"""
|
||||
Subagent stop hook implementing task management coordination.
|
||||
|
||||
Responsibilities:
|
||||
- Analyze subagent task completion and performance
|
||||
- Measure delegation effectiveness and coordination success
|
||||
- Learn from parallel execution patterns
|
||||
- Optimize wave orchestration strategies
|
||||
- Coordinate cross-agent knowledge sharing
|
||||
- Track task management framework effectiveness
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
start_time = time.time()
|
||||
|
||||
# Initialize core components
|
||||
self.framework_logic = FrameworkLogic()
|
||||
self.pattern_detector = PatternDetector()
|
||||
self.mcp_intelligence = MCPIntelligence()
|
||||
self.compression_engine = CompressionEngine()
|
||||
|
||||
# Initialize learning engine
|
||||
cache_dir = Path("cache")
|
||||
self.learning_engine = LearningEngine(cache_dir)
|
||||
|
||||
# Load task management configuration
|
||||
self.task_config = config_loader.get_section('session', 'task_management', {})
|
||||
|
||||
# Load hook-specific configuration from SuperClaude config
|
||||
self.hook_config = config_loader.get_hook_config('subagent_stop')
|
||||
|
||||
# Performance tracking using configuration
|
||||
self.initialization_time = (time.time() - start_time) * 1000
|
||||
self.performance_target_ms = config_loader.get_hook_config('subagent_stop', 'performance_target_ms', 150)
|
||||
|
||||
def process_subagent_stop(self, subagent_data: dict) -> dict:
|
||||
"""
|
||||
Process subagent completion with coordination analytics.
|
||||
|
||||
Args:
|
||||
subagent_data: Subagent completion data from Claude Code
|
||||
|
||||
Returns:
|
||||
Coordination analytics with delegation effectiveness and optimization insights
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
# Log hook start
|
||||
log_hook_start("subagent_stop", {
|
||||
"subagent_id": subagent_data.get('subagent_id', ''),
|
||||
"task_id": subagent_data.get('task_id', ''),
|
||||
"task_type": subagent_data.get('task_type', 'unknown'),
|
||||
"delegation_strategy": subagent_data.get('delegation_strategy', 'unknown'),
|
||||
"parallel_tasks": len(subagent_data.get('parallel_tasks', [])),
|
||||
"wave_context": subagent_data.get('wave_context', {})
|
||||
})
|
||||
|
||||
try:
|
||||
# Extract subagent context
|
||||
context = self._extract_subagent_context(subagent_data)
|
||||
|
||||
# Analyze task completion performance
|
||||
task_analysis = self._analyze_task_completion(context)
|
||||
|
||||
# Log task completion analysis
|
||||
log_decision(
|
||||
"subagent_stop",
|
||||
"task_completion",
|
||||
"completed" if task_analysis['completion_success'] else "failed",
|
||||
f"Quality: {task_analysis['completion_quality']:.2f}, Efficiency: {task_analysis['completion_efficiency']:.2f}"
|
||||
)
|
||||
|
||||
# Measure delegation effectiveness
|
||||
delegation_analysis = self._analyze_delegation_effectiveness(context, task_analysis)
|
||||
|
||||
# Log delegation effectiveness
|
||||
log_decision(
|
||||
"subagent_stop",
|
||||
"delegation_effectiveness",
|
||||
f"{delegation_analysis['delegation_value']:.2f}",
|
||||
f"Strategy: {delegation_analysis['delegation_strategy']}, Overhead: {delegation_analysis['coordination_overhead']:.1%}"
|
||||
)
|
||||
|
||||
# Analyze coordination patterns
|
||||
coordination_analysis = self._analyze_coordination_patterns(context, delegation_analysis)
|
||||
|
||||
# Generate optimization recommendations
|
||||
optimization_insights = self._generate_optimization_insights(
|
||||
context, task_analysis, delegation_analysis, coordination_analysis
|
||||
)
|
||||
|
||||
# Record coordination learning
|
||||
self._record_coordination_learning(context, delegation_analysis, optimization_insights)
|
||||
|
||||
# Update wave orchestration metrics
|
||||
wave_metrics = self._update_wave_orchestration_metrics(context, coordination_analysis)
|
||||
|
||||
# Log wave orchestration if applicable
|
||||
if context.get('wave_total', 1) > 1:
|
||||
log_decision(
|
||||
"subagent_stop",
|
||||
"wave_orchestration",
|
||||
f"wave_{context.get('wave_position', 0) + 1}_of_{context.get('wave_total', 1)}",
|
||||
f"Performance: {wave_metrics['wave_performance']:.2f}, Efficiency: {wave_metrics['orchestration_efficiency']:.2f}"
|
||||
)
|
||||
|
||||
# Generate coordination report
|
||||
coordination_report = self._generate_coordination_report(
|
||||
context, task_analysis, delegation_analysis, coordination_analysis,
|
||||
optimization_insights, wave_metrics
|
||||
)
|
||||
|
||||
# Performance tracking
|
||||
execution_time = (time.time() - start_time) * 1000
|
||||
coordination_report['performance_metrics'] = {
|
||||
'coordination_analysis_time_ms': execution_time,
|
||||
'target_met': execution_time < self.performance_target_ms,
|
||||
'coordination_efficiency': self._calculate_coordination_efficiency(context, execution_time)
|
||||
}
|
||||
|
||||
# Log hook end with success
|
||||
log_hook_end(
|
||||
"subagent_stop",
|
||||
int(execution_time),
|
||||
True,
|
||||
{
|
||||
"task_success": task_analysis['completion_success'],
|
||||
"delegation_value": delegation_analysis['delegation_value'],
|
||||
"coordination_strategy": coordination_analysis['coordination_strategy'],
|
||||
"wave_enabled": context.get('wave_total', 1) > 1,
|
||||
"performance_target_met": execution_time < self.performance_target_ms
|
||||
}
|
||||
)
|
||||
|
||||
return coordination_report
|
||||
|
||||
except Exception as e:
|
||||
# Log error
|
||||
log_error("subagent_stop", str(e), {"subagent_data": subagent_data})
|
||||
|
||||
# Log hook end with failure
|
||||
log_hook_end("subagent_stop", int((time.time() - start_time) * 1000), False)
|
||||
|
||||
# Graceful fallback on error
|
||||
return self._create_fallback_report(subagent_data, str(e))
|
||||
|
||||
def _extract_subagent_context(self, subagent_data: dict) -> dict:
|
||||
"""Extract and enrich subagent context."""
|
||||
context = {
|
||||
'subagent_id': subagent_data.get('subagent_id', ''),
|
||||
'parent_session_id': subagent_data.get('parent_session_id', ''),
|
||||
'task_id': subagent_data.get('task_id', ''),
|
||||
'task_type': subagent_data.get('task_type', 'unknown'),
|
||||
'delegation_strategy': subagent_data.get('delegation_strategy', 'unknown'),
|
||||
'execution_time_ms': subagent_data.get('execution_time_ms', 0),
|
||||
'task_result': subagent_data.get('result', {}),
|
||||
'task_status': subagent_data.get('status', 'unknown'),
|
||||
'resources_used': subagent_data.get('resources', {}),
|
||||
'coordination_data': subagent_data.get('coordination', {}),
|
||||
'parallel_tasks': subagent_data.get('parallel_tasks', []),
|
||||
'wave_context': subagent_data.get('wave_context', {}),
|
||||
'completion_timestamp': time.time()
|
||||
}
|
||||
|
||||
# Analyze task characteristics
|
||||
context.update(self._analyze_task_characteristics(context))
|
||||
|
||||
# Extract coordination metrics
|
||||
context.update(self._extract_coordination_metrics(context))
|
||||
|
||||
return context
|
||||
|
||||
def _analyze_task_characteristics(self, context: dict) -> dict:
|
||||
"""Analyze characteristics of the completed task."""
|
||||
task_result = context.get('task_result', {})
|
||||
|
||||
characteristics = {
|
||||
'task_complexity': self._calculate_task_complexity(context),
|
||||
'task_success': context.get('task_status') == 'completed',
|
||||
'partial_success': context.get('task_status') == 'partial',
|
||||
'task_error': context.get('task_status') == 'error',
|
||||
'output_quality': self._assess_output_quality(task_result),
|
||||
'resource_efficiency': self._calculate_resource_efficiency(context),
|
||||
'coordination_required': len(context.get('parallel_tasks', [])) > 0
|
||||
}
|
||||
|
||||
return characteristics
|
||||
|
||||
def _calculate_task_complexity(self, context: dict) -> float:
|
||||
"""Calculate task complexity score (0.0 to 1.0)."""
|
||||
complexity_factors = []
|
||||
|
||||
# Task type complexity
|
||||
task_type = context.get('task_type', 'unknown')
|
||||
type_complexity = {
|
||||
'file_analysis': 0.3,
|
||||
'code_generation': 0.6,
|
||||
'multi_file_edit': 0.7,
|
||||
'architecture_analysis': 0.9,
|
||||
'system_refactor': 1.0
|
||||
}
|
||||
complexity_factors.append(type_complexity.get(task_type, 0.5))
|
||||
|
||||
# Execution time complexity
|
||||
execution_time = context.get('execution_time_ms', 0)
|
||||
if execution_time > 0:
|
||||
# Normalize to 5 seconds as high complexity
|
||||
time_complexity = min(execution_time / 5000, 1.0)
|
||||
complexity_factors.append(time_complexity)
|
||||
|
||||
# Resource usage complexity
|
||||
resources = context.get('resources_used', {})
|
||||
if resources:
|
||||
resource_complexity = max(
|
||||
resources.get('memory_mb', 0) / 1000, # 1GB = high
|
||||
resources.get('cpu_percent', 0) / 100
|
||||
)
|
||||
complexity_factors.append(min(resource_complexity, 1.0))
|
||||
|
||||
# Coordination complexity
|
||||
if context.get('coordination_required'):
|
||||
complexity_factors.append(0.4) # Coordination adds complexity
|
||||
|
||||
return statistics.mean(complexity_factors) if complexity_factors else 0.5
|
||||
|
||||
def _assess_output_quality(self, task_result: dict) -> float:
|
||||
"""Assess quality of task output (0.0 to 1.0)."""
|
||||
if not task_result:
|
||||
return 0.0
|
||||
|
||||
quality_indicators = []
|
||||
|
||||
# Check for quality metrics in result
|
||||
if 'quality_score' in task_result:
|
||||
quality_indicators.append(task_result['quality_score'])
|
||||
|
||||
# Check for validation results
|
||||
if task_result.get('validation_passed'):
|
||||
quality_indicators.append(0.8)
|
||||
elif task_result.get('validation_failed'):
|
||||
quality_indicators.append(0.3)
|
||||
|
||||
# Check for error indicators
|
||||
if task_result.get('errors'):
|
||||
error_penalty = min(len(task_result['errors']) * 0.2, 0.6)
|
||||
quality_indicators.append(1.0 - error_penalty)
|
||||
|
||||
# Check for completeness
|
||||
if task_result.get('completeness_ratio'):
|
||||
quality_indicators.append(task_result['completeness_ratio'])
|
||||
|
||||
# Default quality estimation
|
||||
if not quality_indicators:
|
||||
# Estimate quality from task status
|
||||
status = task_result.get('status', 'unknown')
|
||||
if status == 'success':
|
||||
quality_indicators.append(0.8)
|
||||
elif status == 'partial':
|
||||
quality_indicators.append(0.6)
|
||||
else:
|
||||
quality_indicators.append(0.4)
|
||||
|
||||
return statistics.mean(quality_indicators)
|
||||
|
||||
def _calculate_resource_efficiency(self, context: dict) -> float:
|
||||
"""Calculate resource usage efficiency."""
|
||||
resources = context.get('resources_used', {})
|
||||
execution_time = context.get('execution_time_ms', 1)
|
||||
|
||||
if not resources:
|
||||
return 0.7 # Assume moderate efficiency
|
||||
|
||||
# Memory efficiency (lower usage = higher efficiency)
|
||||
memory_mb = resources.get('memory_mb', 100)
|
||||
memory_efficiency = max(1.0 - (memory_mb / 1000), 0.1) # Penalty above 1GB
|
||||
|
||||
# CPU efficiency (moderate usage is optimal)
|
||||
cpu_percent = resources.get('cpu_percent', 50)
|
||||
if cpu_percent < 30:
|
||||
cpu_efficiency = cpu_percent / 30 # Underutilization penalty
|
||||
elif cpu_percent > 80:
|
||||
cpu_efficiency = (100 - cpu_percent) / 20 # Overutilization penalty
|
||||
else:
|
||||
cpu_efficiency = 1.0 # Optimal range
|
||||
|
||||
# Time efficiency (faster is better, but not at quality cost)
|
||||
expected_time = resources.get('expected_time_ms', execution_time)
|
||||
if expected_time > 0:
|
||||
time_efficiency = min(expected_time / execution_time, 1.0)
|
||||
else:
|
||||
time_efficiency = 0.8
|
||||
|
||||
return (memory_efficiency + cpu_efficiency + time_efficiency) / 3
|
||||
|
||||
def _extract_coordination_metrics(self, context: dict) -> dict:
|
||||
"""Extract coordination-specific metrics."""
|
||||
coordination_data = context.get('coordination_data', {})
|
||||
|
||||
return {
|
||||
'coordination_overhead_ms': coordination_data.get('overhead_ms', 0),
|
||||
'synchronization_points': coordination_data.get('sync_points', 0),
|
||||
'data_exchange_size': coordination_data.get('data_exchange_bytes', 0),
|
||||
'coordination_success': coordination_data.get('success', True),
|
||||
'parallel_efficiency': coordination_data.get('parallel_efficiency', 1.0),
|
||||
'wave_position': context.get('wave_context', {}).get('position', 0),
|
||||
'wave_total': context.get('wave_context', {}).get('total_waves', 1)
|
||||
}
|
||||
|
||||
def _analyze_task_completion(self, context: dict) -> dict:
|
||||
"""Analyze task completion performance."""
|
||||
task_analysis = {
|
||||
'completion_success': context.get('task_success', False),
|
||||
'completion_quality': context.get('output_quality', 0.0),
|
||||
'completion_efficiency': context.get('resource_efficiency', 0.0),
|
||||
'completion_time_performance': 0.0,
|
||||
'error_analysis': {},
|
||||
'success_factors': [],
|
||||
'improvement_areas': []
|
||||
}
|
||||
|
||||
# Time performance analysis
|
||||
execution_time = context.get('execution_time_ms', 0)
|
||||
task_type = context.get('task_type', 'unknown')
|
||||
|
||||
# Expected times by task type (rough estimates)
|
||||
expected_times = {
|
||||
'file_analysis': 500,
|
||||
'code_generation': 2000,
|
||||
'multi_file_edit': 1500,
|
||||
'architecture_analysis': 3000,
|
||||
'system_refactor': 5000
|
||||
}
|
||||
|
||||
expected_time = expected_times.get(task_type, 1000)
|
||||
if execution_time > 0:
|
||||
task_analysis['completion_time_performance'] = min(expected_time / execution_time, 1.0)
|
||||
|
||||
# Success factor identification
|
||||
if task_analysis['completion_success']:
|
||||
if task_analysis['completion_quality'] > 0.8:
|
||||
task_analysis['success_factors'].append('high_output_quality')
|
||||
if task_analysis['completion_efficiency'] > 0.8:
|
||||
task_analysis['success_factors'].append('efficient_resource_usage')
|
||||
if task_analysis['completion_time_performance'] > 0.8:
|
||||
task_analysis['success_factors'].append('fast_execution')
|
||||
|
||||
# Improvement area identification
|
||||
if task_analysis['completion_quality'] < 0.6:
|
||||
task_analysis['improvement_areas'].append('output_quality')
|
||||
if task_analysis['completion_efficiency'] < 0.6:
|
||||
task_analysis['improvement_areas'].append('resource_efficiency')
|
||||
if task_analysis['completion_time_performance'] < 0.6:
|
||||
task_analysis['improvement_areas'].append('execution_speed')
|
||||
|
||||
return task_analysis
|
||||
|
||||
def _analyze_delegation_effectiveness(self, context: dict, task_analysis: dict) -> dict:
|
||||
"""Analyze effectiveness of task delegation."""
|
||||
delegation_analysis = {
|
||||
'delegation_strategy': context.get('delegation_strategy', 'unknown'),
|
||||
'delegation_success': context.get('task_success', False),
|
||||
'delegation_efficiency': 0.0,
|
||||
'coordination_overhead': 0.0,
|
||||
'parallel_benefit': 0.0,
|
||||
'delegation_value': 0.0
|
||||
}
|
||||
|
||||
# Calculate delegation efficiency
|
||||
coordination_overhead = context.get('coordination_overhead_ms', 0)
|
||||
execution_time = context.get('execution_time_ms', 1)
|
||||
|
||||
if execution_time > 0:
|
||||
delegation_analysis['coordination_overhead'] = coordination_overhead / execution_time
|
||||
delegation_analysis['delegation_efficiency'] = max(
|
||||
1.0 - delegation_analysis['coordination_overhead'], 0.0
|
||||
)
|
||||
|
||||
# Calculate parallel benefit
|
||||
parallel_tasks = context.get('parallel_tasks', [])
|
||||
if len(parallel_tasks) > 1:
|
||||
# Estimate parallel benefit based on task coordination
|
||||
parallel_efficiency = context.get('parallel_efficiency', 1.0)
|
||||
theoretical_speedup = len(parallel_tasks)
|
||||
actual_speedup = theoretical_speedup * parallel_efficiency
|
||||
delegation_analysis['parallel_benefit'] = actual_speedup / theoretical_speedup
|
||||
|
||||
# Overall delegation value
|
||||
quality_factor = task_analysis['completion_quality']
|
||||
efficiency_factor = delegation_analysis['delegation_efficiency']
|
||||
parallel_factor = delegation_analysis['parallel_benefit'] if parallel_tasks else 1.0
|
||||
|
||||
delegation_analysis['delegation_value'] = (
|
||||
quality_factor * 0.4 +
|
||||
efficiency_factor * 0.3 +
|
||||
parallel_factor * 0.3
|
||||
)
|
||||
|
||||
return delegation_analysis
|
||||
|
||||
def _analyze_coordination_patterns(self, context: dict, delegation_analysis: dict) -> dict:
|
||||
"""Analyze coordination patterns and effectiveness."""
|
||||
coordination_analysis = {
|
||||
'coordination_strategy': 'unknown',
|
||||
'synchronization_effectiveness': 0.0,
|
||||
'data_flow_efficiency': 0.0,
|
||||
'wave_coordination_success': 0.0,
|
||||
'cross_agent_learning': 0.0,
|
||||
'coordination_patterns_detected': []
|
||||
}
|
||||
|
||||
# Determine coordination strategy
|
||||
if context.get('wave_total', 1) > 1:
|
||||
coordination_analysis['coordination_strategy'] = 'wave_orchestration'
|
||||
elif len(context.get('parallel_tasks', [])) > 1:
|
||||
coordination_analysis['coordination_strategy'] = 'parallel_coordination'
|
||||
else:
|
||||
coordination_analysis['coordination_strategy'] = 'single_agent'
|
||||
|
||||
# Synchronization effectiveness
|
||||
sync_points = context.get('synchronization_points', 0)
|
||||
coordination_success = context.get('coordination_success', True)
|
||||
|
||||
if sync_points > 0 and coordination_success:
|
||||
coordination_analysis['synchronization_effectiveness'] = 1.0
|
||||
elif sync_points > 0:
|
||||
coordination_analysis['synchronization_effectiveness'] = 0.5
|
||||
else:
|
||||
coordination_analysis['synchronization_effectiveness'] = 0.8 # No sync needed
|
||||
|
||||
# Data flow efficiency
|
||||
data_exchange = context.get('data_exchange_size', 0)
|
||||
if data_exchange > 0:
|
||||
# Efficiency based on data size (smaller is more efficient)
|
||||
coordination_analysis['data_flow_efficiency'] = max(1.0 - (data_exchange / 1000000), 0.1) # 1MB threshold
|
||||
else:
|
||||
coordination_analysis['data_flow_efficiency'] = 1.0 # No data exchange needed
|
||||
|
||||
# Wave coordination success
|
||||
wave_position = context.get('wave_position', 0)
|
||||
wave_total = context.get('wave_total', 1)
|
||||
|
||||
if wave_total > 1:
|
||||
# Success based on position completion and delegation value
|
||||
wave_progress = (wave_position + 1) / wave_total
|
||||
delegation_value = delegation_analysis.get('delegation_value', 0)
|
||||
coordination_analysis['wave_coordination_success'] = (wave_progress + delegation_value) / 2
|
||||
else:
|
||||
coordination_analysis['wave_coordination_success'] = 1.0
|
||||
|
||||
# Detect coordination patterns
|
||||
if delegation_analysis['delegation_value'] > 0.8:
|
||||
coordination_analysis['coordination_patterns_detected'].append('effective_delegation')
|
||||
|
||||
if coordination_analysis['synchronization_effectiveness'] > 0.8:
|
||||
coordination_analysis['coordination_patterns_detected'].append('efficient_synchronization')
|
||||
|
||||
if coordination_analysis['wave_coordination_success'] > 0.8:
|
||||
coordination_analysis['coordination_patterns_detected'].append('successful_wave_orchestration')
|
||||
|
||||
# Log detected patterns if any
|
||||
if coordination_analysis['coordination_patterns_detected']:
|
||||
log_decision(
|
||||
"subagent_stop",
|
||||
"coordination_patterns",
|
||||
str(len(coordination_analysis['coordination_patterns_detected'])),
|
||||
f"Patterns: {', '.join(coordination_analysis['coordination_patterns_detected'])}"
|
||||
)
|
||||
|
||||
return coordination_analysis
|
||||
|
||||
def _generate_optimization_insights(self, context: dict, task_analysis: dict,
|
||||
delegation_analysis: dict, coordination_analysis: dict) -> dict:
|
||||
"""Generate optimization insights for future delegations."""
|
||||
insights = {
|
||||
'delegation_optimizations': [],
|
||||
'coordination_improvements': [],
|
||||
'wave_strategy_recommendations': [],
|
||||
'performance_enhancements': [],
|
||||
'learning_opportunities': []
|
||||
}
|
||||
|
||||
# Delegation optimizations
|
||||
if delegation_analysis['delegation_value'] < 0.6:
|
||||
insights['delegation_optimizations'].extend([
|
||||
'Consider alternative delegation strategies',
|
||||
'Reduce coordination overhead',
|
||||
'Improve task partitioning'
|
||||
])
|
||||
|
||||
if delegation_analysis['coordination_overhead'] > 0.3:
|
||||
insights['delegation_optimizations'].append('Minimize coordination overhead')
|
||||
|
||||
# Coordination improvements
|
||||
if coordination_analysis['synchronization_effectiveness'] < 0.7:
|
||||
insights['coordination_improvements'].append('Improve synchronization mechanisms')
|
||||
|
||||
if coordination_analysis['data_flow_efficiency'] < 0.7:
|
||||
insights['coordination_improvements'].append('Optimize data exchange patterns')
|
||||
|
||||
# Wave strategy recommendations
|
||||
wave_success = coordination_analysis['wave_coordination_success']
|
||||
if wave_success < 0.6 and context.get('wave_total', 1) > 1:
|
||||
insights['wave_strategy_recommendations'].extend([
|
||||
'Adjust wave orchestration strategy',
|
||||
'Consider different task distribution',
|
||||
'Improve wave synchronization'
|
||||
])
|
||||
elif wave_success > 0.8:
|
||||
insights['wave_strategy_recommendations'].append('Wave orchestration working well - maintain strategy')
|
||||
|
||||
# Performance enhancements
|
||||
if task_analysis['completion_time_performance'] < 0.6:
|
||||
insights['performance_enhancements'].append('Optimize task execution speed')
|
||||
|
||||
if task_analysis['completion_efficiency'] < 0.6:
|
||||
insights['performance_enhancements'].append('Improve resource utilization')
|
||||
|
||||
return insights
|
||||
|
||||
def _record_coordination_learning(self, context: dict, delegation_analysis: dict,
|
||||
optimization_insights: dict):
|
||||
"""Record coordination learning for future optimization."""
|
||||
# Record delegation effectiveness
|
||||
self.learning_engine.record_learning_event(
|
||||
LearningType.PERFORMANCE_OPTIMIZATION,
|
||||
AdaptationScope.PROJECT,
|
||||
context,
|
||||
{
|
||||
'delegation_strategy': context.get('delegation_strategy'),
|
||||
'task_type': context.get('task_type'),
|
||||
'delegation_value': delegation_analysis['delegation_value'],
|
||||
'coordination_overhead': delegation_analysis['coordination_overhead'],
|
||||
'parallel_benefit': delegation_analysis['parallel_benefit']
|
||||
},
|
||||
delegation_analysis['delegation_value'],
|
||||
0.8,
|
||||
{'hook': 'subagent_stop', 'coordination_learning': True}
|
||||
)
|
||||
|
||||
# Record task pattern learning
|
||||
if context.get('task_success'):
|
||||
self.learning_engine.record_learning_event(
|
||||
LearningType.OPERATION_PATTERN,
|
||||
AdaptationScope.USER,
|
||||
context,
|
||||
{
|
||||
'successful_task_pattern': context.get('task_type'),
|
||||
'success_factors': optimization_insights.get('performance_enhancements', []),
|
||||
'delegation_effective': delegation_analysis['delegation_value'] > 0.7
|
||||
},
|
||||
delegation_analysis['delegation_value'],
|
||||
0.9,
|
||||
{'task_success_pattern': True}
|
||||
)
|
||||
|
||||
def _update_wave_orchestration_metrics(self, context: dict, coordination_analysis: dict) -> dict:
|
||||
"""Update wave orchestration performance metrics."""
|
||||
wave_metrics = {
|
||||
'wave_performance': 0.0,
|
||||
'orchestration_efficiency': 0.0,
|
||||
'wave_learning_value': 0.0,
|
||||
'next_wave_recommendations': []
|
||||
}
|
||||
|
||||
if context.get('wave_total', 1) > 1:
|
||||
wave_success = coordination_analysis['wave_coordination_success']
|
||||
wave_metrics['wave_performance'] = wave_success
|
||||
|
||||
# Calculate orchestration efficiency
|
||||
coordination_overhead = context.get('coordination_overhead_ms', 0)
|
||||
execution_time = context.get('execution_time_ms', 1)
|
||||
|
||||
if execution_time > 0:
|
||||
wave_metrics['orchestration_efficiency'] = max(
|
||||
1.0 - (coordination_overhead / execution_time), 0.0
|
||||
)
|
||||
|
||||
# Learning value from wave coordination
|
||||
wave_metrics['wave_learning_value'] = wave_success * 0.8 # Waves provide valuable learning
|
||||
|
||||
# Next wave recommendations
|
||||
if wave_success > 0.8:
|
||||
wave_metrics['next_wave_recommendations'].append('Continue current wave strategy')
|
||||
else:
|
||||
wave_metrics['next_wave_recommendations'].extend([
|
||||
'Adjust wave coordination strategy',
|
||||
'Improve inter-wave communication'
|
||||
])
|
||||
|
||||
return wave_metrics
|
||||
|
||||
def _calculate_coordination_efficiency(self, context: dict, execution_time_ms: float) -> float:
|
||||
"""Calculate coordination processing efficiency."""
|
||||
# Efficiency based on coordination overhead vs processing time
|
||||
coordination_overhead = context.get('coordination_overhead_ms', 0)
|
||||
task_execution_time = context.get('execution_time_ms', 1)
|
||||
|
||||
if task_execution_time > 0:
|
||||
coordination_ratio = coordination_overhead / task_execution_time
|
||||
coordination_efficiency = max(1.0 - coordination_ratio, 0.0)
|
||||
else:
|
||||
coordination_efficiency = 0.8
|
||||
|
||||
# Processing time efficiency
|
||||
processing_efficiency = min(100 / max(execution_time_ms, 1), 1.0) # Target: 100ms
|
||||
|
||||
return (coordination_efficiency + processing_efficiency) / 2
|
||||
|
||||
def _generate_coordination_report(self, context: dict, task_analysis: dict,
|
||||
delegation_analysis: dict, coordination_analysis: dict,
|
||||
optimization_insights: dict, wave_metrics: dict) -> dict:
|
||||
"""Generate comprehensive coordination report."""
|
||||
return {
|
||||
'subagent_id': context['subagent_id'],
|
||||
'task_id': context['task_id'],
|
||||
'completion_timestamp': context['completion_timestamp'],
|
||||
|
||||
'task_completion': {
|
||||
'success': task_analysis['completion_success'],
|
||||
'quality_score': task_analysis['completion_quality'],
|
||||
'efficiency_score': task_analysis['completion_efficiency'],
|
||||
'time_performance': task_analysis['completion_time_performance'],
|
||||
'success_factors': task_analysis['success_factors'],
|
||||
'improvement_areas': task_analysis['improvement_areas']
|
||||
},
|
||||
|
||||
'delegation_analysis': {
|
||||
'strategy': delegation_analysis['delegation_strategy'],
|
||||
'effectiveness': delegation_analysis['delegation_value'],
|
||||
'efficiency': delegation_analysis['delegation_efficiency'],
|
||||
'coordination_overhead': delegation_analysis['coordination_overhead'],
|
||||
'parallel_benefit': delegation_analysis['parallel_benefit']
|
||||
},
|
||||
|
||||
'coordination_metrics': {
|
||||
'strategy': coordination_analysis['coordination_strategy'],
|
||||
'synchronization_effectiveness': coordination_analysis['synchronization_effectiveness'],
|
||||
'data_flow_efficiency': coordination_analysis['data_flow_efficiency'],
|
||||
'patterns_detected': coordination_analysis['coordination_patterns_detected']
|
||||
},
|
||||
|
||||
'wave_orchestration': {
|
||||
'enabled': context.get('wave_total', 1) > 1,
|
||||
'wave_position': context.get('wave_position', 0),
|
||||
'total_waves': context.get('wave_total', 1),
|
||||
'wave_performance': wave_metrics['wave_performance'],
|
||||
'orchestration_efficiency': wave_metrics['orchestration_efficiency'],
|
||||
'learning_value': wave_metrics['wave_learning_value']
|
||||
},
|
||||
|
||||
'optimization_insights': optimization_insights,
|
||||
|
||||
'performance_summary': {
|
||||
'overall_effectiveness': (
|
||||
task_analysis['completion_quality'] * 0.4 +
|
||||
delegation_analysis['delegation_value'] * 0.3 +
|
||||
coordination_analysis['synchronization_effectiveness'] * 0.3
|
||||
),
|
||||
'delegation_success': delegation_analysis['delegation_value'] > 0.6,
|
||||
'coordination_success': coordination_analysis['synchronization_effectiveness'] > 0.7,
|
||||
'learning_value': wave_metrics.get('wave_learning_value', 0.5)
|
||||
},
|
||||
|
||||
'next_task_recommendations': {
|
||||
'continue_delegation': delegation_analysis['delegation_value'] > 0.6,
|
||||
'optimize_coordination': coordination_analysis['synchronization_effectiveness'] < 0.7,
|
||||
'adjust_wave_strategy': wave_metrics['wave_performance'] < 0.6,
|
||||
'suggested_improvements': optimization_insights.get('delegation_optimizations', [])[:2]
|
||||
},
|
||||
|
||||
'metadata': {
|
||||
'hook_version': 'subagent_stop_1.0',
|
||||
'analysis_timestamp': time.time(),
|
||||
'coordination_framework': 'task_management_mode'
|
||||
}
|
||||
}
|
||||
|
||||
def _create_fallback_report(self, subagent_data: dict, error: str) -> dict:
|
||||
"""Create fallback coordination report on error."""
|
||||
return {
|
||||
'subagent_id': subagent_data.get('subagent_id', 'unknown'),
|
||||
'task_id': subagent_data.get('task_id', 'unknown'),
|
||||
'completion_timestamp': time.time(),
|
||||
'error': error,
|
||||
'fallback_mode': True,
|
||||
|
||||
'task_completion': {
|
||||
'success': False,
|
||||
'quality_score': 0.0,
|
||||
'efficiency_score': 0.0,
|
||||
'error_occurred': True
|
||||
},
|
||||
|
||||
'delegation_analysis': {
|
||||
'strategy': 'unknown',
|
||||
'effectiveness': 0.0,
|
||||
'error': error
|
||||
},
|
||||
|
||||
'performance_metrics': {
|
||||
'coordination_analysis_time_ms': 0,
|
||||
'target_met': False,
|
||||
'error_occurred': True
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
"""Main hook execution function."""
|
||||
try:
|
||||
# Read subagent data from stdin
|
||||
subagent_data = json.loads(sys.stdin.read())
|
||||
|
||||
# Initialize and run hook
|
||||
hook = SubagentStopHook()
|
||||
result = hook.process_subagent_stop(subagent_data)
|
||||
|
||||
# Output result as JSON
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
except Exception as e:
|
||||
# Output error as JSON
|
||||
error_result = {
|
||||
'coordination_analysis_enabled': False,
|
||||
'error': str(e),
|
||||
'fallback_mode': True
|
||||
}
|
||||
print(json.dumps(error_result, indent=2))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user