MCP Server Implementation

This commit is contained in:
Cole Medin 2025-07-13 22:07:07 -05:00
parent 84d49cf30a
commit 87052aec1a
22 changed files with 5594 additions and 263 deletions

View File

@ -3,10 +3,10 @@ GITHUB_CLIENT_SECRET=<your github client secret>
COOKIE_ENCRYPTION_KEY=<your cookie cookie encryption key>
# Add your Anthropic API key below for PRP parsing functionality
# ANTHROPIC_API_KEY=<your Anthropic API key>
ANTHROPIC_API_KEY=<your Anthropic API key>
# Optional: Override the default Anthropic model (defaults to claude-3-5-haiku-latest)
# ANTHROPIC_MODEL=claude-3-5-haiku-latest
# Anthropic model for PRP parsing (using Claude 3 Sonnet for better parsing accuracy)
ANTHROPIC_MODEL=claude-3-sonnet-20240229
# Database Connection String
# This should be a PostgreSQL connection string with full read/write permissions

View File

@ -25,6 +25,8 @@ We need:
All examples are already referenced in prp_mcp_base.md - do any additional research as needed.
Claude Task Master GitHub repo: https://github.com/eyaltoledano/claude-task-master
## OTHER CONSIDERATIONS:
- Do not use complex regex or complex parsing patterns, we use an LLM to parse PRPs.

File diff suppressed because it is too large Load Diff

View File

@ -43,7 +43,7 @@ with caching and rate limiting.
Use the specialized MCP PRP command to create a comprehensive implementation plan:
```bash
/prp-mcp-create INITIAL.md
/prp-mcp-create PRPs/INITIAL.md
```
**What this does:**
@ -198,7 +198,7 @@ functionality, data sources, and user interactions.
```bash
# Generate comprehensive PRP
/prp-mcp-create INITIAL.md
/prp-mcp-create PRPs/INITIAL.md
# Execute the PRP to build your server
/prp-mcp-execute PRPs/your-server-name.md
@ -262,4 +262,4 @@ The goal is to make MCP server development predictable and successful through co
---
**Ready to build your MCP server?** Start by editing `PRPs/INITIAL.md` and run `/prp-mcp-create INITIAL.md` to generate your comprehensive implementation plan.
**Ready to build your MCP server?** Start by editing `PRPs/INITIAL.md` and run `/prp-mcp-create PRPs/INITIAL.md` to generate your comprehensive implementation plan.

View File

@ -0,0 +1,5 @@
// Main database module - exports all database functionality
export { withDatabase } from "./database/utils";
export { getDb, closeDb } from "./database/connection";
export { validateSqlQuery, isWriteOperation, formatDatabaseError } from "./database/security";
export * from "./database/models";

View File

@ -0,0 +1,139 @@
-- PostgreSQL schema for Taskmaster PRP Parser MCP Server
-- Migration 001: Initial schema creation
-- Extension for UUID generation
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
-- Projects table
CREATE TABLE projects (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name VARCHAR(255) NOT NULL UNIQUE,
description TEXT,
goals TEXT,
target_users TEXT,
why_statement TEXT,
created_by VARCHAR(255) NOT NULL, -- GitHub username
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Tasks table
CREATE TABLE tasks (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
project_id UUID REFERENCES projects(id) ON DELETE CASCADE,
title VARCHAR(500) NOT NULL,
description TEXT,
status VARCHAR(50) DEFAULT 'pending', -- pending, in_progress, completed, blocked
priority VARCHAR(20) DEFAULT 'medium', -- low, medium, high, urgent
assigned_to VARCHAR(255), -- GitHub username
parent_task_id UUID REFERENCES tasks(id), -- For task hierarchies
estimated_hours INTEGER,
actual_hours INTEGER,
due_date TIMESTAMP,
acceptance_criteria TEXT[], -- Array of acceptance criteria
created_by VARCHAR(255) NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Documentation table
CREATE TABLE documentation (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
project_id UUID REFERENCES projects(id) ON DELETE CASCADE,
type VARCHAR(100) NOT NULL, -- goals, why, target_users, specifications, notes
title VARCHAR(255) NOT NULL,
content TEXT NOT NULL,
version INTEGER DEFAULT 1,
created_by VARCHAR(255) NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Tags table
CREATE TABLE tags (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name VARCHAR(100) UNIQUE NOT NULL,
color VARCHAR(7), -- Hex color code
description TEXT,
created_by VARCHAR(255) NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Task-Tag relationship table
CREATE TABLE task_tags (
task_id UUID REFERENCES tasks(id) ON DELETE CASCADE,
tag_id UUID REFERENCES tags(id) ON DELETE CASCADE,
PRIMARY KEY (task_id, tag_id)
);
-- Task dependencies table
CREATE TABLE task_dependencies (
task_id UUID REFERENCES tasks(id) ON DELETE CASCADE,
depends_on_task_id UUID REFERENCES tasks(id) ON DELETE CASCADE,
dependency_type VARCHAR(50) DEFAULT 'blocks', -- blocks, related, subtask
PRIMARY KEY (task_id, depends_on_task_id),
CONSTRAINT no_self_dependency CHECK (task_id != depends_on_task_id)
);
-- Audit logs table
CREATE TABLE audit_logs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
table_name VARCHAR(100) NOT NULL,
record_id UUID NOT NULL,
action VARCHAR(50) NOT NULL, -- insert, update, delete
old_values JSONB,
new_values JSONB,
changed_by VARCHAR(255) NOT NULL,
changed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Indexes for performance
CREATE INDEX idx_tasks_project_id ON tasks(project_id);
CREATE INDEX idx_tasks_status ON tasks(status);
CREATE INDEX idx_tasks_priority ON tasks(priority);
CREATE INDEX idx_tasks_assigned_to ON tasks(assigned_to);
CREATE INDEX idx_tasks_created_by ON tasks(created_by);
CREATE INDEX idx_tasks_parent_task_id ON tasks(parent_task_id);
CREATE INDEX idx_documentation_project_id ON documentation(project_id);
CREATE INDEX idx_documentation_type ON documentation(type);
CREATE INDEX idx_task_tags_task_id ON task_tags(task_id);
CREATE INDEX idx_task_tags_tag_id ON task_tags(tag_id);
CREATE INDEX idx_task_dependencies_task_id ON task_dependencies(task_id);
CREATE INDEX idx_task_dependencies_depends_on ON task_dependencies(depends_on_task_id);
CREATE INDEX idx_audit_logs_table_record ON audit_logs(table_name, record_id);
CREATE INDEX idx_audit_logs_changed_by ON audit_logs(changed_by);
CREATE INDEX idx_audit_logs_changed_at ON audit_logs(changed_at);
-- Trigger function for updating updated_at timestamps
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
-- Apply updated_at triggers to relevant tables
CREATE TRIGGER update_projects_updated_at BEFORE UPDATE ON projects
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_tasks_updated_at BEFORE UPDATE ON tasks
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_documentation_updated_at BEFORE UPDATE ON documentation
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
-- Comments for documentation
COMMENT ON TABLE projects IS 'Main projects table storing project metadata from PRPs';
COMMENT ON TABLE tasks IS 'Individual tasks extracted from PRPs with full lifecycle management';
COMMENT ON TABLE documentation IS 'Project documentation including goals, whys, specifications';
COMMENT ON TABLE tags IS 'Reusable tags for categorizing tasks and projects';
COMMENT ON TABLE task_tags IS 'Many-to-many relationship between tasks and tags';
COMMENT ON TABLE task_dependencies IS 'Task dependency relationships for project planning';
COMMENT ON TABLE audit_logs IS 'Complete audit trail for all data changes';
COMMENT ON COLUMN tasks.acceptance_criteria IS 'Array of acceptance criteria extracted from PRP parsing';
COMMENT ON COLUMN tasks.status IS 'Current task status: pending, in_progress, completed, blocked';
COMMENT ON COLUMN tasks.priority IS 'Task priority: low, medium, high, urgent';
COMMENT ON COLUMN documentation.type IS 'Documentation type: goals, why, target_users, specifications, notes';
COMMENT ON COLUMN documentation.version IS 'Version control for documentation changes';

View File

@ -0,0 +1,330 @@
import { z } from "zod";
import type {
Project,
Task,
Documentation,
Tag,
TaskTag,
TaskDependency,
AuditLog,
TaskWithRelations,
ProjectOverview,
} from "../types/taskmaster.js";
// Database row type converters
export function convertProjectRow(row: any): Project {
return {
id: row.id,
name: row.name,
description: row.description || undefined,
goals: row.goals || undefined,
target_users: row.target_users || undefined,
why_statement: row.why_statement || undefined,
created_by: row.created_by,
created_at: new Date(row.created_at),
updated_at: new Date(row.updated_at),
};
}
export function convertTaskRow(row: any): Task {
return {
id: row.id,
project_id: row.project_id,
title: row.title,
description: row.description || undefined,
status: row.status,
priority: row.priority,
assigned_to: row.assigned_to || undefined,
parent_task_id: row.parent_task_id || undefined,
estimated_hours: row.estimated_hours || undefined,
actual_hours: row.actual_hours || undefined,
due_date: row.due_date ? new Date(row.due_date) : undefined,
acceptance_criteria: row.acceptance_criteria || undefined,
created_by: row.created_by,
created_at: new Date(row.created_at),
updated_at: new Date(row.updated_at),
};
}
export function convertDocumentationRow(row: any): Documentation {
return {
id: row.id,
project_id: row.project_id,
type: row.type,
title: row.title,
content: row.content,
version: row.version,
created_by: row.created_by,
created_at: new Date(row.created_at),
updated_at: new Date(row.updated_at),
};
}
export function convertTagRow(row: any): Tag {
return {
id: row.id,
name: row.name,
color: row.color || undefined,
description: row.description || undefined,
created_by: row.created_by,
created_at: new Date(row.created_at),
};
}
export function convertTaskDependencyRow(row: any): TaskDependency {
return {
task_id: row.task_id,
depends_on_task_id: row.depends_on_task_id,
dependency_type: row.dependency_type,
};
}
export function convertAuditLogRow(row: any): AuditLog {
return {
id: row.id,
table_name: row.table_name,
record_id: row.record_id,
action: row.action,
old_values: row.old_values || undefined,
new_values: row.new_values || undefined,
changed_by: row.changed_by,
changed_at: new Date(row.changed_at),
};
}
// Database operation helpers
export interface TaskListFilters {
project_id?: string;
status?: Task['status'];
priority?: Task['priority'];
assigned_to?: string;
tag?: string;
parent_task_id?: string;
has_dependencies?: boolean;
due_before?: Date;
due_after?: Date;
created_by?: string;
limit?: number;
offset?: number;
}
export interface DocumentationListFilters {
project_id?: string;
type?: Documentation['type'];
created_by?: string;
limit?: number;
offset?: number;
}
export interface ProjectListFilters {
created_by?: string;
search_term?: string;
limit?: number;
offset?: number;
}
// Validation schemas for database operations
export const DatabaseTaskSchema = z.object({
project_id: z.string().uuid(),
title: z.string().min(1).max(500),
description: z.string().optional(),
status: z.enum(['pending', 'in_progress', 'completed', 'blocked']).default('pending'),
priority: z.enum(['low', 'medium', 'high', 'urgent']).default('medium'),
assigned_to: z.string().optional(),
parent_task_id: z.string().uuid().optional(),
estimated_hours: z.number().int().positive().optional(),
actual_hours: z.number().int().min(0).optional(),
due_date: z.date().optional(),
acceptance_criteria: z.array(z.string()).optional(),
created_by: z.string().min(1),
});
export const DatabaseProjectSchema = z.object({
name: z.string().min(1).max(255),
description: z.string().optional(),
goals: z.string().optional(),
target_users: z.string().optional(),
why_statement: z.string().optional(),
created_by: z.string().min(1),
});
export const DatabaseDocumentationSchema = z.object({
project_id: z.string().uuid(),
type: z.enum(['goals', 'why', 'target_users', 'specifications', 'notes']),
title: z.string().min(1).max(255),
content: z.string().min(1),
version: z.number().int().positive().default(1),
created_by: z.string().min(1),
});
export const DatabaseTagSchema = z.object({
name: z.string().min(1).max(100),
color: z.string().regex(/^#[0-9A-Fa-f]{6}$/).optional(),
description: z.string().optional(),
created_by: z.string().min(1),
});
// Query builders for complex operations
export interface TaskQueryBuilder {
withTags?: boolean;
withDependencies?: boolean;
withProject?: boolean;
withParentTask?: boolean;
withSubtasks?: boolean;
}
export interface ProjectStatsQuery {
include_task_counts?: boolean;
include_recent_activity?: boolean;
include_upcoming_deadlines?: boolean;
recent_activity_limit?: number;
upcoming_deadline_days?: number;
}
// Audit logging helpers
export interface AuditLogEntry {
table_name: string;
record_id: string;
action: 'insert' | 'update' | 'delete';
old_values?: Record<string, any>;
new_values?: Record<string, any>;
changed_by: string;
}
export function createAuditLogEntry(
tableName: string,
recordId: string,
action: AuditLogEntry['action'],
changedBy: string,
oldValues?: Record<string, any>,
newValues?: Record<string, any>
): AuditLogEntry {
return {
table_name: tableName,
record_id: recordId,
action,
old_values: oldValues,
new_values: newValues,
changed_by: changedBy,
};
}
// SQL query templates
export const SQL_QUERIES = {
// Project queries
SELECT_PROJECT_BY_ID: `
SELECT * FROM projects WHERE id = $1
`,
SELECT_PROJECT_BY_NAME: `
SELECT * FROM projects WHERE name = $1
`,
LIST_PROJECTS: `
SELECT * FROM projects
WHERE ($1::text IS NULL OR created_by = $1)
AND ($2::text IS NULL OR name ILIKE '%' || $2 || '%' OR description ILIKE '%' || $2 || '%')
ORDER BY created_at DESC
LIMIT $3 OFFSET $4
`,
// Task queries
SELECT_TASK_BY_ID: `
SELECT t.*, p.name as project_name
FROM tasks t
LEFT JOIN projects p ON t.project_id = p.id
WHERE t.id = $1
`,
LIST_TASKS_WITH_FILTERS: `
SELECT DISTINCT t.*, p.name as project_name
FROM tasks t
LEFT JOIN projects p ON t.project_id = p.id
LEFT JOIN task_tags tt ON t.id = tt.task_id
LEFT JOIN tags tag ON tt.tag_id = tag.id
WHERE ($1::uuid IS NULL OR t.project_id = $1)
AND ($2::text IS NULL OR t.status = $2)
AND ($3::text IS NULL OR t.priority = $3)
AND ($4::text IS NULL OR t.assigned_to = $4)
AND ($5::text IS NULL OR tag.name = $5)
AND ($6::uuid IS NULL OR t.parent_task_id = $6)
AND ($7::text IS NULL OR t.created_by = $7)
ORDER BY t.created_at DESC
LIMIT $8 OFFSET $9
`,
SELECT_TASK_TAGS: `
SELECT tag.* FROM tags tag
JOIN task_tags tt ON tag.id = tt.tag_id
WHERE tt.task_id = $1
`,
SELECT_TASK_DEPENDENCIES: `
SELECT td.*, t.title as depends_on_title
FROM task_dependencies td
JOIN tasks t ON td.depends_on_task_id = t.id
WHERE td.task_id = $1
`,
// Documentation queries
LIST_DOCUMENTATION: `
SELECT * FROM documentation
WHERE ($1::uuid IS NULL OR project_id = $1)
AND ($2::text IS NULL OR type = $2)
AND ($3::text IS NULL OR created_by = $3)
ORDER BY created_at DESC
LIMIT $4 OFFSET $5
`,
// Tag queries
SELECT_TAG_BY_NAME: `
SELECT * FROM tags WHERE name = $1
`,
LIST_TAGS: `
SELECT * FROM tags ORDER BY name
`,
// Project overview queries
PROJECT_TASK_STATISTICS: `
SELECT
COUNT(*) as total_tasks,
COUNT(CASE WHEN status = 'completed' THEN 1 END) as completed_tasks,
COUNT(CASE WHEN status = 'in_progress' THEN 1 END) as in_progress_tasks,
COUNT(CASE WHEN status = 'pending' THEN 1 END) as pending_tasks,
COUNT(CASE WHEN status = 'blocked' THEN 1 END) as blocked_tasks,
CASE
WHEN COUNT(*) > 0 THEN
ROUND((COUNT(CASE WHEN status = 'completed' THEN 1 END) * 100.0 / COUNT(*)), 2)
ELSE 0
END as completion_percentage
FROM tasks
WHERE project_id = $1
`,
RECENT_TASKS: `
SELECT * FROM tasks
WHERE project_id = $1
ORDER BY updated_at DESC
LIMIT $2
`,
UPCOMING_DEADLINES: `
SELECT * FROM tasks
WHERE project_id = $1
AND due_date IS NOT NULL
AND due_date >= CURRENT_DATE
AND due_date <= CURRENT_DATE + INTERVAL '$2 days'
AND status != 'completed'
ORDER BY due_date ASC
`,
} as const;
// Type-safe query parameter helpers
export type QueryParams<T extends keyof typeof SQL_QUERIES> =
T extends 'LIST_PROJECTS' ? [string | null, string | null, number, number] :
T extends 'LIST_TASKS_WITH_FILTERS' ? [string | null, string | null, string | null, string | null, string | null, string | null, string | null, number, number] :
T extends 'PROJECT_TASK_STATISTICS' ? [string] :
T extends 'RECENT_TASKS' ? [string, number] :
T extends 'UPCOMING_DEADLINES' ? [string, number] :
any[];

View File

@ -0,0 +1,387 @@
import type {
AnthropicRequest,
AnthropicResponse,
AnthropicError,
AnthropicClientConfig,
PRPParsingConfig,
RateLimitInfo,
AnthropicAPIMetrics,
RetryConfig,
} from "../types/anthropic.js";
import {
DEFAULT_CLIENT_CONFIG,
DEFAULT_RETRY_CONFIG,
} from "../types/anthropic.js";
import {
isAnthropicError,
isAnthropicResponse,
isRateLimitError,
isAuthenticationError,
isServerError,
} from "../types/anthropic.js";
export class AnthropicClient {
private config: AnthropicClientConfig;
private retryConfig: RetryConfig;
private metrics: AnthropicAPIMetrics;
constructor(apiKey: string, config: Partial<AnthropicClientConfig> = {}) {
this.config = {
apiKey,
...DEFAULT_CLIENT_CONFIG,
...config,
} as AnthropicClientConfig;
this.retryConfig = DEFAULT_RETRY_CONFIG;
this.metrics = this.initializeMetrics();
}
private initializeMetrics(): AnthropicAPIMetrics {
return {
total_requests: 0,
successful_requests: 0,
failed_requests: 0,
total_input_tokens: 0,
total_output_tokens: 0,
average_response_time: 0,
rate_limit_hits: 0,
};
}
async makeRequest(request: AnthropicRequest): Promise<AnthropicResponse> {
const startTime = Date.now();
this.metrics.total_requests++;
try {
const response = await this.makeRequestWithRetry(request);
// Update metrics on success
this.metrics.successful_requests++;
this.updateResponseTimeMetrics(Date.now() - startTime);
if (response.usage) {
this.metrics.total_input_tokens += response.usage.input_tokens;
this.metrics.total_output_tokens += response.usage.output_tokens;
}
return response;
} catch (error) {
this.metrics.failed_requests++;
this.updateResponseTimeMetrics(Date.now() - startTime);
throw error;
}
}
private async makeRequestWithRetry(request: AnthropicRequest): Promise<AnthropicResponse> {
let lastError: Error = new Error('No attempts made');
for (let attempt = 1; attempt <= this.retryConfig.max_attempts; attempt++) {
try {
const response = await fetch(`${this.config.baseUrl}/messages`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-api-key': this.config.apiKey,
'anthropic-version': '2023-06-01',
},
body: JSON.stringify(request),
signal: AbortSignal.timeout(this.config.timeout),
});
const data = await response.json();
if (!response.ok) {
if (isAnthropicError(data)) {
if (isRateLimitError(data)) {
this.metrics.rate_limit_hits++;
if (this.retryConfig.retry_on_rate_limit && attempt < this.retryConfig.max_attempts) {
await this.delay(this.calculateRetryDelay(attempt));
continue;
}
}
if (isAuthenticationError(data)) {
throw new Error(`Anthropic authentication failed: ${data.error.message}`);
}
if (isServerError(data) && this.retryConfig.retry_on_server_error && attempt < this.retryConfig.max_attempts) {
await this.delay(this.calculateRetryDelay(attempt));
continue;
}
throw new Error(`Anthropic API error (${data.error.type}): ${data.error.message}`);
}
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
if (!isAnthropicResponse(data)) {
throw new Error('Invalid response format from Anthropic API');
}
return data;
} catch (error) {
lastError = error instanceof Error ? error : new Error(String(error));
if (attempt < this.retryConfig.max_attempts) {
// Check if this is a retryable error
if (this.isRetryableError(lastError)) {
await this.delay(this.calculateRetryDelay(attempt));
continue;
}
}
break; // Non-retryable error or max attempts reached
}
}
throw lastError;
}
private isRetryableError(error: Error): boolean {
const message = error.message.toLowerCase();
// Retry on network errors, timeouts, and certain server errors
return (
message.includes('timeout') ||
message.includes('network') ||
message.includes('connection') ||
message.includes('rate_limit_error') ||
(this.retryConfig.retry_on_server_error && (
message.includes('api_error') ||
message.includes('overloaded_error') ||
message.includes('internal server error')
))
);
}
private calculateRetryDelay(attempt: number): number {
if (!this.retryConfig.exponential_backoff) {
return this.retryConfig.base_delay;
}
const delay = this.retryConfig.base_delay * Math.pow(2, attempt - 1);
return Math.min(delay, this.retryConfig.max_delay);
}
private async delay(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}
private updateResponseTimeMetrics(responseTime: number): void {
const totalRequests = this.metrics.successful_requests + this.metrics.failed_requests;
if (totalRequests === 1) {
this.metrics.average_response_time = responseTime;
} else {
// Calculate running average
this.metrics.average_response_time =
(this.metrics.average_response_time * (totalRequests - 1) + responseTime) / totalRequests;
}
}
async parsePRP(
prpContent: string,
projectContext: string | undefined = undefined,
config: Partial<PRPParsingConfig> = {}
): Promise<any> {
const parsingConfig: PRPParsingConfig = {
model: 'claude-3-sonnet-20240229',
max_tokens: 4000,
temperature: 0.1,
include_context: true,
extract_acceptance_criteria: true,
suggest_tags: true,
estimate_hours: true,
...config,
};
const prompt = this.buildPRPParsingPrompt(prpContent, parsingConfig, projectContext);
const request: AnthropicRequest = {
model: parsingConfig.model,
max_tokens: parsingConfig.max_tokens,
temperature: parsingConfig.temperature,
messages: [{
role: 'user',
content: prompt,
}],
};
try {
const response = await this.makeRequest(request);
const content = response.content[0]?.text;
if (!content) {
throw new Error('Empty response from Anthropic API');
}
// Parse JSON response with error handling
let parsedData: any;
try {
parsedData = JSON.parse(content);
} catch (parseError) {
// Try to extract JSON from potentially malformed response
const jsonMatch = content.match(/\{[\s\S]*\}/);
if (jsonMatch) {
try {
parsedData = JSON.parse(jsonMatch[0]);
} catch {
throw new Error(`Failed to parse LLM response as JSON: ${parseError instanceof Error ? parseError.message : String(parseError)}`);
}
} else {
throw new Error(`LLM response does not contain valid JSON: ${content.substring(0, 200)}...`);
}
}
return this.validateParsedData(parsedData);
} catch (error) {
throw new Error(`PRP parsing failed: ${error instanceof Error ? error.message : String(error)}`);
}
}
private buildPRPParsingPrompt(
prpContent: string,
config: PRPParsingConfig,
projectContext?: string
): string {
const contextSection = config.include_context && projectContext
? `\n\n**Project Context:**\n${projectContext}\n`
: '';
const acceptanceCriteriaInstruction = config.extract_acceptance_criteria
? '\n- Extract acceptance criteria for each task where available'
: '';
const hoursEstimationInstruction = config.estimate_hours
? '\n- Provide estimated hours for each task based on complexity'
: '';
const tagsInstruction = config.suggest_tags
? '\n- Suggest relevant tags for organization and categorization'
: '';
return `You are a expert project management assistant that extracts actionable tasks and project information from Product Requirement Prompts (PRPs).
${contextSection}
**Instructions:**
Please analyze the following PRP and extract:
1. Project information (name, description, goals, why statement, target users)
2. Actionable tasks with priorities, descriptions, and dependencies${acceptanceCriteriaInstruction}${hoursEstimationInstruction}
3. Supporting documentation organized by type
4. Suggested tags for organization${tagsInstruction}
**Important Requirements:**
- Extract ONLY actionable, specific tasks (not high-level goals)
- Prioritize tasks based on dependencies and importance
- Include detailed descriptions for complex tasks
- Identify task dependencies based on logical workflow
- Categorize documentation appropriately
**Response Format:**
Return ONLY valid JSON in this exact structure (no additional text or formatting):
{
"project_info": {
"name": "Clear, concise project name",
"description": "Brief project description",
"goals": "Main project goals and objectives",
"why_statement": "Why this project matters and its value proposition",
"target_users": "Who will use or benefit from this project"
},
"tasks": [
{
"title": "Specific, actionable task title",
"description": "Detailed task description with implementation guidance",
"priority": "low|medium|high|urgent",
"estimated_hours": 8,
"tags": ["relevant", "tags"],
"dependencies": ["Other task titles this depends on"],
"acceptance_criteria": ["Specific criteria for task completion"]
}
],
"documentation": [
{
"type": "goals|why|target_users|specifications|notes",
"title": "Document title",
"content": "Detailed content for this documentation section"
}
],
"suggested_tags": ["project", "feature", "backend", "frontend", "database"]
}
**PRP Content to Parse:**
${prpContent}
Remember: Return ONLY the JSON response with no additional formatting, explanations, or markdown.`;
}
private validateParsedData(data: any): any {
// Basic structure validation
if (!data || typeof data !== 'object') {
throw new Error('Response is not a valid object');
}
const requiredFields = ['project_info', 'tasks', 'documentation', 'suggested_tags'];
for (const field of requiredFields) {
if (!(field in data)) {
throw new Error(`Missing required field: ${field}`);
}
}
// Validate project_info
if (!data.project_info || typeof data.project_info !== 'object') {
throw new Error('project_info must be an object');
}
const requiredProjectFields = ['name', 'description', 'goals', 'why_statement', 'target_users'];
for (const field of requiredProjectFields) {
if (typeof data.project_info[field] !== 'string') {
throw new Error(`project_info.${field} must be a string`);
}
}
// Validate tasks array
if (!Array.isArray(data.tasks)) {
throw new Error('tasks must be an array');
}
for (let i = 0; i < data.tasks.length; i++) {
const task = data.tasks[i];
if (!task.title || typeof task.title !== 'string') {
throw new Error(`Task ${i}: title is required and must be a string`);
}
if (!task.description || typeof task.description !== 'string') {
throw new Error(`Task ${i}: description is required and must be a string`);
}
if (!['low', 'medium', 'high', 'urgent'].includes(task.priority)) {
throw new Error(`Task ${i}: priority must be one of: low, medium, high, urgent`);
}
}
// Validate documentation array
if (!Array.isArray(data.documentation)) {
throw new Error('documentation must be an array');
}
// Validate suggested_tags array
if (!Array.isArray(data.suggested_tags)) {
throw new Error('suggested_tags must be an array');
}
return data;
}
getMetrics(): AnthropicAPIMetrics {
return { ...this.metrics };
}
resetMetrics(): void {
this.metrics = this.initializeMetrics();
}
updateRetryConfig(config: Partial<RetryConfig>): void {
this.retryConfig = { ...this.retryConfig, ...config };
}
}

View File

@ -0,0 +1,285 @@
import type { PRPParsingConfig } from "../types/anthropic.js";
export const PRP_PARSING_SYSTEM_PROMPT = `You are an expert project management assistant specialized in parsing Product Requirement Prompts (PRPs) to extract actionable tasks, project metadata, and documentation.
Your role is to:
1. Identify and extract specific, actionable tasks from complex project descriptions
2. Categorize and prioritize tasks based on dependencies and importance
3. Extract project metadata including goals, target users, and value propositions
4. Organize supporting documentation by type and importance
5. Suggest relevant tags for project organization
Key principles:
- Focus on ACTIONABLE tasks, not high-level goals or concepts
- Maintain logical task dependencies and workflow order
- Provide realistic time estimates based on task complexity
- Extract detailed acceptance criteria when available
- Preserve important context and rationale`;
export const PRP_PARSING_FORMAT_INSTRUCTIONS = `**Response Requirements:**
- Return ONLY valid JSON with no additional text, markdown, or formatting
- Follow the exact schema structure provided
- Ensure all required fields are present and properly typed
- Use descriptive but concise language
- Maintain consistency in naming and terminology`;
export function buildPRPParsingPrompt(
prpContent: string,
projectContext?: string,
config: PRPParsingConfig = {
model: 'claude-3-sonnet-20240229',
max_tokens: 4000,
temperature: 0.1,
include_context: true,
extract_acceptance_criteria: true,
suggest_tags: true,
estimate_hours: true,
}
): string {
const contextSection = config.include_context && projectContext
? `\n\n**Project Context:**\n${projectContext}\n`
: '';
const acceptanceCriteriaInstruction = config.extract_acceptance_criteria
? '\n- Extract specific acceptance criteria for each task'
: '';
const hoursEstimationInstruction = config.estimate_hours
? '\n- Provide realistic hour estimates based on task complexity (consider: research, implementation, testing, documentation)'
: '';
const tagsInstruction = config.suggest_tags
? '\n- Suggest 3-8 relevant tags for categorization (e.g., frontend, backend, api, database, ui, testing, deployment)'
: '';
return `${PRP_PARSING_SYSTEM_PROMPT}
${contextSection}
**Analysis Instructions:**
Please analyze the PRP below and extract:
1. **Project Information:**
- Clear, marketable project name
- Concise project description (1-2 sentences)
- Primary goals and objectives
- Value proposition and why statement
- Target user demographics and use cases
2. **Actionable Tasks:**
- Break down into specific, implementable tasks
- Assign priorities: urgent (critical path), high (important), medium (standard), low (nice-to-have)
- Identify task dependencies based on logical workflow${acceptanceCriteriaInstruction}${hoursEstimationInstruction}
3. **Documentation:**
- goals: Primary objectives and success metrics
- why: Business case and value proposition
- target_users: User personas and use cases
- specifications: Technical requirements and constraints
- notes: Additional context and considerations
4. **Organization:**${tagsInstruction}
${PRP_PARSING_FORMAT_INSTRUCTIONS}
**JSON Schema:**
{
"project_info": {
"name": "string (max 255 chars)",
"description": "string (1-2 sentences)",
"goals": "string (detailed objectives)",
"why_statement": "string (value proposition)",
"target_users": "string (user demographics and use cases)"
},
"tasks": [
{
"title": "string (specific, actionable task)",
"description": "string (implementation guidance and context)",
"priority": "urgent|high|medium|low",
"estimated_hours": number,
"tags": ["string", ...],
"dependencies": ["task_title", ...],
"acceptance_criteria": ["specific completion criteria", ...]
}
],
"documentation": [
{
"type": "goals|why|target_users|specifications|notes",
"title": "string",
"content": "string (detailed content)"
}
],
"suggested_tags": ["string", ...]
}
**PRP Content:**
${prpContent}
**Response:** (JSON only, no additional text)`;
}
export function buildTaskExtractionPrompt(
prpContent: string,
existingTasks: string[] = []
): string {
const existingTasksSection = existingTasks.length > 0
? `\n\n**Existing Tasks to Avoid Duplicating:**\n${existingTasks.map(task => `- ${task}`).join('\n')}\n`
: '';
return `${PRP_PARSING_SYSTEM_PROMPT}
**Task Extraction Focus:**
Extract ONLY new, actionable tasks from this PRP content. Focus on:
- Specific implementation steps
- Testable deliverables
- Discrete work units (4-40 hours each)
- Clear dependencies and prerequisites
${existingTasksSection}
**Instructions:**
Return a JSON array of task objects following this schema:
{
"tasks": [
{
"title": "Specific, actionable task title",
"description": "Detailed implementation guidance",
"priority": "urgent|high|medium|low",
"estimated_hours": number,
"tags": ["relevant", "tags"],
"dependencies": ["prerequisite_task_titles"],
"acceptance_criteria": ["testable completion criteria"]
}
]
}
**PRP Content:**
${prpContent}
**Response:** (JSON only)`;
}
export function buildProjectMetadataPrompt(prpContent: string): string {
return `${PRP_PARSING_SYSTEM_PROMPT}
**Project Metadata Extraction:**
Extract high-level project information from this PRP:
**Instructions:**
Focus on business objectives, target audience, and value proposition.
Return JSON following this schema:
{
"project_info": {
"name": "Clear, professional project name",
"description": "Concise project summary (1-2 sentences)",
"goals": "Primary objectives and success metrics",
"why_statement": "Business value and motivation",
"target_users": "User personas and use cases"
},
"documentation": [
{
"type": "goals|why|target_users|specifications",
"title": "Document title",
"content": "Detailed content"
}
]
}
**PRP Content:**
${prpContent}
**Response:** (JSON only)`;
}
export function buildTaskRefinementPrompt(
tasks: any[],
projectContext: string
): string {
return `${PRP_PARSING_SYSTEM_PROMPT}
**Task Refinement:**
Review and improve these extracted tasks for a project: ${projectContext}
**Current Tasks:**
${JSON.stringify(tasks, null, 2)}
**Improvements Needed:**
1. Ensure all tasks are specific and actionable
2. Validate time estimates are realistic
3. Check dependencies are logical and complete
4. Verify acceptance criteria are testable
5. Ensure priority assignments make sense
**Instructions:**
Return refined tasks in the same JSON format with improvements applied.
**Response:** (JSON only)`;
}
// Validation prompts for quality assurance
export function buildValidationPrompt(
parsedData: any,
originalPRP: string
): string {
return `Validate this parsed PRP data for completeness and accuracy:
**Original PRP (first 500 chars):**
${originalPRP.substring(0, 500)}...
**Parsed Data:**
${JSON.stringify(parsedData, null, 2)}
**Validation Checklist:**
- [ ] All major features/requirements captured as tasks
- [ ] Task priorities reflect true importance and dependencies
- [ ] Time estimates are realistic (4-40 hours per task)
- [ ] Acceptance criteria are specific and testable
- [ ] Project metadata accurately reflects the PRP intent
- [ ] No critical tasks or requirements missing
Return a validation report in JSON format:
{
"is_valid": boolean,
"completeness_score": number (0-100),
"issues": ["issue descriptions"],
"missing_tasks": ["tasks that should be added"],
"improvements": ["suggested improvements"]
}`;
}
// Specialized prompts for different PRP types
export const SPECIALIZED_PROMPTS = {
web_application: `Additional focus for web applications:
- Separate frontend and backend tasks
- Include database schema and API design
- Consider authentication, authorization, and security
- Plan for responsive design and accessibility
- Include deployment and hosting considerations`,
mobile_application: `Additional focus for mobile applications:
- Platform-specific considerations (iOS/Android)
- App store submission and approval process
- Device-specific features and permissions
- Performance optimization for mobile
- Offline functionality and data sync`,
data_pipeline: `Additional focus for data pipelines:
- Data ingestion, transformation, and storage
- Data quality validation and error handling
- Monitoring and alerting systems
- Scalability and performance optimization
- Data governance and compliance requirements`,
api_service: `Additional focus for API services:
- API design and documentation
- Authentication and rate limiting
- Input validation and error handling
- Testing strategies (unit, integration, load)
- Monitoring, logging, and observability`,
};
export function getSpecializedPrompt(projectType: keyof typeof SPECIALIZED_PROMPTS): string {
return SPECIALIZED_PROMPTS[projectType] || '';
}

View File

@ -0,0 +1,323 @@
import { AnthropicClient } from "./anthropic-client.js";
import { buildPRPParsingPrompt, buildValidationPrompt } from "./prompts.js";
import type { ParsedPRPData } from "../types/taskmaster.js";
import type { PRPParsingConfig } from "../types/anthropic.js";
export interface PRPParsingOptions {
project_context?: string;
auto_validate?: boolean;
include_validation_report?: boolean;
parsing_config?: Partial<PRPParsingConfig>;
}
export interface PRPParsingResult {
parsed_data: ParsedPRPData;
validation_report?: {
is_valid: boolean;
completeness_score: number;
issues: string[];
missing_tasks: string[];
improvements: string[];
};
metrics: {
parsing_time_ms: number;
task_count: number;
documentation_count: number;
suggested_tags_count: number;
estimated_total_hours: number;
};
}
export class PRPParser {
private anthropicClient: AnthropicClient;
private defaultConfig: PRPParsingConfig;
constructor(anthropicApiKey: string, model: string = 'claude-3-sonnet-20240229') {
this.anthropicClient = new AnthropicClient(anthropicApiKey);
this.defaultConfig = {
model,
max_tokens: 4000,
temperature: 0.1,
include_context: true,
extract_acceptance_criteria: true,
suggest_tags: true,
estimate_hours: true,
};
}
async parsePRP(
prpContent: string,
options: PRPParsingOptions = {}
): Promise<PRPParsingResult> {
const startTime = Date.now();
try {
// Validate input
this.validateInput(prpContent);
// Merge configuration
const config: PRPParsingConfig = {
...this.defaultConfig,
...options.parsing_config,
};
// Parse PRP content
const parsedData = await this.performParsing(prpContent, options.project_context, config);
// Calculate metrics
const metrics = this.calculateMetrics(parsedData, Date.now() - startTime);
// Prepare result
const result: PRPParsingResult = {
parsed_data: parsedData,
metrics,
};
// Optional validation
if (options.auto_validate || options.include_validation_report) {
result.validation_report = await this.validateParsedData(parsedData, prpContent);
}
return result;
} catch (error) {
throw new Error(`PRP parsing failed: ${error instanceof Error ? error.message : String(error)}`);
}
}
private validateInput(prpContent: string): void {
if (!prpContent || typeof prpContent !== 'string') {
throw new Error('PRP content must be a non-empty string');
}
if (prpContent.trim().length < 10) {
throw new Error('PRP content is too short to parse meaningfully');
}
if (prpContent.length > 100000) {
throw new Error('PRP content exceeds maximum length (100,000 characters)');
}
}
private async performParsing(
prpContent: string,
projectContext?: string,
config: PRPParsingConfig = this.defaultConfig
): Promise<ParsedPRPData> {
try {
const parsedData = await this.anthropicClient.parsePRP(prpContent, projectContext, config);
// Post-process and validate the parsed data
return this.postProcessParsedData(parsedData);
} catch (error) {
if (error instanceof Error) {
// Enhance error messages for common issues
if (error.message.includes('rate_limit')) {
throw new Error('API rate limit exceeded. Please try again in a few moments.');
}
if (error.message.includes('authentication')) {
throw new Error('API authentication failed. Please check your Anthropic API key.');
}
if (error.message.includes('timeout')) {
throw new Error('API request timed out. Please try with shorter content or try again.');
}
}
throw error;
}
}
private postProcessParsedData(data: any): ParsedPRPData {
// Ensure all required fields exist with defaults
const processedData: ParsedPRPData = {
project_info: {
name: data.project_info?.name || 'Untitled Project',
description: data.project_info?.description || '',
goals: data.project_info?.goals || '',
why_statement: data.project_info?.why_statement || '',
target_users: data.project_info?.target_users || '',
},
tasks: [],
documentation: [],
suggested_tags: [],
};
// Process tasks with validation and cleanup
if (Array.isArray(data.tasks)) {
processedData.tasks = data.tasks
.filter((task: any) => task && task.title && task.description)
.map((task: any, index: number) => ({
title: this.cleanText(task.title, 500),
description: this.cleanText(task.description, 2000),
priority: this.validatePriority(task.priority),
estimated_hours: this.validateEstimatedHours(task.estimated_hours),
tags: this.processArray(task.tags, 10),
dependencies: this.processArray(task.dependencies, 20),
acceptance_criteria: this.processArray(task.acceptance_criteria, 10),
}));
}
// Process documentation
if (Array.isArray(data.documentation)) {
processedData.documentation = data.documentation
.filter((doc: any) => doc && doc.type && doc.title && doc.content)
.map((doc: any) => ({
type: this.validateDocumentationType(doc.type),
title: this.cleanText(doc.title, 255),
content: this.cleanText(doc.content, 10000),
}));
}
// Process suggested tags
if (Array.isArray(data.suggested_tags)) {
processedData.suggested_tags = data.suggested_tags
.filter((tag: any) => typeof tag === 'string' && tag.trim().length > 0)
.map((tag: string) => this.cleanText(tag, 50))
.slice(0, 20); // Limit to 20 tags
}
return processedData;
}
private cleanText(text: string, maxLength: number): string {
if (!text || typeof text !== 'string') return '';
return text
.trim()
.replace(/\s+/g, ' ') // Normalize whitespace
.substring(0, maxLength);
}
private validatePriority(priority: any): 'low' | 'medium' | 'high' | 'urgent' {
const validPriorities = ['low', 'medium', 'high', 'urgent'];
return validPriorities.includes(priority) ? priority : 'medium';
}
private validateEstimatedHours(hours: any): number | undefined {
if (typeof hours === 'number' && hours > 0 && hours <= 1000) {
return Math.round(hours);
}
return undefined;
}
private validateDocumentationType(type: any): 'goals' | 'why' | 'target_users' | 'specifications' | 'notes' {
const validTypes = ['goals', 'why', 'target_users', 'specifications', 'notes'];
return validTypes.includes(type) ? type : 'notes';
}
private processArray(arr: any, maxLength: number): string[] {
if (!Array.isArray(arr)) return [];
return arr
.filter(item => typeof item === 'string' && item.trim().length > 0)
.map(item => this.cleanText(item, 200))
.slice(0, maxLength);
}
private calculateMetrics(data: ParsedPRPData, parsingTimeMs: number) {
const totalHours = data.tasks
.filter(task => task.estimated_hours)
.reduce((sum, task) => sum + (task.estimated_hours || 0), 0);
return {
parsing_time_ms: parsingTimeMs,
task_count: data.tasks.length,
documentation_count: data.documentation.length,
suggested_tags_count: data.suggested_tags.length,
estimated_total_hours: totalHours,
};
}
private async validateParsedData(
parsedData: ParsedPRPData,
originalPRP: string
): Promise<{
is_valid: boolean;
completeness_score: number;
issues: string[];
missing_tasks: string[];
improvements: string[];
}> {
try {
const validationPrompt = buildValidationPrompt(parsedData, originalPRP);
const validationResponse = await this.anthropicClient.makeRequest({
model: this.defaultConfig.model,
max_tokens: 1000,
temperature: 0.2,
messages: [{
role: 'user',
content: validationPrompt,
}],
});
const validationText = validationResponse.content[0]?.text;
if (!validationText) {
throw new Error('Empty validation response');
}
const validationData = JSON.parse(validationText);
return {
is_valid: validationData.is_valid || false,
completeness_score: Math.max(0, Math.min(100, validationData.completeness_score || 0)),
issues: Array.isArray(validationData.issues) ? validationData.issues : [],
missing_tasks: Array.isArray(validationData.missing_tasks) ? validationData.missing_tasks : [],
improvements: Array.isArray(validationData.improvements) ? validationData.improvements : [],
};
} catch (error) {
// Return default validation if validation fails
return {
is_valid: true,
completeness_score: 75,
issues: [`Validation failed: ${error instanceof Error ? error.message : 'Unknown error'}`],
missing_tasks: [],
improvements: [],
};
}
}
async parseMultiplePRPs(
prpContents: string[],
options: PRPParsingOptions = {}
): Promise<PRPParsingResult[]> {
const results: PRPParsingResult[] = [];
for (let i = 0; i < prpContents.length; i++) {
try {
const result = await this.parsePRP(prpContents[i], options);
results.push(result);
} catch (error) {
// Continue with other PRPs even if one fails
results.push({
parsed_data: {
project_info: {
name: `Failed Parse ${i + 1}`,
description: `Parsing failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
goals: '',
why_statement: '',
target_users: '',
},
tasks: [],
documentation: [],
suggested_tags: [],
},
metrics: {
parsing_time_ms: 0,
task_count: 0,
documentation_count: 0,
suggested_tags_count: 0,
estimated_total_hours: 0,
},
});
}
}
return results;
}
getClientMetrics() {
return this.anthropicClient.getMetrics();
}
resetClientMetrics(): void {
this.anthropicClient.resetMetrics();
}
}

View File

@ -0,0 +1,126 @@
import OAuthProvider from "@cloudflare/workers-oauth-provider";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { McpAgent } from "agents/mcp";
import { Props } from "./types";
import { GitHubHandler } from "./auth/github-handler";
import { closeDb } from "./database";
import { registerAllTaskmasterTools } from "./tools/register-taskmaster-tools";
// Extended environment for Taskmaster with Anthropic integration
interface TaskmasterEnv extends Env {
ANTHROPIC_API_KEY: string;
ANTHROPIC_MODEL: string;
}
export class TaskmasterMCP extends McpAgent<TaskmasterEnv, Record<string, never>, Props> {
server = new McpServer({
name: "Taskmaster PRP Parser MCP Server",
version: "1.0.0",
});
/**
* Cleanup database connections and resources when Durable Object is shutting down
*/
async cleanup(): Promise<void> {
try {
// Close database connections
await closeDb();
console.log('Taskmaster MCP cleanup completed successfully');
} catch (error) {
console.error('Error during Taskmaster MCP cleanup:', error);
}
}
/**
* Durable Objects alarm handler - used for cleanup and maintenance
*/
async alarm(): Promise<void> {
console.log('Taskmaster MCP alarm triggered - performing cleanup');
await this.cleanup();
}
/**
* Initialize the Taskmaster MCP server with user context and register tools
*/
async init() {
console.log(`Taskmaster MCP server initialized for user: ${this.props.login} (${this.props.name})`);
// Validate required environment variables
this.validateEnvironment();
// Register all Taskmaster tools based on user permissions
registerAllTaskmasterTools(this.server, this.env, this.props);
console.log('All Taskmaster tools registered successfully');
}
/**
* Validate that all required environment variables are present
*/
private validateEnvironment(): void {
const requiredVars = [
'ANTHROPIC_API_KEY',
'ANTHROPIC_MODEL'
];
const missingVars = requiredVars.filter(varName => !this.env[varName as keyof TaskmasterEnv]);
if (missingVars.length > 0) {
const errorMessage = `Missing required environment variables: ${missingVars.join(', ')}`;
console.error(errorMessage);
throw new Error(errorMessage);
}
// Log successful validation (without exposing sensitive values)
console.log('Environment validation successful - all required variables present');
// Log configuration info (safe values only)
console.log(`Anthropic Model: ${this.env.ANTHROPIC_MODEL}`);
console.log(`Environment: ${this.env.NODE_ENV || 'development'}`);
console.log(`Database configured: ${this.env.DATABASE_URL ? 'Yes' : 'No'}`);
console.log(`Sentry monitoring: ${this.env.SENTRY_DSN ? 'Enabled' : 'Disabled'}`);
}
/**
* Get server information and capabilities
*/
getServerInfo() {
return {
name: "Taskmaster PRP Parser MCP Server",
version: "1.0.0",
capabilities: [
'PRP Parsing with Anthropic Claude',
'Task Management (CRUD operations)',
'Documentation Management',
'Project Overview and Analytics',
'GitHub OAuth Authentication',
'Role-based Access Control',
'Audit Logging',
'Real-time Health Monitoring'
],
user_context: {
username: this.props.login,
display_name: this.props.name,
email: this.props.email
},
environment: {
anthropic_model: this.env.ANTHROPIC_MODEL,
monitoring_enabled: !!this.env.SENTRY_DSN,
environment: this.env.NODE_ENV || 'development'
}
};
}
}
// OAuth provider configuration for Taskmaster
export default new OAuthProvider({
apiHandlers: {
'/sse': TaskmasterMCP.serveSSE('/sse') as any,
'/mcp': TaskmasterMCP.serve('/mcp') as any,
},
authorizeEndpoint: "/authorize",
clientRegistrationEndpoint: "/register",
defaultHandler: GitHubHandler as any,
tokenEndpoint: "/token",
});

View File

@ -0,0 +1,541 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { withDatabase } from "../database";
import { z } from "zod";
import type { Documentation, Project } from "../types/taskmaster.js";
import { convertDocumentationRow, convertProjectRow } from "../database/models.js";
interface Props {
login: string;
name: string;
email: string;
accessToken: string;
}
interface Env {
DATABASE_URL: string;
}
// Permission configuration
const DOC_MANAGERS = new Set<string>(['coleam00']); // Can modify any documentation
const DOC_VIEWERS = new Set<string>(['coleam00']); // All authenticated users can view
// Convert Zod schemas to simple object format for MCP tools
function createErrorResponse(message: string, details?: any): any {
return {
content: [{
type: "text",
text: `**Error**\n\n${message}${details ? `\n\n**Details:**\n\`\`\`json\n${JSON.stringify(details, null, 2)}\n\`\`\`` : ''}`,
isError: true
}]
};
}
function createSuccessResponse(message: string, data?: any): any {
return {
content: [{
type: "text",
text: `**Success**\n\n${message}${data ? `\n\n**Data:**\n\`\`\`json\n${JSON.stringify(data, null, 2)}\n\`\`\`` : ''}`
}]
};
}
function canModifyDocumentation(username: string, doc?: Documentation): boolean {
// Documentation managers can modify any documentation
if (DOC_MANAGERS.has(username)) return true;
// Document creators can modify their own documentation
if (doc && doc.created_by === username) return true;
return false;
}
function canViewDocumentation(username: string): boolean {
return DOC_VIEWERS.has(username) || DOC_MANAGERS.has(username);
}
async function logAuditEntry(
db: any,
tableName: string,
recordId: string,
action: 'insert' | 'update' | 'delete',
changedBy: string,
oldValues?: any,
newValues?: any
): Promise<void> {
await db`
INSERT INTO audit_logs (table_name, record_id, action, old_values, new_values, changed_by)
VALUES (${tableName}, ${recordId}, ${action}, ${oldValues || null}, ${newValues || null}, ${changedBy})
`;
}
export function registerDocumentationTools(server: McpServer, env: Env, props: Props) {
// Tool 1: Create Documentation
if (DOC_MANAGERS.has(props.login)) {
server.tool(
"createDocumentation",
"Create project documentation including goals, specifications, target users, and notes (privileged users only)",
{
project_id: z.string().uuid(),
type: z.enum(['goals', 'why', 'target_users', 'specifications', 'notes']),
title: z.string().min(1).max(255),
content: z.string().min(1),
},
async ({ project_id, type, title, content }) => {
try {
console.log(`Documentation creation initiated by ${props.login}: ${type} - ${title}`);
return await withDatabase(env.DATABASE_URL, async (db) => {
// Verify project exists
const [project] = await db`
SELECT id, name FROM projects WHERE id = ${project_id}
`;
if (!project) {
return createErrorResponse("Project not found", { project_id });
}
// Create documentation
const [doc] = await db`
INSERT INTO documentation (project_id, type, title, content, created_by)
VALUES (${project_id}, ${type}, ${title}, ${content}, ${props.login})
RETURNING *
`;
const convertedDoc = convertDocumentationRow(doc);
// Log audit entry
await logAuditEntry(db, 'documentation', doc.id, 'insert', props.login, null, convertedDoc);
return createSuccessResponse(
`Documentation created successfully: ${convertedDoc.title}`,
{
documentation: convertedDoc,
project_name: project.name,
created_by: props.name,
next_steps: [
"Use `getDocumentation` to view all project documentation",
"Use `updateDocumentation` to modify content",
"Use `getProjectOverview` to see this in project context"
]
}
);
});
} catch (error) {
console.error('Documentation creation error:', error);
return createErrorResponse(
`Documentation creation failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login, title }
);
}
}
);
}
// Tool 2: Get Documentation (available to all authenticated users)
if (canViewDocumentation(props.login)) {
server.tool(
"getDocumentation",
"Retrieve project documentation with filtering by type and search capabilities",
{
project_id: z.string().uuid(),
type: z.enum(['goals', 'why', 'target_users', 'specifications', 'notes']).optional(),
limit: z.number().int().positive().max(50).default(20),
offset: z.number().int().min(0).default(0),
},
async ({ project_id, type, limit, offset }) => {
try {
console.log(`Documentation retrieval by ${props.login} for project ${project_id}`);
return await withDatabase(env.DATABASE_URL, async (db) => {
// Verify project exists
const [project] = await db`
SELECT id, name FROM projects WHERE id = ${project_id}
`;
if (!project) {
return createErrorResponse("Project not found", { project_id });
}
// Get documentation with filters
const docs = await db`
SELECT * FROM documentation
WHERE project_id = ${project_id}
AND (${type || null}::text IS NULL OR type = ${type || null})
ORDER BY type, created_at DESC
LIMIT ${limit} OFFSET ${offset}
`;
const convertedDocs = docs.map(convertDocumentationRow);
// Get total count
const [countResult] = await db`
SELECT COUNT(*) as total FROM documentation
WHERE project_id = ${project_id}
AND (${type || null}::text IS NULL OR type = ${type || null})
`;
const totalDocs = parseInt(countResult.total);
const hasMore = offset + limit < totalDocs;
// Group by type for better organization
const docsByType = convertedDocs.reduce((acc, doc) => {
if (!acc[doc.type]) acc[doc.type] = [];
acc[doc.type].push(doc);
return acc;
}, {} as Record<string, Documentation[]>);
return createSuccessResponse(
`Found ${convertedDocs.length} documentation items for ${project.name}`,
{
project: project,
documentation_by_type: docsByType,
total_documents: totalDocs,
pagination: {
limit,
offset,
has_more: hasMore,
next_offset: hasMore ? offset + limit : null
},
type_counts: Object.keys(docsByType).reduce((acc, type) => {
acc[type] = docsByType[type].length;
return acc;
}, {} as Record<string, number>)
}
);
});
} catch (error) {
console.error('Documentation retrieval error:', error);
return createErrorResponse(
`Documentation retrieval failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login, project_id }
);
}
}
);
}
// Tool 3: Update Documentation
server.tool(
"updateDocumentation",
"Update existing documentation content and metadata",
{
id: z.string().uuid(),
title: z.string().min(1).max(255).optional(),
content: z.string().min(1).optional(),
},
async ({ id, title, content }) => {
try {
console.log(`Documentation update initiated by ${props.login}: ${id}`);
return await withDatabase(env.DATABASE_URL, async (db) => {
// Get existing documentation for permission check
const [existingDoc] = await db`
SELECT * FROM documentation WHERE id = ${id}
`;
if (!existingDoc) {
return createErrorResponse("Documentation not found", { documentation_id: id });
}
const convertedExistingDoc = convertDocumentationRow(existingDoc);
// Check permissions
if (!canModifyDocumentation(props.login, convertedExistingDoc)) {
return createErrorResponse(
"Insufficient permissions to modify this documentation",
{
documentation_id: id,
required_permissions: "documentation manager or document creator"
}
);
}
// Build update fields
const updateFields: any = {};
if (title !== undefined) updateFields.title = title;
if (content !== undefined) updateFields.content = content;
if (Object.keys(updateFields).length === 0) {
return createErrorResponse("No fields to update provided");
}
// Update with version increment
updateFields.version = existingDoc.version + 1;
updateFields.updated_at = new Date();
const [updatedDoc] = await db`
UPDATE documentation SET ${db(updateFields)} WHERE id = ${id}
RETURNING *
`;
const convertedDoc = convertDocumentationRow(updatedDoc);
// Log audit entry
await logAuditEntry(db, 'documentation', id, 'update', props.login, convertedExistingDoc, convertedDoc);
return createSuccessResponse(
`Documentation updated successfully: ${convertedDoc.title}`,
{
documentation: convertedDoc,
version_incremented: true,
updated_by: props.name,
changes_made: Object.keys(updateFields).filter(key => key !== 'version' && key !== 'updated_at')
}
);
});
} catch (error) {
console.error('Documentation update error:', error);
return createErrorResponse(
`Documentation update failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login, documentation_id: id }
);
}
}
);
// Tool 4: Search Documentation
if (canViewDocumentation(props.login)) {
server.tool(
"searchDocumentation",
"Search documentation content across projects with full-text search capabilities",
{
query: z.string().min(1).max(255),
project_id: z.string().uuid().optional(),
type: z.enum(['goals', 'why', 'target_users', 'specifications', 'notes']).optional(),
limit: z.number().int().positive().max(50).default(20),
},
async ({ query, project_id, type, limit }) => {
try {
console.log(`Documentation search by ${props.login}: "${query}"`);
return await withDatabase(env.DATABASE_URL, async (db) => {
// Search documentation using ILIKE for basic text search
const searchResults = await db`
SELECT d.*, p.name as project_name
FROM documentation d
JOIN projects p ON d.project_id = p.id
WHERE (d.title ILIKE ${'%' + query + '%'} OR d.content ILIKE ${'%' + query + '%'})
AND (${project_id || null}::uuid IS NULL OR d.project_id = ${project_id || null})
AND (${type || null}::text IS NULL OR d.type = ${type || null})
ORDER BY
CASE
WHEN d.title ILIKE ${'%' + query + '%'} THEN 1
ELSE 2
END,
d.updated_at DESC
LIMIT ${limit}
`;
const results = searchResults.map(row => {
const doc = convertDocumentationRow(row);
return {
...doc,
project_name: row.project_name,
relevance_score: calculateRelevanceScore(query, doc.title, doc.content)
};
});
// Get total count for the search
const [countResult] = await db`
SELECT COUNT(*) as total
FROM documentation d
WHERE (d.title ILIKE ${'%' + query + '%'} OR d.content ILIKE ${'%' + query + '%'})
AND (${project_id || null}::uuid IS NULL OR d.project_id = ${project_id || null})
AND (${type || null}::text IS NULL OR d.type = ${type || null})
`;
const totalResults = parseInt(countResult.total);
return createSuccessResponse(
`Found ${results.length} documentation items matching "${query}"`,
{
search_query: query,
results: results,
total_matches: totalResults,
search_filters: {
project_id,
type
},
search_tips: totalResults === 0 ? [
"Try broader search terms",
"Check spelling",
"Search without filters to see all matches"
] : []
}
);
});
} catch (error) {
console.error('Documentation search error:', error);
return createErrorResponse(
`Documentation search failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login, query }
);
}
}
);
}
// Tool 5: Delete Documentation (privileged users only)
if (DOC_MANAGERS.has(props.login)) {
server.tool(
"deleteDocumentation",
"Delete documentation and all its history (privileged users only)",
{
id: z.string().uuid(),
},
async ({ id }) => {
try {
console.log(`Documentation deletion initiated by ${props.login}: ${id}`);
return await withDatabase(env.DATABASE_URL, async (db) => {
// Get documentation before deletion for audit log
const [existingDoc] = await db`
SELECT * FROM documentation WHERE id = ${id}
`;
if (!existingDoc) {
return createErrorResponse("Documentation not found", { documentation_id: id });
}
const convertedDoc = convertDocumentationRow(existingDoc);
// Delete in transaction
await db.begin(async (tx: any) => {
// Log audit entry before deletion
await logAuditEntry(tx, 'documentation', id, 'delete', props.login, convertedDoc, null);
// Delete documentation
await tx`DELETE FROM documentation WHERE id = ${id}`;
});
return createSuccessResponse(
`Documentation deleted successfully: ${convertedDoc.title}`,
{
deleted_documentation: {
id: convertedDoc.id,
title: convertedDoc.title,
type: convertedDoc.type,
project_id: convertedDoc.project_id
},
deleted_by: props.name
}
);
});
} catch (error) {
console.error('Documentation deletion error:', error);
return createErrorResponse(
`Documentation deletion failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login, documentation_id: id }
);
}
}
);
}
// Tool 6: List Projects (available to all authenticated users)
if (canViewDocumentation(props.login)) {
server.tool(
"listProjects",
"List all projects with basic information and documentation counts",
{
limit: z.number().int().positive().max(50).default(20),
offset: z.number().int().min(0).default(0),
},
async ({ limit, offset }) => {
try {
console.log(`Project listing requested by ${props.login}`);
return await withDatabase(env.DATABASE_URL, async (db) => {
// Get projects with task and documentation counts
const projects = await db`
SELECT
p.*,
COUNT(DISTINCT t.id) as task_count,
COUNT(DISTINCT d.id) as documentation_count,
COUNT(DISTINCT CASE WHEN t.status = 'completed' THEN t.id END) as completed_tasks
FROM projects p
LEFT JOIN tasks t ON p.id = t.project_id
LEFT JOIN documentation d ON p.id = d.project_id
GROUP BY p.id
ORDER BY p.created_at DESC
LIMIT ${limit} OFFSET ${offset}
`;
const projectsWithStats = projects.map(row => {
const project = convertProjectRow(row);
return {
...project,
stats: {
task_count: parseInt(row.task_count),
documentation_count: parseInt(row.documentation_count),
completed_tasks: parseInt(row.completed_tasks),
completion_percentage: row.task_count > 0
? Math.round((parseInt(row.completed_tasks) / parseInt(row.task_count)) * 100)
: 0
}
};
});
// Get total count
const [countResult] = await db`SELECT COUNT(*) as total FROM projects`;
const totalProjects = parseInt(countResult.total);
const hasMore = offset + limit < totalProjects;
return createSuccessResponse(
`Found ${projectsWithStats.length} projects`,
{
projects: projectsWithStats,
pagination: {
total: totalProjects,
limit,
offset,
has_more: hasMore,
next_offset: hasMore ? offset + limit : null
}
}
);
});
} catch (error) {
console.error('Project listing error:', error);
return createErrorResponse(
`Project listing failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login }
);
}
}
);
}
}
// Helper function to calculate search relevance
function calculateRelevanceScore(query: string, title: string, content: string): number {
const queryLower = query.toLowerCase();
const titleLower = title.toLowerCase();
const contentLower = content.toLowerCase();
let score = 0;
// Title matches are weighted higher
if (titleLower.includes(queryLower)) {
score += 10;
}
// Content matches
const contentMatches = (contentLower.match(new RegExp(queryLower, 'g')) || []).length;
score += contentMatches;
// Exact title match gets bonus
if (titleLower === queryLower) {
score += 20;
}
return score;
}

View File

@ -0,0 +1,546 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { withDatabase } from "../database";
import { z } from "zod";
import type { ProjectOverview, Project, Task, Documentation } from "../types/taskmaster.js";
import { convertProjectRow, convertTaskRow, convertDocumentationRow } from "../database/models.js";
interface Props {
login: string;
name: string;
email: string;
accessToken: string;
}
interface Env {
DATABASE_URL: string;
}
// Permission configuration
const OVERVIEW_VIEWERS = new Set<string>(['coleam00']); // All authenticated users can view
// Convert Zod schemas to simple object format for MCP tools
function createErrorResponse(message: string, details?: any): any {
return {
content: [{
type: "text",
text: `**Error**\n\n${message}${details ? `\n\n**Details:**\n\`\`\`json\n${JSON.stringify(details, null, 2)}\n\`\`\`` : ''}`,
isError: true
}]
};
}
function createSuccessResponse(message: string, data?: any): any {
return {
content: [{
type: "text",
text: `**Success**\n\n${message}${data ? `\n\n**Data:**\n\`\`\`json\n${JSON.stringify(data, null, 2)}\n\`\`\`` : ''}`
}]
};
}
function canViewOverview(username: string): boolean {
return OVERVIEW_VIEWERS.has(username);
}
async function calculateProjectHealth(db: any, projectId: string): Promise<{
health_score: number;
health_status: 'excellent' | 'good' | 'warning' | 'critical';
issues: string[];
recommendations: string[];
}> {
// Get project statistics
const [stats] = await db`
SELECT
COUNT(*) as total_tasks,
COUNT(CASE WHEN status = 'completed' THEN 1 END) as completed_tasks,
COUNT(CASE WHEN status = 'blocked' THEN 1 END) as blocked_tasks,
COUNT(CASE WHEN due_date < CURRENT_DATE AND status != 'completed' THEN 1 END) as overdue_tasks,
COUNT(CASE WHEN assigned_to IS NULL AND status != 'completed' THEN 1 END) as unassigned_tasks
FROM tasks
WHERE project_id = ${projectId}
`;
const totalTasks = parseInt(stats.total_tasks);
const completedTasks = parseInt(stats.completed_tasks);
const blockedTasks = parseInt(stats.blocked_tasks);
const overdueTasks = parseInt(stats.overdue_tasks);
const unassignedTasks = parseInt(stats.unassigned_tasks);
let healthScore = 100;
const issues: string[] = [];
const recommendations: string[] = [];
if (totalTasks === 0) {
healthScore = 50;
issues.push("No tasks defined for the project");
recommendations.push("Create initial project tasks to begin tracking progress");
} else {
// Completion rate impact
const completionRate = completedTasks / totalTasks;
if (completionRate < 0.3) {
healthScore -= 20;
issues.push(`Low completion rate: ${Math.round(completionRate * 100)}%`);
recommendations.push("Focus on completing existing tasks before adding new ones");
}
// Blocked tasks impact
const blockedRate = blockedTasks / totalTasks;
if (blockedRate > 0.2) {
healthScore -= 25;
issues.push(`High blocked task rate: ${Math.round(blockedRate * 100)}%`);
recommendations.push("Address blockers to unblock task progress");
}
// Overdue tasks impact
const overdueRate = overdueTasks / totalTasks;
if (overdueRate > 0.1) {
healthScore -= 30;
issues.push(`Overdue tasks: ${overdueTasks} (${Math.round(overdueRate * 100)}%)`);
recommendations.push("Review and update task deadlines, consider resource reallocation");
}
// Unassigned tasks impact
const unassignedRate = unassignedTasks / totalTasks;
if (unassignedRate > 0.3) {
healthScore -= 15;
issues.push(`Many unassigned tasks: ${unassignedTasks} (${Math.round(unassignedRate * 100)}%)`);
recommendations.push("Assign tasks to team members to clarify ownership");
}
}
// Determine health status
let healthStatus: 'excellent' | 'good' | 'warning' | 'critical';
if (healthScore >= 90) healthStatus = 'excellent';
else if (healthScore >= 70) healthStatus = 'good';
else if (healthScore >= 50) healthStatus = 'warning';
else healthStatus = 'critical';
return {
health_score: Math.max(0, healthScore),
health_status: healthStatus,
issues,
recommendations,
};
}
async function getProjectTimeline(db: any, projectId: string, limit: number): Promise<any[]> {
// Get recent activity from audit logs
const auditEntries = await db`
SELECT
al.*,
CASE
WHEN al.table_name = 'tasks' THEN (
SELECT title FROM tasks WHERE id = al.record_id::uuid
)
WHEN al.table_name = 'documentation' THEN (
SELECT title FROM documentation WHERE id = al.record_id::uuid
)
ELSE NULL
END as record_title
FROM audit_logs al
WHERE al.table_name IN ('tasks', 'documentation', 'projects')
AND (
al.table_name = 'projects' AND al.record_id = ${projectId}
OR al.table_name IN ('tasks', 'documentation') AND EXISTS (
SELECT 1 FROM tasks t WHERE t.id = al.record_id::uuid AND t.project_id = ${projectId}
UNION
SELECT 1 FROM documentation d WHERE d.id = al.record_id::uuid AND d.project_id = ${projectId}
)
)
ORDER BY al.changed_at DESC
LIMIT ${limit}
`;
return auditEntries.map((entry: any) => ({
timestamp: entry.changed_at,
action: entry.action,
table: entry.table_name,
record_id: entry.record_id,
record_title: entry.record_title,
changed_by: entry.changed_by,
summary: generateActivitySummary(entry.action, entry.table_name, entry.record_title, entry.changed_by)
}));
}
function generateActivitySummary(action: string, table: string, title: string, changedBy: string): string {
const actionMap: Record<string, string> = {
insert: 'created',
update: 'updated',
delete: 'deleted'
};
const tableMap: Record<string, string> = {
tasks: 'task',
documentation: 'documentation',
projects: 'project'
};
return `${changedBy} ${actionMap[action] || action} ${tableMap[table] || table}${title ? `: ${title}` : ''}`;
}
export function registerProjectOverviewTools(server: McpServer, env: Env, props: Props) {
// Tool 1: Get Project Overview
if (canViewOverview(props.login)) {
server.tool(
"getProjectOverview",
"Get comprehensive project overview including statistics, recent activity, and health metrics",
{
project_id: z.string().uuid(),
},
async ({ project_id }) => {
try {
console.log(`Project overview requested by ${props.login} for project ${project_id}`);
return await withDatabase(env.DATABASE_URL, async (db) => {
// Get project information
const [projectRow] = await db`
SELECT * FROM projects WHERE id = ${project_id}
`;
if (!projectRow) {
return createErrorResponse("Project not found", { project_id });
}
const project = convertProjectRow(projectRow);
// Get task statistics
const [taskStats] = await db`
SELECT
COUNT(*) as total_tasks,
COUNT(CASE WHEN status = 'completed' THEN 1 END) as completed_tasks,
COUNT(CASE WHEN status = 'in_progress' THEN 1 END) as in_progress_tasks,
COUNT(CASE WHEN status = 'pending' THEN 1 END) as pending_tasks,
COUNT(CASE WHEN status = 'blocked' THEN 1 END) as blocked_tasks,
COALESCE(SUM(estimated_hours), 0) as total_estimated_hours,
COALESCE(SUM(actual_hours), 0) as total_actual_hours,
CASE
WHEN COUNT(*) > 0 THEN
ROUND((COUNT(CASE WHEN status = 'completed' THEN 1 END) * 100.0 / COUNT(*)), 2)
ELSE 0
END as completion_percentage
FROM tasks
WHERE project_id = ${project_id}
`;
// Get recent tasks
const recentTasks = await db`
SELECT * FROM tasks
WHERE project_id = ${project_id}
ORDER BY updated_at DESC
LIMIT 10
`;
// Get recent documentation
const recentDocs = await db`
SELECT * FROM documentation
WHERE project_id = ${project_id}
ORDER BY updated_at DESC
LIMIT 5
`;
// Get upcoming deadlines
const upcomingDeadlines = await db`
SELECT * FROM tasks
WHERE project_id = ${project_id}
AND due_date IS NOT NULL
AND due_date >= CURRENT_DATE
AND due_date <= CURRENT_DATE + INTERVAL '30 days'
AND status != 'completed'
ORDER BY due_date ASC
LIMIT 10
`;
// Get project tags
const projectTags = await db`
SELECT DISTINCT t.id, t.name, t.color, t.created_by, t.created_at, COUNT(tt.task_id) as usage_count
FROM tags t
JOIN task_tags tt ON t.id = tt.tag_id
JOIN tasks task ON tt.task_id = task.id
WHERE task.project_id = ${project_id}
GROUP BY t.id, t.name, t.color, t.created_by, t.created_at
ORDER BY usage_count DESC, t.name
`;
// Calculate project health
const healthMetrics = await calculateProjectHealth(db, project_id);
const projectOverview: ProjectOverview = {
project,
task_statistics: {
total_tasks: parseInt(taskStats.total_tasks),
completed_tasks: parseInt(taskStats.completed_tasks),
in_progress_tasks: parseInt(taskStats.in_progress_tasks),
pending_tasks: parseInt(taskStats.pending_tasks),
blocked_tasks: parseInt(taskStats.blocked_tasks),
completion_percentage: parseFloat(taskStats.completion_percentage),
},
recent_activity: {
recent_tasks: recentTasks.map(convertTaskRow),
recent_documentation: recentDocs.map(convertDocumentationRow),
},
tags: projectTags.map(tag => ({
id: tag.id,
name: tag.name,
color: tag.color,
created_by: tag.created_by,
created_at: tag.created_at,
usage_count: parseInt(tag.usage_count)
})),
upcoming_deadlines: upcomingDeadlines.map(convertTaskRow),
};
return createSuccessResponse(
`Project overview generated for: ${project.name}`,
{
overview: projectOverview,
health_metrics: healthMetrics,
effort_metrics: {
total_estimated_hours: parseFloat(taskStats.total_estimated_hours),
total_actual_hours: parseFloat(taskStats.total_actual_hours),
efficiency_ratio: taskStats.total_estimated_hours > 0
? parseFloat((parseFloat(taskStats.total_actual_hours) / parseFloat(taskStats.total_estimated_hours)).toFixed(2))
: null
},
insights: generateProjectInsights(projectOverview, healthMetrics)
}
);
});
} catch (error) {
console.error('Project overview error:', error);
return createErrorResponse(
`Project overview failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login, project_id }
);
}
}
);
}
// Tool 2: Get Project Analytics
if (canViewOverview(props.login)) {
server.tool(
"getProjectAnalytics",
"Get detailed project analytics including trend analysis and performance metrics",
{
project_id: z.string().uuid(),
date_range_days: z.number().int().positive().max(365).default(30),
},
async ({ project_id, date_range_days }) => {
try {
console.log(`Project analytics requested by ${props.login} for project ${project_id}`);
return await withDatabase(env.DATABASE_URL, async (db) => {
const startDate = new Date();
startDate.setDate(startDate.getDate() - date_range_days);
// Get task completion trend
const completionTrend = await db`
SELECT
DATE(updated_at) as date,
COUNT(*) as tasks_completed
FROM tasks
WHERE project_id = ${project_id}
AND status = 'completed'
AND updated_at >= ${startDate}
GROUP BY DATE(updated_at)
ORDER BY date
`;
// Get task creation trend
const creationTrend = await db`
SELECT
DATE(created_at) as date,
COUNT(*) as tasks_created
FROM tasks
WHERE project_id = ${project_id}
AND created_at >= ${startDate}
GROUP BY DATE(created_at)
ORDER BY date
`;
// Get effort analysis
const effortAnalysis = await db`
SELECT
priority,
COUNT(*) as task_count,
AVG(COALESCE(estimated_hours, 0)) as avg_estimated_hours,
AVG(COALESCE(actual_hours, 0)) as avg_actual_hours,
AVG(CASE
WHEN estimated_hours > 0 AND actual_hours > 0
THEN actual_hours::float / estimated_hours::float
ELSE NULL
END) as avg_effort_ratio
FROM tasks
WHERE project_id = ${project_id}
GROUP BY priority
ORDER BY
CASE priority
WHEN 'urgent' THEN 1
WHEN 'high' THEN 2
WHEN 'medium' THEN 3
WHEN 'low' THEN 4
END
`;
// Get team performance
const teamPerformance = await db`
SELECT
COALESCE(assigned_to, 'Unassigned') as assignee,
COUNT(*) as total_tasks,
COUNT(CASE WHEN status = 'completed' THEN 1 END) as completed_tasks,
AVG(CASE
WHEN status = 'completed' AND estimated_hours > 0 AND actual_hours > 0
THEN actual_hours::float / estimated_hours::float
ELSE NULL
END) as avg_efficiency
FROM tasks
WHERE project_id = ${project_id}
GROUP BY assigned_to
ORDER BY completed_tasks DESC, total_tasks DESC
`;
return createSuccessResponse(
`Analytics generated for project (${date_range_days} days)`,
{
date_range: {
start_date: startDate.toISOString().split('T')[0],
end_date: new Date().toISOString().split('T')[0],
days: date_range_days
},
completion_trend: completionTrend,
creation_trend: creationTrend,
effort_analysis: effortAnalysis.map(row => ({
priority: row.priority,
task_count: parseInt(row.task_count),
avg_estimated_hours: parseFloat(row.avg_estimated_hours || 0),
avg_actual_hours: parseFloat(row.avg_actual_hours || 0),
avg_effort_ratio: row.avg_effort_ratio ? parseFloat(row.avg_effort_ratio) : null
})),
team_performance: teamPerformance.map(row => ({
assignee: row.assignee,
total_tasks: parseInt(row.total_tasks),
completed_tasks: parseInt(row.completed_tasks),
completion_rate: parseInt(row.total_tasks) > 0
? Math.round((parseInt(row.completed_tasks) / parseInt(row.total_tasks)) * 100)
: 0,
avg_efficiency: row.avg_efficiency ? parseFloat(row.avg_efficiency) : null
}))
}
);
});
} catch (error) {
console.error('Project analytics error:', error);
return createErrorResponse(
`Project analytics failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login, project_id }
);
}
}
);
}
// Tool 3: Get Project Timeline
if (canViewOverview(props.login)) {
server.tool(
"getProjectTimeline",
"Get chronological timeline of all project activities and changes",
{
project_id: z.string().uuid(),
limit: z.number().int().positive().max(100).default(50),
},
async ({ project_id, limit }) => {
try {
console.log(`Project timeline requested by ${props.login} for project ${project_id}`);
return await withDatabase(env.DATABASE_URL, async (db) => {
// Verify project exists
const [project] = await db`
SELECT id, name FROM projects WHERE id = ${project_id}
`;
if (!project) {
return createErrorResponse("Project not found", { project_id });
}
const timeline = await getProjectTimeline(db, project_id, limit);
// Group activities by date for better presentation
const timelineByDate = timeline.reduce((acc, activity) => {
const date = activity.timestamp.toISOString().split('T')[0];
if (!acc[date]) acc[date] = [];
acc[date].push(activity);
return acc;
}, {} as Record<string, any[]>);
return createSuccessResponse(
`Timeline generated for project: ${project.name}`,
{
project: project,
timeline: timeline,
timeline_by_date: timelineByDate,
total_activities: timeline.length,
date_range: timeline.length > 0 ? {
earliest: timeline[timeline.length - 1].timestamp,
latest: timeline[0].timestamp
} : null
}
);
});
} catch (error) {
console.error('Project timeline error:', error);
return createErrorResponse(
`Project timeline failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login, project_id }
);
}
}
);
}
}
function generateProjectInsights(overview: ProjectOverview, health: any): string[] {
const insights: string[] = [];
const { task_statistics } = overview;
// Completion insights
if (task_statistics.completion_percentage >= 80) {
insights.push("🎯 Project is nearing completion with excellent progress");
} else if (task_statistics.completion_percentage >= 50) {
insights.push("📈 Project is making good progress, keep up the momentum");
} else if (task_statistics.completion_percentage < 25 && task_statistics.total_tasks > 5) {
insights.push("🚨 Project completion rate is low, consider reviewing task priorities");
}
// Blocked tasks insights
if (task_statistics.blocked_tasks > 0) {
const blockedPercentage = (task_statistics.blocked_tasks / task_statistics.total_tasks) * 100;
if (blockedPercentage > 20) {
insights.push("🔒 High number of blocked tasks may be impacting project velocity");
}
}
// Deadline insights
if (overview.upcoming_deadlines.length > 5) {
insights.push("⏰ Multiple upcoming deadlines require attention and planning");
}
// Health insights
if (health.health_status === 'critical') {
insights.push("⚠️ Project health is critical, immediate action recommended");
} else if (health.health_status === 'excellent') {
insights.push("✅ Project health is excellent, maintain current practices");
}
// Activity insights
if (overview.recent_activity.recent_tasks.length === 0) {
insights.push("💭 No recent task activity, consider checking project status");
}
return insights;
}

View File

@ -0,0 +1,352 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { PRPParser } from "../llm/prp-parser.js";
import { withDatabase } from "../database";
import { ParsePRPSchema } from "../types/taskmaster.js";
import { z } from "zod";
import type { ParsedPRPData, Project, Task, Documentation } from "../types/taskmaster.js";
import { convertProjectRow, convertTaskRow, convertDocumentationRow } from "../database/models.js";
interface Props {
login: string;
name: string;
email: string;
accessToken: string;
}
interface Env {
DATABASE_URL: string;
ANTHROPIC_API_KEY: string;
ANTHROPIC_MODEL: string;
}
const PRIVILEGED_USERS = new Set<string>(['coleam00']);
function createErrorResponse(message: string, details?: any): any {
return {
content: [{
type: "text",
text: `**Error**\n\n${message}${details ? `\n\n**Details:**\n\`\`\`json\n${JSON.stringify(details, null, 2)}\n\`\`\`` : ''}`,
isError: true
}]
};
}
function createSuccessResponse(message: string, data?: any): any {
return {
content: [{
type: "text",
text: `**Success**\n\n${message}${data ? `\n\n**Data:**\n\`\`\`json\n${JSON.stringify(data, null, 2)}\n\`\`\`` : ''}`
}]
};
}
async function createProjectFromParsedData(
db: any,
parsedData: ParsedPRPData,
projectName: string,
createdBy: string
): Promise<Project> {
const [project] = await db`
INSERT INTO projects (name, description, goals, why_statement, target_users, created_by)
VALUES (
${projectName},
${parsedData.project_info.description},
${parsedData.project_info.goals},
${parsedData.project_info.why_statement},
${parsedData.project_info.target_users},
${createdBy}
)
ON CONFLICT (name) DO UPDATE SET
description = EXCLUDED.description,
goals = EXCLUDED.goals,
why_statement = EXCLUDED.why_statement,
target_users = EXCLUDED.target_users,
updated_at = CURRENT_TIMESTAMP
RETURNING *
`;
return convertProjectRow(project);
}
async function createTasksFromParsedData(
db: any,
projectId: string,
parsedData: ParsedPRPData,
createdBy: string
): Promise<Task[]> {
const tasks: Task[] = [];
// Create tasks in transaction for consistency
await db.begin(async (tx: any) => {
for (const taskData of parsedData.tasks) {
const [task] = await tx`
INSERT INTO tasks (
project_id, title, description, priority,
estimated_hours, acceptance_criteria, created_by
)
VALUES (
${projectId}, ${taskData.title}, ${taskData.description},
${taskData.priority}, ${taskData.estimated_hours || null},
${taskData.acceptance_criteria || null}, ${createdBy}
)
RETURNING *
`;
const convertedTask = convertTaskRow(task);
tasks.push(convertedTask);
// Create tags and link them to tasks
if (taskData.tags && taskData.tags.length > 0) {
for (const tagName of taskData.tags) {
await upsertTagAndLink(tx, task.id, tagName, createdBy);
}
}
}
// Create task dependencies after all tasks are created
const taskNameToId = new Map(tasks.map(t => [t.title, t.id]));
for (let i = 0; i < parsedData.tasks.length; i++) {
const taskData = parsedData.tasks[i];
const task = tasks[i];
if (taskData.dependencies && taskData.dependencies.length > 0) {
for (const depName of taskData.dependencies) {
const depTaskId = taskNameToId.get(depName);
if (depTaskId) {
await tx`
INSERT INTO task_dependencies (task_id, depends_on_task_id, dependency_type)
VALUES (${task.id}, ${depTaskId}, 'blocks')
ON CONFLICT DO NOTHING
`;
}
}
}
}
});
return tasks;
}
async function createDocumentationFromParsedData(
db: any,
projectId: string,
parsedData: ParsedPRPData,
createdBy: string
): Promise<Documentation[]> {
const documentation: Documentation[] = [];
for (const docData of parsedData.documentation) {
const [doc] = await db`
INSERT INTO documentation (project_id, type, title, content, created_by)
VALUES (${projectId}, ${docData.type}, ${docData.title}, ${docData.content}, ${createdBy})
RETURNING *
`;
documentation.push(convertDocumentationRow(doc));
}
return documentation;
}
async function upsertTagAndLink(tx: any, taskId: string, tagName: string, createdBy: string): Promise<void> {
// Insert or get existing tag
const [tag] = await tx`
INSERT INTO tags (name, created_by)
VALUES (${tagName}, ${createdBy})
ON CONFLICT (name) DO UPDATE SET name = EXCLUDED.name
RETURNING id
`;
// Link tag to task
await tx`
INSERT INTO task_tags (task_id, tag_id)
VALUES (${taskId}, ${tag.id})
ON CONFLICT DO NOTHING
`;
}
export function registerPRPParsingTools(server: McpServer, env: Env, props: Props) {
// Tool 1: Parse PRP Content
server.tool(
"parsePRP",
"Parse a Product Requirement Prompt (PRP) to extract tasks, goals, and documentation using AI",
{
prp_content: z.string().min(10).max(100000),
project_name: z.string().min(1).max(255).optional(),
project_context: z.string().optional(),
auto_create_tasks: z.boolean().default(false),
},
async ({ prp_content, project_name, project_context, auto_create_tasks }) => {
try {
console.log(`PRP parsing initiated by ${props.login}`);
// Initialize PRP parser
const parser = new PRPParser(env.ANTHROPIC_API_KEY, env.ANTHROPIC_MODEL);
// Parse PRP with options
const parsingResult = await parser.parsePRP(prp_content, {
project_context,
auto_validate: true,
include_validation_report: true,
});
const { parsed_data, validation_report, metrics } = parsingResult;
// Use provided project name or extracted name
const finalProjectName = project_name || parsed_data.project_info.name;
if (auto_create_tasks && PRIVILEGED_USERS.has(props.login)) {
// Auto-create project and tasks in database
return await withDatabase(env.DATABASE_URL, async (db) => {
const project = await createProjectFromParsedData(db, parsed_data, finalProjectName, props.login);
const tasks = await createTasksFromParsedData(db, project.id, parsed_data, props.login);
const documentation = await createDocumentationFromParsedData(db, project.id, parsed_data, props.login);
return createSuccessResponse(
`PRP parsed and project created successfully!`,
{
project: project.name,
tasks_created: tasks.length,
documentation_created: documentation.length,
metrics,
validation_report,
next_steps: [
"Use `listTasks` to view all created tasks",
"Use `updateTask` to modify task details",
"Use `getProjectOverview` for comprehensive project status"
]
}
);
});
} else {
// Return parsed data without creating in database
return createSuccessResponse(
"PRP parsed successfully! Use auto_create_tasks=true to save to database (privileged users only).",
{
project_info: parsed_data.project_info,
task_count: parsed_data.tasks.length,
documentation_count: parsed_data.documentation.length,
suggested_tags: parsed_data.suggested_tags,
metrics,
validation_report,
parsed_tasks: parsed_data.tasks.map(t => ({
title: t.title,
description: t.description.substring(0, 100) + "...",
priority: t.priority,
estimated_hours: t.estimated_hours
}))
}
);
}
} catch (error) {
console.error('PRP parsing error:', error);
return createErrorResponse(
`PRP parsing failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login, error_type: 'parsing_error' }
);
}
}
);
// Tool 2: Validate PRP Format
server.tool(
"validatePRP",
"Validate PRP content format and provide feedback on structure and completeness",
{
prp_content: z.string().min(10).max(100000),
},
async ({ prp_content }) => {
try {
console.log(`PRP validation initiated by ${props.login}`);
// Basic validation checks
const validationIssues: string[] = [];
const suggestions: string[] = [];
// Length checks
if (prp_content.length < 100) {
validationIssues.push("PRP content is very short and may not contain enough detail");
}
if (prp_content.length > 50000) {
validationIssues.push("PRP content is very long and may be difficult to parse effectively");
}
// Structure checks
const hasGoals = /goal|objective|aim/i.test(prp_content);
const hasWhy = /why|purpose|reason|motivation|value/i.test(prp_content);
const hasUsers = /user|customer|audience|persona/i.test(prp_content);
const hasTasks = /task|step|implement|build|create|develop/i.test(prp_content);
if (!hasGoals) suggestions.push("Consider adding explicit goals or objectives");
if (!hasWhy) suggestions.push("Consider explaining why this project is valuable");
if (!hasUsers) suggestions.push("Consider describing target users or audiences");
if (!hasTasks) suggestions.push("Consider including more specific implementation tasks");
// Calculate completeness score
const completenessFactors = [hasGoals, hasWhy, hasUsers, hasTasks];
const completenessScore = (completenessFactors.filter(Boolean).length / completenessFactors.length) * 100;
return createSuccessResponse(
"PRP validation completed",
{
is_valid: validationIssues.length === 0,
completeness_score: Math.round(completenessScore),
character_count: prp_content.length,
word_count: prp_content.split(/\s+/).length,
validation_issues: validationIssues,
suggestions: suggestions,
structure_analysis: {
has_goals: hasGoals,
has_why_statement: hasWhy,
has_target_users: hasUsers,
has_actionable_tasks: hasTasks,
}
}
);
} catch (error) {
console.error('PRP validation error:', error);
return createErrorResponse(
`PRP validation failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login }
);
}
}
);
// Tool 3: Get Parsing Metrics
if (PRIVILEGED_USERS.has(props.login)) {
server.tool(
"getPRPParsingMetrics",
"Get API usage metrics for PRP parsing operations (privileged users only)",
{},
async () => {
try {
const parser = new PRPParser(env.ANTHROPIC_API_KEY, env.ANTHROPIC_MODEL);
const metrics = parser.getClientMetrics();
return createSuccessResponse(
"PRP parsing metrics retrieved",
{
anthropic_api_metrics: metrics,
cost_estimation: {
input_tokens: metrics.total_input_tokens,
output_tokens: metrics.total_output_tokens,
estimated_cost_usd: (metrics.total_input_tokens * 0.003 + metrics.total_output_tokens * 0.015) / 1000
}
}
);
} catch (error) {
console.error('Metrics retrieval error:', error);
return createErrorResponse(
`Failed to retrieve metrics: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login }
);
}
}
);
}
}

View File

@ -0,0 +1,43 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { registerPRPParsingTools } from "./prp-parsing-tools";
import { registerTaskManagementTools } from "./task-management-tools";
import { registerDocumentationTools } from "./documentation-tools";
import { registerProjectOverviewTools } from "./project-overview-tools";
interface Props {
login: string;
name: string;
email: string;
accessToken: string;
}
interface TaskmasterEnv extends Env {
DATABASE_URL: string;
ANTHROPIC_API_KEY: string;
ANTHROPIC_MODEL: string;
}
/**
* Register all Taskmaster MCP tools with the server
*/
export function registerAllTaskmasterTools(
server: McpServer,
env: TaskmasterEnv,
props: Props
) {
console.log(`Registering Taskmaster tools for user: ${props.login}`);
// Register PRP parsing tools (uses Anthropic API)
registerPRPParsingTools(server, env, props);
// Register task management tools (CRUD operations)
registerTaskManagementTools(server, env, props);
// Register documentation tools (project docs management)
registerDocumentationTools(server, env, props);
// Register project overview and analytics tools
registerProjectOverviewTools(server, env, props);
console.log(`All Taskmaster tools registered successfully for ${props.login}`);
}

View File

@ -1,6 +1,10 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { Props } from "../types";
import { registerDatabaseTools } from "../../examples/database-tools";
import { registerPRPParsingTools } from "./prp-parsing-tools";
import { registerTaskManagementTools } from "./task-management-tools";
import { registerDocumentationTools } from "./documentation-tools";
import { registerProjectOverviewTools } from "./project-overview-tools";
/**
* Register all MCP tools based on user permissions
@ -12,3 +16,31 @@ export function registerAllTools(server: McpServer, env: Env, props: Props) {
// Future tools can be registered here
// registerOtherTools(server, env, props);
}
// Extended environment for Taskmaster
interface TaskmasterEnv extends Env {
ANTHROPIC_API_KEY: string;
ANTHROPIC_MODEL: string;
}
/**
* Register all Taskmaster-specific MCP tools based on user permissions
* This is a separate registration function for the Taskmaster MCP server
*/
export function registerAllTaskmasterTools(server: McpServer, env: TaskmasterEnv, props: Props) {
console.log(`Registering Taskmaster tools for user: ${props.login}`);
// Register PRP parsing tools (AI-powered task extraction)
registerPRPParsingTools(server, env, props);
// Register task management tools (CRUD operations)
registerTaskManagementTools(server, env, props);
// Register documentation management tools
registerDocumentationTools(server, env, props);
// Register project overview and analytics tools
registerProjectOverviewTools(server, env, props);
console.log('All Taskmaster tools registered successfully');
}

View File

@ -0,0 +1,620 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { withDatabase, validateSqlQuery, isWriteOperation } from "../database";
import { z } from "zod";
import type { Task, TaskWithRelations, Tag, TaskDependency } from "../types/taskmaster.js";
import {
convertTaskRow,
convertTagRow,
convertTaskDependencyRow,
SQL_QUERIES,
} from "../database/models.js";
interface Props {
login: string;
name: string;
email: string;
accessToken: string;
}
interface Env {
DATABASE_URL: string;
}
// Permission configuration
const TASK_MANAGERS = new Set<string>(['coleam00']); // Can modify any task
const TASK_VIEWERS = new Set<string>(['coleam00']); // All authenticated users can view
function createErrorResponse(message: string, details?: any): any {
return {
content: [{
type: "text",
text: `**Error**\n\n${message}${details ? `\n\n**Details:**\n\`\`\`json\n${JSON.stringify(details, null, 2)}\n\`\`\`` : ''}`,
isError: true
}]
};
}
function createSuccessResponse(message: string, data?: any): any {
return {
content: [{
type: "text",
text: `**Success**\n\n${message}${data ? `\n\n**Data:**\n\`\`\`json\n${JSON.stringify(data, null, 2)}\n\`\`\`` : ''}`
}]
};
}
function canModifyTask(username: string, task?: Task): boolean {
// Task managers can modify any task
if (TASK_MANAGERS.has(username)) return true;
// Task creators can modify their own tasks
if (task && task.created_by === username) return true;
// Assigned users can modify their assigned tasks
if (task && task.assigned_to === username) return true;
return false;
}
function canViewTasks(username: string): boolean {
return TASK_VIEWERS.has(username) || TASK_MANAGERS.has(username);
}
async function getTaskWithRelations(db: any, taskId: string): Promise<TaskWithRelations | null> {
const [taskRow] = await db`
SELECT t.*, p.name as project_name
FROM tasks t
LEFT JOIN projects p ON t.project_id = p.id
WHERE t.id = ${taskId}
`;
if (!taskRow) return null;
const task = convertTaskRow(taskRow);
// Get tags
const tagRows = await db`
SELECT tag.* FROM tags tag
JOIN task_tags tt ON tag.id = tt.tag_id
WHERE tt.task_id = ${taskId}
`;
const tags = tagRows.map(convertTagRow);
// Get dependencies
const depRows = await db`
SELECT td.*, t.title as depends_on_title
FROM task_dependencies td
JOIN tasks t ON td.depends_on_task_id = t.id
WHERE td.task_id = ${taskId}
`;
const dependencies = depRows.map(convertTaskDependencyRow);
return {
...task,
tags,
dependencies,
};
}
async function upsertTagAndLink(tx: any, taskId: string, tagName: string, createdBy: string): Promise<void> {
// Insert or get existing tag
const [tag] = await tx`
INSERT INTO tags (name, created_by)
VALUES (${tagName}, ${createdBy})
ON CONFLICT (name) DO UPDATE SET name = EXCLUDED.name
RETURNING id
`;
// Link tag to task
await tx`
INSERT INTO task_tags (task_id, tag_id)
VALUES (${taskId}, ${tag.id})
ON CONFLICT DO NOTHING
`;
}
async function logAuditEntry(
db: any,
tableName: string,
recordId: string,
action: 'insert' | 'update' | 'delete',
changedBy: string,
oldValues?: any,
newValues?: any
): Promise<void> {
await db`
INSERT INTO audit_logs (table_name, record_id, action, old_values, new_values, changed_by)
VALUES (${tableName}, ${recordId}, ${action}, ${oldValues || null}, ${newValues || null}, ${changedBy})
`;
}
export function registerTaskManagementTools(server: McpServer, env: Env, props: Props) {
// Tool 1: Create Task
if (TASK_MANAGERS.has(props.login)) {
server.tool(
"createTask",
"Create a new task with metadata, tags, and validation (privileged users only)",
{
project_id: z.string().uuid(),
title: z.string().min(1).max(500),
description: z.string().optional(),
priority: z.enum(['low', 'medium', 'high', 'urgent']).default('medium'),
assigned_to: z.string().optional(),
parent_task_id: z.string().uuid().optional(),
estimated_hours: z.number().int().positive().optional(),
due_date: z.string().datetime().optional(),
acceptance_criteria: z.array(z.string()).optional(),
tags: z.array(z.string()).optional(),
},
async ({ project_id, title, description, priority, assigned_to, parent_task_id, estimated_hours, due_date, acceptance_criteria, tags }) => {
try {
console.log(`Task creation initiated by ${props.login}: ${title}`);
return await withDatabase(env.DATABASE_URL, async (db) => {
// Verify project exists
const [project] = await db`
SELECT id, name FROM projects WHERE id = ${project_id}
`;
if (!project) {
return createErrorResponse("Project not found", { project_id });
}
// Verify parent task exists if specified
if (parent_task_id) {
const [parentTask] = await db`
SELECT id FROM tasks WHERE id = ${parent_task_id}
`;
if (!parentTask) {
return createErrorResponse("Parent task not found", { parent_task_id });
}
}
// Create task in transaction
const taskData = await db.begin(async (tx: any) => {
// Insert task
const [task] = await tx`
INSERT INTO tasks (
project_id, title, description, priority, assigned_to,
parent_task_id, estimated_hours, due_date, acceptance_criteria, created_by
)
VALUES (
${project_id}, ${title}, ${description || null}, ${priority},
${assigned_to || null}, ${parent_task_id || null},
${estimated_hours || null}, ${due_date ? new Date(due_date) : null},
${acceptance_criteria || null}, ${props.login}
)
RETURNING *
`;
const convertedTask = convertTaskRow(task);
// Add tags if provided
if (tags && tags.length > 0) {
for (const tagName of tags) {
await upsertTagAndLink(tx, task.id, tagName, props.login);
}
}
// Log audit entry
await logAuditEntry(tx, 'tasks', task.id, 'insert', props.login, null, convertedTask);
return convertedTask;
});
return createSuccessResponse(
`Task created successfully: ${taskData.title}`,
{
task: taskData,
project_name: project.name,
created_by: props.name,
next_steps: [
"Use `updateTask` to modify task details",
"Use `getTask` to view full task information",
"Use `listTasks` to see all project tasks"
]
}
);
});
} catch (error) {
console.error('Task creation error:', error);
return createErrorResponse(
`Task creation failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login, task_title: title }
);
}
}
);
}
// Tool 2: List Tasks (available to all authenticated users)
if (canViewTasks(props.login)) {
server.tool(
"listTasks",
"List tasks with filtering options (status, priority, assigned user, tags, project)",
{
project_id: z.string().uuid().optional(),
status: z.enum(['pending', 'in_progress', 'completed', 'blocked']).optional(),
priority: z.enum(['low', 'medium', 'high', 'urgent']).optional(),
assigned_to: z.string().optional(),
tag: z.string().optional(),
limit: z.number().int().positive().max(100).default(50),
offset: z.number().int().min(0).default(0),
},
async ({ project_id, status, priority, assigned_to, tag, limit, offset }) => {
try {
console.log(`Task listing requested by ${props.login}`);
return await withDatabase(env.DATABASE_URL, async (db) => {
// Execute the complex query with filters
const tasks = await db`
SELECT DISTINCT t.*, p.name as project_name
FROM tasks t
LEFT JOIN projects p ON t.project_id = p.id
LEFT JOIN task_tags tt ON t.id = tt.task_id
LEFT JOIN tags tag ON tt.tag_id = tag.id
WHERE (${project_id || null}::uuid IS NULL OR t.project_id = ${project_id || null})
AND (${status || null}::text IS NULL OR t.status = ${status || null})
AND (${priority || null}::text IS NULL OR t.priority = ${priority || null})
AND (${assigned_to || null}::text IS NULL OR t.assigned_to = ${assigned_to || null})
AND (${tag || null}::text IS NULL OR tag.name = ${tag || null})
ORDER BY t.created_at DESC
LIMIT ${limit} OFFSET ${offset}
`;
const convertedTasks = tasks.map(convertTaskRow);
// Get total count for pagination
const [countResult] = await db`
SELECT COUNT(DISTINCT t.id) as total
FROM tasks t
LEFT JOIN task_tags tt ON t.id = tt.task_id
LEFT JOIN tags tag ON tt.tag_id = tag.id
WHERE (${project_id || null}::uuid IS NULL OR t.project_id = ${project_id || null})
AND (${status || null}::text IS NULL OR t.status = ${status || null})
AND (${priority || null}::text IS NULL OR t.priority = ${priority || null})
AND (${assigned_to || null}::text IS NULL OR t.assigned_to = ${assigned_to || null})
AND (${tag || null}::text IS NULL OR tag.name = ${tag || null})
`;
const totalTasks = parseInt(countResult.total);
const hasMore = offset + limit < totalTasks;
return createSuccessResponse(
`Found ${convertedTasks.length} tasks`,
{
tasks: convertedTasks,
pagination: {
total: totalTasks,
limit,
offset,
has_more: hasMore,
next_offset: hasMore ? offset + limit : null
},
filters_applied: {
project_id,
status,
priority,
assigned_to,
tag
}
}
);
});
} catch (error) {
console.error('Task listing error:', error);
return createErrorResponse(
`Task listing failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login }
);
}
}
);
}
// Tool 3: Get Task Details
if (canViewTasks(props.login)) {
server.tool(
"getTask",
"Get detailed information about a specific task including tags, dependencies, and related data",
{
id: z.string().uuid(),
},
async ({ id }) => {
try {
console.log(`Task details requested by ${props.login}: ${id}`);
return await withDatabase(env.DATABASE_URL, async (db) => {
const taskWithRelations = await getTaskWithRelations(db, id);
if (!taskWithRelations) {
return createErrorResponse("Task not found", { task_id: id });
}
// Get subtasks if this is a parent task
const subtasks = await db`
SELECT id, title, status, priority, assigned_to
FROM tasks
WHERE parent_task_id = ${id}
ORDER BY created_at ASC
`;
// Get tasks that depend on this task
const dependentTasks = await db`
SELECT t.id, t.title, t.status, td.dependency_type
FROM task_dependencies td
JOIN tasks t ON td.task_id = t.id
WHERE td.depends_on_task_id = ${id}
ORDER BY t.created_at ASC
`;
return createSuccessResponse(
`Task details retrieved: ${taskWithRelations.title}`,
{
task: taskWithRelations,
subtasks: subtasks.map(convertTaskRow),
dependent_tasks: dependentTasks,
permissions: {
can_modify: canModifyTask(props.login, taskWithRelations),
can_delete: TASK_MANAGERS.has(props.login),
can_assign: TASK_MANAGERS.has(props.login)
}
}
);
});
} catch (error) {
console.error('Task retrieval error:', error);
return createErrorResponse(
`Task retrieval failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login, task_id: id }
);
}
}
);
}
// Tool 4: Update Task
server.tool(
"updateTask",
"Update task details, status, priority, assignments, and metadata",
{
id: z.string().uuid(),
title: z.string().min(1).max(500).optional(),
description: z.string().optional(),
status: z.enum(['pending', 'in_progress', 'completed', 'blocked']).optional(),
priority: z.enum(['low', 'medium', 'high', 'urgent']).optional(),
assigned_to: z.string().optional(),
parent_task_id: z.string().uuid().optional(),
estimated_hours: z.number().int().positive().optional(),
actual_hours: z.number().int().min(0).optional(),
due_date: z.string().datetime().optional(),
acceptance_criteria: z.array(z.string()).optional(),
tags: z.array(z.string()).optional(),
},
async (updateData) => {
try {
console.log(`Task update initiated by ${props.login}: ${updateData.id}`);
return await withDatabase(env.DATABASE_URL, async (db) => {
// Get existing task for permission check
const existingTask = await getTaskWithRelations(db, updateData.id);
if (!existingTask) {
return createErrorResponse("Task not found", { task_id: updateData.id });
}
// Check permissions
if (!canModifyTask(props.login, existingTask)) {
return createErrorResponse(
"Insufficient permissions to modify this task",
{
task_id: updateData.id,
required_permissions: "task manager, task creator, or assigned user"
}
);
}
// Update task in transaction
const updatedTask = await db.begin(async (tx: any) => {
// Build dynamic update query
const updateFields: any = { updated_at: new Date() };
if (updateData.title !== undefined) updateFields.title = updateData.title;
if (updateData.description !== undefined) updateFields.description = updateData.description;
if (updateData.status !== undefined) updateFields.status = updateData.status;
if (updateData.priority !== undefined) updateFields.priority = updateData.priority;
if (updateData.assigned_to !== undefined) updateFields.assigned_to = updateData.assigned_to;
if (updateData.parent_task_id !== undefined) updateFields.parent_task_id = updateData.parent_task_id;
if (updateData.estimated_hours !== undefined) updateFields.estimated_hours = updateData.estimated_hours;
if (updateData.actual_hours !== undefined) updateFields.actual_hours = updateData.actual_hours;
if (updateData.due_date !== undefined) updateFields.due_date = updateData.due_date ? new Date(updateData.due_date) : null;
if (updateData.acceptance_criteria !== undefined) updateFields.acceptance_criteria = updateData.acceptance_criteria;
// Only update if there are fields to update besides updated_at
if (Object.keys(updateFields).length > 1) {
const [task] = await tx`
UPDATE tasks SET ${tx(updateFields)} WHERE id = ${updateData.id}
RETURNING *
`;
// Handle tags update
if (updateData.tags !== undefined) {
// Remove existing tags
await tx`DELETE FROM task_tags WHERE task_id = ${updateData.id}`;
// Add new tags
if (updateData.tags.length > 0) {
for (const tagName of updateData.tags) {
await upsertTagAndLink(tx, updateData.id, tagName, props.login);
}
}
}
// Log audit entry
await logAuditEntry(tx, 'tasks', updateData.id, 'update', props.login, existingTask, convertTaskRow(task));
return convertTaskRow(task);
}
return existingTask;
});
return createSuccessResponse(
`Task updated successfully: ${updatedTask.title}`,
{
task: updatedTask,
updated_by: props.name,
changes_made: Object.keys(updateData).filter(key => (updateData as any)[key] !== undefined)
}
);
});
} catch (error) {
console.error('Task update error:', error);
return createErrorResponse(
`Task update failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login, task_id: updateData.id }
);
}
}
);
// Tool 5: Delete Task (privileged users only)
if (TASK_MANAGERS.has(props.login)) {
server.tool(
"deleteTask",
"Delete a task and all its relationships (privileged users only)",
{
id: z.string().uuid(),
},
async ({ id }) => {
try {
console.log(`Task deletion initiated by ${props.login}: ${id}`);
return await withDatabase(env.DATABASE_URL, async (db) => {
// Get task before deletion for audit log
const existingTask = await getTaskWithRelations(db, id);
if (!existingTask) {
return createErrorResponse("Task not found", { task_id: id });
}
// Delete in transaction (cascading deletes will handle relationships)
await db.begin(async (tx: any) => {
// Log audit entry before deletion
await logAuditEntry(tx, 'tasks', id, 'delete', props.login, existingTask, null);
// Delete task (cascading deletes will handle tags, dependencies)
await tx`DELETE FROM tasks WHERE id = ${id}`;
});
return createSuccessResponse(
`Task deleted successfully: ${existingTask.title}`,
{
deleted_task: {
id: existingTask.id,
title: existingTask.title,
project_id: existingTask.project_id
},
deleted_by: props.name
}
);
});
} catch (error) {
console.error('Task deletion error:', error);
return createErrorResponse(
`Task deletion failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login, task_id: id }
);
}
}
);
}
// Tool 6: Create Task Dependency
if (TASK_MANAGERS.has(props.login)) {
server.tool(
"createTaskDependency",
"Create a dependency relationship between two tasks (privileged users only)",
{
task_id: z.string().uuid(),
depends_on_task_id: z.string().uuid(),
dependency_type: z.enum(['blocks', 'related', 'subtask']).default('blocks'),
},
async ({ task_id, depends_on_task_id, dependency_type }) => {
try {
console.log(`Task dependency creation by ${props.login}: ${task_id} depends on ${depends_on_task_id}`);
return await withDatabase(env.DATABASE_URL, async (db) => {
// Verify both tasks exist
const [task] = await db`SELECT id, title FROM tasks WHERE id = ${task_id}`;
const [dependsOnTask] = await db`SELECT id, title FROM tasks WHERE id = ${depends_on_task_id}`;
if (!task) {
return createErrorResponse("Task not found", { task_id });
}
if (!dependsOnTask) {
return createErrorResponse("Dependency task not found", { depends_on_task_id });
}
// Check for circular dependencies
const circularCheck = await db`
WITH RECURSIVE dependency_chain AS (
SELECT task_id, depends_on_task_id, 1 as depth
FROM task_dependencies
WHERE depends_on_task_id = ${task_id}
UNION ALL
SELECT td.task_id, td.depends_on_task_id, dc.depth + 1
FROM task_dependencies td
JOIN dependency_chain dc ON td.depends_on_task_id = dc.task_id
WHERE dc.depth < 10
)
SELECT 1 FROM dependency_chain WHERE task_id = ${depends_on_task_id}
`;
if (circularCheck.length > 0) {
return createErrorResponse(
"Cannot create dependency: would create circular dependency",
{ task_id, depends_on_task_id }
);
}
// Create dependency
await db`
INSERT INTO task_dependencies (task_id, depends_on_task_id, dependency_type)
VALUES (${task_id}, ${depends_on_task_id}, ${dependency_type})
ON CONFLICT DO NOTHING
`;
return createSuccessResponse(
`Task dependency created: "${task.title}" depends on "${dependsOnTask.title}"`,
{
dependency: {
task: { id: task.id, title: task.title },
depends_on: { id: dependsOnTask.id, title: dependsOnTask.title },
type: dependency_type
},
created_by: props.name
}
);
});
} catch (error) {
console.error('Task dependency creation error:', error);
return createErrorResponse(
`Task dependency creation failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
{ user: props.login, task_id, depends_on_task_id }
);
}
}
);
}
}

View File

@ -0,0 +1,134 @@
// Anthropic API types and interfaces for PRP parsing
export interface AnthropicMessage {
role: 'user' | 'assistant';
content: string;
}
export interface AnthropicRequest {
model: string;
max_tokens: number;
temperature?: number;
messages: AnthropicMessage[];
}
export interface AnthropicResponse {
content: Array<{
type: 'text';
text: string;
}>;
id: string;
model: string;
role: 'assistant';
stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence';
stop_sequence?: string;
type: 'message';
usage: {
input_tokens: number;
output_tokens: number;
};
}
export interface AnthropicError {
type: 'error';
error: {
type: 'invalid_request_error' | 'authentication_error' | 'permission_error' | 'not_found_error' | 'rate_limit_error' | 'api_error' | 'overloaded_error';
message: string;
};
}
// PRP parsing configuration
export interface PRPParsingConfig {
model: string;
max_tokens: number;
temperature: number;
include_context: boolean;
extract_acceptance_criteria: boolean;
suggest_tags: boolean;
estimate_hours: boolean;
}
export const DEFAULT_PRP_CONFIG: PRPParsingConfig = {
model: 'claude-3-sonnet-20240229',
max_tokens: 4000,
temperature: 0.1, // Low temperature for consistent parsing
include_context: true,
extract_acceptance_criteria: true,
suggest_tags: true,
estimate_hours: true,
};
// API client configuration
export interface AnthropicClientConfig {
apiKey: string;
baseUrl: string;
timeout: number;
maxRetries: number;
retryDelay: number;
}
export const DEFAULT_CLIENT_CONFIG: Partial<AnthropicClientConfig> = {
baseUrl: 'https://api.anthropic.com/v1',
timeout: 60000, // 60 seconds
maxRetries: 3,
retryDelay: 1000, // 1 second
};
// Rate limiting and error handling types
export interface RateLimitInfo {
requests_per_minute: number;
tokens_per_minute: number;
requests_remaining: number;
tokens_remaining: number;
reset_time: Date;
}
export interface AnthropicAPIMetrics {
total_requests: number;
successful_requests: number;
failed_requests: number;
total_input_tokens: number;
total_output_tokens: number;
average_response_time: number;
rate_limit_hits: number;
}
// Retry strategy configuration
export interface RetryConfig {
max_attempts: number;
base_delay: number;
max_delay: number;
exponential_backoff: boolean;
retry_on_rate_limit: boolean;
retry_on_server_error: boolean;
}
export const DEFAULT_RETRY_CONFIG: RetryConfig = {
max_attempts: 3,
base_delay: 1000,
max_delay: 10000,
exponential_backoff: true,
retry_on_rate_limit: true,
retry_on_server_error: true,
};
// Helper type guards
export function isAnthropicError(response: any): response is AnthropicError {
return response && response.type === 'error' && response.error;
}
export function isAnthropicResponse(response: any): response is AnthropicResponse {
return response && response.type === 'message' && response.content && Array.isArray(response.content);
}
export function isRateLimitError(error: AnthropicError): boolean {
return error.error.type === 'rate_limit_error';
}
export function isAuthenticationError(error: AnthropicError): boolean {
return error.error.type === 'authentication_error';
}
export function isServerError(error: AnthropicError): boolean {
return ['api_error', 'overloaded_error'].includes(error.error.type);
}

View File

@ -0,0 +1,265 @@
import { z } from "zod";
// Core database model interfaces
export interface Project {
id: string;
name: string;
description?: string;
goals?: string;
target_users?: string;
why_statement?: string;
created_by: string;
created_at: Date;
updated_at: Date;
}
export interface Task {
id: string;
project_id: string;
title: string;
description?: string;
status: 'pending' | 'in_progress' | 'completed' | 'blocked';
priority: 'low' | 'medium' | 'high' | 'urgent';
assigned_to?: string;
parent_task_id?: string;
estimated_hours?: number;
actual_hours?: number;
due_date?: Date;
acceptance_criteria?: string[];
created_by: string;
created_at: Date;
updated_at: Date;
tags?: Tag[];
dependencies?: TaskDependency[];
}
export interface Documentation {
id: string;
project_id: string;
type: 'goals' | 'why' | 'target_users' | 'specifications' | 'notes';
title: string;
content: string;
version: number;
created_by: string;
created_at: Date;
updated_at: Date;
}
export interface Tag {
id: string;
name: string;
color?: string;
description?: string;
created_by: string;
created_at: Date;
}
export interface TaskTag {
task_id: string;
tag_id: string;
}
export interface TaskDependency {
task_id: string;
depends_on_task_id: string;
dependency_type: 'blocks' | 'related' | 'subtask';
}
export interface AuditLog {
id: string;
table_name: string;
record_id: string;
action: 'insert' | 'update' | 'delete';
old_values?: Record<string, any>;
new_values?: Record<string, any>;
changed_by: string;
changed_at: Date;
}
// LLM parsing response structure
export interface ParsedPRPData {
project_info: {
name: string;
description: string;
goals: string;
why_statement: string;
target_users: string;
};
tasks: {
title: string;
description: string;
priority: 'low' | 'medium' | 'high' | 'urgent';
estimated_hours?: number;
tags?: string[];
dependencies?: string[]; // Task titles that this depends on
acceptance_criteria?: string[];
}[];
documentation: {
type: 'goals' | 'why' | 'target_users' | 'specifications' | 'notes';
title: string;
content: string;
}[];
suggested_tags: string[];
}
// Extended task with relations for responses
export interface TaskWithRelations extends Task {
tags: Tag[];
dependencies: TaskDependency[];
project?: Project;
}
// Project overview aggregation
export interface ProjectOverview {
project: Project;
task_statistics: {
total_tasks: number;
completed_tasks: number;
in_progress_tasks: number;
pending_tasks: number;
blocked_tasks: number;
completion_percentage: number;
};
recent_activity: {
recent_tasks: Task[];
recent_documentation: Documentation[];
};
tags: Tag[];
upcoming_deadlines: Task[];
}
// Zod schemas for validation
export const CreateProjectSchema = z.object({
name: z.string().min(1).max(255),
description: z.string().optional(),
goals: z.string().optional(),
target_users: z.string().optional(),
why_statement: z.string().optional(),
});
export const CreateTaskSchema = z.object({
project_id: z.string().uuid(),
title: z.string().min(1).max(500),
description: z.string().optional(),
priority: z.enum(['low', 'medium', 'high', 'urgent']).default('medium'),
assigned_to: z.string().optional(),
parent_task_id: z.string().uuid().optional(),
estimated_hours: z.number().int().positive().optional(),
due_date: z.string().datetime().optional(),
acceptance_criteria: z.array(z.string()).optional(),
tags: z.array(z.string()).optional(),
});
export const UpdateTaskSchema = z.object({
id: z.string().uuid(),
title: z.string().min(1).max(500).optional(),
description: z.string().optional(),
status: z.enum(['pending', 'in_progress', 'completed', 'blocked']).optional(),
priority: z.enum(['low', 'medium', 'high', 'urgent']).optional(),
assigned_to: z.string().optional(),
parent_task_id: z.string().uuid().optional(),
estimated_hours: z.number().int().positive().optional(),
actual_hours: z.number().int().min(0).optional(),
due_date: z.string().datetime().optional(),
acceptance_criteria: z.array(z.string()).optional(),
tags: z.array(z.string()).optional(),
});
export const CreateDocumentationSchema = z.object({
project_id: z.string().uuid(),
type: z.enum(['goals', 'why', 'target_users', 'specifications', 'notes']),
title: z.string().min(1).max(255),
content: z.string().min(1),
});
export const UpdateDocumentationSchema = z.object({
id: z.string().uuid(),
title: z.string().min(1).max(255).optional(),
content: z.string().min(1).optional(),
});
export const CreateTagSchema = z.object({
name: z.string().min(1).max(100),
color: z.string().regex(/^#[0-9A-Fa-f]{6}$/).optional(),
description: z.string().optional(),
});
export const ParsePRPSchema = z.object({
prp_content: z.string().min(10).max(100000),
project_name: z.string().min(1).max(255).optional(),
project_context: z.string().optional(),
auto_create_tasks: z.boolean().default(false),
});
export const ListTasksSchema = z.object({
project_id: z.string().uuid().optional(),
status: z.enum(['pending', 'in_progress', 'completed', 'blocked']).optional(),
priority: z.enum(['low', 'medium', 'high', 'urgent']).optional(),
assigned_to: z.string().optional(),
tag: z.string().optional(),
limit: z.number().int().positive().max(100).default(50),
offset: z.number().int().min(0).default(0),
});
export const GetTaskSchema = z.object({
id: z.string().uuid(),
});
export const DeleteTaskSchema = z.object({
id: z.string().uuid(),
});
export const GetProjectOverviewSchema = z.object({
project_id: z.string().uuid(),
});
export const ListProjectsSchema = z.object({
limit: z.number().int().positive().max(50).default(20),
offset: z.number().int().min(0).default(0),
});
// Task dependency validation schema
export const CreateTaskDependencySchema = z.object({
task_id: z.string().uuid(),
depends_on_task_id: z.string().uuid(),
dependency_type: z.enum(['blocks', 'related', 'subtask']).default('blocks'),
}).refine(data => data.task_id !== data.depends_on_task_id, {
message: "A task cannot depend on itself",
});
// Validation helpers
export function validateTaskStatus(status: string): status is Task['status'] {
return ['pending', 'in_progress', 'completed', 'blocked'].includes(status);
}
export function validateTaskPriority(priority: string): priority is Task['priority'] {
return ['low', 'medium', 'high', 'urgent'].includes(priority);
}
export function validateDocumentationType(type: string): type is Documentation['type'] {
return ['goals', 'why', 'target_users', 'specifications', 'notes'].includes(type);
}
// Type guards
export function isTask(obj: any): obj is Task {
return obj && typeof obj.id === 'string' && typeof obj.title === 'string';
}
export function isProject(obj: any): obj is Project {
return obj && typeof obj.id === 'string' && typeof obj.name === 'string';
}
// Error response types
export interface TaskmasterError {
type: 'validation' | 'permission' | 'database' | 'llm' | 'not_found';
message: string;
details?: Record<string, any>;
}
export function createTaskmasterError(
type: TaskmasterError['type'],
message: string,
details?: Record<string, any>
): TaskmasterError {
return { type, message, details };
}

View File

@ -0,0 +1,367 @@
import type { TaskmasterError } from "../types/taskmaster.js";
/**
* Centralized error handling utilities for Taskmaster MCP Server
*/
// Error type classification
export enum ErrorCategory {
VALIDATION = 'validation',
PERMISSION = 'permission',
DATABASE = 'database',
LLM = 'llm',
NOT_FOUND = 'not_found',
NETWORK = 'network',
RATE_LIMIT = 'rate_limit',
AUTHENTICATION = 'authentication',
INTERNAL = 'internal'
}
// Error severity levels
export enum ErrorSeverity {
LOW = 'low',
MEDIUM = 'medium',
HIGH = 'high',
CRITICAL = 'critical'
}
// Enhanced error interface
export interface EnhancedError extends TaskmasterError {
category: ErrorCategory;
severity: ErrorSeverity;
user_message: string;
technical_message: string;
recovery_suggestions: string[];
error_code?: string;
correlation_id?: string;
}
/**
* Safely execute LLM operations with comprehensive error handling
*/
export async function safeLLMOperation<T>(
operation: () => Promise<T>,
operationName: string = 'LLM Operation',
correlationId?: string
): Promise<T> {
const startTime = Date.now();
try {
console.log(`${operationName} started`, { correlation_id: correlationId });
const result = await operation();
const duration = Date.now() - startTime;
console.log(`${operationName} completed successfully in ${duration}ms`, {
correlation_id: correlationId
});
return result;
} catch (error) {
const duration = Date.now() - startTime;
const enhancedError = enhanceError(error, operationName, correlationId);
console.error(`${operationName} failed after ${duration}ms`, {
error: enhancedError,
correlation_id: correlationId
});
throw createUserFriendlyError(enhancedError);
}
}
/**
* Safely execute database operations with error handling and recovery
*/
export async function safeDatabaseOperation<T>(
operation: () => Promise<T>,
operationName: string = 'Database Operation',
correlationId?: string
): Promise<T> {
const startTime = Date.now();
try {
console.log(`${operationName} started`, { correlation_id: correlationId });
const result = await operation();
const duration = Date.now() - startTime;
console.log(`${operationName} completed successfully in ${duration}ms`, {
correlation_id: correlationId
});
return result;
} catch (error) {
const duration = Date.now() - startTime;
const enhancedError = enhanceError(error, operationName, correlationId);
console.error(`${operationName} failed after ${duration}ms`, {
error: enhancedError,
correlation_id: correlationId
});
throw createUserFriendlyError(enhancedError);
}
}
/**
* Enhanced error analysis and classification
*/
function enhanceError(
error: unknown,
operationName: string,
correlationId?: string
): EnhancedError {
const baseError = error instanceof Error ? error : new Error(String(error));
const message = baseError.message.toLowerCase();
// Categorize error based on message content
let category: ErrorCategory;
let severity: ErrorSeverity;
let userMessage: string;
let recoverySuggestions: string[];
let errorCode: string | undefined;
// LLM-specific errors
if (message.includes('rate_limit') || message.includes('rate limit')) {
category = ErrorCategory.RATE_LIMIT;
severity = ErrorSeverity.MEDIUM;
userMessage = 'API rate limit exceeded. Please wait a moment before trying again.';
recoverySuggestions = [
'Wait 60 seconds before retrying',
'Try with shorter content if parsing a large PRP',
'Consider breaking large operations into smaller chunks'
];
errorCode = 'LLM_RATE_LIMIT';
} else if (message.includes('authentication') || message.includes('api key') || message.includes('invalid_api_key')) {
category = ErrorCategory.AUTHENTICATION;
severity = ErrorSeverity.HIGH;
userMessage = 'API authentication failed. Please check the configuration.';
recoverySuggestions = [
'Contact administrator to verify API key configuration',
'Check if API key has expired or been revoked'
];
errorCode = 'LLM_AUTH_FAILED';
} else if (message.includes('timeout') || message.includes('timed out')) {
category = ErrorCategory.NETWORK;
severity = ErrorSeverity.MEDIUM;
userMessage = 'Request timed out. Please try again with shorter content.';
recoverySuggestions = [
'Retry the operation',
'Try with shorter or simpler content',
'Check network connectivity'
];
errorCode = 'OPERATION_TIMEOUT';
} else if (message.includes('json') || message.includes('parse')) {
category = ErrorCategory.LLM;
severity = ErrorSeverity.MEDIUM;
userMessage = 'Failed to parse AI response. The content may be too complex.';
recoverySuggestions = [
'Try simplifying the input content',
'Retry the operation as this may be a temporary issue',
'Break complex content into smaller sections'
];
errorCode = 'LLM_PARSE_ERROR';
}
// Database-specific errors
else if (message.includes('database') || message.includes('postgres') || message.includes('sql')) {
category = ErrorCategory.DATABASE;
severity = ErrorSeverity.HIGH;
userMessage = 'Database operation failed. Please try again.';
recoverySuggestions = [
'Retry the operation',
'Check if all required fields are provided',
'Contact administrator if problem persists'
];
errorCode = 'DATABASE_ERROR';
} else if (message.includes('not found') || message.includes('does not exist')) {
category = ErrorCategory.NOT_FOUND;
severity = ErrorSeverity.LOW;
userMessage = 'The requested resource was not found.';
recoverySuggestions = [
'Verify the ID or name is correct',
'Check if the resource was recently deleted',
'Use list operations to find the correct resource'
];
errorCode = 'RESOURCE_NOT_FOUND';
} else if (message.includes('permission') || message.includes('unauthorized') || message.includes('forbidden')) {
category = ErrorCategory.PERMISSION;
severity = ErrorSeverity.MEDIUM;
userMessage = 'You do not have permission to perform this operation.';
recoverySuggestions = [
'Contact administrator for additional permissions',
'Try a read-only operation instead',
'Check if you are assigned to this project or task'
];
errorCode = 'INSUFFICIENT_PERMISSIONS';
} else if (message.includes('validation') || message.includes('invalid') || message.includes('required')) {
category = ErrorCategory.VALIDATION;
severity = ErrorSeverity.LOW;
userMessage = 'Input validation failed. Please check your data and try again.';
recoverySuggestions = [
'Review the input parameters and format',
'Check that all required fields are provided',
'Verify data types and constraints'
];
errorCode = 'VALIDATION_ERROR';
}
// Generic/unknown errors
else {
category = ErrorCategory.INTERNAL;
severity = ErrorSeverity.HIGH;
userMessage = 'An unexpected error occurred. Please try again.';
recoverySuggestions = [
'Retry the operation',
'Contact administrator if problem persists',
'Check the system status'
];
errorCode = 'INTERNAL_ERROR';
}
return {
type: category,
message: userMessage,
details: {
operation: operationName,
original_error: baseError.message,
correlation_id: correlationId,
error_code: errorCode,
timestamp: new Date().toISOString()
},
category,
severity,
user_message: userMessage,
technical_message: baseError.message,
recovery_suggestions: recoverySuggestions,
error_code: errorCode,
correlation_id: correlationId
};
}
/**
* Create user-friendly error for MCP response
*/
function createUserFriendlyError(enhancedError: EnhancedError): Error {
const errorMessage = `${enhancedError.user_message}
**What happened:** ${enhancedError.technical_message}
**What you can do:**
${enhancedError.recovery_suggestions.map(suggestion => `${suggestion}`).join('\n')}
**Error Code:** ${enhancedError.error_code || 'UNKNOWN'}
**Correlation ID:** ${enhancedError.correlation_id || 'N/A'}`;
const error = new Error(errorMessage);
error.name = `${enhancedError.category.toUpperCase()}_ERROR`;
return error;
}
/**
* Sanitize error messages to prevent information leakage
*/
export function sanitizeErrorMessage(error: unknown): string {
if (!(error instanceof Error)) {
return 'An unknown error occurred';
}
const message = error.message.toLowerCase();
// Remove sensitive information patterns
const sensitivePatterns = [
/password[=:\s]+[^\s]+/gi,
/api[_\s]?key[=:\s]+[^\s]+/gi,
/secret[=:\s]+[^\s]+/gi,
/token[=:\s]+[^\s]+/gi,
/\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b/gi, // Email addresses
/\b(?:\d{1,3}\.){3}\d{1,3}\b/g, // IP addresses
/postgresql:\/\/[^\s]+/gi, // Database URLs
];
let sanitized = error.message;
sensitivePatterns.forEach(pattern => {
sanitized = sanitized.replace(pattern, '[REDACTED]');
});
return sanitized;
}
/**
* Generate correlation ID for request tracing
*/
export function generateCorrelationId(): string {
return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
}
/**
* Log error with appropriate level based on severity
*/
export function logError(error: EnhancedError | Error, context?: Record<string, any>): void {
const logContext = {
timestamp: new Date().toISOString(),
...context
};
if ('severity' in error) {
switch (error.severity) {
case ErrorSeverity.CRITICAL:
console.error('[CRITICAL]', error, logContext);
break;
case ErrorSeverity.HIGH:
console.error('[HIGH]', error, logContext);
break;
case ErrorSeverity.MEDIUM:
console.warn('[MEDIUM]', error, logContext);
break;
case ErrorSeverity.LOW:
console.info('[LOW]', error, logContext);
break;
}
} else {
console.error('[ERROR]', error, logContext);
}
}
/**
* Check if error is retryable based on category
*/
export function isRetryableError(error: EnhancedError | Error): boolean {
if (!('category' in error)) return false;
const retryableCategories = [
ErrorCategory.NETWORK,
ErrorCategory.RATE_LIMIT,
ErrorCategory.LLM // Some LLM errors are retryable
];
return retryableCategories.includes(error.category);
}
/**
* Calculate retry delay with exponential backoff
*/
export function calculateRetryDelay(attempt: number, baseDelay: number = 1000): number {
const maxDelay = 30000; // 30 seconds max
const delay = baseDelay * Math.pow(2, attempt - 1);
return Math.min(delay, maxDelay);
}
/**
* Create standardized MCP error response
*/
export function createMCPErrorResponse(error: unknown, operationName?: string): any {
const enhancedError = error instanceof Error
? enhanceError(error, operationName || 'Operation')
: enhanceError(new Error(String(error)), operationName || 'Operation');
return {
content: [{
type: "text",
text: `**Error**\n\n${enhancedError.user_message}\n\n**Recovery Options:**\n${enhancedError.recovery_suggestions.map(s => `${s}`).join('\n')}\n\n**Error Code:** ${enhancedError.error_code || 'UNKNOWN'}`,
isError: true
}]
};
}

View File

@ -1,257 +0,0 @@
import { describe, it, expect, vi, beforeEach } from 'vitest'
// Mock the database modules
const mockDbInstance = {
unsafe: vi.fn(),
end: vi.fn(),
}
vi.mock('../../../src/database/connection', () => ({
getDb: vi.fn(() => mockDbInstance),
}))
vi.mock('../../../src/database/utils', () => ({
withDatabase: vi.fn(async (url: string, operation: any) => {
return await operation(mockDbInstance)
}),
}))
// Now import the modules
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js'
import { registerDatabaseTools } from '../../../src/tools/database-tools'
import { mockProps, mockPrivilegedProps } from '../../fixtures/auth.fixtures'
import { mockEnv } from '../../mocks/oauth.mock'
import { mockTableColumns, mockQueryResult } from '../../fixtures/database.fixtures'
describe('Database Tools', () => {
let mockServer: McpServer
beforeEach(() => {
vi.clearAllMocks()
mockServer = new McpServer({ name: 'test', version: '1.0.0' })
// Setup database mocks
mockDbInstance.unsafe.mockImplementation((query: string) => {
if (query.includes('information_schema.columns')) {
return Promise.resolve(mockTableColumns)
}
if (query.includes('SELECT')) {
return Promise.resolve(mockQueryResult)
}
if (query.includes('INSERT') || query.includes('UPDATE') || query.includes('DELETE')) {
return Promise.resolve([{ affectedRows: 1 }])
}
return Promise.resolve([])
})
})
describe('registerDatabaseTools', () => {
it('should register listTables and queryDatabase for regular users', () => {
const toolSpy = vi.spyOn(mockServer, 'tool')
registerDatabaseTools(mockServer, mockEnv as any, mockProps)
expect(toolSpy).toHaveBeenCalledWith(
'listTables',
expect.any(String),
expect.any(Object),
expect.any(Function)
)
expect(toolSpy).toHaveBeenCalledWith(
'queryDatabase',
expect.any(String),
expect.any(Object),
expect.any(Function)
)
expect(toolSpy).toHaveBeenCalledTimes(2)
})
it('should register all tools for privileged users', () => {
const toolSpy = vi.spyOn(mockServer, 'tool')
registerDatabaseTools(mockServer, mockEnv as any, mockPrivilegedProps)
expect(toolSpy).toHaveBeenCalledWith(
'listTables',
expect.any(String),
expect.any(Object),
expect.any(Function)
)
expect(toolSpy).toHaveBeenCalledWith(
'queryDatabase',
expect.any(String),
expect.any(Object),
expect.any(Function)
)
expect(toolSpy).toHaveBeenCalledWith(
'executeDatabase',
expect.any(String),
expect.any(Object),
expect.any(Function)
)
expect(toolSpy).toHaveBeenCalledTimes(3)
})
})
describe('listTables tool', () => {
it('should return table schema successfully', async () => {
const toolSpy = vi.spyOn(mockServer, 'tool')
registerDatabaseTools(mockServer, mockEnv as any, mockProps)
// Get the registered tool handler
const toolCall = toolSpy.mock.calls.find(call => call[0] === 'listTables')
const handler = toolCall![3] as Function
const result = await handler({})
expect(result.content).toBeDefined()
expect(result.content[0].type).toBe('text')
expect(result.content[0].text).toContain('Database Tables and Schema')
expect(result.content[0].text).toContain('users')
expect(result.content[0].text).toContain('posts')
})
it('should handle database errors', async () => {
const toolSpy = vi.spyOn(mockServer, 'tool')
mockDbInstance.unsafe.mockRejectedValue(new Error('Database connection failed'))
registerDatabaseTools(mockServer, mockEnv as any, mockProps)
const toolCall = toolSpy.mock.calls.find(call => call[0] === 'listTables')
const handler = toolCall![3] as Function
const result = await handler({})
expect(result.content[0].isError).toBe(true)
expect(result.content[0].text).toContain('Error')
})
})
describe('queryDatabase tool', () => {
it('should execute SELECT queries successfully', async () => {
const toolSpy = vi.spyOn(mockServer, 'tool')
registerDatabaseTools(mockServer, mockEnv as any, mockProps)
const toolCall = toolSpy.mock.calls.find(call => call[0] === 'queryDatabase')
const handler = toolCall![3] as Function
const result = await handler({ sql: 'SELECT * FROM users' })
expect(result.content[0].type).toBe('text')
expect(result.content[0].text).toContain('Query Results')
expect(result.content[0].text).toContain('SELECT * FROM users')
})
it('should reject write operations', async () => {
const toolSpy = vi.spyOn(mockServer, 'tool')
registerDatabaseTools(mockServer, mockEnv as any, mockProps)
const toolCall = toolSpy.mock.calls.find(call => call[0] === 'queryDatabase')
const handler = toolCall![3] as Function
const result = await handler({ sql: 'INSERT INTO users VALUES (1, \'test\')' })
expect(result.content[0].isError).toBe(true)
expect(result.content[0].text).toContain('Write operations are not allowed')
})
it('should reject invalid SQL', async () => {
const toolSpy = vi.spyOn(mockServer, 'tool')
registerDatabaseTools(mockServer, mockEnv as any, mockProps)
const toolCall = toolSpy.mock.calls.find(call => call[0] === 'queryDatabase')
const handler = toolCall![3] as Function
const result = await handler({ sql: 'SELECT * FROM users; DROP TABLE users' })
expect(result.content[0].isError).toBe(true)
expect(result.content[0].text).toContain('Invalid SQL query')
})
it('should handle database errors', async () => {
const toolSpy = vi.spyOn(mockServer, 'tool')
mockDbInstance.unsafe.mockRejectedValue(new Error('Database connection failed'))
registerDatabaseTools(mockServer, mockEnv as any, mockProps)
const toolCall = toolSpy.mock.calls.find(call => call[0] === 'queryDatabase')
const handler = toolCall![3] as Function
const result = await handler({ sql: 'SELECT * FROM users' })
expect(result.content[0].isError).toBe(true)
expect(result.content[0].text).toContain('Database query error')
})
})
describe('executeDatabase tool', () => {
it('should only be available to privileged users', async () => {
// Regular user should not get executeDatabase
const toolSpy1 = vi.spyOn(mockServer, 'tool')
registerDatabaseTools(mockServer, mockEnv as any, mockProps)
const executeToolCall = toolSpy1.mock.calls.find(call => call[0] === 'executeDatabase')
expect(executeToolCall).toBeUndefined()
// Privileged user should get executeDatabase
const mockServer2 = new McpServer({ name: 'test2', version: '1.0.0' })
const toolSpy2 = vi.spyOn(mockServer2, 'tool')
registerDatabaseTools(mockServer2, mockEnv as any, mockPrivilegedProps)
const privilegedExecuteToolCall = toolSpy2.mock.calls.find(call => call[0] === 'executeDatabase')
expect(privilegedExecuteToolCall).toBeDefined()
})
it('should execute write operations for privileged users', async () => {
const toolSpy = vi.spyOn(mockServer, 'tool')
registerDatabaseTools(mockServer, mockEnv as any, mockPrivilegedProps)
const toolCall = toolSpy.mock.calls.find(call => call[0] === 'executeDatabase')
const handler = toolCall![3] as Function
const result = await handler({ sql: 'INSERT INTO users VALUES (1, \'test\')' })
expect(result.content[0].type).toBe('text')
expect(result.content[0].text).toContain('Write Operation Executed Successfully')
expect(result.content[0].text).toContain('coleam00')
})
it('should execute read operations for privileged users', async () => {
const toolSpy = vi.spyOn(mockServer, 'tool')
registerDatabaseTools(mockServer, mockEnv as any, mockPrivilegedProps)
const toolCall = toolSpy.mock.calls.find(call => call[0] === 'executeDatabase')
const handler = toolCall![3] as Function
const result = await handler({ sql: 'SELECT * FROM users' })
expect(result.content[0].type).toBe('text')
expect(result.content[0].text).toContain('Read Operation Executed Successfully')
})
it('should reject invalid SQL', async () => {
const toolSpy = vi.spyOn(mockServer, 'tool')
registerDatabaseTools(mockServer, mockEnv as any, mockPrivilegedProps)
const toolCall = toolSpy.mock.calls.find(call => call[0] === 'executeDatabase')
const handler = toolCall![3] as Function
const result = await handler({ sql: 'SELECT * FROM users; DROP TABLE users' })
expect(result.content[0].isError).toBe(true)
expect(result.content[0].text).toContain('Invalid SQL statement')
})
it('should handle database errors', async () => {
const toolSpy = vi.spyOn(mockServer, 'tool')
mockDbInstance.unsafe.mockRejectedValue(new Error('Database connection failed'))
registerDatabaseTools(mockServer, mockEnv as any, mockPrivilegedProps)
const toolCall = toolSpy.mock.calls.find(call => call[0] === 'executeDatabase')
const handler = toolCall![3] as Function
const result = await handler({ sql: 'INSERT INTO users VALUES (1, \'test\')' })
expect(result.content[0].isError).toBe(true)
expect(result.content[0].text).toContain('Database execution error')
})
})
})

View File

@ -0,0 +1,48 @@
/**
* Taskmaster PRP Parser MCP Server Configuration
* For more details on how to configure Wrangler, refer to:
* https://developers.cloudflare.com/workers/wrangler/configuration/
*/
{
"$schema": "node_modules/wrangler/config-schema.json",
"name": "taskmaster-mcp-server",
"main": "src/taskmaster.ts",
"compatibility_date": "2025-03-10",
"compatibility_flags": [
"nodejs_compat"
],
"migrations": [
{
"new_sqlite_classes": [
"TaskmasterMCP"
],
"tag": "v1"
}
],
"durable_objects": {
"bindings": [
{
"class_name": "TaskmasterMCP",
"name": "MCP_OBJECT"
}
]
},
"kv_namespaces": [
{
"binding": "OAUTH_KV",
"id": "06998ca39ffb4273a10747065041347b"
}
],
"ai": {
"binding": "AI"
},
"observability": {
"enabled": true
},
"dev": {
"port": 8792
},
"vars": {
"ANTHROPIC_MODEL": "claude-3-5-haiku-latest"
}
}