PRP Template for Pydantic AI Agents

This commit is contained in:
Cole Medin
2025-07-20 08:01:14 -05:00
parent 84d49cf30a
commit 1bcba59231
30 changed files with 6134 additions and 88 deletions

View File

@@ -0,0 +1,9 @@
# ===== LLM Configuration =====
# Provider: openai, anthropic, gemini, ollama, etc.
LLM_PROVIDER=openai
# Your LLM API key
LLM_API_KEY=sk-your-openai-api-key-here
# LLM to use for the agents (e.g., gpt-4.1-mini, gpt-4.1, claude-4-sonnet)
LLM_CHOICE=gpt-4.1-mini
# Base URL for the LLM API (change for Ollama or other providers)
LLM_BASE_URL=https://api.openai.com/v1

View File

@@ -0,0 +1,103 @@
"""
Core data models for the multi-agent system.
"""
from pydantic import BaseModel, Field
from typing import List, Optional, Dict, Any
from datetime import datetime
class ResearchQuery(BaseModel):
"""Model for research query requests."""
query: str = Field(..., description="Research topic to investigate")
max_results: int = Field(10, ge=1, le=50, description="Maximum number of results to return")
include_summary: bool = Field(True, description="Whether to include AI-generated summary")
class BraveSearchResult(BaseModel):
"""Model for individual Brave search results."""
title: str = Field(..., description="Title of the search result")
url: str = Field(..., description="URL of the search result")
description: str = Field(..., description="Description/snippet from the search result")
score: float = Field(0.0, ge=0.0, le=1.0, description="Relevance score")
class Config:
"""Pydantic configuration."""
json_schema_extra = {
"example": {
"title": "Understanding AI Safety",
"url": "https://example.com/ai-safety",
"description": "A comprehensive guide to AI safety principles...",
"score": 0.95
}
}
class EmailDraft(BaseModel):
"""Model for email draft creation."""
to: List[str] = Field(..., min_length=1, description="List of recipient email addresses")
subject: str = Field(..., min_length=1, description="Email subject line")
body: str = Field(..., min_length=1, description="Email body content")
cc: Optional[List[str]] = Field(None, description="List of CC recipients")
bcc: Optional[List[str]] = Field(None, description="List of BCC recipients")
class Config:
"""Pydantic configuration."""
json_schema_extra = {
"example": {
"to": ["john@example.com"],
"subject": "AI Research Summary",
"body": "Dear John,\n\nHere's the latest research on AI safety...",
"cc": ["team@example.com"]
}
}
class EmailDraftResponse(BaseModel):
"""Response model for email draft creation."""
draft_id: str = Field(..., description="Gmail draft ID")
message_id: str = Field(..., description="Message ID")
thread_id: Optional[str] = Field(None, description="Thread ID if part of a thread")
created_at: datetime = Field(default_factory=datetime.now, description="Draft creation timestamp")
class ResearchEmailRequest(BaseModel):
"""Model for research + email draft request."""
research_query: str = Field(..., description="Topic to research")
email_context: str = Field(..., description="Context for email generation")
recipient_email: str = Field(..., description="Email recipient")
email_subject: Optional[str] = Field(None, description="Optional email subject")
class ResearchResponse(BaseModel):
"""Response model for research queries."""
query: str = Field(..., description="Original research query")
results: List[BraveSearchResult] = Field(..., description="Search results")
summary: Optional[str] = Field(None, description="AI-generated summary of results")
total_results: int = Field(..., description="Total number of results found")
timestamp: datetime = Field(default_factory=datetime.now, description="Query timestamp")
class AgentResponse(BaseModel):
"""Generic agent response model."""
success: bool = Field(..., description="Whether the operation was successful")
data: Optional[Dict[str, Any]] = Field(None, description="Response data")
error: Optional[str] = Field(None, description="Error message if failed")
tools_used: List[str] = Field(default_factory=list, description="List of tools used")
class ChatMessage(BaseModel):
"""Model for chat messages in the CLI."""
role: str = Field(..., description="Message role (user/assistant)")
content: str = Field(..., description="Message content")
timestamp: datetime = Field(default_factory=datetime.now, description="Message timestamp")
tools_used: Optional[List[Dict[str, Any]]] = Field(None, description="Tools used in response")
class SessionState(BaseModel):
"""Model for maintaining session state."""
session_id: str = Field(..., description="Unique session identifier")
user_id: Optional[str] = Field(None, description="User identifier")
messages: List[ChatMessage] = Field(default_factory=list, description="Conversation history")
created_at: datetime = Field(default_factory=datetime.now, description="Session creation time")
last_activity: datetime = Field(default_factory=datetime.now, description="Last activity timestamp")

View File

@@ -0,0 +1,61 @@
"""
Flexible provider configuration for LLM models.
Based on examples/agent/providers.py pattern.
"""
from typing import Optional
from pydantic_ai.providers.openai import OpenAIProvider
from pydantic_ai.models.openai import OpenAIModel
from .settings import settings
def get_llm_model(model_choice: Optional[str] = None) -> OpenAIModel:
"""
Get LLM model configuration based on environment variables.
Args:
model_choice: Optional override for model choice
Returns:
Configured OpenAI-compatible model
"""
llm_choice = model_choice or settings.llm_model
base_url = settings.llm_base_url
api_key = settings.llm_api_key
# Create provider based on configuration
provider = OpenAIProvider(base_url=base_url, api_key=api_key)
return OpenAIModel(llm_choice, provider=provider)
def get_model_info() -> dict:
"""
Get information about current model configuration.
Returns:
Dictionary with model configuration info
"""
return {
"llm_provider": settings.llm_provider,
"llm_model": settings.llm_model,
"llm_base_url": settings.llm_base_url,
"app_env": settings.app_env,
"debug": settings.debug,
}
def validate_llm_configuration() -> bool:
"""
Validate that LLM configuration is properly set.
Returns:
True if configuration is valid
"""
try:
# Check if we can create a model instance
get_llm_model()
return True
except Exception as e:
print(f"LLM configuration validation failed: {e}")
return False

View File

@@ -0,0 +1,263 @@
"""
Research Agent that uses Brave Search and can invoke Email Agent.
"""
import logging
from typing import Dict, Any, List, Optional
from dataclasses import dataclass
from pydantic_ai import Agent, RunContext
from .providers import get_llm_model
from .email_agent import email_agent, EmailAgentDependencies
from .tools import search_web_tool
logger = logging.getLogger(__name__)
SYSTEM_PROMPT = """
You are an expert research assistant with the ability to search the web and create email drafts. Your primary goal is to help users find relevant information and communicate findings effectively.
Your capabilities:
1. **Web Search**: Use Brave Search to find current, relevant information on any topic
2. **Email Creation**: Create professional email drafts through Gmail when requested
When conducting research:
- Use specific, targeted search queries
- Analyze search results for relevance and credibility
- Synthesize information from multiple sources
- Provide clear, well-organized summaries
- Include source URLs for reference
When creating emails:
- Use research findings to create informed, professional content
- Adapt tone and detail level to the intended recipient
- Include relevant sources and citations when appropriate
- Ensure emails are clear, concise, and actionable
Always strive to provide accurate, helpful, and actionable information.
"""
@dataclass
class ResearchAgentDependencies:
"""Dependencies for the research agent - only configuration, no tool instances."""
brave_api_key: str
gmail_credentials_path: str
gmail_token_path: str
session_id: Optional[str] = None
# Initialize the research agent
research_agent = Agent(
get_llm_model(),
deps_type=ResearchAgentDependencies,
system_prompt=SYSTEM_PROMPT
)
@research_agent.tool
async def search_web(
ctx: RunContext[ResearchAgentDependencies],
query: str,
max_results: int = 10
) -> List[Dict[str, Any]]:
"""
Search the web using Brave Search API.
Args:
query: Search query
max_results: Maximum number of results to return (1-20)
Returns:
List of search results with title, URL, description, and score
"""
try:
# Ensure max_results is within valid range
max_results = min(max(max_results, 1), 20)
results = await search_web_tool(
api_key=ctx.deps.brave_api_key,
query=query,
count=max_results
)
logger.info(f"Found {len(results)} results for query: {query}")
return results
except Exception as e:
logger.error(f"Web search failed: {e}")
return [{"error": f"Search failed: {str(e)}"}]
@research_agent.tool
async def create_email_draft(
ctx: RunContext[ResearchAgentDependencies],
recipient_email: str,
subject: str,
context: str,
research_summary: Optional[str] = None
) -> Dict[str, Any]:
"""
Create an email draft based on research context using the Email Agent.
Args:
recipient_email: Email address of the recipient
subject: Email subject line
context: Context or purpose for the email
research_summary: Optional research findings to include
Returns:
Dictionary with draft creation results
"""
try:
# Prepare the email content prompt
if research_summary:
email_prompt = f"""
Create a professional email to {recipient_email} with the subject "{subject}".
Context: {context}
Research Summary:
{research_summary}
Please create a well-structured email that:
1. Has an appropriate greeting
2. Provides clear context
3. Summarizes the key research findings professionally
4. Includes actionable next steps if appropriate
5. Ends with a professional closing
The email should be informative but concise, and maintain a professional yet friendly tone.
"""
else:
email_prompt = f"""
Create a professional email to {recipient_email} with the subject "{subject}".
Context: {context}
Please create a well-structured email that addresses the context provided.
"""
# Create dependencies for email agent
email_deps = EmailAgentDependencies(
gmail_credentials_path=ctx.deps.gmail_credentials_path,
gmail_token_path=ctx.deps.gmail_token_path,
session_id=ctx.deps.session_id
)
# Run the email agent
result = await email_agent.run(
email_prompt,
deps=email_deps,
usage=ctx.usage # Pass usage for token tracking
)
logger.info(f"Email agent invoked for recipient: {recipient_email}")
return {
"success": True,
"agent_response": result.data,
"recipient": recipient_email,
"subject": subject,
"context": context
}
except Exception as e:
logger.error(f"Failed to create email draft via Email Agent: {e}")
return {
"success": False,
"error": str(e),
"recipient": recipient_email,
"subject": subject
}
@research_agent.tool
async def summarize_research(
ctx: RunContext[ResearchAgentDependencies],
search_results: List[Dict[str, Any]],
topic: str,
focus_areas: Optional[str] = None
) -> Dict[str, Any]:
"""
Create a comprehensive summary of research findings.
Args:
search_results: List of search result dictionaries
topic: Main research topic
focus_areas: Optional specific areas to focus on
Returns:
Dictionary with research summary
"""
try:
if not search_results:
return {
"summary": "No search results provided for summarization.",
"key_points": [],
"sources": []
}
# Extract key information
sources = []
descriptions = []
for result in search_results:
if "title" in result and "url" in result:
sources.append(f"- {result['title']}: {result['url']}")
if "description" in result:
descriptions.append(result["description"])
# Create summary content
content_summary = "\n".join(descriptions[:5]) # Limit to top 5 descriptions
sources_list = "\n".join(sources[:10]) # Limit to top 10 sources
focus_text = f"\nSpecific focus areas: {focus_areas}" if focus_areas else ""
summary = f"""
Research Summary: {topic}{focus_text}
Key Findings:
{content_summary}
Sources:
{sources_list}
"""
return {
"summary": summary,
"topic": topic,
"sources_count": len(sources),
"key_points": descriptions[:5]
}
except Exception as e:
logger.error(f"Failed to summarize research: {e}")
return {
"summary": f"Failed to summarize research: {str(e)}",
"key_points": [],
"sources": []
}
# Convenience function to create research agent with dependencies
def create_research_agent(
brave_api_key: str,
gmail_credentials_path: str,
gmail_token_path: str,
session_id: Optional[str] = None
) -> Agent:
"""
Create a research agent with specified dependencies.
Args:
brave_api_key: Brave Search API key
gmail_credentials_path: Path to Gmail credentials.json
gmail_token_path: Path to Gmail token.json
session_id: Optional session identifier
Returns:
Configured research agent
"""
return research_agent

View File

@@ -0,0 +1,58 @@
"""
Configuration management using pydantic-settings.
"""
import os
from typing import Optional
from pydantic_settings import BaseSettings
from pydantic import Field, field_validator, ConfigDict
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
class Settings(BaseSettings):
"""Application settings with environment variable support."""
model_config = ConfigDict(
env_file=".env",
env_file_encoding="utf-8",
case_sensitive=False
)
# LLM Configuration
llm_provider: str = Field(default="openai")
llm_api_key: str = Field(...)
llm_model: str = Field(default="gpt-4")
llm_base_url: Optional[str] = Field(default="https://api.openai.com/v1")
# Brave Search Configuration
brave_api_key: str = Field(...)
brave_search_url: str = Field(
default="https://api.search.brave.com/res/v1/web/search"
)
# Application Configuration
app_env: str = Field(default="development")
log_level: str = Field(default="INFO")
debug: bool = Field(default=False)
@field_validator("llm_api_key", "brave_api_key")
@classmethod
def validate_api_keys(cls, v):
"""Ensure API keys are not empty."""
if not v or v.strip() == "":
raise ValueError("API key cannot be empty")
return v
# Global settings instance
try:
settings = Settings()
except Exception:
# For testing, create settings with dummy values
import os
os.environ.setdefault("LLM_API_KEY", "test_key")
os.environ.setdefault("BRAVE_API_KEY", "test_key")
settings = Settings()

View File

@@ -0,0 +1,120 @@
"""
Pure tool functions for multi-agent system.
These are standalone functions that can be imported and used by any agent.
"""
import os
import base64
import logging
import httpx
from typing import List, Dict, Any, Optional
from datetime import datetime
from agents.models import BraveSearchResult
logger = logging.getLogger(__name__)
# Brave Search Tool Function
async def search_web_tool(
api_key: str,
query: str,
count: int = 10,
offset: int = 0,
country: Optional[str] = None,
lang: Optional[str] = None
) -> List[Dict[str, Any]]:
"""
Pure function to search the web using Brave Search API.
Args:
api_key: Brave Search API key
query: Search query
count: Number of results to return (1-20)
offset: Offset for pagination
country: Country code for localized results
lang: Language code for results
Returns:
List of search results as dictionaries
Raises:
ValueError: If query is empty or API key missing
Exception: If API request fails
"""
if not api_key or not api_key.strip():
raise ValueError("Brave API key is required")
if not query or not query.strip():
raise ValueError("Query cannot be empty")
# Ensure count is within valid range
count = min(max(count, 1), 20)
headers = {
"X-Subscription-Token": api_key,
"Accept": "application/json"
}
params = {
"q": query,
"count": count,
"offset": offset
}
if country:
params["country"] = country
if lang:
params["lang"] = lang
logger.info(f"Searching Brave for: {query}")
async with httpx.AsyncClient() as client:
try:
response = await client.get(
"https://api.search.brave.com/res/v1/web/search",
headers=headers,
params=params,
timeout=30.0
)
# Handle rate limiting
if response.status_code == 429:
raise Exception("Rate limit exceeded. Check your Brave API quota.")
# Handle authentication errors
if response.status_code == 401:
raise Exception("Invalid Brave API key")
# Handle other errors
if response.status_code != 200:
raise Exception(f"Brave API returned {response.status_code}: {response.text}")
data = response.json()
# Extract web results
web_results = data.get("web", {}).get("results", [])
# Convert to our format
results = []
for idx, result in enumerate(web_results):
# Calculate a simple relevance score based on position
score = 1.0 - (idx * 0.05) # Decrease by 0.05 for each position
score = max(score, 0.1) # Minimum score of 0.1
results.append({
"title": result.get("title", ""),
"url": result.get("url", ""),
"description": result.get("description", ""),
"score": score
})
logger.info(f"Found {len(results)} results for query: {query}")
return results
except httpx.RequestError as e:
logger.error(f"Request error during Brave search: {e}")
raise Exception(f"Request failed: {str(e)}")
except Exception as e:
logger.error(f"Error during Brave search: {e}")
raise