initial commit

This commit is contained in:
Agent@BackendIM 2025-05-17 09:39:13 +00:00
parent 279c7394ad
commit 7dabe108b0

View File

@ -1,14 +1,14 @@
"""
LLM service for converting natural language to structured task data.
"""
import json
import logging
from abc import ABC, abstractmethod
from typing import Dict, List
from typing import Dict, List, Optional
from app.core.config import settings
# Configure logger
logger = logging.getLogger(__name__)
@ -19,12 +19,26 @@ class LLMService(ABC):
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
"""
Convert natural language input to structured task objects.
Must return a list of task dictionaries.
"""
pass
def extract_json_from_response(text: str) -> Dict:
"""Extract valid JSON from possibly markdown-wrapped LLM responses."""
try:
if "```json" in text:
text = text.split("```json")[1].split("```")[0].strip()
elif "```" in text:
text = text.split("```")[1].strip()
return json.loads(text)
except Exception as e:
logger.error(f"Failed to parse JSON: {e}\nRaw response: {text}")
raise
class OpenAIService(LLMService):
"""OpenAI implementation of LLM service."""
"""OpenAI implementation of the LLM service."""
def __init__(self):
try:
@ -32,45 +46,45 @@ class OpenAIService(LLMService):
self.client = openai.AsyncOpenAI(api_key=settings.OPENAI_API_KEY)
self.model = settings.OPENAI_MODEL
except (ImportError, AttributeError) as e:
logger.error(f"Failed to initialize OpenAI service: {e}")
raise RuntimeError(f"OpenAI service initialization failed: {e}")
logger.error(f"OpenAI service initialization failed: {e}")
raise RuntimeError("OpenAI client setup failed.")
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
system_prompt = """
You are a task extraction assistant. Your job is to convert the user's natural language
input into one or more structured task objects. Each task should have these properties:
- title: A short, clear title for the task
- description: A more detailed description of what needs to be done
- due_date: When the task is due (ISO format date string, or null if not specified)
- priority: The priority level (high, medium, low)
- status: The status (defaults to "pending")
Return ONLY a JSON array of task objects without any additional text or explanation.
"""
system_prompt = (
"You are a task extraction assistant. Convert the user's message into structured task objects. "
"Each task must include:\n"
"- title: short title\n"
"- description: detailed description\n"
"- due_date: ISO 8601 date (YYYY-MM-DD) or null\n"
"- priority: high, medium, or low\n"
"- status: set to \"pending\"\n\n"
"Respond ONLY with a JSON object in the format:\n"
"{ \"tasks\": [ { ... }, { ... } ] }\n"
"No extra commentary or text."
)
try:
response = await self.client.chat.completions.create(
model=self.model,
temperature=0.2,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
],
response_format={"type": "json_object"},
temperature=0.2,
{"role": "user", "content": prompt},
]
)
content = response.choices[0].message.content
result = json.loads(content)
raw = response.choices[0].message.content.strip()
result = extract_json_from_response(raw)
if "tasks" in result:
if "tasks" in result and isinstance(result["tasks"], list):
return result["tasks"]
return [result]
raise ValueError("Missing or invalid 'tasks' key in response.")
except Exception as e:
logger.error(f"OpenAI API error: {e}")
raise RuntimeError(f"Failed to process request with OpenAI: {e}")
logger.error(f"OpenAI task extraction failed: {e}")
raise RuntimeError("Failed to extract tasks from OpenAI response.")
class GeminiService(LLMService):
"""Google Gemini implementation of LLM service."""
"""Google Gemini implementation of the LLM service."""
def __init__(self):
try:
@ -78,60 +92,51 @@ class GeminiService(LLMService):
genai.configure(api_key=settings.GEMINI_API_KEY)
self.model = genai.GenerativeModel(settings.GEMINI_MODEL)
except (ImportError, AttributeError) as e:
logger.error(f"Failed to initialize Gemini service: {e}")
raise RuntimeError(f"Gemini service initialization failed: {e}")
logger.error(f"Gemini service initialization failed: {e}")
raise RuntimeError("Gemini client setup failed.")
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
system_prompt = (
"You are a task extraction assistant. Your job is to convert the user's natural language "
"input into one or more structured task objects. Each task should have these properties:\n"
"- title: A short, clear title for the task\n"
"- description: A more detailed description of what needs to be done\n"
"- due_date: When the task is due (ISO format date string, or null if not specified)\n"
"- priority: The priority level (high, medium, low)\n"
"- status: The status (defaults to \"pending\")\n\n"
"Return ONLY a JSON object with a \"tasks\" key that contains an array of task objects. "
"Do not include any text or explanation outside the JSON."
"You are a task extraction assistant. Convert the user's message into structured task objects. "
"Each task must include:\n"
"- title: short title\n"
"- description: detailed description\n"
"- due_date: ISO 8601 date (YYYY-MM-DD) or null\n"
"- priority: high, medium, or low\n"
"- status: set to \"pending\"\n\n"
"Return only this JSON format: { \"tasks\": [ ... ] }\n"
"No explanations, no markdown, no formatting just pure JSON."
)
try:
chat = self.model.start_chat(history=[
{"role": "user", "parts": [system_prompt]}
])
response = await chat.send_message_async(prompt)
content = response.text
raw = response.text.strip()
result = extract_json_from_response(raw)
# Handle markdown-wrapped responses
if "```json" in content:
json_str = content.split("```json")[1].split("```")[0].strip()
elif "```" in content:
json_str = content.split("```")[1].strip()
else:
json_str = content.strip()
result = json.loads(json_str)
if "tasks" in result:
if "tasks" in result and isinstance(result["tasks"], list):
return result["tasks"]
return [result]
raise ValueError("Missing or invalid 'tasks' key in response.")
except Exception as e:
logger.error(f"Gemini API error: {e}")
raise RuntimeError(f"Failed to process request with Gemini: {e}")
logger.error(f"Gemini task extraction failed: {e}")
raise RuntimeError("Failed to extract tasks from Gemini response.")
class MockLLMService(LLMService):
"""Mock LLM service for testing."""
"""Mock LLM service for testing without external API calls."""
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
words = prompt.lower().split()
priority = "medium"
if "urgent" in words or "important" in words:
if any(word in words for word in ["urgent", "important"]):
priority = "high"
elif "low" in words or "minor" in words:
elif any(word in words for word in ["minor", "low"]):
priority = "low"
return [{
"title": prompt[:50] + ("..." if len(prompt) > 50 else ""),
"title": (prompt[:50] + "...") if len(prompt) > 50 else prompt,
"description": prompt,
"due_date": None,
"priority": priority,
@ -140,6 +145,7 @@ class MockLLMService(LLMService):
def get_llm_service() -> LLMService:
"""Factory to return the appropriate LLM service based on settings."""
llm_provider = settings.LLM_PROVIDER.lower()
if llm_provider == "openai" and settings.OPENAI_API_KEY:
@ -149,5 +155,7 @@ def get_llm_service() -> LLMService:
elif llm_provider == "mock" or settings.environment == "test":
return MockLLMService()
else:
logger.warning(f"LLM provider '{llm_provider}' not properly configured - using mock service")
logger.warning(
f"LLM provider '{llm_provider}' is not properly configured. Falling back to mock."
)
return MockLLMService()