initial commit

This commit is contained in:
Agent@BackendIM 2025-05-17 09:39:13 +00:00
parent 279c7394ad
commit 7dabe108b0

View File

@ -1,14 +1,14 @@
""" """
LLM service for converting natural language to structured task data. LLM service for converting natural language to structured task data.
""" """
import json import json
import logging import logging
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Dict, List from typing import Dict, List, Optional
from app.core.config import settings from app.core.config import settings
# Configure logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -19,12 +19,26 @@ class LLMService(ABC):
async def chat_to_tasks(self, prompt: str) -> List[Dict]: async def chat_to_tasks(self, prompt: str) -> List[Dict]:
""" """
Convert natural language input to structured task objects. Convert natural language input to structured task objects.
Must return a list of task dictionaries.
""" """
pass pass
def extract_json_from_response(text: str) -> Dict:
"""Extract valid JSON from possibly markdown-wrapped LLM responses."""
try:
if "```json" in text:
text = text.split("```json")[1].split("```")[0].strip()
elif "```" in text:
text = text.split("```")[1].strip()
return json.loads(text)
except Exception as e:
logger.error(f"Failed to parse JSON: {e}\nRaw response: {text}")
raise
class OpenAIService(LLMService): class OpenAIService(LLMService):
"""OpenAI implementation of LLM service.""" """OpenAI implementation of the LLM service."""
def __init__(self): def __init__(self):
try: try:
@ -32,45 +46,45 @@ class OpenAIService(LLMService):
self.client = openai.AsyncOpenAI(api_key=settings.OPENAI_API_KEY) self.client = openai.AsyncOpenAI(api_key=settings.OPENAI_API_KEY)
self.model = settings.OPENAI_MODEL self.model = settings.OPENAI_MODEL
except (ImportError, AttributeError) as e: except (ImportError, AttributeError) as e:
logger.error(f"Failed to initialize OpenAI service: {e}") logger.error(f"OpenAI service initialization failed: {e}")
raise RuntimeError(f"OpenAI service initialization failed: {e}") raise RuntimeError("OpenAI client setup failed.")
async def chat_to_tasks(self, prompt: str) -> List[Dict]: async def chat_to_tasks(self, prompt: str) -> List[Dict]:
system_prompt = """ system_prompt = (
You are a task extraction assistant. Your job is to convert the user's natural language "You are a task extraction assistant. Convert the user's message into structured task objects. "
input into one or more structured task objects. Each task should have these properties: "Each task must include:\n"
- title: A short, clear title for the task "- title: short title\n"
- description: A more detailed description of what needs to be done "- description: detailed description\n"
- due_date: When the task is due (ISO format date string, or null if not specified) "- due_date: ISO 8601 date (YYYY-MM-DD) or null\n"
- priority: The priority level (high, medium, low) "- priority: high, medium, or low\n"
- status: The status (defaults to "pending") "- status: set to \"pending\"\n\n"
"Respond ONLY with a JSON object in the format:\n"
Return ONLY a JSON array of task objects without any additional text or explanation. "{ \"tasks\": [ { ... }, { ... } ] }\n"
""" "No extra commentary or text."
)
try: try:
response = await self.client.chat.completions.create( response = await self.client.chat.completions.create(
model=self.model, model=self.model,
temperature=0.2,
messages=[ messages=[
{"role": "system", "content": system_prompt}, {"role": "system", "content": system_prompt},
{"role": "user", "content": prompt} {"role": "user", "content": prompt},
], ]
response_format={"type": "json_object"},
temperature=0.2,
) )
content = response.choices[0].message.content raw = response.choices[0].message.content.strip()
result = json.loads(content) result = extract_json_from_response(raw)
if "tasks" in result: if "tasks" in result and isinstance(result["tasks"], list):
return result["tasks"] return result["tasks"]
return [result] raise ValueError("Missing or invalid 'tasks' key in response.")
except Exception as e: except Exception as e:
logger.error(f"OpenAI API error: {e}") logger.error(f"OpenAI task extraction failed: {e}")
raise RuntimeError(f"Failed to process request with OpenAI: {e}") raise RuntimeError("Failed to extract tasks from OpenAI response.")
class GeminiService(LLMService): class GeminiService(LLMService):
"""Google Gemini implementation of LLM service.""" """Google Gemini implementation of the LLM service."""
def __init__(self): def __init__(self):
try: try:
@ -78,60 +92,51 @@ class GeminiService(LLMService):
genai.configure(api_key=settings.GEMINI_API_KEY) genai.configure(api_key=settings.GEMINI_API_KEY)
self.model = genai.GenerativeModel(settings.GEMINI_MODEL) self.model = genai.GenerativeModel(settings.GEMINI_MODEL)
except (ImportError, AttributeError) as e: except (ImportError, AttributeError) as e:
logger.error(f"Failed to initialize Gemini service: {e}") logger.error(f"Gemini service initialization failed: {e}")
raise RuntimeError(f"Gemini service initialization failed: {e}") raise RuntimeError("Gemini client setup failed.")
async def chat_to_tasks(self, prompt: str) -> List[Dict]: async def chat_to_tasks(self, prompt: str) -> List[Dict]:
system_prompt = ( system_prompt = (
"You are a task extraction assistant. Your job is to convert the user's natural language " "You are a task extraction assistant. Convert the user's message into structured task objects. "
"input into one or more structured task objects. Each task should have these properties:\n" "Each task must include:\n"
"- title: A short, clear title for the task\n" "- title: short title\n"
"- description: A more detailed description of what needs to be done\n" "- description: detailed description\n"
"- due_date: When the task is due (ISO format date string, or null if not specified)\n" "- due_date: ISO 8601 date (YYYY-MM-DD) or null\n"
"- priority: The priority level (high, medium, low)\n" "- priority: high, medium, or low\n"
"- status: The status (defaults to \"pending\")\n\n" "- status: set to \"pending\"\n\n"
"Return ONLY a JSON object with a \"tasks\" key that contains an array of task objects. " "Return only this JSON format: { \"tasks\": [ ... ] }\n"
"Do not include any text or explanation outside the JSON." "No explanations, no markdown, no formatting just pure JSON."
) )
try: try:
chat = self.model.start_chat(history=[ chat = self.model.start_chat(history=[
{"role": "user", "parts": [system_prompt]} {"role": "user", "parts": [system_prompt]}
]) ])
response = await chat.send_message_async(prompt) response = await chat.send_message_async(prompt)
content = response.text raw = response.text.strip()
result = extract_json_from_response(raw)
# Handle markdown-wrapped responses if "tasks" in result and isinstance(result["tasks"], list):
if "```json" in content:
json_str = content.split("```json")[1].split("```")[0].strip()
elif "```" in content:
json_str = content.split("```")[1].strip()
else:
json_str = content.strip()
result = json.loads(json_str)
if "tasks" in result:
return result["tasks"] return result["tasks"]
return [result] raise ValueError("Missing or invalid 'tasks' key in response.")
except Exception as e: except Exception as e:
logger.error(f"Gemini API error: {e}") logger.error(f"Gemini task extraction failed: {e}")
raise RuntimeError(f"Failed to process request with Gemini: {e}") raise RuntimeError("Failed to extract tasks from Gemini response.")
class MockLLMService(LLMService): class MockLLMService(LLMService):
"""Mock LLM service for testing.""" """Mock LLM service for testing without external API calls."""
async def chat_to_tasks(self, prompt: str) -> List[Dict]: async def chat_to_tasks(self, prompt: str) -> List[Dict]:
words = prompt.lower().split() words = prompt.lower().split()
priority = "medium" priority = "medium"
if "urgent" in words or "important" in words: if any(word in words for word in ["urgent", "important"]):
priority = "high" priority = "high"
elif "low" in words or "minor" in words: elif any(word in words for word in ["minor", "low"]):
priority = "low" priority = "low"
return [{ return [{
"title": prompt[:50] + ("..." if len(prompt) > 50 else ""), "title": (prompt[:50] + "...") if len(prompt) > 50 else prompt,
"description": prompt, "description": prompt,
"due_date": None, "due_date": None,
"priority": priority, "priority": priority,
@ -140,6 +145,7 @@ class MockLLMService(LLMService):
def get_llm_service() -> LLMService: def get_llm_service() -> LLMService:
"""Factory to return the appropriate LLM service based on settings."""
llm_provider = settings.LLM_PROVIDER.lower() llm_provider = settings.LLM_PROVIDER.lower()
if llm_provider == "openai" and settings.OPENAI_API_KEY: if llm_provider == "openai" and settings.OPENAI_API_KEY:
@ -149,5 +155,7 @@ def get_llm_service() -> LLMService:
elif llm_provider == "mock" or settings.environment == "test": elif llm_provider == "mock" or settings.environment == "test":
return MockLLMService() return MockLLMService()
else: else:
logger.warning(f"LLM provider '{llm_provider}' not properly configured - using mock service") logger.warning(
f"LLM provider '{llm_provider}' is not properly configured. Falling back to mock."
)
return MockLLMService() return MockLLMService()