154 lines
5.7 KiB
Python
154 lines
5.7 KiB
Python
"""
|
|
LLM service for converting natural language to structured task data.
|
|
"""
|
|
import json
|
|
import logging
|
|
from abc import ABC, abstractmethod
|
|
from typing import Dict, List
|
|
|
|
from app.core.config import settings
|
|
|
|
# Configure logger
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class LLMService(ABC):
|
|
"""Abstract base class for LLM service implementations."""
|
|
|
|
@abstractmethod
|
|
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
|
|
"""
|
|
Convert natural language input to structured task objects.
|
|
"""
|
|
pass
|
|
|
|
|
|
class OpenAIService(LLMService):
|
|
"""OpenAI implementation of LLM service."""
|
|
|
|
def __init__(self):
|
|
try:
|
|
import openai
|
|
self.client = openai.AsyncOpenAI(api_key=settings.OPENAI_API_KEY)
|
|
self.model = settings.OPENAI_MODEL
|
|
except (ImportError, AttributeError) as e:
|
|
logger.error(f"Failed to initialize OpenAI service: {e}")
|
|
raise RuntimeError(f"OpenAI service initialization failed: {e}")
|
|
|
|
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
|
|
system_prompt = """
|
|
You are a task extraction assistant. Your job is to convert the user's natural language
|
|
input into one or more structured task objects. Each task should have these properties:
|
|
- title: A short, clear title for the task
|
|
- description: A more detailed description of what needs to be done
|
|
- due_date: When the task is due (ISO format date string, or null if not specified)
|
|
- priority: The priority level (high, medium, low)
|
|
- status: The status (defaults to "pending")
|
|
|
|
Return ONLY a JSON array of task objects without any additional text or explanation.
|
|
"""
|
|
try:
|
|
response = await self.client.chat.completions.create(
|
|
model=self.model,
|
|
messages=[
|
|
{"role": "system", "content": system_prompt},
|
|
{"role": "user", "content": prompt}
|
|
],
|
|
response_format={"type": "json_object"},
|
|
temperature=0.2,
|
|
)
|
|
content = response.choices[0].message.content
|
|
result = json.loads(content)
|
|
|
|
if "tasks" in result:
|
|
return result["tasks"]
|
|
return [result]
|
|
|
|
except Exception as e:
|
|
logger.error(f"OpenAI API error: {e}")
|
|
raise RuntimeError(f"Failed to process request with OpenAI: {e}")
|
|
|
|
|
|
class GeminiService(LLMService):
|
|
"""Google Gemini implementation of LLM service."""
|
|
|
|
def __init__(self):
|
|
try:
|
|
import google.generativeai as genai
|
|
genai.configure(api_key=settings.GEMINI_API_KEY)
|
|
self.model = genai.GenerativeModel(settings.GEMINI_MODEL)
|
|
except (ImportError, AttributeError) as e:
|
|
logger.error(f"Failed to initialize Gemini service: {e}")
|
|
raise RuntimeError(f"Gemini service initialization failed: {e}")
|
|
|
|
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
|
|
system_prompt = (
|
|
"You are a task extraction assistant. Your job is to convert the user's natural language "
|
|
"input into one or more structured task objects. Each task should have these properties:\n"
|
|
"- title: A short, clear title for the task\n"
|
|
"- description: A more detailed description of what needs to be done\n"
|
|
"- due_date: When the task is due (ISO format date string, or null if not specified)\n"
|
|
"- priority: The priority level (high, medium, low)\n"
|
|
"- status: The status (defaults to \"pending\")\n\n"
|
|
"Return ONLY a JSON object with a \"tasks\" key that contains an array of task objects. "
|
|
"Do not include any text or explanation outside the JSON."
|
|
)
|
|
try:
|
|
chat = self.model.start_chat(history=[
|
|
{"role": "user", "parts": [system_prompt]}
|
|
])
|
|
response = await chat.send_message_async(prompt)
|
|
content = response.text
|
|
|
|
# Handle markdown-wrapped responses
|
|
if "```json" in content:
|
|
json_str = content.split("```json")[1].split("```")[0].strip()
|
|
elif "```" in content:
|
|
json_str = content.split("```")[1].strip()
|
|
else:
|
|
json_str = content.strip()
|
|
|
|
result = json.loads(json_str)
|
|
|
|
if "tasks" in result:
|
|
return result["tasks"]
|
|
return [result]
|
|
|
|
except Exception as e:
|
|
logger.error(f"Gemini API error: {e}")
|
|
raise RuntimeError(f"Failed to process request with Gemini: {e}")
|
|
|
|
|
|
class MockLLMService(LLMService):
|
|
"""Mock LLM service for testing."""
|
|
|
|
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
|
|
words = prompt.lower().split()
|
|
priority = "medium"
|
|
if "urgent" in words or "important" in words:
|
|
priority = "high"
|
|
elif "low" in words or "minor" in words:
|
|
priority = "low"
|
|
|
|
return [{
|
|
"title": prompt[:50] + ("..." if len(prompt) > 50 else ""),
|
|
"description": prompt,
|
|
"due_date": None,
|
|
"priority": priority,
|
|
"status": "pending"
|
|
}]
|
|
|
|
|
|
def get_llm_service() -> LLMService:
|
|
llm_provider = settings.LLM_PROVIDER.lower()
|
|
|
|
if llm_provider == "openai" and settings.OPENAI_API_KEY:
|
|
return OpenAIService()
|
|
elif llm_provider == "gemini" and settings.GEMINI_API_KEY:
|
|
return GeminiService()
|
|
elif llm_provider == "mock" or settings.environment == "test":
|
|
return MockLLMService()
|
|
else:
|
|
logger.warning(f"LLM provider '{llm_provider}' not properly configured - using mock service")
|
|
return MockLLMService()
|