initial commit

This commit is contained in:
Agent@BackendIM 2025-05-17 09:34:17 +00:00
parent dfbfa75251
commit 279c7394ad

View File

@ -19,12 +19,6 @@ class LLMService(ABC):
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
"""
Convert natural language input to structured task objects.
Args:
prompt: User's natural language input describing tasks
Returns:
List of dictionary objects representing tasks
"""
pass
@ -33,7 +27,6 @@ class OpenAIService(LLMService):
"""OpenAI implementation of LLM service."""
def __init__(self):
"""Initialize the OpenAI service."""
try:
import openai
self.client = openai.AsyncOpenAI(api_key=settings.OPENAI_API_KEY)
@ -43,15 +36,6 @@ class OpenAIService(LLMService):
raise RuntimeError(f"OpenAI service initialization failed: {e}")
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
"""
Convert natural language to tasks using OpenAI.
Args:
prompt: User's natural language input
Returns:
List of task dictionaries
"""
system_prompt = """
You are a task extraction assistant. Your job is to convert the user's natural language
input into one or more structured task objects. Each task should have these properties:
@ -63,7 +47,6 @@ class OpenAIService(LLMService):
Return ONLY a JSON array of task objects without any additional text or explanation.
"""
try:
response = await self.client.chat.completions.create(
model=self.model,
@ -74,7 +57,6 @@ class OpenAIService(LLMService):
response_format={"type": "json_object"},
temperature=0.2,
)
content = response.choices[0].message.content
result = json.loads(content)
@ -91,7 +73,6 @@ class GeminiService(LLMService):
"""Google Gemini implementation of LLM service."""
def __init__(self):
"""Initialize the Gemini service."""
try:
import google.generativeai as genai
genai.configure(api_key=settings.GEMINI_API_KEY)
@ -101,15 +82,6 @@ class GeminiService(LLMService):
raise RuntimeError(f"Gemini service initialization failed: {e}")
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
"""
Convert natural language to tasks using Google Gemini.
Args:
prompt: User's natural language input
Returns:
List of task dictionaries
"""
system_prompt = (
"You are a task extraction assistant. Your job is to convert the user's natural language "
"input into one or more structured task objects. Each task should have these properties:\n"
@ -121,27 +93,14 @@ class GeminiService(LLMService):
"Return ONLY a JSON object with a \"tasks\" key that contains an array of task objects. "
"Do not include any text or explanation outside the JSON."
)
try:
chat = self.model.start_chat(history=[
{
"content": {
"parts": [system_prompt]
}
}
{"role": "user", "parts": [system_prompt]}
])
response = await chat.send_message_async(
{
"content": {
"parts": [prompt]
}
}
)
response = await chat.send_message_async(prompt)
content = response.text
# Remove possible markdown code blocks
# Handle markdown-wrapped responses
if "```json" in content:
json_str = content.split("```json")[1].split("```")[0].strip()
elif "```" in content:
@ -164,18 +123,8 @@ class MockLLMService(LLMService):
"""Mock LLM service for testing."""
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
"""
Return mock tasks based on the prompt.
Args:
prompt: User's natural language input
Returns:
List of task dictionaries
"""
words = prompt.lower().split()
priority = "medium"
if "urgent" in words or "important" in words:
priority = "high"
elif "low" in words or "minor" in words:
@ -191,12 +140,6 @@ class MockLLMService(LLMService):
def get_llm_service() -> LLMService:
"""
Factory function for LLM service dependency injection.
Returns:
An instance of a concrete LLMService implementation
"""
llm_provider = settings.LLM_PROVIDER.lower()
if llm_provider == "openai" and settings.OPENAI_API_KEY: