diff --git a/app/services/llm_service.py b/app/services/llm_service.py index 97f52e4..09665d5 100644 --- a/app/services/llm_service.py +++ b/app/services/llm_service.py @@ -6,7 +6,6 @@ import logging from abc import ABC, abstractmethod from typing import Dict, List - from app.core.config import settings # Configure logger @@ -37,7 +36,7 @@ class OpenAIService(LLMService): """Initialize the OpenAI service.""" try: import openai - self.client = openai.AsyncOpenAI(api_key=settings.openai_api_key) + self.client = openai.AsyncOpenAI(api_key=settings.OPENAI_API_KEY) self.model = settings.OPENAI_MODEL except (ImportError, AttributeError) as e: logger.error(f"Failed to initialize OpenAI service: {e}") @@ -122,21 +121,21 @@ class GeminiService(LLMService): - priority: The priority level (high, medium, low) - status: The status (defaults to "pending") - Return ONLY a JSON array of task objects without any additional text or explanation. - Format your response as valid JSON with a "tasks" key that contains an array of task objects. + Return ONLY a JSON object with a "tasks" key that contains an array of task objects. + Do not include any text or explanation outside the JSON. """ try: + # Start the chat session with the system prompt chat = self.model.start_chat(history=[ - {"role": "user", "parts": [system_prompt]}, - {"role": "model", "parts": ["I understand. I'll convert user inputs into JSON task objects with the specified properties."]} + {"role": "system", "content": system_prompt} ]) + # Send the user prompt asynchronously response = await chat.send_message_async(prompt) content = response.text - # Extract JSON from the response - # This handles cases where the model might add markdown code blocks + # Remove possible markdown code blocks wrapping the JSON response if "```json" in content: json_str = content.split("```json")[1].split("```")[0].strip() elif "```" in content: @@ -144,12 +143,13 @@ class GeminiService(LLMService): else: json_str = content.strip() + # Parse JSON response result = json.loads(json_str) - # Expect a "tasks" key in the response JSON + # Return the list under "tasks" or the whole as single-item list if "tasks" in result: return result["tasks"] - return [result] # Return as a single-item list if no "tasks" key + return [result] except Exception as e: logger.error(f"Gemini API error: {e}") @@ -169,7 +169,6 @@ class MockLLMService(LLMService): Returns: List of task dictionaries """ - # Simple parsing logic for testing words = prompt.lower().split() priority = "medium" @@ -178,7 +177,6 @@ class MockLLMService(LLMService): elif "low" in words or "minor" in words: priority = "low" - # Create a basic task from the prompt return [{ "title": prompt[:50] + ("..." if len(prompt) > 50 else ""), "description": prompt, @@ -188,7 +186,6 @@ class MockLLMService(LLMService): }] -# Factory function to create the appropriate LLM service def get_llm_service() -> LLMService: """ Factory function for LLM service dependency injection. @@ -197,15 +194,13 @@ def get_llm_service() -> LLMService: An instance of a concrete LLMService implementation """ llm_provider = settings.LLM_PROVIDER.lower() - - if llm_provider == "openai" and settings.OPEANAI_API_KEY: + + if llm_provider == "openai" and settings.OPENAI_API_KEY: return OpenAIService() elif llm_provider == "gemini" and settings.GEMINI_API_KEY: return GeminiService() elif llm_provider == "mock" or settings.environment == "test": - # Use mock service for testing or when configured return MockLLMService() else: - # Default to mock service if configuration is incomplete logger.warning(f"LLM provider '{llm_provider}' not properly configured - using mock service") - return MockLLMService() \ No newline at end of file + return MockLLMService()