initial commit

This commit is contained in:
Agent@BackendIM 2025-05-17 09:49:47 +00:00
parent 7dabe108b0
commit c90f25350d
2 changed files with 69 additions and 70 deletions

View File

@ -1,6 +1,7 @@
"""
Router for the chat-to-tasks functionality.
"""
import logging
from fastapi import APIRouter, Depends, HTTPException
@ -14,7 +15,6 @@ from app.schemas.task import TaskCreate, TaskRead
from app.services.llm_service import LLMService, get_llm_service
from app.db.session import get_db
# Set up logger
logger = logging.getLogger(__name__)
router = APIRouter()
@ -28,99 +28,94 @@ async def create_tasks_from_chat(
llm_service: LLMService = Depends(get_llm_service),
):
"""
Convert natural language chat input into one or more task objects.
This endpoint:
1. Takes the user's natural language input
2. Sends it to an LLM for processing
3. Parses the LLM's response into TaskCreate objects
4. Creates the tasks in the database
5. Returns the created tasks
All tasks are associated with the authenticated user.
Convert natural language chat input into one or more structured task objects.
Steps:
1. Validate the input message length.
2. Send the message to the LLM service for task extraction.
3. Parse the response into TaskCreate schemas.
4. Persist tasks in the database linked to the authenticated user.
5. Return the list of created tasks or an error response.
"""
if not chat_input.message or len(chat_input.message.strip()) < 3:
message = chat_input.message.strip()
if len(message) < 3:
raise HTTPException(
status_code=400,
detail="Message must be at least 3 characters long",
)
# Initialize response
response = ChatResponse(original_message=chat_input.message)
response = ChatResponse(original_message=message)
try:
# Process the chat message with the LLM service
logger.info(f"Processing chat input: {chat_input.message[:50]}...")
llm_tasks_data = await llm_service.chat_to_tasks(chat_input.message)
if not llm_tasks_data:
logger.warning("LLM returned no tasks")
logger.info(f"Received chat input for task extraction: {message[:50]}...")
# Extract tasks from the LLM service
llm_tasks = await llm_service.chat_to_tasks(message)
if not llm_tasks:
logger.warning("LLM service returned no tasks.")
response.processing_successful = False
response.error = ChatProcessingError(
error_type="parsing_error",
error_detail="No tasks could be extracted from your message",
error_detail="No tasks could be extracted from your message.",
)
return response
# Convert LLM response to TaskCreate objects and create in DB
created_tasks = []
for task_data in llm_tasks_data:
for task_data in llm_tasks:
try:
# Map LLM response fields to TaskCreate schema
# Handle different field names or formats that might come from the LLM
# Map LLM response fields to TaskCreate schema fields
task_create_data = {
"title": task_data.get("title", "Untitled Task"),
"description": task_data.get("description", ""),
"priority": task_data.get("priority", "medium").lower(),
}
# Handle due_date if present
if due_date := task_data.get("due_date"):
if due_date != "null" and due_date is not None:
task_create_data["due_date"] = due_date
# Map status if present (convert "pending" to "todo" if needed)
if status := task_data.get("status"):
if status.lower() == "pending":
task_create_data["status"] = "todo"
else:
task_create_data["status"] = status.lower()
# Create TaskCreate object and validate
# Validate and include due_date if present and valid
due_date = task_data.get("due_date")
if due_date and due_date != "null":
task_create_data["due_date"] = due_date
# Map status field and normalize to internal status naming
status = task_data.get("status", "").lower()
if status == "pending":
task_create_data["status"] = "todo"
elif status:
task_create_data["status"] = status
# Validate input data against TaskCreate schema
task_in = TaskCreate(**task_create_data)
# Create task in database with current user as owner
# Create task with ownership linked to current user
db_task = task_crud.task.create_with_owner(
db=db, obj_in=task_in, user_id=current_user.id
db=db,
obj_in=task_in,
user_id=current_user.id,
)
# Add created task to response
created_tasks.append(TaskRead.model_validate(db_task))
except Exception as e:
logger.error(f"Error creating task: {e}")
# Continue with other tasks if one fails
continue
except Exception as task_exc:
logger.error(f"Failed to create task from LLM data: {task_exc}")
# Continue processing remaining tasks even if one fails
if not created_tasks:
# If no tasks were successfully created
response.processing_successful = False
response.error = ChatProcessingError(
error_type="creation_error",
error_detail="Could not create any tasks from your message",
error_detail="Failed to create any tasks from the provided message.",
)
else:
# Add created tasks to response
response.tasks = created_tasks
return response
except Exception as e:
logger.exception(f"Error in chat-to-tasks endpoint: {e}")
except Exception as exc:
logger.exception(f"Unexpected error in chat-to-tasks endpoint: {exc}")
response.processing_successful = False
response.error = ChatProcessingError(
error_type="processing_error",
error_detail=f"An error occurred while processing your request: {str(e)}",
error_detail=f"An error occurred while processing your request: {str(exc)}",
)
return response
return response

View File

@ -5,7 +5,7 @@ LLM service for converting natural language to structured task data.
import json
import logging
from abc import ABC, abstractmethod
from typing import Dict, List, Optional
from typing import Dict, List, Optional, Union
from app.core.config import settings
@ -24,7 +24,7 @@ class LLMService(ABC):
pass
def extract_json_from_response(text: str) -> Dict:
def extract_json_from_response(text: str) -> Union[Dict, List]:
"""Extract valid JSON from possibly markdown-wrapped LLM responses."""
try:
if "```json" in text:
@ -74,7 +74,8 @@ class OpenAIService(LLMService):
raw = response.choices[0].message.content.strip()
result = extract_json_from_response(raw)
if "tasks" in result and isinstance(result["tasks"], list):
# Expect a dict with a "tasks" key
if isinstance(result, dict) and "tasks" in result and isinstance(result["tasks"], list):
return result["tasks"]
raise ValueError("Missing or invalid 'tasks' key in response.")
@ -96,6 +97,7 @@ class GeminiService(LLMService):
raise RuntimeError("Gemini client setup failed.")
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
# Note: Gemini returns a JSON LIST of tasks, not wrapped in an object.
system_prompt = (
"You are a task extraction assistant. Convert the user's message into structured task objects. "
"Each task must include:\n"
@ -104,7 +106,8 @@ class GeminiService(LLMService):
"- due_date: ISO 8601 date (YYYY-MM-DD) or null\n"
"- priority: high, medium, or low\n"
"- status: set to \"pending\"\n\n"
"Return only this JSON format: { \"tasks\": [ ... ] }\n"
"Return ONLY a JSON array like this:\n"
"[ { ... }, { ... } ]\n"
"No explanations, no markdown, no formatting just pure JSON."
)
try:
@ -115,9 +118,10 @@ class GeminiService(LLMService):
raw = response.text.strip()
result = extract_json_from_response(raw)
if "tasks" in result and isinstance(result["tasks"], list):
return result["tasks"]
raise ValueError("Missing or invalid 'tasks' key in response.")
# Expect a LIST of task dicts directly
if isinstance(result, list):
return result
raise ValueError("Expected a JSON list of tasks from Gemini response.")
except Exception as e:
logger.error(f"Gemini task extraction failed: {e}")