initial commit
This commit is contained in:
parent
7dabe108b0
commit
c90f25350d
@ -1,6 +1,7 @@
|
|||||||
"""
|
"""
|
||||||
Router for the chat-to-tasks functionality.
|
Router for the chat-to-tasks functionality.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from fastapi import APIRouter, Depends, HTTPException
|
from fastapi import APIRouter, Depends, HTTPException
|
||||||
@ -14,7 +15,6 @@ from app.schemas.task import TaskCreate, TaskRead
|
|||||||
from app.services.llm_service import LLMService, get_llm_service
|
from app.services.llm_service import LLMService, get_llm_service
|
||||||
from app.db.session import get_db
|
from app.db.session import get_db
|
||||||
|
|
||||||
# Set up logger
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
@ -28,99 +28,94 @@ async def create_tasks_from_chat(
|
|||||||
llm_service: LLMService = Depends(get_llm_service),
|
llm_service: LLMService = Depends(get_llm_service),
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Convert natural language chat input into one or more task objects.
|
Convert natural language chat input into one or more structured task objects.
|
||||||
|
|
||||||
This endpoint:
|
Steps:
|
||||||
1. Takes the user's natural language input
|
1. Validate the input message length.
|
||||||
2. Sends it to an LLM for processing
|
2. Send the message to the LLM service for task extraction.
|
||||||
3. Parses the LLM's response into TaskCreate objects
|
3. Parse the response into TaskCreate schemas.
|
||||||
4. Creates the tasks in the database
|
4. Persist tasks in the database linked to the authenticated user.
|
||||||
5. Returns the created tasks
|
5. Return the list of created tasks or an error response.
|
||||||
|
|
||||||
All tasks are associated with the authenticated user.
|
|
||||||
"""
|
"""
|
||||||
if not chat_input.message or len(chat_input.message.strip()) < 3:
|
message = chat_input.message.strip()
|
||||||
|
if len(message) < 3:
|
||||||
raise HTTPException(
|
raise HTTPException(
|
||||||
status_code=400,
|
status_code=400,
|
||||||
detail="Message must be at least 3 characters long",
|
detail="Message must be at least 3 characters long",
|
||||||
)
|
)
|
||||||
|
|
||||||
# Initialize response
|
response = ChatResponse(original_message=message)
|
||||||
response = ChatResponse(original_message=chat_input.message)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Process the chat message with the LLM service
|
logger.info(f"Received chat input for task extraction: {message[:50]}...")
|
||||||
logger.info(f"Processing chat input: {chat_input.message[:50]}...")
|
|
||||||
llm_tasks_data = await llm_service.chat_to_tasks(chat_input.message)
|
|
||||||
|
|
||||||
if not llm_tasks_data:
|
# Extract tasks from the LLM service
|
||||||
logger.warning("LLM returned no tasks")
|
llm_tasks = await llm_service.chat_to_tasks(message)
|
||||||
|
|
||||||
|
if not llm_tasks:
|
||||||
|
logger.warning("LLM service returned no tasks.")
|
||||||
response.processing_successful = False
|
response.processing_successful = False
|
||||||
response.error = ChatProcessingError(
|
response.error = ChatProcessingError(
|
||||||
error_type="parsing_error",
|
error_type="parsing_error",
|
||||||
error_detail="No tasks could be extracted from your message",
|
error_detail="No tasks could be extracted from your message.",
|
||||||
)
|
)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
# Convert LLM response to TaskCreate objects and create in DB
|
|
||||||
created_tasks = []
|
created_tasks = []
|
||||||
|
|
||||||
for task_data in llm_tasks_data:
|
for task_data in llm_tasks:
|
||||||
try:
|
try:
|
||||||
# Map LLM response fields to TaskCreate schema
|
# Map LLM response fields to TaskCreate schema fields
|
||||||
# Handle different field names or formats that might come from the LLM
|
|
||||||
task_create_data = {
|
task_create_data = {
|
||||||
"title": task_data.get("title", "Untitled Task"),
|
"title": task_data.get("title", "Untitled Task"),
|
||||||
"description": task_data.get("description", ""),
|
"description": task_data.get("description", ""),
|
||||||
"priority": task_data.get("priority", "medium").lower(),
|
"priority": task_data.get("priority", "medium").lower(),
|
||||||
}
|
}
|
||||||
|
|
||||||
# Handle due_date if present
|
# Validate and include due_date if present and valid
|
||||||
if due_date := task_data.get("due_date"):
|
due_date = task_data.get("due_date")
|
||||||
if due_date != "null" and due_date is not None:
|
if due_date and due_date != "null":
|
||||||
task_create_data["due_date"] = due_date
|
task_create_data["due_date"] = due_date
|
||||||
|
|
||||||
# Map status if present (convert "pending" to "todo" if needed)
|
# Map status field and normalize to internal status naming
|
||||||
if status := task_data.get("status"):
|
status = task_data.get("status", "").lower()
|
||||||
if status.lower() == "pending":
|
if status == "pending":
|
||||||
task_create_data["status"] = "todo"
|
task_create_data["status"] = "todo"
|
||||||
else:
|
elif status:
|
||||||
task_create_data["status"] = status.lower()
|
task_create_data["status"] = status
|
||||||
|
|
||||||
# Create TaskCreate object and validate
|
# Validate input data against TaskCreate schema
|
||||||
task_in = TaskCreate(**task_create_data)
|
task_in = TaskCreate(**task_create_data)
|
||||||
|
|
||||||
# Create task in database with current user as owner
|
# Create task with ownership linked to current user
|
||||||
db_task = task_crud.task.create_with_owner(
|
db_task = task_crud.task.create_with_owner(
|
||||||
db=db, obj_in=task_in, user_id=current_user.id
|
db=db,
|
||||||
|
obj_in=task_in,
|
||||||
|
user_id=current_user.id,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Add created task to response
|
|
||||||
created_tasks.append(TaskRead.model_validate(db_task))
|
created_tasks.append(TaskRead.model_validate(db_task))
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as task_exc:
|
||||||
logger.error(f"Error creating task: {e}")
|
logger.error(f"Failed to create task from LLM data: {task_exc}")
|
||||||
# Continue with other tasks if one fails
|
# Continue processing remaining tasks even if one fails
|
||||||
continue
|
|
||||||
|
|
||||||
if not created_tasks:
|
if not created_tasks:
|
||||||
# If no tasks were successfully created
|
|
||||||
response.processing_successful = False
|
response.processing_successful = False
|
||||||
response.error = ChatProcessingError(
|
response.error = ChatProcessingError(
|
||||||
error_type="creation_error",
|
error_type="creation_error",
|
||||||
error_detail="Could not create any tasks from your message",
|
error_detail="Failed to create any tasks from the provided message.",
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# Add created tasks to response
|
|
||||||
response.tasks = created_tasks
|
response.tasks = created_tasks
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as exc:
|
||||||
logger.exception(f"Error in chat-to-tasks endpoint: {e}")
|
logger.exception(f"Unexpected error in chat-to-tasks endpoint: {exc}")
|
||||||
response.processing_successful = False
|
response.processing_successful = False
|
||||||
response.error = ChatProcessingError(
|
response.error = ChatProcessingError(
|
||||||
error_type="processing_error",
|
error_type="processing_error",
|
||||||
error_detail=f"An error occurred while processing your request: {str(e)}",
|
error_detail=f"An error occurred while processing your request: {str(exc)}",
|
||||||
)
|
)
|
||||||
return response
|
return response
|
@ -5,7 +5,7 @@ LLM service for converting natural language to structured task data.
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from typing import Dict, List, Optional
|
from typing import Dict, List, Optional, Union
|
||||||
|
|
||||||
from app.core.config import settings
|
from app.core.config import settings
|
||||||
|
|
||||||
@ -24,7 +24,7 @@ class LLMService(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def extract_json_from_response(text: str) -> Dict:
|
def extract_json_from_response(text: str) -> Union[Dict, List]:
|
||||||
"""Extract valid JSON from possibly markdown-wrapped LLM responses."""
|
"""Extract valid JSON from possibly markdown-wrapped LLM responses."""
|
||||||
try:
|
try:
|
||||||
if "```json" in text:
|
if "```json" in text:
|
||||||
@ -74,7 +74,8 @@ class OpenAIService(LLMService):
|
|||||||
raw = response.choices[0].message.content.strip()
|
raw = response.choices[0].message.content.strip()
|
||||||
result = extract_json_from_response(raw)
|
result = extract_json_from_response(raw)
|
||||||
|
|
||||||
if "tasks" in result and isinstance(result["tasks"], list):
|
# Expect a dict with a "tasks" key
|
||||||
|
if isinstance(result, dict) and "tasks" in result and isinstance(result["tasks"], list):
|
||||||
return result["tasks"]
|
return result["tasks"]
|
||||||
raise ValueError("Missing or invalid 'tasks' key in response.")
|
raise ValueError("Missing or invalid 'tasks' key in response.")
|
||||||
|
|
||||||
@ -96,6 +97,7 @@ class GeminiService(LLMService):
|
|||||||
raise RuntimeError("Gemini client setup failed.")
|
raise RuntimeError("Gemini client setup failed.")
|
||||||
|
|
||||||
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
|
async def chat_to_tasks(self, prompt: str) -> List[Dict]:
|
||||||
|
# Note: Gemini returns a JSON LIST of tasks, not wrapped in an object.
|
||||||
system_prompt = (
|
system_prompt = (
|
||||||
"You are a task extraction assistant. Convert the user's message into structured task objects. "
|
"You are a task extraction assistant. Convert the user's message into structured task objects. "
|
||||||
"Each task must include:\n"
|
"Each task must include:\n"
|
||||||
@ -104,7 +106,8 @@ class GeminiService(LLMService):
|
|||||||
"- due_date: ISO 8601 date (YYYY-MM-DD) or null\n"
|
"- due_date: ISO 8601 date (YYYY-MM-DD) or null\n"
|
||||||
"- priority: high, medium, or low\n"
|
"- priority: high, medium, or low\n"
|
||||||
"- status: set to \"pending\"\n\n"
|
"- status: set to \"pending\"\n\n"
|
||||||
"Return only this JSON format: { \"tasks\": [ ... ] }\n"
|
"Return ONLY a JSON array like this:\n"
|
||||||
|
"[ { ... }, { ... } ]\n"
|
||||||
"No explanations, no markdown, no formatting – just pure JSON."
|
"No explanations, no markdown, no formatting – just pure JSON."
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
@ -115,9 +118,10 @@ class GeminiService(LLMService):
|
|||||||
raw = response.text.strip()
|
raw = response.text.strip()
|
||||||
result = extract_json_from_response(raw)
|
result = extract_json_from_response(raw)
|
||||||
|
|
||||||
if "tasks" in result and isinstance(result["tasks"], list):
|
# Expect a LIST of task dicts directly
|
||||||
return result["tasks"]
|
if isinstance(result, list):
|
||||||
raise ValueError("Missing or invalid 'tasks' key in response.")
|
return result
|
||||||
|
raise ValueError("Expected a JSON list of tasks from Gemini response.")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Gemini task extraction failed: {e}")
|
logger.error(f"Gemini task extraction failed: {e}")
|
||||||
|
Loading…
x
Reference in New Issue
Block a user