
Implement a new endpoint that converts natural language input into structured tasks using an LLM. Features include: - LLM service abstraction with support for OpenAI and Google Gemini - Dependency injection pattern for easy provider switching - Robust error handling and response formatting - Integration with existing user authentication and task creation - Fallback to mock LLM service for testing or development
44 lines
1.2 KiB
Python
44 lines
1.2 KiB
Python
"""
|
|
Pydantic schemas for the Chat-to-Tasks feature.
|
|
"""
|
|
from typing import List, Optional
|
|
|
|
from pydantic import BaseModel, Field
|
|
|
|
from app.schemas.task import TaskRead
|
|
|
|
|
|
class ChatInput(BaseModel):
|
|
"""Schema for chat input from user."""
|
|
|
|
message: str = Field(
|
|
...,
|
|
description="Natural language input describing tasks to be created",
|
|
min_length=3,
|
|
max_length=2000,
|
|
)
|
|
|
|
|
|
class ChatProcessingError(BaseModel):
|
|
"""Schema for error details when processing chat."""
|
|
|
|
error_type: str = Field(..., description="Type of error encountered")
|
|
error_detail: str = Field(..., description="Detailed error information")
|
|
|
|
|
|
class ChatResponse(BaseModel):
|
|
"""Schema for chat response with parsed tasks."""
|
|
|
|
original_message: str = Field(..., description="Original user message")
|
|
tasks: List[TaskRead] = Field(
|
|
default_factory=list,
|
|
description="Tasks extracted from the message",
|
|
)
|
|
processing_successful: bool = Field(
|
|
default=True,
|
|
description="Indicates if processing was successful",
|
|
)
|
|
error: Optional[ChatProcessingError] = Field(
|
|
default=None,
|
|
description="Error details if processing was not successful",
|
|
) |