This commit is contained in:
2026-01-27 17:40:37 +01:00
parent 82947a7bd6
commit adc2cd572a
55 changed files with 4145 additions and 101 deletions

40
backend/app/core/ai.py Normal file
View File

@@ -0,0 +1,40 @@
from openai import AsyncOpenAI
from app.core.config import settings
class AIClient:
def __init__(self):
self.client = AsyncOpenAI(
api_key=settings.OPENAI_API_KEY,
base_url=settings.OPENAI_API_BASE
)
self.model = settings.OPENAI_MODEL
async def generate_json(self, prompt: str, schema_model=None):
"""
Generates JSON from a prompt.
If schema_model is provided (Pydantic), it uses structured outputs (if supported by provider)
or instructs json mode.
"""
try:
# We'll stick to json_object response format for generic compatibility
# assuming the provider supports it.
messages = [{"role": "user", "content": prompt}]
kwargs = {
"model": self.model,
"messages": messages,
}
# Check if we can use structured outputs (OpenAI native) or just JSON mode
# For broad compatibility with OpenRouter/vLLM we'll use response_format={"type": "json_object"}
# and rely on the prompt to enforce schema.
kwargs["response_format"] = {"type": "json_object"}
response = await self.client.chat.completions.create(**kwargs)
return response.choices[0].message.content
except Exception as e:
print(f"AI Generation Error: {e}")
raise e
ai_client = AIClient()