dataset / app /llm.py
parthpatel01's picture
Add files using upload-large-folder tool
45a77a4 verified
from __future__ import annotations
import json
from typing import Any, Dict, Type
from openai import OpenAI
from pydantic import BaseModel
from pydantic import ValidationError
from .models import Summary, DecisionOutput, TradePlan
from .prompts import SYSTEM_EXPERT, SYSTEM_TRADE_JSON
from openai import AzureOpenAI
import re
def _sanitize_schema_name(name: str) -> str:
"""
response_format.json_schema.name must match ^[a-zA-Z0-9_-]+$
"""
cleaned = re.sub(r"[^a-zA-Z0-9_-]", "_", name.strip())
return cleaned or "Schema"
def _ensure_no_extra(obj_schema: Dict[str, Any]) -> Dict[str, Any]:
"""
Recursively walk a (possibly nested) pydantic schema and:
- for every object node, add additionalProperties: false if missing
"""
# If this node is an object, enforce additionalProperties == False
if obj_schema.get("type") == "object":
if "additionalProperties" not in obj_schema:
obj_schema["additionalProperties"] = False
props = obj_schema.get("properties", {})
if isinstance(props, dict):
for _, sub in props.items():
if isinstance(sub, dict):
_ensure_no_extra(sub)
# Handle arrays-of-objects
if "items" in obj_schema and isinstance(obj_schema["items"], dict):
_ensure_no_extra(obj_schema["items"])
# Handle unions / compositions
for key in ("anyOf", "allOf", "oneOf"):
if key in obj_schema and isinstance(obj_schema[key], list):
for sub in obj_schema[key]:
if isinstance(sub, dict):
_ensure_no_extra(sub)
# Also recurse into $defs / definitions (pydantic puts models there)
for defs_key in ("$defs", "definitions"):
if defs_key in obj_schema and isinstance(obj_schema[defs_key], dict):
for _, sub in obj_schema[defs_key].items():
if isinstance(sub, dict):
_ensure_no_extra(sub)
return obj_schema
def _prepare_schema_for_openai(pyd_schema: Dict[str, Any], name: str) -> Dict[str, Any]:
"""
Turn Pydantic's model_json_schema() into a response_format that the
OpenAI-compatible server will accept.
Critical rules enforced here:
- Every object in the schema must have additionalProperties: false
- The TOP LEVEL of the schema must ALSO have additionalProperties: false,
EVEN IF it's using allOf/anyOf/oneOf instead of a direct {type:"object"}.
- The "name" field must match ^[a-zA-Z0-9_-]+$.
- strict = True.
"""
fixed = _ensure_no_extra(dict(pyd_schema))
# HARD REQUIREMENT FOR YOUR SERVER:
# Force additionalProperties: false at the root no matter what.
# (This handles the case where root is an allOf and not directly type:"object")
fixed["additionalProperties"] = False
safe_name = _sanitize_schema_name(name)
return {
"type": "json_schema",
"json_schema": {
"name": safe_name,
"strict": True,
"schema": fixed,
},
}
class LLMClient:
def __init__(self, api_key: str, model: str, base_url: str,temperature: float=0, top_p: float = 0.1):
if not api_key:
raise ValueError("OPENAI_API_KEY is empty; set in env or .env")
self.client = AzureOpenAI(api_key=api_key, azure_endpoint=base_url,api_version="2024-08-01-preview") if base_url else OpenAI(api_key=api_key)
# self.client = OpenAI(api_key=api_key, base_url=base_url) if base_url else OpenAI(api_key=api_key)
self.model = model
self.temperature=temperature
self.top_p=top_p
def _chat_json(self, system_prompt: str, user_prompt: str, schema_model: Type[BaseModel], schema_name: str):
"""
Call the model with response_format=json_schema and return parsed json.
schema_model: a Pydantic BaseModel subclass describing the expected response.
schema_name: unique-ish name string for OpenAI
"""
# get pydantic schema
raw_schema = schema_model.model_json_schema()
# patch it to satisfy OpenAI's "additionalProperties": false requirement
response_format = _prepare_schema_for_openai(raw_schema, schema_name)
resp = self.client.chat.completions.create(
model=self.model,
messages=[{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}],
response_format=response_format,
temperature=self.temperature,
top_p=self.top_p,
)
raw = resp.choices[0].message.content
return json.loads(raw)
def morning_summary(self, user_prompt: str) -> Summary:
data = self._chat_json(system_prompt=SYSTEM_EXPERT, user_prompt=user_prompt,
schema_model=Summary,schema_name="MorningSummary")
try:
return Summary.model_validate(data)
except ValidationError as e:
raise RuntimeError(f"LLM validation failed: {e}")
def trade_decision_output(self, user_prompt: str) -> DecisionOutput:
data = self._chat_json(SYSTEM_EXPERT+ "\n" + SYSTEM_TRADE_JSON, user_prompt, DecisionOutput,"DecisionOutput")
return DecisionOutput.model_validate(data)
def trade_plan(self, user_prompt: str) -> TradePlan:
data = self._chat_json(SYSTEM_EXPERT+ "\n" + SYSTEM_TRADE_JSON, user_prompt, TradePlan,"TradePlan")
return TradePlan.model_validate(data)