import os
from src.clients.llm_client import LLMClient
import json
import pandas as pd
from pydantic import BaseModel, ValidationError
from functools import cache
from typing import List
from datetime import datetime
import pytz
def _ask_raw_hf(messages, model, response_format=None):
"""Compatibility wrapper: routes OpenAI-style messages through HF LLMClient."""
from src.clients.llm_client import LLMClient
import json as _json
client = LLMClient()
system_prompt = None
user_text = ""
images = []
for msg in messages:
role = msg.get("role", "")
c = msg.get("content", "")
if role == "system":
if isinstance(c, str):
system_prompt = c
elif role == "user":
if isinstance(c, str):
user_text = c
elif isinstance(c, list):
for part in c:
if isinstance(part, dict):
if part.get("type") == "text":
user_text += part.get("text", "")
elif part.get("type") == "image_url":
url = part.get("image_url", {}).get("url", "")
if url.startswith("data:"):
images.append(url.split(",", 1)[1] if "," in url else url)
else:
images.append(url)
if response_format is not None and hasattr(response_format, "model_json_schema"):
result = client.call(
prompt=user_text,
schema=response_format,
model=model,
system_prompt=system_prompt,
images=images if images else None,
temperature=0,
)
return _json.dumps(result.model_dump(), ensure_ascii=False)
else:
return client.call_raw(
prompt=user_text,
model=model,
system_prompt=system_prompt,
images=images if images else None,
)
class newHTMLs(BaseModel):
HTMLs: list[str]
@cache
def modifyHTML(p, openai_key=os.environ.get('OPENAI_KEY')):
"""
input0 (text): ボタンの斁E��を通信業界に変えて。色は若老E��け�EぁE��グラチE�Eションで、サブコピ�Eもバイブスあがる感じに。影をつけて、�Eタンは丸くして.