api_light_hf / apis /background.py
Renecto's picture
deploy api_light_hf (2026-03-12 12:47:03)
cf7f643
from openai import os
from src.clients.llm_client import LLMClient
import json
import pandas as pd
from pydantic import BaseModel, Field
from enum import Enum
import base64
from io import BytesIO
from PIL import Image
from typing import List, Optional
from functools import cache
from datetime import datetime
import pytz
from src.utils.tracer import customtracer
from src.models.common import model
def _ask_raw_hf(messages, model, response_format=None):
"""Compatibility wrapper: routes OpenAI-style messages through HF LLMClient."""
from src.clients.llm_client import LLMClient
import json, re
client = LLMClient()
# Extract system prompt and user content from messages list
system_prompt = None
user_text = ""
images = []
for msg in messages:
role = msg.get("role", "")
c = msg.get("content", "")
if role == "system":
if isinstance(c, str):
system_prompt = c
elif role == "user":
if isinstance(c, str):
user_text = c
elif isinstance(c, list):
for part in c:
if isinstance(part, dict):
if part.get("type") == "text":
user_text += part.get("text", "")
elif part.get("type") == "image_url":
url = part.get("image_url", {}).get("url", "")
if url.startswith("data:"):
images.append(url.split(",", 1)[1] if "," in url else url)
else:
images.append(url)
if response_format is not None and hasattr(response_format, "model_json_schema"):
result = client.call(
prompt=user_text,
schema=response_format,
model=model,
system_prompt=system_prompt,
images=images if images else None,
temperature=0,
)
import json
return json.dumps(result.model_dump(), ensure_ascii=False)
else:
return client.call_raw(
prompt=user_text,
model=model,
system_prompt=system_prompt,
images=images if images else None,
)
class Estimations(BaseModel):
name: str
prob: float
reason: str
button_prompt: str
change_candidates: Optional[List[str]] = Field(default_factory=list, description="変更すべきUI要素のリスチE)
class EstimateCategory(BaseModel):
title: str
estimations: list[Estimations]
class EstimateBackground(BaseModel):
estimated_bg:list[EstimateCategory]
def get_openai_request(messages, format):
client = LLMClient()
# HF: beta.parse not available; use _ask_raw_hf instead
response = client.chat.completions.create(
model="meta-llama/Llama-3.3-70B-Instruct",
messages=messages,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
response_format=format,
temperature=0
)
return response.choices[0].message.content
@customtracer
def background(p, openai_key=os.environ.get('OPENAI_KEY')):
"""
input1 (text): 親子でのスマ�E料��節紁E親子でのお得感 チE�Eタの余剰利用 通話とネット�Eコストパフォーマンス スマ�EチE��ュー支援 家族向け�E安�E機�E 豊富な端末ラインアチE�E
input2 (text): default
output1 (json): 頁E��
"""
print(datetime.now(pytz.timezone('Asia/Tokyo')).strftime("%Y-%m-%d %H:%M:%S"), __name__)
if openai_key == "default":
os.environ['OPENAI_API_KEY'] = os.environ.get('OPENAI_KEY')
else:
os.environ['OPENAI_API_KEY'] = openai_key
messages=[
{
"role": "system",
"content": """WEBPAGEのOCR惁E��を提供します。このLandingPageにつぁE��持E��された頁E��の制作背景を推定してください。頁E��ごとに、指定数の候補と確玁E��0~1の間で回答して、E
吁E��景�E�Eame�E�につぁE��、change_candidatesフィールドに「その背景を実現するために変更すべきUI要素」�Eリストを斁E���E配�Eで返してください、E
※禁止ワード:「未来」「革命」「夢」に類する想像�E幁E��庁E��てしまぁE��ードがあれば具体的で納得度の高い言葉に置き換えて
""",
},
{
"role": "user",
"content": [{"type": "text", "text":p}]
},
]
return get_openai_request(messages, EstimateBackground)