api_light_hf / apis /format2fvinfos.py
Renecto's picture
deploy api_light_hf (2026-03-12 12:47:03)
cf7f643
import os
from src.clients.llm_client import LLMClient
import json
import pandas as pd
from pydantic import BaseModel, Field
from enum import Enum
import base64
from io import BytesIO
from PIL import Image
from typing import List, Optional
from functools import cache
from datetime import datetime
import pytz
from src.utils.tracer import customtracer
from src.models.common import model
def _ask_raw_hf(messages, model, response_format=None):
"""Compatibility wrapper: routes OpenAI-style messages through HF LLMClient."""
from src.clients.llm_client import LLMClient
import json as _json
client = LLMClient()
system_prompt = None
user_text = ""
images = []
for msg in messages:
role = msg.get("role", "")
c = msg.get("content", "")
if role == "system":
if isinstance(c, str):
system_prompt = c
elif role == "user":
if isinstance(c, str):
user_text = c
elif isinstance(c, list):
for part in c:
if isinstance(part, dict):
if part.get("type") == "text":
user_text += part.get("text", "")
elif part.get("type") == "image_url":
url = part.get("image_url", {}).get("url", "")
if url.startswith("data:"):
images.append(url.split(",", 1)[1] if "," in url else url)
else:
images.append(url)
if response_format is not None and hasattr(response_format, "model_json_schema"):
result = client.call(
prompt=user_text,
schema=response_format,
model=model,
system_prompt=system_prompt,
images=images if images else None,
temperature=0,
)
return _json.dumps(result.model_dump(), ensure_ascii=False)
else:
return client.call_raw(
prompt=user_text,
model=model,
system_prompt=system_prompt,
images=images if images else None,
)
# スキーマ定義�E�Eormat2fvinfo.pyから独立!E
class Meta(BaseModel):
会社吁E str
業畁E str
ブランチE str
サービス: str
啁E��: str
タイトル: str
構�Eの意図: str
訴求テーチE list[str]
class Font(str, Enum):
font1 = "ゴシチE��"
font2 = "明朝"
font3 = "手書ぁE
class EvsF(str, Enum):
EMOTIONAL = "惁E��E
FUNCTIONAL = "機�E"
class PvsS(str, Enum):
PROBLEM = "問題提起"
SOLUTION = "課題解決"
class Copy(BaseModel):
text: str
font: Font
color: str
visual: str
appeal_mode : EvsF
forcus_stage : PvsS
class CatchCopy(BaseModel):
main_copy: list[Copy]
sub_copy: list[Copy]
class FVinfo(BaseModel):
非LP: bool
メタ: Meta
キャチE��コピ�E: CatchCopy
権威付け: list[str]
ビジュアル: list[str]
CTAボタン: list[str]
def get_openai_request(messages, format, n=1):
"""
OpenAI API呼び出し!Eパラメータ対応!E
Args:
messages: メチE��ージリスチE
format: レスポンスフォーマッチE
n: 生�Eする候補数�E�デフォルチE 1�E�E
Returns:
list[str]: 常にリストで返却�E�E=1でも長ぁEのリスト!E
"""
client = LLMClient()
response = _ask_raw_hf([{"role":"user","content":p}], model,
model="meta-llama/Llama-3.3-70B-Instruct",
messages=messages,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
response_format=format,
temperature=1.2,
n=n
)
# 常にリストで返す�E�E=1でも統一�E�E
return [choice.message.content for choice in response.choices]
@customtracer
def format2fvinfos(p, openai_key=os.environ.get('OPENAI_KEY'), n=1):
"""
褁E��バリアントを返すformat2fvinfo�E��E列版�E�E
input1 (text): prompt text
input2 (text): default
input3 (number): 1
output1 (json): variants list
"""
print(datetime.now(pytz.timezone('Asia/Tokyo')).strftime("%Y-%m-%d %H:%M:%S"), __name__, f"n={n}")
if openai_key == "default":
os.environ['OPENAI_API_KEY'] = os.environ.get('OPENAI_KEY')
else:
os.environ['OPENAI_API_KEY'] = openai_key
# n を整数に変換し、篁E��チェチE��
try:
n = int(n)
if n < 1:
print(f"Warning: n={n} is invalid, using n=1")
n = 1
elif n > 10:
print(f"Warning: n={n} is too large, capping at 10")
n = 10
except (TypeError, ValueError):
print(f"Warning: n={n} is invalid, using n=1")
n = 1
messages=[
{
"role": "system",
"content": """提供したフォーマットデータから、忁E��なファーストビューの要素を生成してください、E"",
},
{
"role": "user",
"content": [{"type": "text", "text":p}]
},
]
# get_openai_requestは常にリストを返すので、そのまま使用
result = get_openai_request(messages, FVinfo, n=n)
print(f"Generated {len(result)} variants")
# リストをJSON斁E���Eとして返す
return json.dumps(result, ensure_ascii=False)