import os from src.clients.llm_client import LLMClient import json import pandas as pd from pydantic import BaseModel, Field from enum import Enum import base64 from io import BytesIO from PIL import Image from typing import List, Optional from functools import cache from datetime import datetime import pytz from src.utils.tracer import customtracer from src.models.common import model def _ask_raw_hf(messages, model, response_format=None): """Compatibility wrapper: routes OpenAI-style messages through HF LLMClient.""" from src.clients.llm_client import LLMClient import json as _json client = LLMClient() system_prompt = None user_text = "" images = [] for msg in messages: role = msg.get("role", "") c = msg.get("content", "") if role == "system": if isinstance(c, str): system_prompt = c elif role == "user": if isinstance(c, str): user_text = c elif isinstance(c, list): for part in c: if isinstance(part, dict): if part.get("type") == "text": user_text += part.get("text", "") elif part.get("type") == "image_url": url = part.get("image_url", {}).get("url", "") if url.startswith("data:"): images.append(url.split(",", 1)[1] if "," in url else url) else: images.append(url) if response_format is not None and hasattr(response_format, "model_json_schema"): result = client.call( prompt=user_text, schema=response_format, model=model, system_prompt=system_prompt, images=images if images else None, temperature=0, ) return _json.dumps(result.model_dump(), ensure_ascii=False) else: return client.call_raw( prompt=user_text, model=model, system_prompt=system_prompt, images=images if images else None, ) """ CN(コンチE��チE用の褁E��バリアント生成API�E�Eormat2cninfo.pyに依存しなぁE��立実裁E��E """ # スキーマ定義�E�Eormat2cninfo.pyから独立!E class UIoption(str, Enum): element1 = "バナー/動画" element2 = "CTA" element3 = "チE��スチE element4 = "フォーム" class Component(BaseModel): component_large: str component_middle: str component_small: list[str] UIelement: UIoption class CNinfo(BaseModel): components: list[Component] def get_openai_request(messages, format, n=1): """ OpenAI API呼び出し!Eパラメータ対応、常にリストを返す�E�E Args: messages: メチE��ージリスチE format: レスポンスフォーマッチE n: 生�Eする候補数�E�デフォルチE 1�E�E Returns: list[str]: 常にリストで返却�E�E=1でも長ぁEのリスト!E """ client = LLMClient()) response = _ask_raw_hf([{"role":"user","content":p}], model, model="meta-llama/Llama-3.3-70B-Instruct", messages=messages, top_p=1, frequency_penalty=0, presence_penalty=0, response_format=format, temperature=0, n=n ) # 常にリストで返す�E�E=1でも統一�E�E return [choice.message.content for choice in response.choices] @customtracer def format2cninfos(p, openai_key=os.environ.get('OPENAI_KEY'), n=1): """ input1 (text): prompt text input2 (text): default input3 (number): 1 output1 (json): CNinfo variants list """ print(datetime.now(pytz.timezone('Asia/Tokyo')).strftime("%Y-%m-%d %H:%M:%S"), __name__, f"n={n}") if openai_key == "default" or not openai_key: openai_key = os.environ.get('OPENAI_KEY', '') if openai_key: os.environ['OPENAI_API_KEY'] = openai_key # n を整数に変換し、篁E��チェチE�� try: n = int(n) if n < 1: print(f"Warning: n={n} is invalid, using n=1") n = 1 elif n > 10: print(f"Warning: n={n} is too large, capping at 10") n = 10 except (TypeError, ValueError): print(f"Warning: n={n} is invalid, using n=1") n = 1 messages=[ { "role": "system", "content": """提供したフォーマットデータから、忁E��なコンチE��チE�E要素を生成してください、E"", }, { "role": "user", "content": [{"type": "text", "text":p}] }, ] # get_openai_requestは常にリストを返すので、そのまま使用 result = get_openai_request(messages, CNinfo, n=n) print(f"Generated {len(result)} CN variants") # リストをJSON斁E���Eとして返す return json.dumps(result, ensure_ascii=False)