File size: 4,581 Bytes
cf7f643
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
from openai import os
from src.clients.llm_client import LLMClient
import json 
import pandas as pd
from pydantic import BaseModel, Field
from enum import Enum
import base64
from io import BytesIO
from PIL import Image
from typing import List, Optional
from functools import cache
from datetime import datetime
import pytz
from src.utils.tracer import customtracer
from src.models.common import model

def _ask_raw_hf(messages, model, response_format=None):
    """Compatibility wrapper: routes OpenAI-style messages through HF LLMClient."""
    from src.clients.llm_client import LLMClient
    import json, re

    client = LLMClient()

    # Extract system prompt and user content from messages list
    system_prompt = None
    user_text = ""
    images = []
    for msg in messages:
        role = msg.get("role", "")
        c = msg.get("content", "")
        if role == "system":
            if isinstance(c, str):
                system_prompt = c
        elif role == "user":
            if isinstance(c, str):
                user_text = c
            elif isinstance(c, list):
                for part in c:
                    if isinstance(part, dict):
                        if part.get("type") == "text":
                            user_text += part.get("text", "")
                        elif part.get("type") == "image_url":
                            url = part.get("image_url", {}).get("url", "")
                            if url.startswith("data:"):
                                images.append(url.split(",", 1)[1] if "," in url else url)
                            else:
                                images.append(url)

    if response_format is not None and hasattr(response_format, "model_json_schema"):
        result = client.call(
            prompt=user_text,
            schema=response_format,
            model=model,
            system_prompt=system_prompt,
            images=images if images else None,
            temperature=0,
        )
        import json
        return json.dumps(result.model_dump(), ensure_ascii=False)
    else:
        return client.call_raw(
            prompt=user_text,
            model=model,
            system_prompt=system_prompt,
            images=images if images else None,
        )

class Estimations(BaseModel):
    name: str
    prob: float
    reason: str
    button_prompt: str 
    change_candidates: Optional[List[str]] = Field(default_factory=list, description="変更すべきUI要素のリスチE) 
    
class EstimateCategory(BaseModel):
    title: str
    estimations: list[Estimations]
    
class EstimateBackground(BaseModel):
    estimated_bg:list[EstimateCategory]
    
def get_openai_request(messages, format):
    client = LLMClient()
    # HF: beta.parse not available; use _ask_raw_hf instead
    response = client.chat.completions.create(
        model="meta-llama/Llama-3.3-70B-Instruct",
        messages=messages,
        top_p=1,
        frequency_penalty=0,
        presence_penalty=0,
        response_format=format,
        temperature=0
    )
    return response.choices[0].message.content

@customtracer
def background(p, openai_key=os.environ.get('OPENAI_KEY')):
    """
    input1 (text):  親子でのスマ�E料��節紁E親子でのお得感 チE�Eタの余剰利用 通話とネット�Eコストパフォーマンス スマ�EチE��ュー支援 家族向け�E安�E機�E 豊富な端末ラインアチE�E
    input2 (text): default
    output1 (json): 頁E��
    """
    print(datetime.now(pytz.timezone('Asia/Tokyo')).strftime("%Y-%m-%d %H:%M:%S"), __name__)
    if openai_key == "default":
        os.environ['OPENAI_API_KEY'] = os.environ.get('OPENAI_KEY')
    else:
        os.environ['OPENAI_API_KEY'] = openai_key
    
    messages=[
        {
        "role": "system",
        "content": """WEBPAGEのOCR惁E��を提供します。このLandingPageにつぁE��持E��された頁E��の制作背景を推定してください。頁E��ごとに、指定数の候補と確玁E��0~1の間で回答して、E
吁E��景�E�Eame�E�につぁE��、change_candidatesフィールドに「その背景を実現するために変更すべきUI要素」�Eリストを斁E���E配�Eで返してください、E
※禁止ワード:「未来」「革命」「夢」に類する想像�E幁E��庁E��てしまぁE��ードがあれば具体的で納得度の高い言葉に置き換えて
""",
        },
        {
        "role": "user",
        "content":  [{"type": "text", "text":p}]
        },
    ]
    
    return get_openai_request(messages, EstimateBackground)