File size: 3,362 Bytes
cf7f643
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import os
from src.clients.llm_client import LLMClient
import json 
import base64
from io import BytesIO
from PIL import Image
import re
from pydantic import BaseModel
from enum import Enum

def _ask_raw_hf(messages, model, response_format=None):
    """Compatibility wrapper: routes OpenAI-style messages through HF LLMClient."""
    from src.clients.llm_client import LLMClient
    import json as _json

    client = LLMClient()
    system_prompt = None
    user_text = ""
    images = []
    for msg in messages:
        role = msg.get("role", "")
        c = msg.get("content", "")
        if role == "system":
            if isinstance(c, str):
                system_prompt = c
        elif role == "user":
            if isinstance(c, str):
                user_text = c
            elif isinstance(c, list):
                for part in c:
                    if isinstance(part, dict):
                        if part.get("type") == "text":
                            user_text += part.get("text", "")
                        elif part.get("type") == "image_url":
                            url = part.get("image_url", {}).get("url", "")
                            if url.startswith("data:"):
                                images.append(url.split(",", 1)[1] if "," in url else url)
                            else:
                                images.append(url)

    if response_format is not None and hasattr(response_format, "model_json_schema"):
        result = client.call(
            prompt=user_text,
            schema=response_format,
            model=model,
            system_prompt=system_prompt,
            images=images if images else None,
            temperature=0,
        )
        return _json.dumps(result.model_dump(), ensure_ascii=False)
    else:
        return client.call_raw(
            prompt=user_text,
            model=model,
            system_prompt=system_prompt,
            images=images if images else None,
        )


client = LLMClient()

class Comment(BaseModel):
    コメンチE str
    琁E��: str
    チE��スチE str
    チE��スト�E種顁E str

def ask_raw(messages, model):
    response = _ask_raw_hf([{"role":"user","content":p}], model,
        model=model,
        messages=messages,
        top_p=1,
        frequency_penalty=0,
        presence_penalty=0,
        response_format=Comment,
        temperature=0
    )
    return response

def heatmap_text2comment(p, fv_info1,fv_info2,title1, title2, openai_key=os.environ.get('OPENAI_KEY')):
    """
    input1 (text): 
    input2 (text): 
    input3 (text): 
    input4 (text): 
    input5 (text): 
    input6 (text): default
    output1 (json): コメンチE
    """
    if openai_key == "default":
        os.environ['OPENAI_API_KEY'] = os.environ.get('OPENAI_KEY')
    else:
        os.environ['OPENAI_API_KEY'] = openai_key
    messages = [
        {
        "role": "system",
        "content": f"以下�E、�E析を進めてぁE��LPの冁E��です、Enこ�ELPの惁E��を�Eに、LP刁E��の専門家としてコメントしてください、En\n#{title1}\n{fv_info1}\n\n#{title2}\n{fv_info2}"
        },
        {
        "role": "user",
        "content":[
            {"type": "text", "text":p}
        ]
        },
    ]
    return ask_raw(messages, "meta-llama/Llama-3.3-70B-Instruct")