File size: 2,411 Bytes
9f5a128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import os
from typing import List
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
import torch

_model = None
_tokenizer = None

def _load_local():
    global _model, _tokenizer
    model_id = os.getenv("HF_LOCAL_MODEL_ID", "google/flan-t5-base")
    if "t5" in model_id or "flan" in model_id:
        _tokenizer = AutoTokenizer.from_pretrained(model_id)
        _model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
    else:
        _tokenizer = AutoTokenizer.from_pretrained(model_id)
        _model = AutoModelForCausalLM.from_pretrained(model_id)
    if torch.cuda.is_available():
        _model = _model.to("cuda")

def generate(system_prompt: str, user_prompt: str, temperature: float=0.4, max_new_tokens: int=512) -> str:
    """
    単純なプロンプト→テキスト出力。必要に応じて強化してください。
    """
    use_api = os.getenv("USE_HF_INFERENCE_API", "false").lower() == "true"
    if use_api:
        # Inference APIを使う場合はrequestsでPOST(簡易版)
        import requests
        api_url = f"https://api-inference.huggingface.co/models/{os.getenv('HF_LOCAL_MODEL_ID')}"
        headers = {"Authorization": f"Bearer {os.getenv('HF_API_TOKEN', '')}"}
        payload = {"inputs": f"{system_prompt}\n\n{user_prompt}", "parameters": {"temperature": temperature, "max_new_tokens": max_new_tokens}}
        r = requests.post(api_url, headers=headers, json=payload, timeout=120)
        r.raise_for_status()
        data = r.json()
        if isinstance(data, list) and len(data) and "generated_text" in data[0]:
            return data[0]["generated_text"]
        elif isinstance(data, dict) and "generated_text" in data:
            return data["generated_text"]
        return str(data)

    # ローカル実行(Transformers)
    if _model is None:
        _load_local()
    prompt = f"{system_prompt}\n\n{user_prompt}".strip()
    inputs = _tokenizer(prompt, return_tensors="pt")
    if torch.cuda.is_available():
        inputs = {k: v.to("cuda") for k, v in inputs.items()}
    if hasattr(_model, "generate"):
        with torch.no_grad():
            out_ids = _model.generate(**inputs, do_sample=temperature>0, temperature=temperature, max_new_tokens=max_new_tokens)
        text = _tokenizer.decode(out_ids[0], skip_special_tokens=True)
        return text
    else:
        return "Model not supported."