Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| from bitsandbytes.nn import Int8Params, Int8Linear | |
| from transformers.utils.quantization_config import BitsAndBytesConfig | |
| # モデルとトークナイザの読み込み | |
| model_name = "Qwen/Qwen2.5-7B-Instruct" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| # 4bit量子化設定 | |
| quantization_config = BitsAndBytesConfig( | |
| load_in_4bit=True, | |
| bnb_4bit_compute_dtype=torch.bfloat16, | |
| bnb_4bit_use_double_quant=True, | |
| bnb_4bit_quant_type="nf4" | |
| ) | |
| # モデルの読み込みと量子化 | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_name, | |
| device_map="auto", | |
| torch_dtype=torch.bfloat16, | |
| quantization_config=quantization_config | |
| ) | |
| def respond( | |
| message, | |
| history: list[tuple[str, str]], | |
| system_message, | |
| max_tokens, | |
| temperature, | |
| top_p, | |
| ): | |
| # メッセージの準備 | |
| messages = [{"role": "system", "content": system_message}] | |
| for val in history: | |
| if val[0]: | |
| messages.append({"role": "user", "content": val[0]}) | |
| if val[1]: | |
| messages.append({"role": "assistant", "content": val[1]}) | |
| messages.append({"role": "user", "content": message}) | |
| # メッセージをトークナイザに通す | |
| input_ids = tokenizer([message], return_tensors="pt").input_ids.to(model.device) | |
| # モデルの推論 | |
| output_ids = model.generate( | |
| input_ids, | |
| max_length=max_tokens, | |
| temperature=temperature, | |
| top_p=top_p, | |
| do_sample=True, | |
| ) | |
| response = tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
| response = response[len(message):] # 入力メッセージを削除 | |
| return response | |
| # インターフェース | |
| demo = gr.ChatInterface( | |
| respond, | |
| additional_inputs=[ | |
| gr.Textbox(value="ユーザーの応答と依頼に答えてください。ポジティブに", label="システムメッセージ"), | |
| gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="新規トークン最大"), | |
| gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="温度"), | |
| gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.95, | |
| step=0.05, | |
| label="Top-p (核 sampling)", | |
| ), | |
| ], | |
| concurrency_limit=30 # 例: 同時に4つのリクエストを処理 | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |