| | import os |
| | import requests |
| | import gradio as gr |
| |
|
| | |
| | token = os.environ.get("HF_TOKEN") |
| |
|
| | |
| | def chat_with_model(message): |
| | url = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-7B-Instruct" |
| | headers = {"Authorization": f"Bearer {token}"} |
| | data = {"inputs": message} |
| | |
| | |
| | response = requests.post(url, headers=headers, json=data) |
| | |
| | |
| | if response.status_code == 200: |
| | return response.json()[0]['generated_text'] |
| | else: |
| | return "エラーが発生しました。もう一度試してください。" |
| |
|
| | |
| | def chatbot_interface(message): |
| | return chat_with_model(message) |
| |
|
| | |
| | interface = gr.Interface( |
| | fn=chatbot_interface, |
| | inputs=gr.Textbox(label="メッセージ", placeholder="ここにメッセージを入力してください...", lines=2), |
| | outputs="text", |
| | live=False, |
| | title="Qwen2.5-7B-Instruct チャットボット", |
| | description="Hugging Face APIを使ってQwen2.5-7B-Instructモデルと対話できます。" |
| | ) |
| |
|
| | |
| | if __name__ == "__main__": |
| | interface.launch() |