| # app.py | |
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| import os | |
| client = InferenceClient(token=os.environ.get("HF_TOKEN")) | |
| def predict(prompt): | |
| try: | |
| response = client.text_generation( | |
| prompt=prompt, | |
| model="microsoft/phi-2", | |
| max_new_tokens=100 | |
| ) | |
| return response | |
| except: | |
| return "Извините, сервис временно недоступен" | |
| gr.Interface( | |
| fn=predict, | |
| inputs=gr.Textbox(label="Вопрос к NPC"), | |
| outputs=gr.Textbox(label="Ответ"), | |
| title="AI для игры" | |
| ).launch() |