Spaces:
Running
Running
| import gradio as gr | |
| import requests | |
| import json | |
| import os | |
| # Функция для загрузки системной роли из JSON файла | |
| def load_system_role(role_name): | |
| with open('system_roles.json', 'r', encoding='utf-8') as file: | |
| roles = json.load(file) | |
| return roles.get(role_name, "Ты помощник по умолчанию.") | |
| def load_role_names(): | |
| with open('system_roles.json', 'r', encoding='utf-8') as file: | |
| roles = json.load(file) | |
| return list(roles.keys()) | |
| def generate(description, system_role_name, max_tokens): | |
| if not description: | |
| yield None, None | |
| return | |
| system_role = load_system_role(system_role_name) | |
| headers = { | |
| 'Content-Type': 'application/json', | |
| } | |
| payload = { | |
| 'messages': [{'role': 'system', 'content': system_role}, {'role': 'user', 'content': description}], | |
| 'max_tokens': max_tokens, | |
| 'model': "openai", | |
| 'stream': True | |
| } | |
| try: | |
| response = requests.post(os.getenv("BASE_URL"), headers=headers, json=payload, stream=True, timeout=200) | |
| response.raise_for_status() | |
| full_text = "" | |
| for chunk in response.iter_lines(): | |
| if chunk: | |
| try: | |
| chunk = chunk.decode('utf-8').replace("data: ", "") | |
| if chunk == "[DONE]": | |
| break | |
| chunk_data = json.loads(chunk) | |
| if 'choices' in chunk_data and len(chunk_data['choices']) > 0: | |
| text_chunk = chunk_data['choices'][0]['delta'].get('content', "") | |
| full_text += text_chunk | |
| yield full_text, full_text | |
| except json.JSONDecodeError: | |
| continue | |
| if not full_text: | |
| yield "**Не удалось получить ответ от сервера.**", "Не удалось получить ответ от сервера." | |
| except requests.exceptions.RequestException as e: | |
| print(f"Ошибка запроса: {e}") | |
| yield f"**Ошибка запроса!**\n\n```\n{e}\n```", f"Ошибка запроса!\n\n{e}" | |
| except Exception as e: | |
| print(f"Ошибка: {str(e)}") | |
| yield "Произошла ошибка при генерации", "Произошла ошибка при генерации" | |
| # Ссылка на файл CSS | |
| css_url = "https://neurixyufi-aihub.static.hf.space/styles.css" | |
| # Получение CSS по ссылке | |
| response = requests.get(css_url) | |
| css = response.text + ".gradio-container{max-width: 700px !important; margin: 0 auto;} h1{text-align: center;}" | |
| # Загрузка названий ролей из JSON файла | |
| role_names = load_role_names() | |
| # UI | |
| with gr.Blocks(css=css) as demo: | |
| gr.Markdown("# EasyText") | |
| with gr.Tab("Запрос"): | |
| with gr.Row(): | |
| promt = gr.Textbox(show_label=True, label="Запрос", lines=3) | |
| with gr.Row(): | |
| with gr.Accordion(label="Помощник", open=False): | |
| helper_role = gr.Radio(show_label=True, label="Выберите помощника", interactive=True, choices=role_names, value=role_names[0]) | |
| with gr.Tab("Настройки"): | |
| with gr.Row(): | |
| max_tokens = gr.Slider(show_label=True, label="Максимальное количество токенов", minimum=100, maximum=32000, value=4000, step=1) | |
| with gr.Row(): | |
| text_button = gr.Button("Генерация", variant='primary') | |
| with gr.Row(): | |
| with gr.Tab("Ответ"): | |
| text_output = gr.Markdown(show_label=False, value="**Здравствуйте!** Чем я могу Вам помочь сегодня?", container=True) | |
| with gr.Accordion(label="Без форматирования", open=False): | |
| text_output_nm = gr.Textbox(show_label=False, value="**Здравствуйте!** Чем я могу Вам помочь сегодня?", lines=3) | |
| text_button.click(generate, inputs=[promt, helper_role, max_tokens], outputs=[text_output, text_output_nm]) | |
| demo.queue(max_size=250, api_open=False).launch() |