Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import os | |
| from huggingface_hub import hf_hub_download | |
| from llama_cpp import Llama | |
| # Путь для хранения модели локально в Space | |
| model_dir = "./models" | |
| os.makedirs(model_dir, exist_ok=True) | |
| # Название модели и репозитория | |
| repo_id = "Mykes/simpo_abl_model_epoch_1" | |
| model_filename = "Simpo_Abl_Model_Epoch_1_Q8_0.gguf" | |
| # Загружаем модель, если она еще не загружена | |
| model_path = os.path.join(model_dir, model_filename) | |
| if not os.path.exists(model_path): | |
| print(f"Downloading model {model_filename} from {repo_id}...") | |
| model_path = hf_hub_download(repo_id=repo_id, filename=model_filename, cache_dir=model_dir) | |
| print(f"Model downloaded to {model_path}") | |
| # Загружаем модель в память | |
| print("Loading model into memory...") | |
| llm = Llama(model_path=model_path, n_ctx=2048) # n_ctx - максимальная длина контекста | |
| def respond( | |
| message, | |
| history: list[tuple[str, str]], | |
| system_message, | |
| max_tokens, | |
| temperature, | |
| top_p, | |
| ): | |
| # Формируем историю сообщений в формате, подходящем для модели | |
| messages = [{"role": "system", "content": system_message}] | |
| for val in history: | |
| if val[0]: | |
| messages.append({"role": "user", "content": val[0]}) | |
| if val[1]: | |
| messages.append({"role": "assistant", "content": val[1]}) | |
| messages.append({"role": "user", "content": message}) | |
| # Формируем prompt для модели (в GGUF обычно используется специфический формат) | |
| prompt = "" | |
| for msg in messages: | |
| if msg["role"] == "system": | |
| prompt += f"System: {msg['content']}\n" | |
| elif msg["role"] == "user": | |
| prompt += f"User: {msg['content']}\n" | |
| elif msg["role"] == "assistant": | |
| prompt += f"Assistant: {msg['content']}\n" | |
| # Генерируем ответ с помощью модели | |
| response = llm( | |
| prompt, | |
| max_tokens=max_tokens, | |
| temperature=temperature, | |
| top_p=top_p, | |
| stop=["User:", "System:"], # Останавливаем генерацию, если начинается новое сообщение | |
| stream=True | |
| ) | |
| # Потоковая обработка ответа | |
| full_response = "" | |
| for chunk in response: | |
| token = chunk["choices"][0]["text"] | |
| full_response += token | |
| yield full_response | |
| # Настройка интерфейса Gradio | |
| demo = gr.ChatInterface( | |
| respond, | |
| additional_inputs=[ | |
| gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
| gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
| gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
| gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.95, | |
| step=0.05, | |
| label="Top-p (nucleus sampling)", | |
| ), | |
| ], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |