import spaces import json import os import glob import subprocess from llama_cpp import Llama from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType from llama_cpp_agent.providers import LlamaCppPythonProvider from llama_cpp_agent.chat_history import BasicChatHistory from llama_cpp_agent.chat_history.messages import Roles import gradio as gr from huggingface_hub import hf_hub_download, list_repo_files from model_loader import MODEL_DROPDOWN_CHOICES, MODEL_FILE_MAPPING # --- Globale Konfiguration und Variablen --- llm = None llm_model = None css = """.bubble-wrap { padding-top: calc(var(--spacing-xl) * 3) !important;}.message-row { justify-content: space-evenly !important; width: 100% !important; max-width: 100% !important; margin: calc(var(--spacing-xl)) 0 !important; padding: 0 calc(var(--spacing-xl) * 3) !important;}.flex-wrap.user { border-bottom-right-radius: var(--radius-lg) !important;}.flex-wrap.bot { border-bottom-left-radius: var(--radius-lg) !important;}.message.user{ padding: 10px;}.message.bot{ text-align: right; width: 100%; padding: 10px; border-radius: 10px;}.message-bubble-border { border-radius: 6px !important;}.message-buttons { justify-content: flex-end !important;}.message-buttons-left { align-self: end !important;}.message-buttons-bot, .message-buttons-user { right: 10px !important; left: auto !important; bottom: 2px !important;}.dark.message-bubble-border { border-color: #343140 !important;}.dark.user { background: #1e1c26 !important;}.dark.assistant.dark, .dark.pending.dark { background: #16141c !important;}""" def get_messages_formatter_type(model_name): if "Llama" in model_name: return MessagesFormatterType.LLAMA_3 elif "Mistral" in model_name: return MessagesFormatterType.MISTRAL elif "GLM" in model_name or "Granite" in model_name: return MessagesFormatterType.CHATML else: print("Formatter type not found, trying default") return MessagesFormatterType.CHATML # ---------------------------------------------------------------------- ## Main Response Function for ChatInterface # ---------------------------------------------------------------------- @spaces.GPU(duration=90) def respond( message, history: list[dict[str, str]], selected_model_name, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty, ): global llm global llm_model model_file_path = MODEL_FILE_MAPPING.get(selected_model_name) if not model_file_path: return f"Error: Model file for '{selected_model_name}' not found. Has the download completed?" chat_template = get_messages_formatter_type(selected_model_name) if llm is None or llm_model != model_file_path: print(f"Loading new model: {model_file_path}") try: llm = Llama( model_path=model_file_path, flash_attn=True, n_gpu_layers=81, n_batch=1024, n_ctx=8192, ) llm_model = model_file_path except Exception as e: return f"Error during loading of Llama model '{selected_model_name}' ({model_file_path}): {e}" provider = LlamaCppPythonProvider(llm) agent = LlamaCppAgent( provider, system_prompt=f"{system_message}", predefined_messages_formatter_type=chat_template, debug_output=True ) settings = provider.get_provider_default_settings() settings.temperature = temperature settings.top_k = top_k settings.top_p = top_p settings.max_tokens = max_tokens settings.repeat_penalty = repeat_penalty settings.stream = True messages = BasicChatHistory() for msn in history: role = Roles.user if msn.get('role') == 'user' else Roles.assistant messages.add_message({'role': role, 'content': msn.get('content', '')}) stream = agent.get_chat_response( message, llm_sampling_settings=settings, chat_history=messages, returns_streaming_generator=True, print_output=False ) outputs = "" for output in stream: outputs += output yield outputs PLACEHOLDER = """
""" # --- Gradio Components (Dynamically populated) --- default_model = MODEL_DROPDOWN_CHOICES[0] if MODEL_DROPDOWN_CHOICES else None model_dropdown = gr.Dropdown( choices=MODEL_DROPDOWN_CHOICES, value=default_model, label="Model" ) system_textbox = gr.Textbox(value="You are a deep thinking AI, you may use extremely long chains of thought to deeply consider the problem and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. You should enclose your thoughts and internal monologue inside