TobDeBer commited on
Commit
1d9807e
·
verified ·
1 Parent(s): b8d5174

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +113 -94
app.py CHANGED
@@ -1,31 +1,34 @@
1
  import spaces
2
  import json
3
- import subprocess
4
  import os
 
5
  from llama_cpp import Llama
6
  from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
7
  from llama_cpp_agent.providers import LlamaCppPythonProvider
8
  from llama_cpp_agent.chat_history import BasicChatHistory
9
  from llama_cpp_agent.chat_history.messages import Roles
10
  import gradio as gr
11
- # Wichtige Imports für das dynamische Herunterladen
12
  from huggingface_hub import hf_hub_download, list_repo_files
13
 
14
- # --- Globale Variablen ---
15
  llm = None
16
  llm_model = None
17
  MODEL_CONFIG_FILE = "models.json"
18
  DEFAULT_LOCAL_DIR = "./models"
19
  MODEL_DROPDOWN_CHOICES = []
20
- MODEL_FILE_MAPPING = {} # Map des Anzeigenamens auf den tatsächlichen Dateipfad (oder den ersten Teil des Ordners)
 
21
 
22
  # Stelle sicher, dass das models-Verzeichnis existiert
23
  os.makedirs(DEFAULT_LOCAL_DIR, exist_ok=True)
24
 
25
 
26
- # --- Hilfsfunktion zum Herunterladen von Dateien/Ordnern ---
 
 
 
27
  def download_models():
28
- """Liest models.json und lädt alle Modelle herunter."""
29
  global MODEL_DROPDOWN_CHOICES
30
  global MODEL_FILE_MAPPING
31
 
@@ -33,21 +36,24 @@ def download_models():
33
  with open(MODEL_CONFIG_FILE, 'r') as f:
34
  config = json.load(f)
35
  except FileNotFoundError:
36
- print(f"ERROR: {MODEL_CONFIG_FILE} nicht gefunden.")
 
 
37
  return
38
- except json.JSONDecodeError:
39
- print(f"ERROR: {MODEL_CONFIG_FILE} ist kein gültiges JSON.")
 
40
  return
41
 
42
- # Die globale lokale Zielverzeichnis aus der JSON-Datei abrufen (standardmäßig './models')
43
  local_dir = config.get('local_dir', DEFAULT_LOCAL_DIR)
44
  if not os.path.exists(local_dir):
45
  os.makedirs(local_dir, exist_ok=True)
46
  print(f"Lokales Verzeichnis {local_dir} erstellt.")
47
 
48
- models_list = [model for key, model in config.items() if key != 'local_dir']
49
 
50
- print(f"Starte den Download von {len(models_list)} Modellen...")
51
 
52
  for model_entry in models_list:
53
  name = model_entry.get('name')
@@ -56,88 +62,101 @@ def download_models():
56
  folder_name = model_entry.get('folder_name')
57
 
58
  if not name or not repo_id:
59
- print(f"WARNUNG: Eintrag übersprungen, da 'name' oder 'repo_id' fehlt: {model_entry}")
60
  continue
61
 
 
62
  MODEL_DROPDOWN_CHOICES.append(name)
63
 
64
- # Fall 1: Einzelne Datei angegeben
65
  if file_name:
66
- print(f"Lade einzelne Datei für '{name}': {file_name}")
67
- hf_hub_download(
68
- repo_id=repo_id,
69
- filename=file_name,
70
- local_dir=local_dir
71
- )
72
- # Speichere den tatsächlichen Dateinamen, den Llama-Cpp braucht
73
- MODEL_FILE_MAPPING[name] = os.path.join(os.path.basename(local_dir), file_name)
74
-
 
 
 
75
 
76
- # Fall 2: Ordner angegeben (möglicherweise mehrere Teile)
77
  elif folder_name:
78
- print(f"Lade Ordner für '{name}': {folder_name}")
79
 
80
- # Alle Dateien im Repo auflisten
81
- all_files = list_repo_files(repo_id=repo_id)
82
-
83
- # Nur Dateien im gewünschten Ordner filtern
84
- files_in_folder = sorted([
85
- filename
86
- for filename in all_files
87
- if filename.startswith(f"{folder_name}/")
88
- ])
 
89
 
90
- if not files_in_folder:
91
- print(f"WARNUNG: Im Ordner '{folder_name}' des Repos '{repo_id}' wurden keine Dateien gefunden.")
92
  continue
93
 
94
- # Jede Datei einzeln herunterladen
95
- for filename in files_in_folder:
96
- print(f" -> Lade Datei herunter: {filename}")
97
- hf_hub_download(
98
- repo_id=repo_id,
99
- filename=filename,
100
- local_dir=local_dir
101
- )
102
 
103
- # Für die Llama-CPP-Initialisierung nur den Pfad zum ersten Teil speichern
104
- first_part_filename = files_in_folder[0]
105
- # Der Pfad, den Llama-cpp erwartet, muss relativ zum Installationsort sein,
106
- # daher verwenden wir: <local_dir>/<first_part_filename>
107
- MODEL_FILE_MAPPING[name] = os.path.join(os.path.basename(local_dir), first_part_filename)
 
 
 
 
 
 
 
 
 
 
 
108
 
109
  else:
110
- print(f"WARNUNG: Für '{name}' wurde weder 'file_name' noch 'folder_name' angegeben. Übersprungen.")
111
 
112
- print("Alle konfigurierten Modelle wurden heruntergeladen.")
113
 
114
- # --- Hier die neue Download-Funktion aufrufen ---
115
  download_models()
116
- # ------------------------------------------------
117
 
118
- # --- CSS Styling (Unverändert gelassen) ---
 
119
  css = """.bubble-wrap { padding-top: calc(var(--spacing-xl) * 3) !important;}.message-row { justify-content: space-evenly !important; width: 100% !important; max-width: 100% !important; margin: calc(var(--spacing-xl)) 0 !important; padding: 0 calc(var(--spacing-xl) * 3) !important;}.flex-wrap.user { border-bottom-right-radius: var(--radius-lg) !important;}.flex-wrap.bot { border-bottom-left-radius: var(--radius-lg) !important;}.message.user{ padding: 10px;}.message.bot{ text-align: right; width: 100%; padding: 10px; border-radius: 10px;}.message-bubble-border { border-radius: 6px !important;}.message-buttons { justify-content: flex-end !important;}.message-buttons-left { align-self: end !important;}.message-buttons-bot, .message-buttons-user { right: 10px !important; left: auto !important; bottom: 2px !important;}.dark.message-bubble-border { border-color: #343140 !important;}.dark.user { background: #1e1c26 !important;}.dark.assistant.dark, .dark.pending.dark { background: #16141c !important;}"""
120
 
121
- # --- Hilfsfunktion für den Message Formatter Typ (Unverändert gelassen) ---
122
  def get_messages_formatter_type(model_name):
123
- print(f"getting type for model: {model_name}")
124
  if "Llama" in model_name:
125
  return MessagesFormatterType.LLAMA_3
126
  elif "Mistral" in model_name:
127
  return MessagesFormatterType.MISTRAL
128
- elif "unsloth" in model_name:
129
- # Hier muss man den Anzeigenamen verwenden, da 'model_name' im JSON-Eintrag steht
130
- return MessagesFormatterType.CHATML
131
  else:
132
- print("formatter type not found, trying default")
133
  return MessagesFormatterType.CHATML
134
 
135
- # --- Haupt-Antwortfunktion für ChatInterface (MIT MESSAGES-FIX) ---
 
 
 
136
  @spaces.GPU(duration=45)
137
  def respond(
138
  message,
139
- history: list[dict[str, str]], # Erwartet jetzt Dictionaries ('messages' type)
140
- selected_model_name, # Wird aus Dropdown übergeben (der 'name' aus JSON)
141
  system_message,
142
  max_tokens,
143
  temperature,
@@ -147,31 +166,31 @@ def respond(
147
  ):
148
  global llm
149
  global llm_model
150
- global MODEL_FILE_MAPPING
151
 
152
- # 1. Den tatsächlichen Dateipfad abrufen
153
  model_file_path = MODEL_FILE_MAPPING.get(selected_model_name)
 
154
  if not model_file_path:
155
- return f"Fehler: Model-Datei für '{selected_model_name}' nicht gefunden. Bitte prüfen Sie die models.json und das Download-Verzeichnis."
156
 
157
- # 2. Den Formatierer-Typ basierend auf dem Anzeigenamen bestimmen
158
  chat_template = get_messages_formatter_type(selected_model_name)
159
 
160
- # 3. Das Llama-Objekt nur initialisieren, wenn es neu oder ein anderes Modell ist
161
- # HINWEIS: 'model_file_path' ist jetzt der korrekte Dateipfad
162
  if llm is None or llm_model != model_file_path:
163
  print(f"Lade neues Modell: {model_file_path}")
164
- llm = Llama(
165
- # Wir übergeben den relativen Pfad (z.B. models/UD-Q3_K_XL/...)
166
- model_path=model_file_path,
167
- flash_attn=True,
168
- n_gpu_layers=81,
169
- n_batch=1024,
170
- n_ctx=8192,
171
- )
172
- llm_model = model_file_path
 
 
173
 
174
- # 4. Agent und Sampling-Einstellungen (Unverändert, aber verwendet das neue 'llm'-Objekt)
175
  provider = LlamaCppPythonProvider(llm)
176
  agent = LlamaCppAgent(
177
  provider,
@@ -188,19 +207,11 @@ def respond(
188
  settings.repeat_penalty = repeat_penalty
189
  settings.stream = True
190
 
 
191
  messages = BasicChatHistory()
192
  for msn in history:
193
- if msn.get('role') == 'user':
194
- role = Roles.user
195
- elif msn.get('role') == 'assistant':
196
- role = Roles.assistant
197
- else:
198
- continue
199
- message_dict = {
200
- 'role': role,
201
- 'content': msn.get('content', '')
202
- }
203
- messages.add_message(message_dict)
204
 
205
  # 5. Stream-Antwort generieren
206
  stream = agent.get_chat_response(
@@ -216,14 +227,18 @@ def respond(
216
  outputs += output
217
  yield outputs
218
 
219
- # --- HTML Platzhalter für den Chatbot (als String beibehalten) ---
220
  PLACEHOLDER = """<div class="message-bubble-border" style="display:flex; max-width: 600px; border-radius: 6px; border-width: 1px; border-color: #e5e7eb; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); backdrop-filter: blur(10px);"> <div style="padding: .5rem 1.5rem;display: flex;flex-direction: column;justify-content: space-evenly;"> <h2 style="text-align: left; font-size: 1.5rem; font-weight: 700; margin-bottom: 0.5rem;">llama-cpp-agent</h2> <p style="text-align: left; font-size: 16px; line-height: 1.5; margin-bottom: 15px;">The llama-cpp-agent framework based on llama_cpp_python simplifies interactions with Large Language Models (LLMs). Here you can try out a range of models via the basic chat interface. For advanced features check out the discord or github link below.</p> <div style="display: flex; justify-content: space-between; align-items: center;"> <div style="display: flex; justify-content: flex-end; align-items: center;"> <a href="https://discord.gg/fgr5RycPFP" target="_blank" rel="noreferrer" style="padding: .5rem;"> <svg width="24" height="24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" viewBox="0 5 30.67 23.25"> <title>Discord</title> <path d="M26.0015 6.9529C24.0021 6.03845 21.8787 5.37198 19.6623 5C19.3833 5.48048 19.0733 6.13144 18.8563 6.64292C16.4989 6.30193 14.1585 6.30193 11.8336 6.64292C11.6166 6.13144 11.2911 5.48048 11.0276 5C8.79575 5.37198 6.67235 6.03845 4.6869 6.9529C0.672601 12.8736 -0.41235 18.6548 0.130124 24.3585C2.79599 26.2959 5.36889 27.4739 7.89682 28.2489C8.51679 27.4119 9.07477 26.5129 9.55525 25.5675C8.64079 25.2265 7.77283 24.808 6.93587 24.312C7.15286 24.1571 7.36986 23.9866 7.57135 23.8161C12.6241 26.1255 18.0969 26.1255 23.0876 23.8161C23.3046 23.9866 23.5061 24.1571 23.7231 24.312C22.8861 24.808 22.0182 25.2265 21.1037 25.5675C21.5842 26.5129 22.1422 27.4119 22.7621 28.2489C25.2885 27.4739 27.8769 26.2959 30.5288 24.3585C31.1952 17.7559 29.4733 12.0212 26.0015 6.9529ZM10.2527 20.8402C8.73376 20.8402 7.49382 19.4608 7.49382 17.7714C7.49382 16.082 8.70276 14.7025 10.2527 14.7025C11.7871 14.7025 13.0425 16.082 13.0115 17.7714C13.0115 19.4608 11.7871 20.8402 10.2527 20.8402ZM20.4373 20.8402C18.9183 20.8402 17.6768 19.4608 17.6768 17.7714C17.6768 16.082 18.8873 14.7025 20.4373 14.7025C21.9717 14.7025 23.2271 16.082 23.1961 17.7714C23.1961 19.4608 21.9872 20.8402 20.4373 20.8402Z"></path> </svg> </a> <a href="https://github.com/Maximilian-Winter/llama-cpp-agent" target="_blank" rel="noreferrer" style="padding: .5rem;"> <svg width="24" height="24" fill="currentColor" viewBox="3 3 18 18"> <title>GitHub</title> <path d="M12 3C7.0275 3 3 7.12937 3 12.2276C3 16.3109 5.57625 19.7597 9.15374 20.9824C9.60374 21.0631 9.77249 20.7863 9.77249 20.5441C9.77249 20.3249 9.76125 19.5982 9.76125 18.8254C7.5 19.2522 6.915 18.2602 6.735 17.7412C6.63375 17.4759 6.19499 16.6569 5.8125 16.4378C5.4975 16.2647 5.0475 15.838 5.80124 15.8264C6.51 15.8149 7.01625 16.4954 7.18499 16.7723C7.99499 18.1679 9.28875 17.7758 9.80625 17.5335C9.885 16.9337 10.1212 16.53 10.38 16.2993C8.3775 16.0687 6.285 15.2728 6.285 11.7432C6.285 10.7397 6.63375 9.9092 7.20749 9.26326C7.1175 9.03257 6.8025 8.08674 7.2975 6.81794C7.2975 6.81794 8.05125 6.57571 9.77249 7.76377C10.4925 7.55615 11.2575 7.45234 12.0225 7.45234C12.7875 7.45234 13.5525 7.55615 14.2725 7.76377C15.9937 6.56418 16.7475 6.81794 16.7475 6.81794C17.2424 8.08674 16.9275 9.03257 16.8375 9.26326C17.4113 9.9092 17.76 10.7281 17.76 11.7432C17.76 15.2843 15.6563 16.0687 13.6537 16.2993C13.98 16.5877 14.2613 17.1414 14.2613 18.0065C14.2613 19.2407 14.25 20.2326 14.25 20.5441C14.25 20.7863 14.4188 21.0746 14.8688 20.9824C16.6554 20.364 18.2079 19.1866 19.3078 17.6162C20.4077 16.0457 20.9995 14.1611 21 12.2276C21 7.12937 16.9725 3 12 3Z"></path> </svg> </a> </div> </div> </div></div>"""
221
 
 
222
  # --- Gradio Komponenten (Dynamisch befüllt) ---
 
 
 
 
223
  model_dropdown = gr.Dropdown(
224
- # Jetzt die dynamisch generierten Namen verwenden
225
  choices=MODEL_DROPDOWN_CHOICES,
226
- value=MODEL_DROPDOWN_CHOICES[0] if MODEL_DROPDOWN_CHOICES else None, # Wählt den ersten Eintrag aus, falls vorhanden
227
  label="Model"
228
  )
229
  system_textbox = gr.Textbox(value="You are a deep thinking AI, you may use extremely long chains of thought to deeply consider the problem and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. You should enclose your thoughts and internal monologue inside <think> </think> tags, and then provide your solution or response to the problem.", label="System message")
@@ -284,4 +299,8 @@ demo = gr.ChatInterface(
284
 
285
  # --- App starten ---
286
  if __name__ == "__main__":
287
- demo.launch()
 
 
 
 
 
1
  import spaces
2
  import json
 
3
  import os
4
+ import subprocess # Behalten wir, falls es für andere Dinge gebraucht wird, obwohl es hier nicht mehr direkt verwendet wird
5
  from llama_cpp import Llama
6
  from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
7
  from llama_cpp_agent.providers import LlamaCppPythonProvider
8
  from llama_cpp_agent.chat_history import BasicChatHistory
9
  from llama_cpp_agent.chat_history.messages import Roles
10
  import gradio as gr
 
11
  from huggingface_hub import hf_hub_download, list_repo_files
12
 
13
+ # --- Globale Konfiguration und Variablen ---
14
  llm = None
15
  llm_model = None
16
  MODEL_CONFIG_FILE = "models.json"
17
  DEFAULT_LOCAL_DIR = "./models"
18
  MODEL_DROPDOWN_CHOICES = []
19
+ # Map: Anzeigename -> Tatsächlicher Pfad zur ersten Modelldatei
20
+ MODEL_FILE_MAPPING = {}
21
 
22
  # Stelle sicher, dass das models-Verzeichnis existiert
23
  os.makedirs(DEFAULT_LOCAL_DIR, exist_ok=True)
24
 
25
 
26
+ # ----------------------------------------------------------------------
27
+ ## Modell-Downloads und Konfigurations-Parsing
28
+ # ----------------------------------------------------------------------
29
+
30
  def download_models():
31
+ """Liest models.json, lädt Modelle herunter und füllt die globale Map."""
32
  global MODEL_DROPDOWN_CHOICES
33
  global MODEL_FILE_MAPPING
34
 
 
36
  with open(MODEL_CONFIG_FILE, 'r') as f:
37
  config = json.load(f)
38
  except FileNotFoundError:
39
+ print(f"❌ FEHLER: Konfigurationsdatei '{MODEL_CONFIG_FILE}' nicht gefunden.")
40
+ # Füge einen Platzhalter hinzu, falls die Datei fehlt
41
+ MODEL_DROPDOWN_CHOICES.append("ERROR: models.json fehlt")
42
  return
43
+ except json.JSONDecodeError as e:
44
+ print(f"❌ FEHLER: {MODEL_CONFIG_FILE} ist kein gültiges JSON. Fehler: {e}")
45
+ MODEL_DROPDOWN_CHOICES.append("ERROR: models.json ungültig")
46
  return
47
 
48
+ # Holen des lokalen Zielverzeichnisses aus der JSON-Datei
49
  local_dir = config.get('local_dir', DEFAULT_LOCAL_DIR)
50
  if not os.path.exists(local_dir):
51
  os.makedirs(local_dir, exist_ok=True)
52
  print(f"Lokales Verzeichnis {local_dir} erstellt.")
53
 
54
+ models_list = config.get('models', [])
55
 
56
+ print(f"Starte den Download von {len(models_list)} konfigurierten Modellen...")
57
 
58
  for model_entry in models_list:
59
  name = model_entry.get('name')
 
62
  folder_name = model_entry.get('folder_name')
63
 
64
  if not name or not repo_id:
65
+ print(f"⚠️ WARNUNG: Eintrag ohne 'name' oder 'repo_id' übersprungen: {model_entry}")
66
  continue
67
 
68
+ # Füge den Anzeigenamen zur Dropdown-Liste hinzu
69
  MODEL_DROPDOWN_CHOICES.append(name)
70
 
71
+ # Fall 1: Einzelne Datei (file_name)
72
  if file_name:
73
+ print(f" -> Lade Einzeldatei für '{name}': {file_name}")
74
+ try:
75
+ hf_hub_download(
76
+ repo_id=repo_id,
77
+ filename=file_name,
78
+ local_dir=local_dir
79
+ )
80
+ # Den vollständigen Pfad speichern (relativ zum Installationsort)
81
+ MODEL_FILE_MAPPING[name] = os.path.join(local_dir, file_name)
82
+ print(f" -> Erfolgreich heruntergeladen: {name}")
83
+ except Exception as e:
84
+ print(f"❌ FEHLER beim Download von {name} ({file_name}): {e}")
85
 
86
+ # Fall 2: Ganzer Ordner (folder_name)
87
  elif folder_name:
88
+ print(f" -> Lade Ordner für '{name}': {folder_name}")
89
 
90
+ try:
91
+ all_files = list_repo_files(repo_id=repo_id)
92
+ files_to_download = sorted([
93
+ filename
94
+ for filename in all_files
95
+ if filename.startswith(f"{folder_name}/")
96
+ ])
97
+ except Exception as e:
98
+ print(f"❌ FEHLER beim Auflisten der Dateien im Repo {repo_id}: {e}")
99
+ continue
100
 
101
+ if not files_to_download:
102
+ print(f"⚠️ WARNUNG: Im Ordner '{folder_name}' wurden keine Dateien gefunden.")
103
  continue
104
 
105
+ first_part_filename = files_to_download[0]
 
 
 
 
 
 
 
106
 
107
+ # Jede Datei einzeln herunterladen
108
+ for filename in files_to_download:
109
+ print(f" - Download von {filename}")
110
+ try:
111
+ hf_hub_download(
112
+ repo_id=repo_id,
113
+ filename=filename,
114
+ local_dir=local_dir
115
+ )
116
+ except Exception as e:
117
+ print(f"❌ FEHLER beim Herunterladen von {filename}: {e}")
118
+
119
+ # Für Llama-CPP-Initialisierung den Pfad zum ersten Teil speichern
120
+ # Pfad: <local_dir>/<folder_name>/<first_file_part>
121
+ MODEL_FILE_MAPPING[name] = os.path.join(local_dir, first_part_filename)
122
+ print(f" -> Erfolgreich heruntergeladen: {name}. Erster Teil: {MODEL_FILE_MAPPING[name]}")
123
 
124
  else:
125
+ print(f"⚠️ WARNUNG: Für '{name}' wurde weder 'file_name' noch 'folder_name' angegeben. Übersprungen.")
126
 
127
+ print("--- Download-Vorgang abgeschlossen. ---")
128
 
129
+ # --- Globale Downloads einmalig starten ---
130
  download_models()
131
+ # ----------------------------------------
132
 
133
+
134
+ # --- CSS Styling (Unverändert) ---
135
  css = """.bubble-wrap { padding-top: calc(var(--spacing-xl) * 3) !important;}.message-row { justify-content: space-evenly !important; width: 100% !important; max-width: 100% !important; margin: calc(var(--spacing-xl)) 0 !important; padding: 0 calc(var(--spacing-xl) * 3) !important;}.flex-wrap.user { border-bottom-right-radius: var(--radius-lg) !important;}.flex-wrap.bot { border-bottom-left-radius: var(--radius-lg) !important;}.message.user{ padding: 10px;}.message.bot{ text-align: right; width: 100%; padding: 10px; border-radius: 10px;}.message-bubble-border { border-radius: 6px !important;}.message-buttons { justify-content: flex-end !important;}.message-buttons-left { align-self: end !important;}.message-buttons-bot, .message-buttons-user { right: 10px !important; left: auto !important; bottom: 2px !important;}.dark.message-bubble-border { border-color: #343140 !important;}.dark.user { background: #1e1c26 !important;}.dark.assistant.dark, .dark.pending.dark { background: #16141c !important;}"""
136
 
137
+ # --- Hilfsfunktion für den Message Formatter Typ (Unverändert) ---
138
  def get_messages_formatter_type(model_name):
139
+ # Nutzt jetzt den Anzeigenamen
140
  if "Llama" in model_name:
141
  return MessagesFormatterType.LLAMA_3
142
  elif "Mistral" in model_name:
143
  return MessagesFormatterType.MISTRAL
144
+ # Das ist eine gute Annahme für die meisten unsloth-Modelle, wenn kein spezifischer Typ bekannt ist
145
+ elif "GLM" in model_name or "Granite" in model_name:
146
+ return MessagesFormatterType.CHATML
147
  else:
148
+ print("Formatter type not found, trying default")
149
  return MessagesFormatterType.CHATML
150
 
151
+ # ----------------------------------------------------------------------
152
+ ## Haupt-Antwortfunktion für ChatInterface
153
+ # ----------------------------------------------------------------------
154
+
155
  @spaces.GPU(duration=45)
156
  def respond(
157
  message,
158
+ history: list[dict[str, str]],
159
+ selected_model_name,
160
  system_message,
161
  max_tokens,
162
  temperature,
 
166
  ):
167
  global llm
168
  global llm_model
 
169
 
170
+ # 1. Den tatsächlichen Dateipfad über das Mapping abrufen
171
  model_file_path = MODEL_FILE_MAPPING.get(selected_model_name)
172
+
173
  if not model_file_path:
174
+ return f"Fehler: Model-Datei für '{selected_model_name}' nicht gefunden. Ist der Download fehlgeschlagen?"
175
 
 
176
  chat_template = get_messages_formatter_type(selected_model_name)
177
 
178
+ # 2. Llama-Objekt nur neu initialisieren, wenn nötig
 
179
  if llm is None or llm_model != model_file_path:
180
  print(f"Lade neues Modell: {model_file_path}")
181
+ try:
182
+ llm = Llama(
183
+ model_path=model_file_path,
184
+ flash_attn=True,
185
+ n_gpu_layers=81,
186
+ n_batch=1024,
187
+ n_ctx=8192,
188
+ )
189
+ llm_model = model_file_path
190
+ except Exception as e:
191
+ return f"Fehler beim Laden von Llama-Modell '{selected_model_name}' ({model_file_path}): {e}"
192
 
193
+ # 3. Agent initialisieren und Einstellungen setzen
194
  provider = LlamaCppPythonProvider(llm)
195
  agent = LlamaCppAgent(
196
  provider,
 
207
  settings.repeat_penalty = repeat_penalty
208
  settings.stream = True
209
 
210
+ # 4. Chat-Verlauf vorbereiten
211
  messages = BasicChatHistory()
212
  for msn in history:
213
+ role = Roles.user if msn.get('role') == 'user' else Roles.assistant
214
+ messages.add_message({'role': role, 'content': msn.get('content', '')})
 
 
 
 
 
 
 
 
 
215
 
216
  # 5. Stream-Antwort generieren
217
  stream = agent.get_chat_response(
 
227
  outputs += output
228
  yield outputs
229
 
230
+ # --- HTML Platzhalter für den Chatbot (Unverändert) ---
231
  PLACEHOLDER = """<div class="message-bubble-border" style="display:flex; max-width: 600px; border-radius: 6px; border-width: 1px; border-color: #e5e7eb; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); backdrop-filter: blur(10px);"> <div style="padding: .5rem 1.5rem;display: flex;flex-direction: column;justify-content: space-evenly;"> <h2 style="text-align: left; font-size: 1.5rem; font-weight: 700; margin-bottom: 0.5rem;">llama-cpp-agent</h2> <p style="text-align: left; font-size: 16px; line-height: 1.5; margin-bottom: 15px;">The llama-cpp-agent framework based on llama_cpp_python simplifies interactions with Large Language Models (LLMs). Here you can try out a range of models via the basic chat interface. For advanced features check out the discord or github link below.</p> <div style="display: flex; justify-content: space-between; align-items: center;"> <div style="display: flex; justify-content: flex-end; align-items: center;"> <a href="https://discord.gg/fgr5RycPFP" target="_blank" rel="noreferrer" style="padding: .5rem;"> <svg width="24" height="24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" viewBox="0 5 30.67 23.25"> <title>Discord</title> <path d="M26.0015 6.9529C24.0021 6.03845 21.8787 5.37198 19.6623 5C19.3833 5.48048 19.0733 6.13144 18.8563 6.64292C16.4989 6.30193 14.1585 6.30193 11.8336 6.64292C11.6166 6.13144 11.2911 5.48048 11.0276 5C8.79575 5.37198 6.67235 6.03845 4.6869 6.9529C0.672601 12.8736 -0.41235 18.6548 0.130124 24.3585C2.79599 26.2959 5.36889 27.4739 7.89682 28.2489C8.51679 27.4119 9.07477 26.5129 9.55525 25.5675C8.64079 25.2265 7.77283 24.808 6.93587 24.312C7.15286 24.1571 7.36986 23.9866 7.57135 23.8161C12.6241 26.1255 18.0969 26.1255 23.0876 23.8161C23.3046 23.9866 23.5061 24.1571 23.7231 24.312C22.8861 24.808 22.0182 25.2265 21.1037 25.5675C21.5842 26.5129 22.1422 27.4119 22.7621 28.2489C25.2885 27.4739 27.8769 26.2959 30.5288 24.3585C31.1952 17.7559 29.4733 12.0212 26.0015 6.9529ZM10.2527 20.8402C8.73376 20.8402 7.49382 19.4608 7.49382 17.7714C7.49382 16.082 8.70276 14.7025 10.2527 14.7025C11.7871 14.7025 13.0425 16.082 13.0115 17.7714C13.0115 19.4608 11.7871 20.8402 10.2527 20.8402ZM20.4373 20.8402C18.9183 20.8402 17.6768 19.4608 17.6768 17.7714C17.6768 16.082 18.8873 14.7025 20.4373 14.7025C21.9717 14.7025 23.2271 16.082 23.1961 17.7714C23.1961 19.4608 21.9872 20.8402 20.4373 20.8402Z"></path> </svg> </a> <a href="https://github.com/Maximilian-Winter/llama-cpp-agent" target="_blank" rel="noreferrer" style="padding: .5rem;"> <svg width="24" height="24" fill="currentColor" viewBox="3 3 18 18"> <title>GitHub</title> <path d="M12 3C7.0275 3 3 7.12937 3 12.2276C3 16.3109 5.57625 19.7597 9.15374 20.9824C9.60374 21.0631 9.77249 20.7863 9.77249 20.5441C9.77249 20.3249 9.76125 19.5982 9.76125 18.8254C7.5 19.2522 6.915 18.2602 6.735 17.7412C6.63375 17.4759 6.19499 16.6569 5.8125 16.4378C5.4975 16.2647 5.0475 15.838 5.80124 15.8264C6.51 15.8149 7.01625 16.4954 7.18499 16.7723C7.99499 18.1679 9.28875 17.7758 9.80625 17.5335C9.885 16.9337 10.1212 16.53 10.38 16.2993C8.3775 16.0687 6.285 15.2728 6.285 11.7432C6.285 10.7397 6.63375 9.9092 7.20749 9.26326C7.1175 9.03257 6.8025 8.08674 7.2975 6.81794C7.2975 6.81794 8.05125 6.57571 9.77249 7.76377C10.4925 7.55615 11.2575 7.45234 12.0225 7.45234C12.7875 7.45234 13.5525 7.55615 14.2725 7.76377C15.9937 6.56418 16.7475 6.81794 16.7475 6.81794C17.2424 8.08674 16.9275 9.03257 16.8375 9.26326C17.4113 9.9092 17.76 10.7281 17.76 11.7432C17.76 15.2843 15.6563 16.0687 13.6537 16.2993C13.98 16.5877 14.2613 17.1414 14.2613 18.0065C14.2613 19.2407 14.25 20.2326 14.25 20.5441C14.25 20.7863 14.4188 21.0746 14.8688 20.9824C16.6554 20.364 18.2079 19.1866 19.3078 17.6162C20.4077 16.0457 20.9995 14.1611 21 12.2276C21 7.12937 16.9725 3 12 3Z"></path> </svg> </a> </div> </div> </div></div>"""
232
 
233
+
234
  # --- Gradio Komponenten (Dynamisch befüllt) ---
235
+
236
+ # Das erste Element aus der dynamisch befüllten Liste als Standardwert nehmen
237
+ default_model = MODEL_DROPDOWN_CHOICES[0] if MODEL_DROPDOWN_CHOICES else None
238
+
239
  model_dropdown = gr.Dropdown(
 
240
  choices=MODEL_DROPDOWN_CHOICES,
241
+ value=default_model,
242
  label="Model"
243
  )
244
  system_textbox = gr.Textbox(value="You are a deep thinking AI, you may use extremely long chains of thought to deeply consider the problem and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. You should enclose your thoughts and internal monologue inside <think> </think> tags, and then provide your solution or response to the problem.", label="System message")
 
299
 
300
  # --- App starten ---
301
  if __name__ == "__main__":
302
+ # Stelle sicher, dass der Default-Wert im Dropdown gesetzt ist, bevor gestartet wird
303
+ if default_model:
304
+ demo.launch()
305
+ else:
306
+ print("Konnte keine Modelle laden oder konfigurieren. App wird nicht gestartet.")