Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,27 +2,18 @@ import gradio as gr
|
|
| 2 |
from llama_cpp import Llama
|
| 3 |
|
| 4 |
# --- 1. MODELL LADEN ---
|
| 5 |
-
# Wir laden das Modell global, damit es nicht bei jeder Nachricht neu geladen werden muss.
|
| 6 |
llm = Llama.from_pretrained(
|
| 7 |
repo_id="simonper/Llama-3.2-1B-bnb-4bit_finetome-100k_gguf_3epochs_4bit",
|
| 8 |
filename="Llama-3.2-1B.Q4_K_M.gguf",
|
| 9 |
-
n_ctx=2048,
|
| 10 |
-
n_threads=2,
|
| 11 |
)
|
| 12 |
|
| 13 |
# --- 2. HELPER: PROMPT BAUEN ---
|
| 14 |
def build_prompt(system_message: str, history: list[dict], user_message: str) -> str:
|
| 15 |
-
"""
|
| 16 |
-
Wandelt die Chat-Historie (Liste von Dicts) in einen einzelnen String um,
|
| 17 |
-
den das Llama-Modell verstehen kann.
|
| 18 |
-
"""
|
| 19 |
lines = []
|
| 20 |
-
|
| 21 |
-
# System-Nachricht am Anfang
|
| 22 |
if system_message:
|
| 23 |
lines.append(f"System: {system_message}\n")
|
| 24 |
-
|
| 25 |
-
# Chat-Historie
|
| 26 |
for turn in history:
|
| 27 |
role = turn["role"]
|
| 28 |
content = turn["content"]
|
|
@@ -30,64 +21,92 @@ def build_prompt(system_message: str, history: list[dict], user_message: str) ->
|
|
| 30 |
lines.append(f"User: {content}")
|
| 31 |
elif role == "assistant":
|
| 32 |
lines.append(f"Assistant: {content}")
|
| 33 |
-
|
| 34 |
-
# Aktuelle User-Nachricht
|
| 35 |
lines.append(f"User: {user_message}")
|
| 36 |
lines.append("Assistant:")
|
| 37 |
-
|
| 38 |
return "\n".join(lines)
|
| 39 |
|
| 40 |
-
|
| 41 |
# --- 3. RESPOND FUNKTION ---
|
| 42 |
def respond(
|
| 43 |
message,
|
| 44 |
history: list[dict[str, str]],
|
| 45 |
-
|
| 46 |
max_tokens,
|
| 47 |
temperature,
|
| 48 |
top_p,
|
|
|
|
|
|
|
| 49 |
):
|
| 50 |
-
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
-
#
|
|
|
|
|
|
|
|
|
|
| 54 |
output = llm(
|
| 55 |
prompt,
|
| 56 |
max_tokens=int(max_tokens),
|
| 57 |
temperature=float(temperature),
|
| 58 |
top_p=float(top_p),
|
| 59 |
-
|
|
|
|
| 60 |
echo=False
|
| 61 |
)
|
| 62 |
|
| 63 |
-
# Antwort extrahieren
|
| 64 |
reply = output["choices"][0]["text"].strip()
|
| 65 |
return reply
|
| 66 |
|
| 67 |
|
| 68 |
-
# --- 4.
|
| 69 |
chatbot = gr.ChatInterface(
|
| 70 |
respond,
|
| 71 |
type="messages",
|
| 72 |
additional_inputs=[
|
| 73 |
-
gr.Textbox(value="
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
gr.Slider(
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
],
|
| 84 |
)
|
| 85 |
|
| 86 |
with gr.Blocks() as demo:
|
|
|
|
| 87 |
with gr.Sidebar():
|
| 88 |
gr.LoginButton()
|
| 89 |
chatbot.render()
|
| 90 |
|
| 91 |
-
|
| 92 |
if __name__ == "__main__":
|
| 93 |
demo.launch()
|
|
|
|
| 2 |
from llama_cpp import Llama
|
| 3 |
|
| 4 |
# --- 1. MODELL LADEN ---
|
|
|
|
| 5 |
llm = Llama.from_pretrained(
|
| 6 |
repo_id="simonper/Llama-3.2-1B-bnb-4bit_finetome-100k_gguf_3epochs_4bit",
|
| 7 |
filename="Llama-3.2-1B.Q4_K_M.gguf",
|
| 8 |
+
n_ctx=2048,
|
| 9 |
+
n_threads=2, # Optimiert für Hugging Face CPU Basic Tier
|
| 10 |
)
|
| 11 |
|
| 12 |
# --- 2. HELPER: PROMPT BAUEN ---
|
| 13 |
def build_prompt(system_message: str, history: list[dict], user_message: str) -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
lines = []
|
|
|
|
|
|
|
| 15 |
if system_message:
|
| 16 |
lines.append(f"System: {system_message}\n")
|
|
|
|
|
|
|
| 17 |
for turn in history:
|
| 18 |
role = turn["role"]
|
| 19 |
content = turn["content"]
|
|
|
|
| 21 |
lines.append(f"User: {content}")
|
| 22 |
elif role == "assistant":
|
| 23 |
lines.append(f"Assistant: {content}")
|
|
|
|
|
|
|
| 24 |
lines.append(f"User: {user_message}")
|
| 25 |
lines.append("Assistant:")
|
|
|
|
| 26 |
return "\n".join(lines)
|
| 27 |
|
|
|
|
| 28 |
# --- 3. RESPOND FUNKTION ---
|
| 29 |
def respond(
|
| 30 |
message,
|
| 31 |
history: list[dict[str, str]],
|
| 32 |
+
system_message_dummy,
|
| 33 |
max_tokens,
|
| 34 |
temperature,
|
| 35 |
top_p,
|
| 36 |
+
repetition_penalty, # <--- NEUER PARAMETER
|
| 37 |
+
style_mode,
|
| 38 |
):
|
| 39 |
+
|
| 40 |
+
# --- A. Style Logik ---
|
| 41 |
+
base_instruction = (
|
| 42 |
+
"Du bist ein hilfreicher Assistent zur Textumformulierung. "
|
| 43 |
+
"Gib die folgende Eingabe des Nutzers inhaltlich identisch zurück, ändere nur die Formulierungen."
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
context = ""
|
| 47 |
+
if style_mode == "Professionelle E-Mail":
|
| 48 |
+
context = "Formuliere die Eingabe extrem höflich und professionell (Business-Deutsch)."
|
| 49 |
+
elif style_mode == "Gen-Z / Jugendsprache":
|
| 50 |
+
context = "Formuliere die Eingabe in Jugendsprache (nutze Wörter wie 'cringe', 'wild', 'sus', Emojis)."
|
| 51 |
+
elif style_mode == "Shakespeare":
|
| 52 |
+
context = "Formuliere die Eingabe in altmodischem, poetischem Deutsch."
|
| 53 |
+
elif style_mode == "Passiv-Aggressiv":
|
| 54 |
+
context = "Formuliere die Eingabe höflich, aber unterschwellig passiv-aggressiv."
|
| 55 |
+
elif style_mode == "Lustig/Ironisch":
|
| 56 |
+
context = "Formuliere die Eingabe lustig und ironisch."
|
| 57 |
+
else:
|
| 58 |
+
context = "Antworte ganz normal."
|
| 59 |
+
|
| 60 |
+
final_system = f"{base_instruction} {context} Verändere nicht die Bedeutung."
|
| 61 |
|
| 62 |
+
# --- B. Prompt bauen ---
|
| 63 |
+
prompt = build_prompt(final_system, history, message)
|
| 64 |
+
|
| 65 |
+
# --- C. Modell aufrufen ---
|
| 66 |
output = llm(
|
| 67 |
prompt,
|
| 68 |
max_tokens=int(max_tokens),
|
| 69 |
temperature=float(temperature),
|
| 70 |
top_p=float(top_p),
|
| 71 |
+
repeat_penalty=float(repetition_penalty), # <--- HIER WIRD ER GENUTZT
|
| 72 |
+
stop=["User:", "System:"],
|
| 73 |
echo=False
|
| 74 |
)
|
| 75 |
|
|
|
|
| 76 |
reply = output["choices"][0]["text"].strip()
|
| 77 |
return reply
|
| 78 |
|
| 79 |
|
| 80 |
+
# --- 4. GUI SETUP ---
|
| 81 |
chatbot = gr.ChatInterface(
|
| 82 |
respond,
|
| 83 |
type="messages",
|
| 84 |
additional_inputs=[
|
| 85 |
+
gr.Textbox(value="", label="System Prompt (Hidden)", visible=False),
|
| 86 |
+
|
| 87 |
+
# Bestehende Slider
|
| 88 |
+
gr.Slider(minimum=1, maximum=2048, value=512, label="Max Tokens"),
|
| 89 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, label="Temperature"),
|
| 90 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, label="Top-p"),
|
| 91 |
+
|
| 92 |
+
# NEUER SLIDER
|
| 93 |
+
# Standard 1.0 = Keine Strafe. 1.2 ist meist ein guter Wert für Llama.
|
| 94 |
+
gr.Slider(minimum=1.0, maximum=2.0, value=1.2, step=0.05, label="Repetition Penalty"),
|
| 95 |
+
|
| 96 |
+
# Style Dropdown
|
| 97 |
+
gr.Dropdown(
|
| 98 |
+
choices=["Normal", "Professionelle E-Mail", "Gen-Z / Jugendsprache", "Shakespeare", "Passiv-Aggressiv", "Lustig/Ironisch"],
|
| 99 |
+
value="Normal",
|
| 100 |
+
label="Wähle den Stil / Tonfall"
|
| 101 |
+
)
|
| 102 |
],
|
| 103 |
)
|
| 104 |
|
| 105 |
with gr.Blocks() as demo:
|
| 106 |
+
gr.Markdown("# 🎭 Der Text-Wandler")
|
| 107 |
with gr.Sidebar():
|
| 108 |
gr.LoginButton()
|
| 109 |
chatbot.render()
|
| 110 |
|
|
|
|
| 111 |
if __name__ == "__main__":
|
| 112 |
demo.launch()
|