Spaces:
Runtime error
Runtime error
File size: 4,092 Bytes
017e518 d3aa58e 017e518 0a1fb0f 7844968 0a1fb0f a72c2cf a0a0374 a72c2cf 0a72678 7844968 a72c2cf 017e518 0a1fb0f 017e518 cd94000 017e518 cd94000 017e518 3aa5f94 017e518 d1d1e78 017e518 d1d1e78 017e518 d1d1e78 017e518 d1d1e78 017e518 2d827c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
import gradio as gr
from huggingface_hub import InferenceClient
import os
# Deutsche LLM Konfiguration
HF_TOKEN = os.getenv("tomoniaccess")
current_model = "HuggingFaceH4/zephyr-7b-beta"
client = InferenceClient(model=current_model, token=HF_TOKEN)
conversation_history = []
def enhanced_chat_response(user_input, max_tokens, temperature, top_p):
if not user_input.strip():
return "", "Bitte gib eine Nachricht ein.", "", ""
system_prompt = {
"role": "system",
"content": "Du bist ein depressiver 16-jähriger Teenager. Bleibe durchgehend in deiner Rolle."
}
messages = [system_prompt, {"role": "user", "content": user_input}]
#for mistralai/Mistral-7B-Instruct-v0.3
#role_instruction = (
#"Du bist ein depressiver 16-jähriger Teenager. Antworte so, wie es deiner Stimmung entspricht.\n"
#f"User: {user_input}\nAssistant:"
#)
#messages = [{"role": "user", "content": role_instruction}]
response_text = ""
try:
for message in client.chat_completion(
messages=messages,
max_tokens=min(max_tokens, 100),
stream=True,
temperature=temperature,
top_p=top_p
):
token = message.choices[0].delta.content
if token:
response_text += token
except Exception as e:
print(f"API Error: {e}") # This will show in console
print(f"Error type: {type(e)}")
response_text = f"API Fehler: {str(e)}" # Show actual error to user
response_text = response_text.strip()
chat_display = f"**Du:** {user_input}\n**Assistant:** {response_text}\n\n"
return "", response_text, chat_display, ""
def reset_conversation():
return "Neues Gespräch gestartet.", "", ""
with gr.Blocks(title="Depression Training Simulator", theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🧠 Depression Training Simulator")
gr.Markdown("**Übe realistische Gespräche mit depressiven Jugendlichen und erhalte Feedback**")
with gr.Row():
with gr.Column(scale=1):
# Parameter
gr.Markdown("### ⚙️ Einstellungen")
max_tokens = gr.Slider(50, 150, value=80, step=10, label="Antwortlänge")
temperature = gr.Slider(0.5, 1.2, value=0.9, step=0.1, label="Variabilität")
top_p = gr.Slider(0.7, 1.0, value=0.95, step=0.05, label="Fokus")
# Actions
gr.Markdown("### 🔄 Aktionen")
reset_btn = gr.Button("Neues Gespräch", variant="secondary")
with gr.Column(scale=2):
# Chat Interface
gr.Markdown("### 💬 Gespräch")
user_input = gr.Textbox(
label="Deine Nachricht",
placeholder="Beginne das Gespräch...",
lines=2
)
send_btn = gr.Button("📨 Senden", variant="primary")
bot_response = gr.Textbox(
label="Antwort",
interactive=False,
lines=3
)
chat_history = gr.Textbox(
label="Gesprächsverlauf",
interactive=False,
lines=12
)
# Feedback Panel (empty, no persona feedback)
#with gr.Accordion("📈 Trainer-Feedback", open=True):
# feedback_display = gr.Markdown("Starte ein Gespräch, um Feedback zu erhalten.")
# Event Bindings
send_btn.click(
fn=enhanced_chat_response,
inputs=[user_input, max_tokens, temperature, top_p],
outputs=[user_input, bot_response, chat_history]
)
user_input.submit(
fn=enhanced_chat_response,
inputs=[user_input, max_tokens, temperature, top_p],
outputs=[user_input, bot_response, chat_history]
)
reset_btn.click(
fn=reset_conversation,
outputs=[bot_response, chat_history]
)
if __name__ == "__main__":
demo.launch() |