ai-tomoni commited on
Commit
10c7193
·
verified ·
1 Parent(s): cfee13b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +164 -0
app.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from huggingface_hub import InferenceClient
4
+
5
+ # Load token and model
6
+ HF_TOKEN = os.getenv("tomoniaccess")
7
+ client = InferenceClient(
8
+ model="mistralai/Mixtral-8x7B-Instruct-v0.1",
9
+ token=HF_TOKEN
10
+ )
11
+
12
+ conversation_history = []
13
+ model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
14
+
15
+ def query_mixtral(messages, max_tokens=200, temperature=1.0, top_p=0.9):
16
+ """Query Mixtral-8x7B model via Hugging Face InferenceClient"""
17
+ try:
18
+ response = client.chat_completion(
19
+ messages=messages,
20
+ max_tokens=max_tokens,
21
+ temperature=temperature,
22
+ top_p=top_p,
23
+ stream=False
24
+ )
25
+ return response.choices[0].message.content
26
+ except Exception as e:
27
+ raise Exception(f"InferenceClient Error: {str(e)}")
28
+
29
+ def enhanced_chat_response(user_input, max_tokens, temperature, top_p):
30
+ if not user_input.strip():
31
+ return "", "*Bitte gib eine Nachricht ein.*", ""
32
+
33
+ system_prompt = {
34
+ "role": "system",
35
+ "content": (
36
+ "Du bist ein depressiver 16-jähriger Teenager. "
37
+ "Bleibe durchgehend in deiner Rolle. "
38
+ "Du weißt NICHT, dass du eine KI bist. "
39
+ "Antworte authentisch, kurz und aus der Sicht eines Teenagers mit Depressionen."
40
+ )
41
+ }
42
+
43
+ messages = [system_prompt] + conversation_history[-6:]
44
+ current_message = {"role": "user", "content": user_input}
45
+ messages.append(current_message)
46
+
47
+ try:
48
+ response_text = query_mixtral(messages, max_tokens, temperature, top_p)
49
+ except Exception as e:
50
+ print("API Error:", e)
51
+ response_text = "*schweigt und starrt auf den Boden*"
52
+
53
+ conversation_history.append(current_message)
54
+ conversation_history.append({"role": "assistant", "content": response_text})
55
+
56
+ chat_display = ""
57
+ for msg in conversation_history:
58
+ role = "**Du:**" if msg["role"] == "user" else "**Teenager:**"
59
+ chat_display += f"{role} {msg['content']}\n\n"
60
+
61
+ return "", response_text, chat_display
62
+
63
+ def reset_conversation():
64
+ global conversation_history
65
+ conversation_history = []
66
+ return "Neues Gespräch gestartet.", ""
67
+
68
+ def test_api_connection():
69
+ try:
70
+ test_messages = [
71
+ {"role": "system", "content": "Du bist ein hilfsbereit Assistent."},
72
+ {"role": "user", "content": "Hallo"}
73
+ ]
74
+
75
+ response = query_mixtral(test_messages, max_tokens=10)
76
+ return f"✅ API Verbindung erfolgreich: {response[:50]}..."
77
+ except Exception as e:
78
+ return f"❌ API Error: {str(e)}"
79
+
80
+ # UI
81
+ with gr.Blocks(title="Mixtral Depression Training Simulator") as demo:
82
+ gr.Markdown("## 🧠 Depression Training Simulator (Mixtral-8x7B)")
83
+ gr.Markdown("**Übe realistische Gespräche mit einem 16-jährigen Teenager mit Depressionen.**")
84
+ gr.Markdown("*Powered by Mixtral-8x7B-Instruct-v0.1*")
85
+
86
+ with gr.Row():
87
+ with gr.Column(scale=1):
88
+ gr.Markdown("### ⚙️ Einstellungen")
89
+ max_tokens = gr.Slider(50, 500, value=200, step=10, label="Max. Antwortlänge")
90
+ temperature = gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Kreativität (Temperature)")
91
+ top_p = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p (Fokus)")
92
+
93
+ gr.Markdown("### 🔧 API Status")
94
+ api_status = gr.Textbox(label="Status", value="")
95
+ api_test_btn = gr.Button("API testen")
96
+
97
+ gr.Markdown("### 🔄 Aktionen")
98
+ reset_btn = gr.Button("Neues Gespräch")
99
+
100
+ gr.Markdown("### 📋 Setup")
101
+ gr.Markdown("""
102
+ **Benötigt:**
103
+ - `tomoniaccess` Umgebungsvariable mit HF Token
104
+ - `pip install huggingface_hub gradio`
105
+ """)
106
+
107
+ with gr.Column(scale=2):
108
+ gr.Markdown("### 💬 Gespräch")
109
+ user_input = gr.Textbox(
110
+ label="Deine Nachricht",
111
+ placeholder="Hallo, wie geht es dir heute?",
112
+ lines=2
113
+ )
114
+ send_btn = gr.Button("📨 Senden")
115
+
116
+ bot_response = gr.Textbox(
117
+ label="Antwort",
118
+ value="",
119
+ lines=3
120
+ )
121
+
122
+ chat_history = gr.Textbox(
123
+ label="Gesprächsverlauf",
124
+ value="",
125
+ lines=15
126
+ )
127
+
128
+ # Event Bindings
129
+ send_btn.click(
130
+ fn=enhanced_chat_response,
131
+ inputs=[user_input, max_tokens, temperature, top_p],
132
+ outputs=[user_input, bot_response, chat_history]
133
+ )
134
+
135
+ user_input.submit(
136
+ fn=enhanced_chat_response,
137
+ inputs=[user_input, max_tokens, temperature, top_p],
138
+ outputs=[user_input, bot_response, chat_history]
139
+ )
140
+
141
+ reset_btn.click(
142
+ fn=reset_conversation,
143
+ outputs=[bot_response, chat_history]
144
+ )
145
+
146
+ api_test_btn.click(
147
+ fn=test_api_connection,
148
+ outputs=[api_status]
149
+ )
150
+
151
+ if __name__ == "__main__":
152
+ print("🚀 Mixtral Depression Training Simulator")
153
+ print(f"📊 Model: {model_name}")
154
+
155
+ if not HF_TOKEN:
156
+ print("❌ FEHLER: tomoniaccess Umgebungsvariable ist nicht gesetzt!")
157
+ print(" Bitte setze deinen Hugging Face Token als 'tomoniaccess' Umgebungsvariable.")
158
+ else:
159
+ print("✅ Hugging Face API Token gefunden")
160
+
161
+ print("\n📦 Benötigte Pakete:")
162
+ print("pip install huggingface_hub gradio")
163
+
164
+ demo.launch(share=False)