Pant0x commited on
Commit
c388cb7
·
verified ·
1 Parent(s): f5504e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -30
app.py CHANGED
@@ -3,14 +3,12 @@ from huggingface_hub import InferenceClient
3
  import random
4
  import re
5
 
6
- # ✅ Smart Detection: Only blocks obvious garbage (ads/links/spam), everything else goes to AI
7
  OFF_TOPIC_REGEX = r"(http|www|buy now|discount|subscribe|follow me|click here)"
8
 
9
  def is_safe_to_process(text: str) -> bool:
10
- # If it's a very short message with no meaning, we use a friendly nudge
11
  if len(text.strip()) < 2:
12
  return False
13
- # If it matches spam patterns
14
  if re.search(OFF_TOPIC_REGEX, text.lower()):
15
  return False
16
  return True
@@ -23,35 +21,48 @@ def respond(
23
  temperature,
24
  top_p,
25
  hf_token: gr.OAuthToken,
 
26
  ):
27
- # 1. Basic Safety Check
 
 
 
 
 
28
  if not is_safe_to_process(message):
29
- yield "I'm here to listen and support your emotional well-being. How can I help you today?"
30
  return
31
 
32
  if not hf_token:
33
- yield "Please log in via the Sidebar to start our session."
34
  return
35
 
36
- # 2. Using Zephyr-7B: Faster, smarter, and doesn't 'hang' on free tier
37
  client = InferenceClient(token=hf_token.token, model="HuggingFaceH4/zephyr-7b-beta")
38
 
39
- # 3. Memory Management: Last 8 messages to keep the context sharp but fast
40
- messages = [{"role": "system", "content": system_message}]
41
- messages.extend(history[-8:])
 
 
 
 
 
 
 
 
 
42
  messages.append({"role": "user", "content": message})
43
 
44
  response = ""
45
  try:
46
- # 4. Perfect Parameters for ChatGPT-like flow
47
  for msg in client.chat_completion(
48
  messages,
49
  max_tokens=max_tokens,
50
  stream=True,
51
- temperature=0.7, # The "Sweet Spot"
52
- top_p=0.9,
53
  extra_body={
54
- "repetition_penalty": 1.15, # Stops the "I specialize in..." loop
55
  "presence_penalty": 0.3
56
  }
57
  ):
@@ -59,37 +70,36 @@ def respond(
59
  response += token
60
  yield response
61
  except Exception as e:
62
- yield f"Connection error: {str(e)}. Please try refreshing the page."
63
 
64
- # ✅ Professional UI Setup
65
- with gr.Blocks(theme=gr.themes.Default(primary_hue="blue")) as demo:
66
  with gr.Sidebar():
67
- gr.Markdown("## 🌿 Therapy Assistant Settings")
68
  gr.LoginButton()
69
  gr.Markdown("---")
70
  sys_msg = gr.Textbox(
71
  value=(
72
- "You are a professional mental health assistant. "
73
- "1. Respond directly to the user's specific problem in the first sentence. "
74
- "2. Be empathetic but professional. "
75
- "3. If the user mentions symptoms (like washing hands), explain them gently as a supportive peer. "
76
- "4. Never use a repetitive introductory phrase."
77
  ),
78
  label="System Persona",
79
- lines=6
80
  )
81
- tokens = gr.Slider(128, 1024, value=512, label="Response Length")
82
- temp = gr.Slider(0.1, 1.5, value=0.7, label="Empathy Level (Temperature)")
83
- top_p_val = gr.Slider(0.1, 1.0, value=0.9, label="Focus (Top-p)")
84
 
85
  chatbot_ui = gr.ChatInterface(
86
  respond,
87
  type="messages",
88
  additional_inputs=[sys_msg, tokens, temp, top_p_val],
 
89
  examples=[
90
- ["I think my sister has OCD, she washes her hands constantly.", sys_msg.value, 512, 0.7, 0.9],
91
- ["Ana 7ases b de2 fashkh w msh 3aref anam.", sys_msg.value, 512, 0.7, 0.9],
92
- ["I've been feeling very lonely lately.", sys_msg.value, 512, 0.7, 0.9]
93
  ],
94
  cache_examples=False,
95
  )
 
3
  import random
4
  import re
5
 
6
+ # ✅ تنظيف المدخلات من السبام
7
  OFF_TOPIC_REGEX = r"(http|www|buy now|discount|subscribe|follow me|click here)"
8
 
9
  def is_safe_to_process(text: str) -> bool:
 
10
  if len(text.strip()) < 2:
11
  return False
 
12
  if re.search(OFF_TOPIC_REGEX, text.lower()):
13
  return False
14
  return True
 
21
  temperature,
22
  top_p,
23
  hf_token: gr.OAuthToken,
24
+ request: gr.Request, # 👈 دي اللي بتسحب بيانات اليوزر من موقعك
25
  ):
26
+ # 1. سحب الـ Username من الـ URL Query Parameter
27
+ # موقعك هيبعت الـ Iframe كدا: src=".../?username=Koko"
28
+ user_name = "User"
29
+ if request:
30
+ user_name = request.query_params.get("username", "User")
31
+
32
  if not is_safe_to_process(message):
33
+ yield f"Hello {user_name}, I'm here to support your emotional well-being. How can I help you today?"
34
  return
35
 
36
  if not hf_token:
37
+ yield "Please log in to start our session."
38
  return
39
 
 
40
  client = InferenceClient(token=hf_token.token, model="HuggingFaceH4/zephyr-7b-beta")
41
 
42
+ # 2. حوار الـ History والـ الشخصية
43
+ # بنحقن الاسم في الـ System Message عشان الموديل يناديه باسمه صح
44
+ personalized_system = (
45
+ f"{system_message} "
46
+ f"The user's name is {user_name}. Always address them by this name naturally. "
47
+ f"Do not use formal headers like 'Dear'."
48
+ )
49
+
50
+ # الـ History هنا هتكون اللي مبعوثة من الـ Frontend بتاعك فقط
51
+ # الموديل هياخد آخر 10 رسايل عشان الذاكرة تكون قوية بس سريعة
52
+ messages = [{"role": "system", "content": personalized_system}]
53
+ messages.extend(history[-10:])
54
  messages.append({"role": "user", "content": message})
55
 
56
  response = ""
57
  try:
 
58
  for msg in client.chat_completion(
59
  messages,
60
  max_tokens=max_tokens,
61
  stream=True,
62
+ temperature=temperature,
63
+ top_p=top_p,
64
  extra_body={
65
+ "repetition_penalty": 1.15,
66
  "presence_penalty": 0.3
67
  }
68
  ):
 
70
  response += token
71
  yield response
72
  except Exception as e:
73
+ yield f"Connection error: {str(e)}. Please try refreshing."
74
 
75
+ # ✅ الـ UI الاحترافي
76
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as demo:
77
  with gr.Sidebar():
78
+ gr.Markdown("## 🌿 Therapy Assistant")
79
  gr.LoginButton()
80
  gr.Markdown("---")
81
  sys_msg = gr.Textbox(
82
  value=(
83
+ "You are a compassionate mental health assistant. "
84
+ "Respond directly to the user's problem. "
85
+ "Be empathetic and professional. "
86
+ "Never use formal letter formats or generic intros."
 
87
  ),
88
  label="System Persona",
89
+ lines=5
90
  )
91
+ tokens = gr.Slider(128, 1024, value=512, label="Max Response Length")
92
+ temp = gr.Slider(0.1, 1.5, value=0.7, label="Temperature")
93
+ top_p_val = gr.Slider(0.1, 1.0, value=0.9, label="Top-p")
94
 
95
  chatbot_ui = gr.ChatInterface(
96
  respond,
97
  type="messages",
98
  additional_inputs=[sys_msg, tokens, temp, top_p_val],
99
+ # خلي بالك الـ Examples لازم تطابق عدد الـ inputs
100
  examples=[
101
+ ["I've been feeling very anxious lately.", sys_msg.value, 512, 0.7, 0.9],
102
+ ["Ana ta3ban awee w msh 3aref anam.", sys_msg.value, 512, 0.7, 0.9]
 
103
  ],
104
  cache_examples=False,
105
  )