Aliqateebah commited on
Commit
586671a
·
verified ·
1 Parent(s): a9ce45e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -68
app.py CHANGED
@@ -1,14 +1,13 @@
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs:
6
- https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
  """
8
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
 
10
 
11
- # وظيفة الاستجابة
12
  def respond(
13
  message,
14
  history: list[tuple[str, str]],
@@ -16,11 +15,6 @@ def respond(
16
  max_tokens,
17
  temperature,
18
  top_p,
19
- repetition_penalty,
20
- frequency_penalty,
21
- presence_penalty,
22
- stop_sequences,
23
- allow_stream,
24
  ):
25
  messages = [{"role": "system", "content": system_message}]
26
 
@@ -34,28 +28,22 @@ def respond(
34
 
35
  response = ""
36
 
37
- # التعامل مع API باستخدام الخيارات المضافة
38
  for message in client.chat_completion(
39
  messages,
40
  max_tokens=max_tokens,
41
- stream=allow_stream,
42
  temperature=temperature,
43
  top_p=top_p,
44
- repetition_penalty=repetition_penalty,
45
- frequency_penalty=frequency_penalty,
46
- presence_penalty=presence_penalty,
47
- stop_sequences=stop_sequences,
48
  ):
49
  token = message.choices[0].delta.content
 
50
  response += token
51
  yield response
52
 
53
 
54
  """
55
- For information on how to customize the ChatInterface, peruse the gradio docs:
56
- https://www.gradio.app/docs/chatinterface
57
  """
58
- # إعداد واجهة Gradio مع الميزات الجديدة
59
  demo = gr.ChatInterface(
60
  respond,
61
  additional_inputs=[
@@ -69,58 +57,9 @@ demo = gr.ChatInterface(
69
  step=0.05,
70
  label="Top-p (nucleus sampling)",
71
  ),
72
- gr.Slider(
73
- minimum=1.0,
74
- maximum=2.0,
75
- value=1.2,
76
- step=0.1,
77
- label="Repetition Penalty",
78
- ),
79
- gr.Slider(
80
- minimum=-2.0,
81
- maximum=2.0,
82
- value=0.0,
83
- step=0.1,
84
- label="Frequency Penalty",
85
- ),
86
- gr.Slider(
87
- minimum=-2.0,
88
- maximum=2.0,
89
- value=0.0,
90
- step=0.1,
91
- label="Presence Penalty",
92
- ),
93
- gr.Textbox(
94
- value="",
95
- placeholder="Enter custom stop sequences (comma-separated)",
96
- label="Stop Sequences",
97
- ),
98
- gr.Checkbox(value=True, label="Stream responses"),
99
- gr.Checkbox(value=False, label="Enable Markdown Formatting"),
100
- gr.Checkbox(value=True, label="Enable Response Summarization"),
101
- gr.Textbox(
102
- value="en",
103
- placeholder="Enter language code (e.g., en, fr, ar)",
104
- label="Response Language",
105
- ),
106
- gr.Textbox(
107
- value="",
108
- placeholder="Optional: Specify keywords to highlight",
109
- label="Highlight Keywords",
110
- ),
111
- gr.Checkbox(value=False, label="Enable Sentiment Analysis"),
112
- gr.Checkbox(value=False, label="Enable Keyword Extraction"),
113
- gr.Checkbox(value=False, label="Enable Named Entity Recognition"),
114
  ],
115
- title="Enhanced AI Chatbot",
116
- description=(
117
- "An advanced chatbot interface powered by Zephyr-7B with new features "
118
- "like sentiment analysis, custom stop sequences, multilingual support, "
119
- "and keyword highlighting."
120
- ),
121
- theme="default", # يمكن التبديل إلى "compact" إذا رغبت بتصميم مضغوط
122
  )
123
 
124
- # تشغيل التطبيق
125
  if __name__ == "__main__":
126
- demo.launch()
 
1
+
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
  """
6
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
7
  """
8
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
 
10
 
 
11
  def respond(
12
  message,
13
  history: list[tuple[str, str]],
 
15
  max_tokens,
16
  temperature,
17
  top_p,
 
 
 
 
 
18
  ):
19
  messages = [{"role": "system", "content": system_message}]
20
 
 
28
 
29
  response = ""
30
 
 
31
  for message in client.chat_completion(
32
  messages,
33
  max_tokens=max_tokens,
34
+ stream=True,
35
  temperature=temperature,
36
  top_p=top_p,
 
 
 
 
37
  ):
38
  token = message.choices[0].delta.content
39
+
40
  response += token
41
  yield response
42
 
43
 
44
  """
45
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
 
46
  """
 
47
  demo = gr.ChatInterface(
48
  respond,
49
  additional_inputs=[
 
57
  step=0.05,
58
  label="Top-p (nucleus sampling)",
59
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  ],
 
 
 
 
 
 
 
61
  )
62
 
63
+
64
  if __name__ == "__main__":
65
+ demo.launch()