1nabeelsiddiqui commited on
Commit
f66f753
·
verified ·
1 Parent(s): 72cd81e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -45
app.py CHANGED
@@ -1,11 +1,12 @@
1
  import gradio as gr
2
  import time
3
- from huggingface_hub import InferenceClient
4
 
5
- """
6
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
- """
8
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
9
 
10
  def add_message(history, message):
11
  for x in message["files"]:
@@ -14,6 +15,7 @@ def add_message(history, message):
14
  history.append({"role": "user", "content": message["text"]})
15
  return history, gr.MultimodalTextbox(value=None, interactive=False)
16
 
 
17
  def bot(history: list):
18
  response = "**That's cool!**"
19
  history.append({"role": "assistant", "content": ""})
@@ -22,43 +24,8 @@ def bot(history: list):
22
  time.sleep(0.05)
23
  yield history
24
 
25
- def respond(
26
- message,
27
- history: list[tuple[str, str]],
28
- system_message,
29
- max_tokens,
30
- temperature,
31
- top_p,
32
- ):
33
- messages = [{"role": "system", "content": system_message}]
34
-
35
- for val in history:
36
- if val[0]:
37
- messages.append({"role": "user", "content": val[0]})
38
- if val[1]:
39
- messages.append({"role": "assistant", "content": val[1]})
40
-
41
- messages.append({"role": "user", "content": message})
42
 
43
- response = ""
44
-
45
- for message in client.chat_completion(
46
- messages,
47
- max_tokens=max_tokens,
48
- stream=True,
49
- temperature=temperature,
50
- top_p=top_p,
51
- ):
52
- token = message.choices[0].delta.content
53
-
54
- response += token
55
- yield response
56
-
57
-
58
- """
59
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
60
- """
61
- with gr.blocks() as demo:
62
  chatbot = gr.Chatbot(elem_id="chatbot", bubble_full_width=False, type="messages")
63
 
64
  chat_input = gr.MultimodalTextbox(
@@ -66,7 +33,7 @@ with gr.blocks() as demo:
66
  file_count="multiple",
67
  placeholder="Enter message or upload file...",
68
  show_label=False,
69
- sources=["upload"],
70
  )
71
 
72
  chat_msg = chat_input.submit(
@@ -77,6 +44,4 @@ with gr.blocks() as demo:
77
 
78
  chatbot.like(print_like_dislike, None, None, like_user_message=True)
79
 
80
-
81
- if __name__ == "__main__":
82
- demo.launch()
 
1
  import gradio as gr
2
  import time
 
3
 
4
+ # Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
5
+
6
+
7
+ def print_like_dislike(x: gr.LikeData):
8
+ print(x.index, x.value, x.liked)
9
+
10
 
11
  def add_message(history, message):
12
  for x in message["files"]:
 
15
  history.append({"role": "user", "content": message["text"]})
16
  return history, gr.MultimodalTextbox(value=None, interactive=False)
17
 
18
+
19
  def bot(history: list):
20
  response = "**That's cool!**"
21
  history.append({"role": "assistant", "content": ""})
 
24
  time.sleep(0.05)
25
  yield history
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
+ with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  chatbot = gr.Chatbot(elem_id="chatbot", bubble_full_width=False, type="messages")
30
 
31
  chat_input = gr.MultimodalTextbox(
 
33
  file_count="multiple",
34
  placeholder="Enter message or upload file...",
35
  show_label=False,
36
+ sources=["microphone", "upload"],
37
  )
38
 
39
  chat_msg = chat_input.submit(
 
44
 
45
  chatbot.like(print_like_dislike, None, None, like_user_message=True)
46
 
47
+ demo.launch()