ewingreen commited on
Commit
5109df5
·
verified ·
1 Parent(s): 3772b61

Debug using Gemini

Browse files
Files changed (1) hide show
  1. app.py +27 -20
app.py CHANGED
@@ -1,40 +1,47 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
3
 
4
  # this client will handle making requests to the model to generate responses
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
7
  def respond(message, history):
8
 
9
- system_message = "You are a friendly tour guide chatbot. You always respond with an upbeat attitude!"
10
-
11
  # initialize a list of dictionaries to store the messages
12
- messages = [{"role": "system",
13
  "content": system_message}]
14
-
15
- # add all previous messages to the messages list
16
  if history:
17
  messages.extend(history)
18
 
19
  # add the current user’s message to the messages list
20
  messages.append({"role": "user", "content": message})
21
 
22
- # makes the chat completion API call,
23
  # sending the messages and other parameters to the model
24
  # implements streaming, where one word/token appears at a time
25
  response = ""
26
 
27
  # iterate through each message in the method
28
- for message in client.chat_completion(
29
- messages,
30
- max_tokens=200,
31
- temperature=0.9,
32
- stream=True
33
- ):
34
- # add the tokens to the output content
35
- token = message.choices[0].delta.content # capture the most recent token
36
- response += token # Add it to the response
37
- yield response # yield the response
 
 
 
 
38
 
39
  # Create the Gradio chat interface (using Gradio's ChatInterface)
40
  title = "# SousChef AI 🍳"
@@ -55,14 +62,14 @@ with gr.Blocks() as chatbot:
55
  with gr.Column(scale=1):
56
  gr.Markdown(title)
57
  gr.Markdown(topics)
58
-
59
  with gr.Column(scale=2):
60
- gr.ChatInterface(respond,
61
- type="messages",
62
  examples = ["How do I know if my chicken is fully cooked?", "What toppings should I add to my ramen?", "What's the best recipe for homemade pizza dough?"],
63
  theme='d8ahazard/material_design_rd'
64
  )
65
  with gr.Row():
66
  gr.Markdown(disclaimer)
67
 
68
- chatbot.launch(debug=True)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from sentence_transformers import SentenceTransformer
4
+ import torch
5
+ import numpy as np
6
 
7
  # this client will handle making requests to the model to generate responses
8
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
 
10
  def respond(message, history):
11
 
12
+ system_message = "You are a kitchen sous chef. You always respond with a knowledgeable and upbeat attitude!"
13
+
14
  # initialize a list of dictionaries to store the messages
15
+ messages = [{"role": "system",
16
  "content": system_message}]
17
+
18
+ # add all previous messages to the messages list
19
  if history:
20
  messages.extend(history)
21
 
22
  # add the current user’s message to the messages list
23
  messages.append({"role": "user", "content": message})
24
 
25
+ # makes the chat completion API call,
26
  # sending the messages and other parameters to the model
27
  # implements streaming, where one word/token appears at a time
28
  response = ""
29
 
30
  # iterate through each message in the method
31
+ try:
32
+ for message in client.chat_completion(
33
+ messages,
34
+ max_tokens=200,
35
+ temperature=0.9,
36
+ stream=True
37
+ ):
38
+ # add the tokens to the output content
39
+ token = message.choices[0].delta.content # capture the most recent token
40
+ response += token # Add it to the response
41
+ yield response # yield the response
42
+
43
+ except Exception as e:
44
+ print(f"An error occurred: {e}")
45
 
46
  # Create the Gradio chat interface (using Gradio's ChatInterface)
47
  title = "# SousChef AI 🍳"
 
62
  with gr.Column(scale=1):
63
  gr.Markdown(title)
64
  gr.Markdown(topics)
65
+
66
  with gr.Column(scale=2):
67
+ gr.ChatInterface(respond,
68
+ type="messages",
69
  examples = ["How do I know if my chicken is fully cooked?", "What toppings should I add to my ramen?", "What's the best recipe for homemade pizza dough?"],
70
  theme='d8ahazard/material_design_rd'
71
  )
72
  with gr.Row():
73
  gr.Markdown(disclaimer)
74
 
75
+ chatbot.launch(debug=False)