UserPotato12397 commited on
Commit
edfbc1e
·
verified ·
1 Parent(s): 67327a2

Changed code to answer key

Browse files
Files changed (1) hide show
  1. app.py +18 -43
app.py CHANGED
@@ -1,50 +1,25 @@
1
  import gradio as gr
2
- import random
3
  from huggingface_hub import InferenceClient
4
 
5
- client = InferenceClient("google/gemma-3-27b-it")
6
- #Changes the LLM being used
7
 
8
  def respond(message, history):
9
- messages = [{"role": "human", "content": "You are a human AI robot existing in the year 2123. You live in Loveland, Colorado. This city is a bustling tech hub leading the innovation of AI technologies along with environmental studies. In this city, many human beings intermingle with AI bots. Your role as a human AI is to mix with the human counterparts in society."}] #Change the personaliy of the chatbot
 
10
  if history:
11
  messages.extend(history)
12
-
13
- messages.append({"role": "user", "content": message})
14
-
15
- response = client.chat_completion(messages, max_tokens = 500, temperature = 1, top_p = 0.1)
16
- # Changes the length of message
17
-
18
- print(response['choices'][0]['message']['content'].strip())
19
-
20
- return response['choices'][0]['message']['content'].strip()
21
-
22
- def random_message(message, history):
23
- choices = [
24
- "It is certain",
25
- "It is decidedly so",
26
- "Without a doubt",
27
- "Yes definitely",
28
- "You may rely on it",
29
- "As I see it, yes",
30
- "Most likely",
31
- "Outlook good",
32
- "Yes",
33
- "Signs point to yes",
34
- "Reply hazy, try again",
35
- "Ask again later",
36
- "Better not tell you now",
37
- "Cannot predict now",
38
- "Concentrate and ask again",
39
- "Don’t count on it",
40
- "My reply is no",
41
- "My sources say no",
42
- "Outlook not so good",
43
- "Very doubtful"
44
- ]
45
- chat_answer = random.choice(choices)
46
- return chat_answer
47
-
48
- chatbot = gr.ChatInterface(respond, type = "messages")
49
-
50
- chatbot.launch(debug = True)
 
1
  import gradio as gr
 
2
  from huggingface_hub import InferenceClient
3
 
4
+ client = InferenceClient(“HuggingFaceH4/zephyr-7b-beta”)
 
5
 
6
  def respond(message, history):
7
+ messages = [{role: “system”, content: You are a friendly chatbot.}]
8
+
9
  if history:
10
  messages.extend(history)
11
+ messages.append({“role”: “user”, “content”: message})
12
+ response = “”
13
+
14
+ for message in client.chat_completion(
15
+ messages,
16
+ max_tokens=500,
17
+ temperature=0.2,
18
+ top_p=0.9,
19
+ stream=True
20
+ ):
21
+ token = message.choices[0].delta.content
22
+ response += token
23
+ yield response
24
+ chatbot = gr.ChatInterface(respond, type=“messages”)
25
+ chatbot.launch()