rrk757 commited on
Commit
ae5eea4
·
verified ·
1 Parent(s): 902b32d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -36
app.py CHANGED
@@ -2,43 +2,32 @@ from huggingface_hub import InferenceClient
2
  import gradio as gr
3
  import random
4
 
5
- client = InferenceClient("google/gemma-3-27b-it")
6
- #this is where you change the LLM
7
 
8
- def respond(message,history):
9
- # with gr.Blocks() as demo:
10
- # gr.Markdown("# Simple Q&A Chatbot")
11
-
12
- # question = gr.Textbox(label="Your Question", placeholder="Type your question here...")
13
- # answer = gr.Textbox(label="Chatbot Answer", interactive=False)
14
-
15
- # question.submit(fn=chatbot_response, inputs=question, outputs=answer)
16
-
17
- messages = [{"role": "system", "content": "You are a mean chatbot."}]
18
-
19
- if history:
20
- messages.extend(history)
21
 
22
- message.append({"role": "user", "content": message})
 
 
 
 
 
 
23
 
24
  response = ""
25
-
26
- for message in client.chat_completion(messages,max_tokens=50, stream=True):
27
- token = message.choices[0].delta.content
28
- response += token
29
- yield response
30
-
31
- #temperature=.9,
32
- #top_p=.7) #changes the length of message
33
-
34
- def echo(message, history):
35
- choices = ["so true", "shut up", "you need help", "ts pmo sybau"] #change personality
36
- #use random to select choices
37
- chat_answer = random.choice(choices)
38
- return chat_answer
39
-
40
- chatbot = gr.ChatInterface(respond, type = "messages")
41
-
42
- chatbot.launch()
43
-
44
- chatbot.launch(debug=True)
 
2
  import gradio as gr
3
  import random
4
 
5
+ # Initialize the inference client
6
+ client = InferenceClient("google/gemma-1.1-7b-it") # Replace with a smaller model if needed
7
 
8
+ # Define the response function
9
+ def respond(message, history):
10
+ messages = [{"role": "system", "content": "You are a mean chatbot."}]
 
 
 
 
 
 
 
 
 
 
11
 
12
+ # Convert history to OpenAI-style messages
13
+ if history:
14
+ for user_msg, bot_msg in history:
15
+ messages.append({"role": "user", "content": user_msg})
16
+ messages.append({"role": "assistant", "content": bot_msg})
17
+
18
+ messages.append({"role": "user", "content": message})
19
 
20
  response = ""
21
+
22
+ # Call the model with streaming response
23
+ for chunk in client.chat_completion(messages, max_tokens=100, stream=True):
24
+ if chunk.choices and chunk.choices[0].delta:
25
+ token = chunk.choices[0].delta.get("content", "")
26
+ response += token
27
+ yield response
28
+
29
+ # Set up the Gradio chat interface
30
+ chatbot = gr.ChatInterface(fn=respond, chatbot=gr.Chatbot(), title="Mean Chatbot")
31
+
32
+ # Launch the chatbot
33
+ chatbot.launch(debug=True)