Alihamas212 commited on
Commit
5fad509
·
verified ·
1 Parent(s): 0215819

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -26
app.py CHANGED
@@ -1,27 +1,51 @@
1
  import gradio as gr
2
- import random
3
-
4
- def chat(message, history):
5
- history = history or []
6
- message_lower = message.lower()
7
-
8
- if "hello" in message_lower or "hi" in message_lower:
9
- response = random.choice(["Hello! How can I help you?", "Hi there!", "Hey! What’s up?"])
10
- elif "how are you" in message_lower:
11
- response = random.choice(["I'm good! Thanks for asking.", "Doing great, how about you?", "All good here!"])
12
- elif "your name" in message_lower:
13
- response = "I'm your AI chatbot built with Gradio."
14
- elif "bye" in message_lower:
15
- response = "Goodbye! Have a great day!"
16
- else:
17
- response = "I'm not sure how to respond to that, but I'm learning!"
18
-
19
- history.append((message, response))
20
- return history, history
21
-
22
- gr.ChatInterface(
23
- fn=chat,
24
- title="AI Chatbot",
25
- description="A friendly chatbot built with Gradio",
26
- theme="soft"
27
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
4
+
5
+ model_name = "facebook/blenderbot-400M-distill"
6
+ device = "cuda" if torch.cuda.is_available() else "cpu"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device)
9
+
10
+ system_preamble = (
11
+ "You are a friendly, energetic motivational coach. "
12
+ "Keep answers concise, positive, and actionable. "
13
+ "When the user asks for exercises or steps, provide a short numbered list. "
14
+ )
15
+
16
+ def generate_response(history, user_message):
17
+ conversation = ""
18
+ for u, r in history:
19
+ conversation += "User: " + u + "\nCoach: " + r + "\n"
20
+ conversation += "User: " + user_message + "\nCoach:"
21
+ prompt = system_preamble + "\n" + conversation
22
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024).to(device)
23
+ out = model.generate(**inputs, max_new_tokens=256, do_sample=True, top_p=0.9, temperature=0.8)
24
+ reply = tokenizer.decode(out[0], skip_special_tokens=True).strip()
25
+ return reply
26
+
27
+ def chat(user_message, chat_history):
28
+ if chat_history is None:
29
+ chat_history = []
30
+ reply = generate_response(chat_history, user_message)
31
+ chat_history.append((user_message, reply))
32
+ return chat_history, chat_history
33
+
34
+ with gr.Blocks(title="Motivational Coach") as demo:
35
+ gr.Markdown("<h2 style='text-align:center'>Motivational Coach — powered by BlenderBot</h2>")
36
+ with gr.Row():
37
+ chatbot = gr.Chatbot(elem_id="chatbot", label="Coach")
38
+ with gr.Column(scale=0.3):
39
+ clear_btn = gr.Button("Clear")
40
+ msg = gr.Textbox(placeholder="Write your message here...", show_label=False)
41
+ state = gr.State([])
42
+
43
+ def submit_message(message, state):
44
+ new_history, state_out = chat(message, state)
45
+ return "", new_history, state_out
46
+
47
+ msg.submit(submit_message, inputs=[msg, state], outputs=[msg, chatbot, state])
48
+ clear_btn.click(lambda: ([], []), None, [chatbot, state])
49
+
50
+ if __name__ == "__main__":
51
+ demo.launch(server_name="0.0.0.0", server_port=7860)