BikoRiko commited on
Commit
ab5069b
·
verified ·
1 Parent(s): ed8d032

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -12
app.py CHANGED
@@ -1,25 +1,42 @@
1
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
2
  import gradio as gr
3
 
4
- # Load a lightweight conversational model (CPU-friendly)
5
  model_name = "microsoft/DialoGPT-small"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
- chatbot = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1) # device=-1 forces CPU
 
 
 
 
 
 
10
 
11
- def respond(message, history):
12
- # Format history for DialoGPT
13
- history_str = "\n".join([f"User: {h[0]}\nBot: {h[1]}" for h in history])
14
- input_text = f"{history_str}\nUser: {message}\nBot:"
 
 
 
 
 
 
 
15
 
16
- response = chatbot(input_text, max_length=1000, pad_token_id=tokenizer.eos_token_id)
17
- bot_reply = response[0]['generated_text'].split("Bot:")[-1].strip()
18
  return bot_reply
19
 
 
20
  gr.ChatInterface(
21
- respond,
22
- title="My Chatbot",
23
  description="A simple CPU-friendly chatbot using DialoGPT-small.",
24
- examples=["Hello!", "How are you?", "Tell me a joke."]
 
 
 
25
  ).launch()
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import gradio as gr
3
 
4
+ # Load a lightweight model (CPU-friendly)
5
  model_name = "microsoft/DialoGPT-small"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
+ # Store chat history per session using Gradio's state
10
+ def predict(message, history):
11
+ # Format conversation for DialoGPT
12
+ chat_history = ""
13
+ for human, ai in history:
14
+ chat_history += f"User: {human}\nBot: {ai}\n"
15
+ chat_history += f"User: {message}\nBot:"
16
 
17
+ # Tokenize and generate
18
+ inputs = tokenizer.encode(chat_history, return_tensors="pt")
19
+ outputs = model.generate(
20
+ inputs,
21
+ max_length=1000,
22
+ pad_token_id=tokenizer.eos_token_id,
23
+ do_sample=True,
24
+ top_p=0.9,
25
+ top_k=50
26
+ )
27
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
28
 
29
+ # Extract only the bot's latest reply
30
+ bot_reply = response.split("Bot:")[-1].strip()
31
  return bot_reply
32
 
33
+ # Launch the chat interface
34
  gr.ChatInterface(
35
+ fn=predict,
36
+ title="💬 My Chatbot",
37
  description="A simple CPU-friendly chatbot using DialoGPT-small.",
38
+ examples=["Hello!", "What's your name?", "Tell me a fun fact."],
39
+ retry_btn=None,
40
+ undo_btn=None,
41
+ clear_btn="Clear",
42
  ).launch()