BikoRiko commited on
Commit
ebfa322
·
verified ·
1 Parent(s): 94e992b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -10
app.py CHANGED
@@ -1,20 +1,17 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import gradio as gr
3
 
4
- # Load a lightweight model (CPU-friendly)
5
  model_name = "microsoft/DialoGPT-small"
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
- # Store chat history per session using Gradio's state
10
  def predict(message, history):
11
- # Format conversation for DialoGPT
12
  chat_history = ""
13
  for human, ai in history:
14
  chat_history += f"User: {human}\nBot: {ai}\n"
15
  chat_history += f"User: {message}\nBot:"
16
 
17
- # Tokenize and generate
18
  inputs = tokenizer.encode(chat_history, return_tensors="pt")
19
  outputs = model.generate(
20
  inputs,
@@ -25,18 +22,13 @@ def predict(message, history):
25
  top_k=50
26
  )
27
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
28
-
29
- # Extract only the bot's latest reply
30
  bot_reply = response.split("Bot:")[-1].strip()
31
  return bot_reply
32
 
33
- # Launch the chat interface
34
  gr.ChatInterface(
35
  fn=predict,
36
  title="💬 My Chatbot",
37
  description="A simple CPU-friendly chatbot using DialoGPT-small.",
38
  examples=["Hello!", "What's your name?", "Tell me a fun fact."],
39
- retry_btn=None,
40
- undo_btn=None,
41
- clear_btn="Clear",
42
  ).launch()
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import gradio as gr
3
 
 
4
  model_name = "microsoft/DialoGPT-small"
5
  tokenizer = AutoTokenizer.from_pretrained(model_name)
6
  model = AutoModelForCausalLM.from_pretrained(model_name)
7
 
 
8
  def predict(message, history):
9
+ # Build conversation context
10
  chat_history = ""
11
  for human, ai in history:
12
  chat_history += f"User: {human}\nBot: {ai}\n"
13
  chat_history += f"User: {message}\nBot:"
14
 
 
15
  inputs = tokenizer.encode(chat_history, return_tensors="pt")
16
  outputs = model.generate(
17
  inputs,
 
22
  top_k=50
23
  )
24
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
25
  bot_reply = response.split("Bot:")[-1].strip()
26
  return bot_reply
27
 
28
+ # Use only universally supported args
29
  gr.ChatInterface(
30
  fn=predict,
31
  title="💬 My Chatbot",
32
  description="A simple CPU-friendly chatbot using DialoGPT-small.",
33
  examples=["Hello!", "What's your name?", "Tell me a fun fact."],
 
 
 
34
  ).launch()