mkoot007 commited on
Commit
f41fee6
·
1 Parent(s): 5baecf1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -1,19 +1,19 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
 
4
- # Load the conversational pipeline with the "facebook/blenderbot-400M-distill" model
5
- pipe = pipeline("conversational", model="facebook/blenderbot-400M-distill")
 
6
 
7
  def chat_with_model(input_text):
8
- # Start a new conversation with the user's input
9
- conversation = [{"role": "user", "content": input_text}]
10
-
11
  # Generate a response from the model
12
- response = pipe(conversation)
13
-
14
- # Extract the assistant's reply
15
- reply = response[0]['message']['content']
16
-
17
  return reply
18
 
19
  iface = gr.Interface(
 
1
  import gradio as gr
2
+ from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
3
 
4
+ # Load the tokenizer and model
5
+ tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
6
+ model = BlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
7
 
8
  def chat_with_model(input_text):
9
+ # Encode the user's input
10
+ input_ids = tokenizer.encode("You: " + input_text, return_tensors="pt", max_length=512, truncation=True)
11
+
12
  # Generate a response from the model
13
+ response_ids = model.generate(input_ids, max_length=100, num_return_sequences=1, no_repeat_ngram_size=2)
14
+
15
+ # Decode and return the assistant's reply
16
+ reply = tokenizer.decode(response_ids[0], skip_special_tokens=True)
 
17
  return reply
18
 
19
  iface = gr.Interface(