MS-YUN commited on
Commit
1aee503
·
1 Parent(s): 5b8f1d5
Files changed (1) hide show
  1. app.py +43 -0
app.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import gradio as gr
3
+ import torch
4
+
5
+ title = "🤖AI ChatBot"
6
+ description = "Building open-domain chatbots is a challenging area for machine learning research."
7
+ examples = [["How are you?"]]
8
+
9
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
10
+ model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
11
+
12
+ def predict(input, history=[]):
13
+ # tokenize the new input sentence
14
+ new_user_input_ids = tokenizer.encode(
15
+ input + tokenizer.eos_token, return_tensors="pt"
16
+ )
17
+
18
+ # append the new user input tokens to the chat history
19
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
20
+
21
+ # generate a response
22
+ history = model.generate(
23
+ bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id
24
+ ).tolist()
25
+
26
+ # convert the tokens to text, and then split the responses into lines
27
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
28
+ # print('decoded_response-->>'+str(response))
29
+ response = [
30
+ (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
31
+ ] # convert to tuples of list
32
+ # print('response-->>'+str(response))
33
+ return response, history
34
+
35
+ gr.Interface(
36
+ fn=predict,
37
+ title=title,
38
+ description=description,
39
+ examples=examples,
40
+ inputs=["text", "state"],
41
+ outputs=["chatbot", "state"],
42
+ theme="finlaymacklon/boxy_violet",
43
+ ).launch()