JothishJJ commited on
Commit
c38ae0b
·
verified ·
1 Parent(s): 31371d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -4
app.py CHANGED
@@ -2,17 +2,26 @@ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
  # Load GPT-2 model and tokenizer
5
- MODEL_NAME = "gpt2"
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
7
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
8
 
 
 
 
9
  def chat(user_input):
10
- # Encode the user input and generate a response
11
- inputs = tokenizer(user_input, return_tensors="pt")
 
 
 
 
12
  outputs = model.generate(**inputs, max_length=200, num_return_sequences=1)
13
 
14
- # Decode and return the response
15
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
16
  return response
17
 
18
  # Gradio interface
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
  # Load GPT-2 model and tokenizer
5
+ MODEL_NAME = "distilgpt2"
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
7
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
8
 
9
+ # Conversation history
10
+ conversation_history = ""
11
+
12
  def chat(user_input):
13
+ global conversation_history
14
+ # Add user input to conversation history
15
+ conversation_history += f"User: {user_input}\n"
16
+
17
+ # Encode the conversation and generate a response
18
+ inputs = tokenizer(conversation_history, return_tensors="pt", truncation=True, max_length=1000)
19
  outputs = model.generate(**inputs, max_length=200, num_return_sequences=1)
20
 
21
+ # Decode and add bot response to history
22
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
+ conversation_history += f"Bot: {response}\n"
24
+
25
  return response
26
 
27
  # Gradio interface