BWComedian commited on
Commit
03baa71
·
1 Parent(s): 35a604c

Update app.py with another model

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -1,9 +1,9 @@
1
- from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
 
3
  import gradio as gr
4
 
5
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
6
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
7
 
8
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
  model.to(device)
@@ -26,8 +26,8 @@ demo = gr.Interface(
26
  fn=generate_response,
27
  inputs=gr.Textbox(lines=5, placeholder="Talk to the model..."),
28
  outputs=gr.Textbox(label="Response", lines=10),
29
- title="Chat with Llama 2 7B",
30
- description="Open-access LLM demo."
31
  )
32
 
33
  if __name__ == "__main__":
 
 
1
  import torch
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
 
5
+ tokenizer = AutoTokenizer.from_pretrained("gpt2")
6
+ model = AutoModelForCausalLM.from_pretrained("gpt2")
7
 
8
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
  model.to(device)
 
26
  fn=generate_response,
27
  inputs=gr.Textbox(lines=5, placeholder="Talk to the model..."),
28
  outputs=gr.Textbox(label="Response", lines=10),
29
+ title="GPT-2 Chatbot",
30
+ description="Chat with GPT-2 model."
31
  )
32
 
33
  if __name__ == "__main__":