kdevoe commited on
Commit
c0f5c7a
·
verified ·
1 Parent(s): 64e115b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -5
app.py CHANGED
@@ -1,16 +1,24 @@
1
  import gradio as gr
2
- from transformers import GPT2Tokenizer, GPT2LMHeadModel
3
  import torch
4
  from langchain.memory import ConversationBufferMemory
5
 
6
  # Move model to device (GPU if available)
7
  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
8
 
9
- # Load the tokenizer (use pre-trained tokenizer for GPT-2 family)
10
- tokenizer = GPT2Tokenizer.from_pretrained("path_to_your_model_directory")
11
 
12
- # Load the model from the directory containing 'pytorch_model.bin' and 'config.json'
13
- model = GPT2LMHeadModel.from_pretrained("path_to_your_model_directory")
 
 
 
 
 
 
 
 
14
 
15
  # Move model to the device (GPU or CPU)
16
  model.to(device)
@@ -66,3 +74,4 @@ interface.launch()
66
 
67
 
68
 
 
 
1
  import gradio as gr
2
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config
3
  import torch
4
  from langchain.memory import ConversationBufferMemory
5
 
6
  # Move model to device (GPU if available)
7
  device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
8
 
9
+ # Load the tokenizer (you can use the pre-trained tokenizer for GPT-2 family)
10
+ tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2")
11
 
12
+ # Manually create a configuration for the model (since we don't have config.json)
13
+ config = GPT2Config.from_pretrained("distilgpt2")
14
+
15
+ # Initialize the model using the manually created configuration
16
+ model = GPT2LMHeadModel(config)
17
+
18
+ # Load the weights from the pytorch_model.bin file
19
+ model_path = "./pytorch_model.bin" # Path to your local model file
20
+ state_dict = torch.load(model_path, map_location=device) # Load the state_dict
21
+ model.load_state_dict(state_dict) # Load the state dict into the model
22
 
23
  # Move model to the device (GPU or CPU)
24
  model.to(device)
 
74
 
75
 
76
 
77
+