Spaces:
Sleeping
Sleeping
Commit
·
fffa819
1
Parent(s):
5ba32b3
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,31 +1,72 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
import pickle
|
|
|
|
| 4 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
| 5 |
|
| 6 |
# Load model and tokenizer from pickle files
|
| 7 |
print("Loading model and tokenizer...")
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
try:
|
| 10 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
with open('gpt2_pseudo2code_lora_model.pkl', 'rb') as f:
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
print("✓ Model loaded successfully")
|
| 14 |
|
| 15 |
# Load tokenizer
|
|
|
|
| 16 |
with open('gpt2_pseudo2code_tokenizer.pkl', 'rb') as f:
|
| 17 |
tokenizer = pickle.load(f)
|
| 18 |
print("✓ Tokenizer loaded successfully")
|
| 19 |
|
| 20 |
-
#
|
| 21 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 22 |
model = model.to(device)
|
| 23 |
model.eval()
|
| 24 |
-
print(f"✓ Model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
except Exception as e:
|
| 27 |
print(f"Error loading model: {e}")
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
def generate_code(pseudocode, indent, line, max_length=128, temperature=0.7, top_p=0.9):
|
| 31 |
"""
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
import pickle
|
| 4 |
+
import io
|
| 5 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
| 6 |
|
| 7 |
# Load model and tokenizer from pickle files
|
| 8 |
print("Loading model and tokenizer...")
|
| 9 |
|
| 10 |
+
# Determine device
|
| 11 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 12 |
+
print(f"Using device: {device}")
|
| 13 |
+
|
| 14 |
try:
|
| 15 |
+
# Custom unpickler to handle device mapping
|
| 16 |
+
class CPU_Unpickler(pickle.Unpickler):
|
| 17 |
+
def find_class(self, module, name):
|
| 18 |
+
if module == 'torch.storage' and name == '_load_from_bytes':
|
| 19 |
+
return lambda b: torch.load(io.BytesIO(b), map_location=device)
|
| 20 |
+
else:
|
| 21 |
+
return super().find_class(module, name)
|
| 22 |
+
|
| 23 |
+
# Load LoRA model with device mapping
|
| 24 |
+
print("Loading LoRA model...")
|
| 25 |
with open('gpt2_pseudo2code_lora_model.pkl', 'rb') as f:
|
| 26 |
+
if device == "cpu":
|
| 27 |
+
# Use custom unpickler for CPU
|
| 28 |
+
model = CPU_Unpickler(f).load()
|
| 29 |
+
else:
|
| 30 |
+
model = pickle.load(f)
|
| 31 |
print("✓ Model loaded successfully")
|
| 32 |
|
| 33 |
# Load tokenizer
|
| 34 |
+
print("Loading tokenizer...")
|
| 35 |
with open('gpt2_pseudo2code_tokenizer.pkl', 'rb') as f:
|
| 36 |
tokenizer = pickle.load(f)
|
| 37 |
print("✓ Tokenizer loaded successfully")
|
| 38 |
|
| 39 |
+
# Ensure model is on correct device
|
|
|
|
| 40 |
model = model.to(device)
|
| 41 |
model.eval()
|
| 42 |
+
print(f"✓ Model ready on {device}")
|
| 43 |
+
|
| 44 |
+
# Print model info
|
| 45 |
+
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 46 |
+
total_params = sum(p.numel() for p in model.parameters())
|
| 47 |
+
print(f"✓ Total parameters: {total_params:,}")
|
| 48 |
+
print(f"✓ Trainable parameters: {trainable_params:,}")
|
| 49 |
|
| 50 |
except Exception as e:
|
| 51 |
print(f"Error loading model: {e}")
|
| 52 |
+
print("\nTrying alternative loading method...")
|
| 53 |
+
try:
|
| 54 |
+
# Alternative method: load with torch.load directly
|
| 55 |
+
import io
|
| 56 |
+
|
| 57 |
+
with open('gpt2_pseudo2code_lora_model.pkl', 'rb') as f:
|
| 58 |
+
buffer = io.BytesIO(f.read())
|
| 59 |
+
model = torch.load(buffer, map_location=torch.device(device))
|
| 60 |
+
|
| 61 |
+
with open('gpt2_pseudo2code_tokenizer.pkl', 'rb') as f:
|
| 62 |
+
tokenizer = pickle.load(f)
|
| 63 |
+
|
| 64 |
+
model = model.to(device)
|
| 65 |
+
model.eval()
|
| 66 |
+
print("✓ Model loaded successfully using alternative method")
|
| 67 |
+
except Exception as e2:
|
| 68 |
+
print(f"Alternative loading also failed: {e2}")
|
| 69 |
+
raise
|
| 70 |
|
| 71 |
def generate_code(pseudocode, indent, line, max_length=128, temperature=0.7, top_p=0.9):
|
| 72 |
"""
|