Spaces:
Sleeping
Sleeping
Commit
·
31668f0
1
Parent(s):
2851b69
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,71 +1,105 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
-
import
|
| 4 |
-
import
|
| 5 |
-
|
| 6 |
|
| 7 |
-
# Load model and tokenizer
|
| 8 |
-
print("
|
|
|
|
|
|
|
| 9 |
|
| 10 |
# Determine device
|
| 11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 12 |
print(f"Using device: {device}")
|
| 13 |
|
| 14 |
try:
|
| 15 |
-
#
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
if module == 'torch.storage' and name == '_load_from_bytes':
|
| 19 |
-
return lambda b: torch.load(io.BytesIO(b), map_location=device)
|
| 20 |
-
else:
|
| 21 |
-
return super().find_class(module, name)
|
| 22 |
|
| 23 |
-
#
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
tokenizer
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
-
#
|
| 40 |
-
model = model.to(device)
|
| 41 |
model.eval()
|
| 42 |
print(f"✓ Model ready on {device}")
|
| 43 |
|
| 44 |
# Print model info
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
except Exception as e:
|
| 51 |
-
print(f"Error loading
|
| 52 |
-
print("\nTrying alternative
|
|
|
|
| 53 |
try:
|
| 54 |
-
#
|
| 55 |
-
import
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
-
|
| 62 |
-
|
|
|
|
| 63 |
|
| 64 |
-
model = model.to(device)
|
| 65 |
model.eval()
|
| 66 |
-
print("✓ Model loaded successfully
|
|
|
|
| 67 |
except Exception as e2:
|
| 68 |
-
print(f"Alternative
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
raise
|
| 70 |
|
| 71 |
def generate_code(pseudocode, indent, line, max_length=128, temperature=0.7, top_p=0.9):
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
+
from transformers import GPT2Tokenizer, AutoModelForCausalLM
|
| 4 |
+
from peft import PeftModel, PeftConfig, AutoPeftModelForCausalLM
|
| 5 |
+
import os
|
| 6 |
|
| 7 |
+
# Load model and tokenizer
|
| 8 |
+
print("="*70)
|
| 9 |
+
print("Loading Pseudo-Code to Code Generator")
|
| 10 |
+
print("="*70)
|
| 11 |
|
| 12 |
# Determine device
|
| 13 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 14 |
print(f"Using device: {device}")
|
| 15 |
|
| 16 |
try:
|
| 17 |
+
# Method 1: Try loading as PeftModel (LoRA adapters)
|
| 18 |
+
print("\nAttempting to load LoRA model from Hugging Face format...")
|
| 19 |
+
model_path = "./model" # or wherever you uploaded the model files
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
+
# Check if model files exist
|
| 22 |
+
if os.path.exists(model_path):
|
| 23 |
+
print(f"✓ Model directory found: {model_path}")
|
| 24 |
+
|
| 25 |
+
# Load with AutoPeftModel (handles LoRA automatically)
|
| 26 |
+
model = AutoPeftModelForCausalLM.from_pretrained(
|
| 27 |
+
model_path,
|
| 28 |
+
device_map={"": device},
|
| 29 |
+
torch_dtype=torch.float32,
|
| 30 |
+
low_cpu_mem_usage=True
|
| 31 |
+
)
|
| 32 |
+
tokenizer = GPT2Tokenizer.from_pretrained(model_path)
|
| 33 |
+
|
| 34 |
+
print("✓ Model and tokenizer loaded successfully (Hugging Face format)")
|
| 35 |
+
else:
|
| 36 |
+
# Fallback: Load from current directory
|
| 37 |
+
print(f"✗ Model directory not found, trying current directory...")
|
| 38 |
+
model = AutoPeftModelForCausalLM.from_pretrained(
|
| 39 |
+
".",
|
| 40 |
+
device_map={"": device},
|
| 41 |
+
torch_dtype=torch.float32,
|
| 42 |
+
low_cpu_mem_usage=True
|
| 43 |
+
)
|
| 44 |
+
tokenizer = GPT2Tokenizer.from_pretrained(".")
|
| 45 |
+
print("✓ Model loaded from current directory")
|
| 46 |
|
| 47 |
+
# Set model to evaluation mode
|
|
|
|
| 48 |
model.eval()
|
| 49 |
print(f"✓ Model ready on {device}")
|
| 50 |
|
| 51 |
# Print model info
|
| 52 |
+
try:
|
| 53 |
+
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 54 |
+
total_params = sum(p.numel() for p in model.parameters())
|
| 55 |
+
print(f"✓ Total parameters: {total_params:,}")
|
| 56 |
+
print(f"✓ Trainable parameters: {trainable_params:,}")
|
| 57 |
+
except:
|
| 58 |
+
print("✓ Model parameters info not available")
|
| 59 |
+
|
| 60 |
+
print("="*70)
|
| 61 |
|
| 62 |
except Exception as e:
|
| 63 |
+
print(f"\n✗ Error loading with AutoPeftModel: {e}")
|
| 64 |
+
print("\nTrying alternative method: Loading base model + LoRA adapters separately...")
|
| 65 |
+
|
| 66 |
try:
|
| 67 |
+
# Method 2: Load base GPT-2 and merge LoRA adapters
|
| 68 |
+
from transformers import GPT2LMHeadModel
|
| 69 |
+
|
| 70 |
+
print("Loading base GPT-2 model...")
|
| 71 |
+
base_model = GPT2LMHeadModel.from_pretrained("gpt2")
|
| 72 |
|
| 73 |
+
print("Loading LoRA adapters...")
|
| 74 |
+
model = PeftModel.from_pretrained(
|
| 75 |
+
base_model,
|
| 76 |
+
model_path if os.path.exists(model_path) else ".",
|
| 77 |
+
device_map={"": device}
|
| 78 |
+
)
|
| 79 |
|
| 80 |
+
tokenizer = GPT2Tokenizer.from_pretrained(
|
| 81 |
+
model_path if os.path.exists(model_path) else "."
|
| 82 |
+
)
|
| 83 |
|
|
|
|
| 84 |
model.eval()
|
| 85 |
+
print("✓ Model loaded successfully (base + adapters)")
|
| 86 |
+
|
| 87 |
except Exception as e2:
|
| 88 |
+
print(f"\n✗ Alternative method also failed: {e2}")
|
| 89 |
+
print("\n" + "="*70)
|
| 90 |
+
print("DEPLOYMENT INSTRUCTIONS")
|
| 91 |
+
print("="*70)
|
| 92 |
+
print("Please upload the model in Hugging Face format, not pickle!")
|
| 93 |
+
print("\nFiles needed:")
|
| 94 |
+
print(" - adapter_config.json")
|
| 95 |
+
print(" - adapter_model.safetensors (or .bin)")
|
| 96 |
+
print(" - tokenizer.json")
|
| 97 |
+
print(" - tokenizer_config.json")
|
| 98 |
+
print(" - special_tokens_map.json")
|
| 99 |
+
print(" - vocab.json")
|
| 100 |
+
print(" - merges.txt")
|
| 101 |
+
print("\nSee SAVE_MODEL_FOR_HF.py for instructions on how to save properly.")
|
| 102 |
+
print("="*70)
|
| 103 |
raise
|
| 104 |
|
| 105 |
def generate_code(pseudocode, indent, line, max_length=128, temperature=0.7, top_p=0.9):
|