Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,15 +6,34 @@ import gradio as gr
|
|
| 6 |
# Use GPU if available
|
| 7 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
# Base model and adapter paths
|
| 10 |
base_model_name = "microsoft/phi-2" # Pull from HF Hub directly
|
| 11 |
adapter_path = "Shriti09/Microsoft-Phi-QLora" # Update with your Hugging Face repo path
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
print("🔧 Loading base model...")
|
| 14 |
base_model = AutoModelForCausalLM.from_pretrained(
|
| 15 |
base_model_name,
|
| 16 |
device_map="auto",
|
| 17 |
-
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32
|
|
|
|
| 18 |
)
|
| 19 |
|
| 20 |
print("🔧 Loading LoRA adapter...")
|
|
|
|
| 6 |
# Use GPU if available
|
| 7 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 8 |
|
| 9 |
+
# Base model and adapter paths
|
| 10 |
+
base_model_name = "microsoft/phi-2" # Pull from HF Hub directly
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 14 |
+
from peft import PeftModel
|
| 15 |
+
import gradio as gr
|
| 16 |
+
import os
|
| 17 |
+
|
| 18 |
+
# Use GPU if available
|
| 19 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 20 |
+
|
| 21 |
# Base model and adapter paths
|
| 22 |
base_model_name = "microsoft/phi-2" # Pull from HF Hub directly
|
| 23 |
adapter_path = "Shriti09/Microsoft-Phi-QLora" # Update with your Hugging Face repo path
|
| 24 |
|
| 25 |
+
# Create an offload directory to store the model parts
|
| 26 |
+
offload_dir = "./offload" # Replace with your desired path
|
| 27 |
+
|
| 28 |
+
# Ensure the offload directory exists
|
| 29 |
+
os.makedirs(offload_dir, exist_ok=True)
|
| 30 |
+
|
| 31 |
print("🔧 Loading base model...")
|
| 32 |
base_model = AutoModelForCausalLM.from_pretrained(
|
| 33 |
base_model_name,
|
| 34 |
device_map="auto",
|
| 35 |
+
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
|
| 36 |
+
offload_dir=offload_dir # Set offload directory here
|
| 37 |
)
|
| 38 |
|
| 39 |
print("🔧 Loading LoRA adapter...")
|