Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,18 +2,16 @@ import gradio as gr
|
|
| 2 |
import torch
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
|
| 5 |
-
# The public model ID
|
| 6 |
model_id = "lakshraina2/leetcodeAI"
|
| 7 |
|
| 8 |
-
print("Loading model on CPU
|
| 9 |
|
| 10 |
-
# Force token=False to bypass the 401 error on public repos
|
| 11 |
tokenizer = AutoTokenizer.from_pretrained(model_id, token=False)
|
|
|
|
| 12 |
model = AutoModelForCausalLM.from_pretrained(
|
| 13 |
model_id,
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
token=False # This is the magic fix
|
| 17 |
)
|
| 18 |
|
| 19 |
def solve(problem_text):
|
|
|
|
| 2 |
import torch
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
|
|
|
|
| 5 |
model_id = "lakshraina2/leetcodeAI"
|
| 6 |
|
| 7 |
+
print("Loading model on CPU...")
|
| 8 |
|
|
|
|
| 9 |
tokenizer = AutoTokenizer.from_pretrained(model_id, token=False)
|
| 10 |
+
# We removed device_map to avoid the Accelerate dependency error
|
| 11 |
model = AutoModelForCausalLM.from_pretrained(
|
| 12 |
model_id,
|
| 13 |
+
dtype=torch.float32,
|
| 14 |
+
token=False
|
|
|
|
| 15 |
)
|
| 16 |
|
| 17 |
def solve(problem_text):
|