Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,41 +1,42 @@
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 4 |
|
| 5 |
-
model_id = "lakshraina2/
|
| 6 |
|
| 7 |
-
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
| 9 |
model = AutoModelForCausalLM.from_pretrained(
|
| 10 |
model_id,
|
| 11 |
dtype=torch.float32,
|
| 12 |
-
token=
|
| 13 |
)
|
| 14 |
|
| 15 |
def solve(problem_text):
|
| 16 |
if not problem_text or len(problem_text) < 10:
|
| 17 |
return "// Error: Problem text too short or not scraped correctly."
|
| 18 |
|
| 19 |
-
# Standard Alpaca/Llama prompt format
|
| 20 |
prompt = f"Below is a LeetCode problem. Write a complete Python solution.\n\n### Problem:\n{problem_text}\n\n### Solution:\n"
|
| 21 |
|
| 22 |
inputs = tokenizer(prompt, return_tensors="pt")
|
| 23 |
|
| 24 |
-
# Generate with specific constraints to prevent empty output
|
| 25 |
with torch.no_grad():
|
| 26 |
outputs = model.generate(
|
| 27 |
input_ids=inputs["input_ids"],
|
| 28 |
attention_mask=inputs["attention_mask"],
|
| 29 |
-
max_new_tokens=1024,
|
| 30 |
-
min_new_tokens=50,
|
| 31 |
-
temperature=0.1,
|
| 32 |
do_sample=True,
|
| 33 |
pad_token_id=tokenizer.eos_token_id
|
| 34 |
)
|
| 35 |
|
| 36 |
full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 37 |
|
| 38 |
-
# Extract only the part after our '### Solution:' marker
|
| 39 |
if "### Solution:" in full_text:
|
| 40 |
return full_text.split("### Solution:")[-1].strip()
|
| 41 |
return full_text.strip()
|
|
|
|
| 1 |
+
import os
|
| 2 |
import gradio as gr
|
| 3 |
import torch
|
| 4 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 5 |
|
| 6 |
+
model_id = "lakshraina2/leetcodeAI"
|
| 7 |
|
| 8 |
+
# Securely grab the token you just saved in the Space settings
|
| 9 |
+
hf_token = os.environ.get("HF_TOKEN")
|
| 10 |
+
|
| 11 |
+
print("Loading model securely...")
|
| 12 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token)
|
| 13 |
model = AutoModelForCausalLM.from_pretrained(
|
| 14 |
model_id,
|
| 15 |
dtype=torch.float32,
|
| 16 |
+
token=hf_token
|
| 17 |
)
|
| 18 |
|
| 19 |
def solve(problem_text):
|
| 20 |
if not problem_text or len(problem_text) < 10:
|
| 21 |
return "// Error: Problem text too short or not scraped correctly."
|
| 22 |
|
|
|
|
| 23 |
prompt = f"Below is a LeetCode problem. Write a complete Python solution.\n\n### Problem:\n{problem_text}\n\n### Solution:\n"
|
| 24 |
|
| 25 |
inputs = tokenizer(prompt, return_tensors="pt")
|
| 26 |
|
|
|
|
| 27 |
with torch.no_grad():
|
| 28 |
outputs = model.generate(
|
| 29 |
input_ids=inputs["input_ids"],
|
| 30 |
attention_mask=inputs["attention_mask"],
|
| 31 |
+
max_new_tokens=1024,
|
| 32 |
+
min_new_tokens=50,
|
| 33 |
+
temperature=0.1,
|
| 34 |
do_sample=True,
|
| 35 |
pad_token_id=tokenizer.eos_token_id
|
| 36 |
)
|
| 37 |
|
| 38 |
full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 39 |
|
|
|
|
| 40 |
if "### Solution:" in full_text:
|
| 41 |
return full_text.split("### Solution:")[-1].strip()
|
| 42 |
return full_text.strip()
|