leetcodeAI / app.py
lakshraina2's picture
Update app.py
09c3fad verified
raw
history blame
973 Bytes
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_id = "lakshraina2/leetcodeAI"
print("Loading model on CPU...")
tokenizer = AutoTokenizer.from_pretrained(model_id, token=False)
# We removed device_map to avoid the Accelerate dependency error
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.float32,
token=False
)
def solve(problem_text):
prompt = f"### Instruction:\nSolve this LeetCode problem:\n{problem_text}\n\n### Response:\n"
inputs = tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.2,
do_sample=True
)
solution = tokenizer.decode(outputs[0], skip_special_tokens=True)
return solution.split("### Response:\n")[-1].strip()
iface = gr.Interface(fn=solve, inputs="text", outputs="text")
iface.launch()