lakshraina2 commited on
Commit
47131aa
·
verified ·
1 Parent(s): 09c3fad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -3
app.py CHANGED
@@ -7,7 +7,6 @@ model_id = "lakshraina2/leetcodeAI"
7
  print("Loading model on CPU...")
8
 
9
  tokenizer = AutoTokenizer.from_pretrained(model_id, token=False)
10
- # We removed device_map to avoid the Accelerate dependency error
11
  model = AutoModelForCausalLM.from_pretrained(
12
  model_id,
13
  dtype=torch.float32,
@@ -15,6 +14,10 @@ model = AutoModelForCausalLM.from_pretrained(
15
  )
16
 
17
  def solve(problem_text):
 
 
 
 
18
  prompt = f"### Instruction:\nSolve this LeetCode problem:\n{problem_text}\n\n### Response:\n"
19
  inputs = tokenizer(prompt, return_tensors="pt")
20
 
@@ -29,5 +32,12 @@ def solve(problem_text):
29
  solution = tokenizer.decode(outputs[0], skip_special_tokens=True)
30
  return solution.split("### Response:\n")[-1].strip()
31
 
32
- iface = gr.Interface(fn=solve, inputs="text", outputs="text")
33
- iface.launch()
 
 
 
 
 
 
 
 
7
  print("Loading model on CPU...")
8
 
9
  tokenizer = AutoTokenizer.from_pretrained(model_id, token=False)
 
10
  model = AutoModelForCausalLM.from_pretrained(
11
  model_id,
12
  dtype=torch.float32,
 
14
  )
15
 
16
  def solve(problem_text):
17
+ # Basic check to ensure input isn't empty
18
+ if not problem_text:
19
+ return "No problem text detected."
20
+
21
  prompt = f"### Instruction:\nSolve this LeetCode problem:\n{problem_text}\n\n### Response:\n"
22
  inputs = tokenizer(prompt, return_tensors="pt")
23
 
 
32
  solution = tokenizer.decode(outputs[0], skip_special_tokens=True)
33
  return solution.split("### Response:\n")[-1].strip()
34
 
35
+ # THE FIX: We use gr.Interface but explicitly name the API endpoint 'predict'
36
+ demo = gr.Interface(
37
+ fn=solve,
38
+ inputs=gr.Textbox(),
39
+ outputs=gr.Textbox(),
40
+ api_name="predict" # This matches the /predict in your content.js URL
41
+ )
42
+
43
+ demo.launch()