lakshraina2 commited on
Commit
9b8d7a2
·
verified ·
1 Parent(s): b1af243

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -9
app.py CHANGED
@@ -18,28 +18,37 @@ def solve(problem_text):
18
  if not problem_text or len(problem_text) < 10:
19
  return "// Error: Problem text too short."
20
 
21
- # Let's try a simpler, universal prompt format
22
  prompt = f"Problem:\n{problem_text}\n\nPython code solution:\n"
23
-
24
  inputs = tokenizer(prompt, return_tensors="pt")
25
 
26
- print("Starting generation...") # This will show up in HF Logs
27
-
28
  with torch.no_grad():
29
  outputs = model.generate(
30
  input_ids=inputs["input_ids"],
31
  attention_mask=inputs["attention_mask"],
32
  max_new_tokens=512,
33
- do_sample=False, # Force deterministic greedy decoding
34
  pad_token_id=tokenizer.eos_token_id
35
  )
36
 
37
  full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
38
 
39
- print("RAW MODEL OUTPUT:\n", full_text) # Check HF logs to see exactly what it did
40
-
41
- # TEMPORARY: Return the whole thing so you can see it in the GUI!
42
- return full_text
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
  demo = gr.Interface(fn=solve, inputs="text", outputs="text", api_name="predict")
45
  demo.launch()
 
18
  if not problem_text or len(problem_text) < 10:
19
  return "// Error: Problem text too short."
20
 
 
21
  prompt = f"Problem:\n{problem_text}\n\nPython code solution:\n"
 
22
  inputs = tokenizer(prompt, return_tensors="pt")
23
 
 
 
24
  with torch.no_grad():
25
  outputs = model.generate(
26
  input_ids=inputs["input_ids"],
27
  attention_mask=inputs["attention_mask"],
28
  max_new_tokens=512,
29
+ do_sample=False,
30
  pad_token_id=tokenizer.eos_token_id
31
  )
32
 
33
  full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
 
35
+ # Extract only the Python code block
36
+ try:
37
+ # Split by the python code block marker
38
+ code_block = full_text.split("```python")[1]
39
+
40
+ # Split again by the ending backticks and take everything before it
41
+ pure_code = code_block.split("```")[0]
42
+
43
+ # Remove any leading or trailing blank lines
44
+ return pure_code.strip()
45
+
46
+ except IndexError:
47
+ # Fallback: if the model forgot to use ```python formatting,
48
+ # just return everything after "Python code solution:\n"
49
+ if "Python code solution:\n" in full_text:
50
+ return full_text.split("Python code solution:\n")[1].strip()
51
+ return full_text
52
 
53
  demo = gr.Interface(fn=solve, inputs="text", outputs="text", api_name="predict")
54
  demo.launch()