Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -18,37 +18,37 @@ def solve(problem_text):
|
|
| 18 |
if not problem_text or len(problem_text) < 10:
|
| 19 |
return "// Error: Problem text too short."
|
| 20 |
|
| 21 |
-
|
|
|
|
| 22 |
inputs = tokenizer(prompt, return_tensors="pt")
|
| 23 |
|
| 24 |
with torch.no_grad():
|
| 25 |
outputs = model.generate(
|
| 26 |
input_ids=inputs["input_ids"],
|
| 27 |
attention_mask=inputs["attention_mask"],
|
| 28 |
-
max_new_tokens=
|
| 29 |
do_sample=False,
|
| 30 |
pad_token_id=tokenizer.eos_token_id
|
| 31 |
)
|
| 32 |
|
| 33 |
full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 34 |
|
| 35 |
-
# Extract only the Python code block
|
| 36 |
try:
|
| 37 |
-
# Split by the python code block marker
|
| 38 |
code_block = full_text.split("```python")[1]
|
| 39 |
-
|
| 40 |
-
# Split again by the ending backticks and take everything before it
|
| 41 |
pure_code = code_block.split("```")[0]
|
| 42 |
-
|
| 43 |
-
# Remove any leading or trailing blank lines
|
| 44 |
-
return pure_code.strip()
|
| 45 |
-
|
| 46 |
except IndexError:
|
| 47 |
-
# Fallback: if the model forgot to use ```python formatting,
|
| 48 |
-
# just return everything after "Python code solution:\n"
|
| 49 |
if "Python code solution:\n" in full_text:
|
| 50 |
-
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
demo = gr.Interface(fn=solve, inputs="text", outputs="text", api_name="predict")
|
| 54 |
demo.launch()
|
|
|
|
| 18 |
if not problem_text or len(problem_text) < 10:
|
| 19 |
return "// Error: Problem text too short."
|
| 20 |
|
| 21 |
+
# UPDATED PROMPT: Explicitly forbid comments
|
| 22 |
+
prompt = f"Problem:\n{problem_text}\n\nProvide ONLY the Python3 code. DO NOT include any comments, explanations, or docstrings.\nPython code solution:\n"
|
| 23 |
inputs = tokenizer(prompt, return_tensors="pt")
|
| 24 |
|
| 25 |
with torch.no_grad():
|
| 26 |
outputs = model.generate(
|
| 27 |
input_ids=inputs["input_ids"],
|
| 28 |
attention_mask=inputs["attention_mask"],
|
| 29 |
+
max_new_tokens=300, # REDUCED from 512 to speed up generation!
|
| 30 |
do_sample=False,
|
| 31 |
pad_token_id=tokenizer.eos_token_id
|
| 32 |
)
|
| 33 |
|
| 34 |
full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 35 |
|
|
|
|
| 36 |
try:
|
|
|
|
| 37 |
code_block = full_text.split("```python")[1]
|
|
|
|
|
|
|
| 38 |
pure_code = code_block.split("```")[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
except IndexError:
|
|
|
|
|
|
|
| 40 |
if "Python code solution:\n" in full_text:
|
| 41 |
+
pure_code = full_text.split("Python code solution:\n")[1]
|
| 42 |
+
else:
|
| 43 |
+
pure_code = full_text
|
| 44 |
+
|
| 45 |
+
# POST-PROCESSING: Scrub out any remaining comment lines
|
| 46 |
+
cleaned_lines = []
|
| 47 |
+
for line in pure_code.split('\n'):
|
| 48 |
+
if not line.strip().startswith('#'):
|
| 49 |
+
cleaned_lines.append(line)
|
| 50 |
+
|
| 51 |
+
return "\n".join(cleaned_lines).strip()
|
| 52 |
|
| 53 |
demo = gr.Interface(fn=solve, inputs="text", outputs="text", api_name="predict")
|
| 54 |
demo.launch()
|