Spaces:
Sleeping
Sleeping
File size: 2,130 Bytes
50251ce 7436ade a09d950 50251ce 507568d 09c3fad 50251ce 507568d 7436ade 507568d bbcc54c f7b5423 bbcc54c 38ef4f9 9b5884f 7436ade 507568d bbcc54c cf48f82 9b8d7a2 bbcc54c 507568d 7436ade bbcc54c 9aded77 9b8d7a2 f67e108 9aded77 f67e108 9aded77 47131aa bbcc54c 47131aa | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 | import os
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_id = "Qwen/Qwen2.5-Coder-1.5B-Instruct"
hf_token = os.environ.get("HF_TOKEN")
print("Loading model securely...")
tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token)
model = AutoModelForCausalLM.from_pretrained(
model_id,
dtype=torch.float32,
token=hf_token
)
def solve(problem_text):
if not problem_text or len(problem_text) < 10:
return "// Error: Problem text too short."
prompt = f"Problem:\n{problem_text}\n\nOptimal and correct Python3 code solution:\n"
inputs = tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
outputs = model.generate(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
max_new_tokens=700,
do_sample=False,
pad_token_id=tokenizer.eos_token_id
)
full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# 1. Extract the raw Python code block
try:
code_block = full_text.split("```python")[1]
pure_code = code_block.split("```")[0]
except IndexError:
if "Python code solution:\n" in full_text:
pure_code = full_text.split("Python code solution:\n")[1]
else:
pure_code = full_text
# 2. THE SCRUBBER: Delete the comments using Python, not the AI
cleaned_lines = []
for line in pure_code.split('\n'):
# This splits the line at the '#' symbol and only keeps the code on the left side
no_comment_line = line.split('#')[0].rstrip()
# Only add the line if it's not completely empty
if no_comment_line.strip():
cleaned_lines.append(no_comment_line)
final_code = "\n".join(cleaned_lines).strip()
# Safety net: If the scrubber accidentally deleted everything, return the original
if not final_code:
return pure_code.strip()
return final_code
demo = gr.Interface(fn=solve, inputs="text", outputs="text", api_name="predict")
demo.launch() |