Tanveerooooooo commited on
Commit
697b71b
Β·
verified Β·
1 Parent(s): 59872b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -10
app.py CHANGED
@@ -1,22 +1,68 @@
1
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
2
  import gradio as gr
 
 
3
 
4
- model_name = "Salesforce/codet5-base"
 
5
  tokenizer = AutoTokenizer.from_pretrained(model_name)
6
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
7
- generator = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
8
 
 
9
  def eternos_debugger(code, error):
10
- prompt = f"Fix this code:\n{code}\nError: {error}"
11
- response = generator(prompt, max_length=100, do_sample=False)
12
- return response[0]["generated_text"]
 
 
 
 
 
 
 
 
 
 
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  interface = gr.Interface(
15
  fn=eternos_debugger,
16
- inputs=["textbox", "textbox"],
17
- outputs="textbox",
18
- title="🧠 Eternos - Code Debugging AI",
19
- description="Enter your buggy code and error message. Eternos will suggest a fix."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  )
21
 
22
  interface.launch()
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
+ import torch
4
 
5
+ # Load smaller CodeT5 model (faster)
6
+ model_name = "Salesforce/codet5-small"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
 
9
 
10
+ # Main function
11
  def eternos_debugger(code, error):
12
+ if not code.strip():
13
+ return "❌ Please provide some code."
14
+ if not error.strip():
15
+ return "❌ Please provide the error message you encountered."
16
+
17
+ # Smart prompting
18
+ prompt = (
19
+ f"You are an expert Python debugger.\n"
20
+ f"Given the buggy code and the error message, fix the code.\n\n"
21
+ f"Code:\n{code}\n\n"
22
+ f"Error:\n{error}\n\n"
23
+ f"Corrected Code:"
24
+ )
25
 
26
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
27
+ with torch.no_grad():
28
+ outputs = model.generate(
29
+ **inputs,
30
+ max_length=512,
31
+ num_beams=4,
32
+ early_stopping=True,
33
+ temperature=0.7,
34
+ top_p=0.95
35
+ )
36
+
37
+ fixed_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
38
+ return fixed_code.strip()
39
+
40
+ # Gradio UI
41
  interface = gr.Interface(
42
  fn=eternos_debugger,
43
+ inputs=[
44
+ gr.Textbox(label="🐞 Buggy Code", lines=12, placeholder="Paste your Python code here..."),
45
+ gr.Textbox(label="🚨 Error Message", lines=3, placeholder="Paste the error message you got...")
46
+ ],
47
+ outputs=gr.Code(label="βœ… Suggested Fixed Code"),
48
+ title="πŸ› οΈ Eternos: AI Code Debugger",
49
+ description="Eternos uses a CodeT5 model to help debug and fix Python code. Provide your code and the error message to get a fix.",
50
+ theme="soft",
51
+ allow_flagging="never",
52
+ examples=[
53
+ [
54
+ "def add_numbers(a, b)\n return a + b",
55
+ "SyntaxError: expected ':'"
56
+ ],
57
+ [
58
+ "for i in range(5)\n print(i)",
59
+ "SyntaxError: expected ':'"
60
+ ],
61
+ [
62
+ "def divide(a, b):\n return a / b\nprint(divide(4, 0))",
63
+ "ZeroDivisionError: division by zero"
64
+ ]
65
+ ]
66
  )
67
 
68
  interface.launch()