Tanveerooooooo commited on
Commit
75a47e0
·
verified ·
1 Parent(s): 6e9ef0f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -2,30 +2,30 @@ import torch
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
 
5
- # Load a lightweight coding model
6
- model_id = "replit/code-v1-3b"
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
- model = AutoModelForCausalLM.from_pretrained(model_id)
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
  model.to(device)
11
 
12
- # Debug function
13
  def debug_code(code, language):
14
- prompt = f"### Debug this {language} code:\n{code}\n### Provide fixed code and explanation:\n"
15
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
16
  outputs = model.generate(**inputs, max_new_tokens=300, pad_token_id=tokenizer.eos_token_id)
17
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
18
- return result
19
 
20
- # UI layout
21
  gr.Interface(
22
  fn=debug_code,
23
  inputs=[
24
  gr.Textbox(lines=12, label="Paste Your Buggy Code Here"),
25
  gr.Dropdown(["Python", "C", "C++", "JavaScript"], label="Code Language", value="Python")
26
  ],
27
- outputs=gr.Textbox(label="Debugged Code & Explanation"),
28
- title="🛠 Eternos Basic Debugger",
29
- description="Paste your code, choose language, and let AI fix & explain the bugs.",
30
  theme="default"
31
  ).launch()
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
 
5
+ # Load model
6
+ model_id = "bigcode/starcoderbase"
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
+ model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
  model.to(device)
11
 
12
+ # Inference function
13
  def debug_code(code, language):
14
+ prompt = f"### Debug the following {language} code:\n{code}\n### Fixed Code and Explanation:\n"
15
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
16
  outputs = model.generate(**inputs, max_new_tokens=300, pad_token_id=tokenizer.eos_token_id)
17
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
18
+ return result[len(prompt):].strip()
19
 
20
+ # Gradio UI
21
  gr.Interface(
22
  fn=debug_code,
23
  inputs=[
24
  gr.Textbox(lines=12, label="Paste Your Buggy Code Here"),
25
  gr.Dropdown(["Python", "C", "C++", "JavaScript"], label="Code Language", value="Python")
26
  ],
27
+ outputs=gr.Textbox(label="Fixed Code & Explanation"),
28
+ title="🛠 Eternos Debugger — Powered by StarCoder",
29
+ description="Paste buggy code, select language, and get a fix + explanation.",
30
  theme="default"
31
  ).launch()