Tanveerooooooo commited on
Commit
257e31a
Β·
verified Β·
1 Parent(s): b31ba00

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -18
app.py CHANGED
@@ -2,14 +2,14 @@ import gradio as gr
2
  import torch
3
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
 
5
- # Load CodeT5+ model
6
  model_id = "Salesforce/codet5p-770m"
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForSeq2SeqLM.from_pretrained(model_id, torch_dtype=torch.float32)
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
  model.to(device)
11
 
12
- # Prompt templates
13
  language_prompts = {
14
  "Python": "Fix the following Python code:\n",
15
  "C": "Fix the following C code:\n",
@@ -17,11 +17,12 @@ language_prompts = {
17
  "JavaScript": "Fix the following JavaScript code:\n"
18
  }
19
 
20
- # Debugging function
21
  def eternos_debugger(code, error, language):
22
  if not code.strip():
23
  return "❌ Please enter some code to debug."
24
  prompt = language_prompts[language] + code + "\nError:\n" + error + "\nFixed Code:\n"
 
25
  inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to(device)
26
  with torch.no_grad():
27
  outputs = model.generate(
@@ -34,22 +35,10 @@ def eternos_debugger(code, error, language):
34
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
35
  return result.strip()
36
 
37
- # Custom CSS to set background color
38
- custom_css = """
39
- body {
40
- background-color: #cbedec !important;
41
- color: #000000 !important;
42
- }
43
- textarea, .prose textarea, .prose code, .gr-code, .gr-box {
44
- background-color: white !important;
45
- color: black !important;
46
- }
47
- """
48
-
49
  # Gradio Interface
50
- with gr.Blocks(css=custom_css) as demo:
51
- gr.Markdown("## πŸ› οΈ <span style='color:#003333;'>Eternos β€” AI Code Debugger</span>")
52
- gr.Markdown("<span style='color:#004444;'>Supports Python, C, C++, JavaScript β€” Powered by CodeT5+</span>")
53
 
54
  with gr.Row():
55
  code_input = gr.Textbox(label="πŸ‘¨β€πŸ’» Your Code", lines=14, placeholder="Paste your buggy code here...")
 
2
  import torch
3
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
 
5
+ # Load CodeT5+ model and tokenizer
6
  model_id = "Salesforce/codet5p-770m"
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForSeq2SeqLM.from_pretrained(model_id, torch_dtype=torch.float32)
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
  model.to(device)
11
 
12
+ # Prompt templates for supported languages
13
  language_prompts = {
14
  "Python": "Fix the following Python code:\n",
15
  "C": "Fix the following C code:\n",
 
17
  "JavaScript": "Fix the following JavaScript code:\n"
18
  }
19
 
20
+ # Debug function
21
  def eternos_debugger(code, error, language):
22
  if not code.strip():
23
  return "❌ Please enter some code to debug."
24
  prompt = language_prompts[language] + code + "\nError:\n" + error + "\nFixed Code:\n"
25
+
26
  inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to(device)
27
  with torch.no_grad():
28
  outputs = model.generate(
 
35
  result = tokenizer.decode(outputs[0], skip_special_tokens=True)
36
  return result.strip()
37
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  # Gradio Interface
39
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
40
+ gr.Markdown("## πŸ› οΈ Eternos β€” AI Code Debugger")
41
+ gr.Markdown("Supports Python, C, C++, JavaScript β€” Powered by CodeT5+")
42
 
43
  with gr.Row():
44
  code_input = gr.Textbox(label="πŸ‘¨β€πŸ’» Your Code", lines=14, placeholder="Paste your buggy code here...")