IMMORTALJAY commited on
Commit
2e9424b
·
verified ·
1 Parent(s): 8120f6a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -10
app.py CHANGED
@@ -1,20 +1,25 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
- import torch
 
 
4
 
5
- model_id = "Salesforce/codegen-350M-mono" # SMALL model that works on HF free tier
6
  tokenizer = AutoTokenizer.from_pretrained(model_id)
7
  model = AutoModelForCausalLM.from_pretrained(model_id)
8
 
9
  def generate_code(prompt):
10
  inputs = tokenizer(prompt, return_tensors="pt")
11
- outputs = model.generate(inputs["input_ids"], max_new_tokens=256, do_sample=True)
12
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
13
 
14
- gr.Interface(
 
15
  fn=generate_code,
16
- inputs=gr.Textbox(label="Enter Prompt", lines=4, placeholder="e.g., make a login form using HTML and CSS"),
17
  outputs=gr.Code(label="Generated Code"),
18
- title="IMMORTAL Text to Code AI",
19
- description="Enter a prompt to generate code using AI (based on CodeGen 350M)."
20
- ).launch()
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+
4
+ # Use a lightweight, free-tier friendly model
5
+ model_id = "Salesforce/codegen-350M-mono"
6
 
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(model_id)
9
 
10
  def generate_code(prompt):
11
  inputs = tokenizer(prompt, return_tensors="pt")
12
+ outputs = model.generate(**inputs, max_new_tokens=300, do_sample=True)
13
+ code = tokenizer.decode(outputs[0], skip_special_tokens=True)
14
+ return code
15
 
16
+ # Gradio app
17
+ demo = gr.Interface(
18
  fn=generate_code,
19
+ inputs=gr.Textbox(label="Describe your code idea"),
20
  outputs=gr.Code(label="Generated Code"),
21
+ title="Text to Code Generator",
22
+ description="Enter any code-related prompt and watch it generate live."
23
+ )
24
+
25
+ demo.launch()