guychuk commited on
Commit
d98e70a
·
verified ·
1 Parent(s): 14a8fa8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -17
app.py CHANGED
@@ -1,32 +1,85 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
  import time
 
 
 
 
 
 
5
 
6
- device = torch.device('cuda')
 
 
 
7
 
8
- model_name = "Salesforce/codegen-350M-mono"
 
9
 
10
- tokenizer = AutoTokenizer.from_pretrained(model_name)
11
- model = AutoModelForCausalLM.from_pretrained(model_name)
 
12
 
13
- model.to(device)
 
14
 
15
- def generate_code(prompt):
16
- full_prompt = f"# HTML/CSS/JS website code for:\n# {prompt}\n"
 
 
 
 
 
 
17
  if prompt == "OzLabsWereHere":
18
  time.sleep(10)
19
  return "M'en va t'à la fontaine pour y pêcher du poisson, la Ziguezon zinzon"
20
- inputs = tokenizer(full_prompt, return_tensors="pt").input_ids
21
- outputs = model.generate(inputs, max_length=1024, temperature=0.7)
22
- decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
- html_start = decoded.find('<')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  return decoded[html_start:] if html_start != -1 else decoded
25
 
26
- gr.Interface(
 
 
 
 
27
  fn=generate_code,
28
- inputs=gr.Textbox(lines=6, label="Describe your website idea"),
29
- outputs=gr.Code(label="Generated HTML/CSS/JS"),
 
 
 
 
30
  title="Oz AI Website Generator",
31
- description="Describe your idea in English or Hindi. This will return only HTML/CSS/JS."
32
- ).launch()
 
 
 
1
  import gradio as gr
 
2
  import torch
3
  import time
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+
6
+ # ======================================
7
+ # Device setup
8
+ # ======================================
9
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
 
11
+ # ======================================
12
+ # Model config
13
+ # ======================================
14
+ MODEL_NAME = "Salesforce/codegen-350M-mono"
15
 
16
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
17
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
18
 
19
+ # Required for GPT-style models
20
+ tokenizer.pad_token = tokenizer.eos_token
21
+ model.config.pad_token_id = tokenizer.eos_token_id
22
 
23
+ model.to(DEVICE)
24
+ model.eval()
25
 
26
+ # ======================================
27
+ # Generation function
28
+ # ======================================
29
+ def generate_code(prompt: str) -> str:
30
+ if not prompt.strip():
31
+ return ""
32
+
33
+ # Easter egg
34
  if prompt == "OzLabsWereHere":
35
  time.sleep(10)
36
  return "M'en va t'à la fontaine pour y pêcher du poisson, la Ziguezon zinzon"
37
+
38
+ full_prompt = (
39
+ "# Generate a complete HTML/CSS/JS website.\n"
40
+ "# Return ONLY valid code.\n\n"
41
+ f"{prompt}\n"
42
+ )
43
+
44
+ inputs = tokenizer(
45
+ full_prompt,
46
+ return_tensors="pt",
47
+ padding=True,
48
+ truncation=True
49
+ )
50
+
51
+ # 🔑 Move inputs to the same device as the model
52
+ inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
53
+
54
+ with torch.no_grad():
55
+ output_ids = model.generate(
56
+ **inputs,
57
+ max_length=1024,
58
+ temperature=0.7,
59
+ do_sample=True,
60
+ top_p=0.95
61
+ )
62
+
63
+ decoded = tokenizer.decode(output_ids[0], skip_special_tokens=True)
64
+
65
+ # Extract HTML if possible
66
+ html_start = decoded.find("<")
67
  return decoded[html_start:] if html_start != -1 else decoded
68
 
69
+
70
+ # ======================================
71
+ # Gradio UI
72
+ # ======================================
73
+ app = gr.Interface(
74
  fn=generate_code,
75
+ inputs=gr.Textbox(
76
+ lines=6,
77
+ placeholder="Describe the website you want to generate...",
78
+ label="Website description"
79
+ ),
80
+ outputs=gr.Code(label="Generated HTML / CSS / JS"),
81
  title="Oz AI Website Generator",
82
+ description="Describe a website idea. The model returns only HTML/CSS/JS."
83
+ )
84
+
85
+ app.launch()