AiCoderv2 commited on
Commit
2712381
·
verified ·
1 Parent(s): b760790

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -45
app.py CHANGED
@@ -1,14 +1,19 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
  import warnings
5
 
6
  # Suppress warnings
7
  warnings.filterwarnings("ignore")
8
 
9
- # Try to load model with fallback
10
  model_loaded = False
 
 
 
 
11
  try:
 
12
  model_name = "baidu/ERNIE-4.5-21B-A3B-Thinking"
13
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
14
  model = AutoModelForCausalLM.from_pretrained(
@@ -18,63 +23,107 @@ try:
18
  trust_remote_code=True
19
  )
20
  model_loaded = True
21
- print("Model loaded successfully")
22
  except Exception as e:
23
- print(f"Model loading failed: {e}")
24
- # Fallback to a working model
 
25
  try:
 
26
  model_name = "google/flan-t5-base"
27
  tokenizer = AutoTokenizer.from_pretrained(model_name)
28
- model = AutoModelForCausalLM.from_pretrained(model_name)
29
  model_loaded = True
30
- print("Fallback model loaded successfully")
31
  except Exception as e2:
32
- print(f"Fallback model also failed: {e2}")
 
 
 
 
33
 
34
  def generate_code(prompt):
35
  """Generate HTML code"""
36
- if not model_loaded:
37
- return f"<!-- Model not available -->\n<!DOCTYPE html>\n<html>\n<head>\n <title>{prompt or 'Generated Site'}</title>\n</head>\n<body>\n <h1>Model Loading Failed</h1>\n <p>Using template instead</p>\n</body>\n</html>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- if "flan-t5" in str(type(model)):
40
- # FLAN-T5 handling
41
- full_prompt = f"Create a complete HTML file for: {prompt}. Include CSS and JS."
42
- inputs = tokenizer(full_prompt, return_tensors="pt")
43
- outputs = model.generate(**inputs, max_new_tokens=800)
44
- result = tokenizer.decode(outputs[0], skip_special_tokens=True)
45
- return result
46
- else:
47
- # ERNIE handling
48
- full_prompt = f"Create a complete single HTML file with embedded CSS and JavaScript for: {prompt}. Return only valid HTML code."
49
- inputs = tokenizer(full_prompt, return_tensors="pt").to("cuda")
50
- outputs = model.generate(**inputs, max_new_tokens=1000, temperature=0.7)
51
- result = tokenizer.decode(outputs[0], skip_special_tokens=True)
52
- result = result[len(full_prompt):]
53
- if '<!DOCTYPE html>' in result:
54
- start = result.find('<!DOCTYPE html>')
55
- return result[start:]
56
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  def improve_code(description, current_code):
59
  """Improve existing code"""
60
- if not model_loaded:
61
  return current_code
62
 
63
- if "flan-t5" in str(type(model)):
64
- prompt = f"Improve this HTML: {description}\n\n{current_code}"
65
- inputs = tokenizer(prompt, return_tensors="pt")
66
- outputs = model.generate(**inputs, max_new_tokens=600)
67
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
68
- else:
69
- prompt = f"Improve this HTML code based on: {description}\n\nCurrent code:\n{current_code}\n\nReturn only the improved HTML code."
70
- inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
71
- outputs = model.generate(**inputs, max_new_tokens=800, temperature=0.7)
72
- result = tokenizer.decode(outputs[0], skip_special_tokens=True)
73
- result = result[len(prompt):]
74
- if '<!DOCTYPE html>' in result:
75
- start = result.find('<!DOCTYPE html>')
76
- return result[start:]
77
- return result
 
 
 
 
 
78
 
79
  with gr.Blocks(theme=gr.themes.Soft()) as app:
80
  gr.Markdown("# AI Website Builder")
@@ -95,7 +144,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as app:
95
  code_editor = gr.Code(
96
  label="HTML Code Editor",
97
  language="html",
98
- lines=30
 
99
  )
100
 
101
  with gr.Row():
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
3
  import torch
4
  import warnings
5
 
6
  # Suppress warnings
7
  warnings.filterwarnings("ignore")
8
 
9
+ # Try to load models with proper fallbacks
10
  model_loaded = False
11
+ tokenizer = None
12
+ model = None
13
+
14
+ # Try ERNIE model first
15
  try:
16
+ print("Loading ERNIE model...")
17
  model_name = "baidu/ERNIE-4.5-21B-A3B-Thinking"
18
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
19
  model = AutoModelForCausalLM.from_pretrained(
 
23
  trust_remote_code=True
24
  )
25
  model_loaded = True
26
+ print("ERNIE model loaded successfully")
27
  except Exception as e:
28
+ print(f"ERNIE model failed: {e}")
29
+
30
+ # Try FLAN-T5 model (seq2seq)
31
  try:
32
+ print("Loading FLAN-T5 model...")
33
  model_name = "google/flan-t5-base"
34
  tokenizer = AutoTokenizer.from_pretrained(model_name)
35
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
36
  model_loaded = True
37
+ print("FLAN-T5 model loaded successfully")
38
  except Exception as e2:
39
+ print(f"FLAN-T5 model also failed: {e2}")
40
+
41
+ # Final fallback - use a simple template system
42
+ print("Using template-based generation")
43
+ model_loaded = False
44
 
45
  def generate_code(prompt):
46
  """Generate HTML code"""
47
+ if not model_loaded or not tokenizer or not model:
48
+ # Template-based fallback
49
+ return f"""<!DOCTYPE html>
50
+ <html>
51
+ <head>
52
+ <title>{prompt or 'AI Generated Website'}</title>
53
+ <style>
54
+ body {{ font-family: Arial, sans-serif; margin: 40px; background: #f0f0f0; }}
55
+ .container {{ max-width: 800px; margin: 0 auto; background: white; padding: 30px; border-radius: 10px; }}
56
+ h1 {{ color: #333; }}
57
+ p {{ color: #666; }}
58
+ </style>
59
+ </head>
60
+ <body>
61
+ <div class="container">
62
+ <h1>{prompt or 'Generated Website'}</h1>
63
+ <p>This website was generated based on your description.</p>
64
+ <button onclick="alert('Hello!')">Click Me</button>
65
+ </div>
66
+ </body>
67
+ </html>"""
68
 
69
+ try:
70
+ if "t5" in str(type(model)).lower():
71
+ # FLAN-T5 handling (seq2seq)
72
+ full_prompt = f"Create a complete HTML file with CSS and JavaScript for: {prompt}. Return only valid HTML code."
73
+ inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=512)
74
+ outputs = model.generate(**inputs, max_new_tokens=800, temperature=0.7)
75
+ result = tokenizer.decode(outputs[0], skip_special_tokens=True)
76
+ return result
77
+ else:
78
+ # Causal LM handling (ERNIE)
79
+ full_prompt = f"Create a complete single HTML file with embedded CSS and JavaScript for: {prompt}. Return only valid HTML code."
80
+ inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=512).to("cuda")
81
+ outputs = model.generate(**inputs, max_new_tokens=1000, temperature=0.7, do_sample=True)
82
+ result = tokenizer.decode(outputs[0], skip_special_tokens=True)
83
+ result = result[len(full_prompt):]
84
+ if '<!DOCTYPE html>' in result:
85
+ start = result.find('<!DOCTYPE html>')
86
+ return result[start:]
87
+ return result
88
+ except Exception as e:
89
+ # Fallback template on error
90
+ return f"""<!DOCTYPE html>
91
+ <html>
92
+ <head>
93
+ <title>Error - {prompt or 'AI Generated Website'}</title>
94
+ </head>
95
+ <body>
96
+ <h1>Generation Error</h1>
97
+ <p>{str(e)}</p>
98
+ <p>Using template instead...</p>
99
+ </body>
100
+ </html>"""
101
 
102
  def improve_code(description, current_code):
103
  """Improve existing code"""
104
+ if not model_loaded or not tokenizer or not model:
105
  return current_code
106
 
107
+ try:
108
+ if "t5" in str(type(model)).lower():
109
+ # FLAN-T5 handling
110
+ prompt = f"Improve this HTML code based on the request: {description}\n\nCurrent code:\n{current_code}"
111
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
112
+ outputs = model.generate(**inputs, max_new_tokens=600, temperature=0.7)
113
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
114
+ else:
115
+ # ERNIE handling
116
+ prompt = f"Improve this HTML code based on: {description}\n\nCurrent code:\n{current_code}\n\nReturn only the improved HTML code."
117
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to("cuda")
118
+ outputs = model.generate(**inputs, max_new_tokens=800, temperature=0.7, do_sample=True)
119
+ result = tokenizer.decode(outputs[0], skip_special_tokens=True)
120
+ result = result[len(prompt):]
121
+ if '<!DOCTYPE html>' in result:
122
+ start = result.find('<!DOCTYPE html>')
123
+ return result[start:]
124
+ return result
125
+ except Exception as e:
126
+ return current_code
127
 
128
  with gr.Blocks(theme=gr.themes.Soft()) as app:
129
  gr.Markdown("# AI Website Builder")
 
144
  code_editor = gr.Code(
145
  label="HTML Code Editor",
146
  language="html",
147
+ lines=30,
148
+ value="<!DOCTYPE html>\n<html>\n<head>\n <title>AI Generated Website</title>\n</head>\n<body>\n <h1>Your website will appear here</h1>\n</body>\n</html>"
149
  )
150
 
151
  with gr.Row():