OrlandoHugBot commited on
Commit
6fd4fd2
·
verified ·
1 Parent(s): 96e3744

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +136 -173
app.py CHANGED
@@ -1,11 +1,11 @@
1
  """
2
  UniPic-3 DMD Multi-Image Composition
3
- Hugging Face Space - ZeroGPU 优化版本
4
 
5
- 架构说明:
6
- 1. 模型在全局作用域加载(ZeroGPU 会拦截 CUDA 调用)
7
- 2. 只有实际推理时才使用 @spaces.GPU 装饰器
8
- 3. 这样避免了每次请求都重新加载模型
9
  """
10
 
11
  import gradio as gr
@@ -18,8 +18,10 @@ import sys
18
  try:
19
  import spaces
20
  HF_SPACES = True
 
21
  except ImportError:
22
  HF_SPACES = False
 
23
  # 本地开发时的 mock
24
  class spaces:
25
  @staticmethod
@@ -32,60 +34,94 @@ except ImportError:
32
  sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
33
 
34
  # Model configuration
35
- MODEL_NAME = os.environ.get("MODEL_NAME", "Skywork/Unipic3-DMD")
36
- TRANSFORMER_PATH = os.environ.get("TRANSFORMER_PATH", "Skywork/Unipic3-DMD/ema_transformer")
37
 
38
  # ============================================================
39
- # 全局加载模型(ZeroGPU 会拦截 CUDA 调用)
40
  # ============================================================
41
 
42
- print("🚀 Loading models...")
 
 
43
 
44
- try:
45
- from pipeline_qwenimage_edit import QwenImageEditPipeline
46
- except ImportError:
47
- from diffusers import QwenImageEditPipeline
48
-
49
- from diffusers import (
50
- FlowMatchEulerDiscreteScheduler,
51
- QwenImageTransformer2DModel,
52
- AutoencoderKLQwenImage
53
- )
54
- from transformers import AutoModel, AutoTokenizer, Qwen2VLProcessor
55
-
56
- # 确定 dtype
57
- dtype = torch.bfloat16
58
 
59
- # Load scheduler (CPU)
60
- print(" Loading scheduler...")
61
- scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(
62
- MODEL_NAME, subfolder='scheduler'
63
- )
64
 
65
- # Load tokenizer & processor (CPU)
66
- print(" Loading tokenizer & processor...")
67
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, subfolder='tokenizer')
68
- processor = Qwen2VLProcessor.from_pretrained(MODEL_NAME, subfolder='processor')
69
-
70
- # Load text encoder
71
- print(" Loading text_encoder...")
72
- text_encoder = AutoModel.from_pretrained(
73
- MODEL_NAME,
74
- subfolder='text_encoder',
75
- torch_dtype=dtype,
76
- ).eval()
77
-
78
- # Load transformer
79
- print(" Loading transformer...")
80
- def load_transformer():
81
- """Load transformer with proper path handling"""
82
- if os.path.exists(TRANSFORMER_PATH):
83
- # Local path
84
- if os.path.isdir(TRANSFORMER_PATH):
85
- config_path = os.path.join(TRANSFORMER_PATH, "config.json")
86
- if os.path.exists(config_path):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  return QwenImageTransformer2DModel.from_pretrained(
88
- TRANSFORMER_PATH,
 
89
  torch_dtype=dtype,
90
  use_safetensors=False
91
  ).eval()
@@ -96,101 +132,30 @@ def load_transformer():
96
  torch_dtype=dtype,
97
  use_safetensors=False
98
  ).eval()
99
- raise ValueError(f"Invalid transformer path: {TRANSFORMER_PATH}")
100
- else:
101
- # HuggingFace repo path
102
- path_parts = TRANSFORMER_PATH.split('/')
103
- if len(path_parts) >= 3:
104
- repo_id = '/'.join(path_parts[:2])
105
- subfolder = '/'.join(path_parts[2:])
106
- return QwenImageTransformer2DModel.from_pretrained(
107
- repo_id,
108
- subfolder=subfolder,
109
- torch_dtype=dtype,
110
- use_safetensors=False
111
- ).eval()
112
- else:
113
- return QwenImageTransformer2DModel.from_pretrained(
114
- TRANSFORMER_PATH,
115
- subfolder='transformer',
116
- torch_dtype=dtype,
117
- use_safetensors=False
118
- ).eval()
119
-
120
- transformer = load_transformer()
121
-
122
- # Load VAE
123
- print(" Loading VAE...")
124
- vae = AutoencoderKLQwenImage.from_pretrained(
125
- MODEL_NAME,
126
- subfolder='vae',
127
- torch_dtype=dtype,
128
- ).eval()
129
-
130
- # Create pipeline
131
- print(" Creating pipeline...")
132
- pipe = QwenImageEditPipeline(
133
- scheduler=scheduler,
134
- vae=vae,
135
- text_encoder=text_encoder,
136
- tokenizer=tokenizer,
137
- processor=processor,
138
- transformer=transformer
139
- )
140
-
141
- # 移动到 CUDA(ZeroGPU 会拦截这个调用)
142
- pipe.to('cuda')
143
-
144
- print("✅ Models loaded successfully!")
145
-
146
-
147
- def fix_rope_buffers(model):
148
- """
149
- 修复 RoPE (Rotary Position Embedding) 中的 buffer 张量
150
- ZeroGPU 环境下,register_buffer 注册的张量可能不会被正确移动到 GPU
151
 
152
- 这个函数会遍历模型的所有子模块,检查并修复以下 buffer:
153
- - inv_freq: RoPE 的核心频率 buffer
154
- - cos_cached / sin_cached: 某些实现会缓存的 cos/sin 值
155
- - 其他所有未在 CUDA 上的 buffer
156
- """
157
- device = 'cuda'
158
- fixed_count = 0
159
 
160
- for name, module in model.named_modules():
161
- # 修复 inv_freq buffer (RoPE 的核心 buffer)
162
- if hasattr(module, 'inv_freq') and module.inv_freq is not None:
163
- if module.inv_freq.device.type != 'cuda':
164
- module.inv_freq = module.inv_freq.to(device)
165
- fixed_count += 1
166
- print(f" [FIX] Moved {name}.inv_freq to {device}")
167
-
168
- # 修复 cos_cached 和 sin_cached (某些 RoPE 实现会缓存这些)
169
- if hasattr(module, 'cos_cached') and module.cos_cached is not None:
170
- if module.cos_cached.device.type != 'cuda':
171
- module.cos_cached = module.cos_cached.to(device)
172
- fixed_count += 1
173
- print(f" [FIX] Moved {name}.cos_cached to {device}")
174
-
175
- if hasattr(module, 'sin_cached') and module.sin_cached is not None:
176
- if module.sin_cached.device.type != 'cuda':
177
- module.sin_cached = module.sin_cached.to(device)
178
- fixed_count += 1
179
- print(f" [FIX] Moved {name}.sin_cached to {device}")
180
-
181
- # 通用:修复所有 buffer(更全面的修复)
182
- for buf_name, buf in module.named_buffers(recurse=False):
183
- if buf is not None and buf.device.type != 'cuda':
184
- setattr(module, buf_name, buf.to(device))
185
- fixed_count += 1
186
- print(f" [FIX] Moved {name}.{buf_name} to {device}")
187
 
188
- return fixed_count
 
189
 
190
 
 
 
 
191
 
192
  # ============================================================
193
- # GPU 推理函数(只包含实际的推理逻辑)
194
  # ============================================================
195
 
196
  @spaces.GPU(duration=120)
@@ -202,31 +167,44 @@ def generate_image(
202
  num_steps: int
203
  ) -> Image.Image:
204
  """
205
- GPU 推理函数 - 只包含实际的推理逻辑
206
- 模型已在全局加载,这里只执行推理
207
  """
 
 
208
  print(f"🎨 Generating with {len(images)} image(s)...")
209
  print(f" Prompt: {prompt[:50]}...")
210
  print(f" Steps: {num_steps}, CFG: {true_cfg_scale}, Seed: {seed}")
211
 
212
- # Step 1: 移动 pipeline 到 CUDA
213
- pipe.to('cuda')
214
-
215
- # Step 2: 关键修复 - 手动修复 RoPE buffer
216
- # ZeroGPU 可能没有正确移动 register_buffer 注册的张量
217
- print(" [DEBUG] Fixing RoPE buffers...")
218
- fixed = 0
219
- fixed += fix_rope_buffers(pipe.text_encoder)
220
- fixed += fix_rope_buffers(pipe.transformer)
221
- fixed += fix_rope_buffers(pipe.vae)
222
- print(f" [DEBUG] Fixed {fixed} buffer(s)")
223
-
224
- # 调试信息:检查模型设备
225
- print(f" [DEBUG] text_encoder device: {next(pipe.text_encoder.parameters()).device}")
226
- print(f" [DEBUG] transformer device: {next(pipe.transformer.parameters()).device}")
227
- print(f" [DEBUG] vae device: {next(pipe.vae.parameters()).device}")
 
 
 
 
 
 
 
 
 
 
 
228
 
229
- # 调试信息:检查模型设备
230
  print(f" [DEBUG] text_encoder device: {next(pipe.text_encoder.parameters()).device}")
231
  print(f" [DEBUG] transformer device: {next(pipe.transformer.parameters()).device}")
232
  print(f" [DEBUG] vae device: {next(pipe.vae.parameters()).device}")
@@ -320,7 +298,6 @@ def update_image_visibility(num):
320
 
321
  CUSTOM_CSS = """
322
  @import url('https://fonts.googleapis.com/css2?family=Outfit:wght@300;400;500;600;700&family=JetBrains+Mono:wght@400;500&display=swap');
323
-
324
  :root {
325
  --primary: #6366f1;
326
  --primary-dark: #4f46e5;
@@ -336,13 +313,11 @@ CUSTOM_CSS = """
336
  --gradient-1: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
337
  --gradient-hero: linear-gradient(135deg, #0f0f23 0%, #1a1a3e 50%, #252552 100%);
338
  }
339
-
340
  .gradio-container {
341
  font-family: 'Outfit', sans-serif !important;
342
  background: var(--gradient-hero) !important;
343
  min-height: 100vh;
344
  }
345
-
346
  .main-header {
347
  text-align: center;
348
  padding: 2rem 1rem;
@@ -351,7 +326,6 @@ CUSTOM_CSS = """
351
  margin-bottom: 2rem;
352
  border: 1px solid rgba(99, 102, 241, 0.2);
353
  }
354
-
355
  .main-header h1 {
356
  font-size: 2.5rem;
357
  font-weight: 700;
@@ -361,14 +335,12 @@ CUSTOM_CSS = """
361
  background-clip: text;
362
  margin-bottom: 0.5rem;
363
  }
364
-
365
  .main-header p {
366
  color: var(--text-muted);
367
  font-size: 1.1rem;
368
  max-width: 600px;
369
  margin: 0 auto;
370
  }
371
-
372
  .feature-badges {
373
  display: flex;
374
  gap: 1rem;
@@ -376,7 +348,6 @@ CUSTOM_CSS = """
376
  flex-wrap: wrap;
377
  margin-top: 1.5rem;
378
  }
379
-
380
  .badge {
381
  display: inline-flex;
382
  align-items: center;
@@ -389,7 +360,6 @@ CUSTOM_CSS = """
389
  font-size: 0.875rem;
390
  font-weight: 500;
391
  }
392
-
393
  .section-header {
394
  display: flex;
395
  align-items: center;
@@ -398,14 +368,12 @@ CUSTOM_CSS = """
398
  padding-bottom: 0.75rem;
399
  border-bottom: 1px solid var(--border);
400
  }
401
-
402
  .section-header h3 {
403
  font-size: 1.125rem;
404
  font-weight: 600;
405
  color: var(--text);
406
  margin: 0;
407
  }
408
-
409
  .generate-btn {
410
  background: var(--gradient-1) !important;
411
  border: none !important;
@@ -418,12 +386,10 @@ CUSTOM_CSS = """
418
  transition: all 0.3s ease !important;
419
  box-shadow: 0 4px 15px rgba(99, 102, 241, 0.4) !important;
420
  }
421
-
422
  .generate-btn:hover {
423
  transform: translateY(-2px) !important;
424
  box-shadow: 0 6px 20px rgba(99, 102, 241, 0.5) !important;
425
  }
426
-
427
  .output-image {
428
  border-radius: 16px;
429
  overflow: hidden;
@@ -431,7 +397,6 @@ CUSTOM_CSS = """
431
  background: linear-gradient(var(--surface-light), var(--surface-light)) padding-box,
432
  var(--gradient-1) border-box;
433
  }
434
-
435
  @media (max-width: 768px) {
436
  .main-header h1 {
437
  font-size: 1.75rem;
@@ -558,14 +523,12 @@ def create_demo():
558
  type="pil",
559
  label="Output",
560
  elem_classes=["output-image"],
561
- show_download_button=True
562
  )
563
 
564
  status_text = gr.Textbox(
565
  label="Status",
566
  value="✨ Ready! Upload images and click Generate.",
567
  interactive=False,
568
- show_copy_button=False
569
  )
570
 
571
  gr.HTML("""
 
1
  """
2
  UniPic-3 DMD Multi-Image Composition
3
+ Hugging Face Space - ZeroGPU 优化版本 V2
4
 
5
+ 关键修复:使用延迟加载 (Lazy Loading) 方案
6
+ - 模型组件在全局 CPU 上加载
7
+ - Pipeline @spaces.GPU 函数内首次调用时才创建并移动到 GPU
8
+ - 这确保了所有张量都在真实的 GPU 环境中初始化
9
  """
10
 
11
  import gradio as gr
 
18
  try:
19
  import spaces
20
  HF_SPACES = True
21
+ print("✅ Running in Hugging Face Spaces with ZeroGPU")
22
  except ImportError:
23
  HF_SPACES = False
24
+ print("⚠️ Running locally (no ZeroGPU)")
25
  # 本地开发时的 mock
26
  class spaces:
27
  @staticmethod
 
34
  sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
35
 
36
  # Model configuration
37
+ MODEL_NAME = os.environ.get("MODEL_NAME", "/data_genie/genie/chris/Unipic3-DMD")
38
+ TRANSFORMER_PATH = os.environ.get("TRANSFORMER_PATH", "/data_genie/genie/chris/Unipic3-DMD/ema_transformer")
39
 
40
  # ============================================================
41
+ # 全局变量 - Pipeline 延迟初始化
42
  # ============================================================
43
 
44
+ # 只在全局加载轻量级组件和 CPU 上的模型权重
45
+ pipe = None # 延迟初始化
46
+ _models_loaded = False
47
 
48
+ # 存储 CPU 上的模型组件
49
+ _cpu_components = {}
 
 
 
 
 
 
 
 
 
 
 
 
50
 
 
 
 
 
 
51
 
52
+ def load_models_to_cpu():
53
+ """
54
+ CPU 上加载所有模型组件
55
+ 这一步在全局执行,不需要 GPU
56
+ """
57
+ global _cpu_components, _models_loaded
58
+
59
+ if _models_loaded:
60
+ return
61
+
62
+ print("🚀 Loading models to CPU...")
63
+
64
+ try:
65
+ from pipeline_qwenimage_edit import QwenImageEditPipeline
66
+ except ImportError:
67
+ from diffusers import QwenImageEditPipeline
68
+
69
+ from diffusers import (
70
+ FlowMatchEulerDiscreteScheduler,
71
+ QwenImageTransformer2DModel,
72
+ AutoencoderKLQwenImage
73
+ )
74
+ from transformers import AutoModel, AutoTokenizer, Qwen2VLProcessor
75
+
76
+ dtype = torch.bfloat16
77
+
78
+ # Load scheduler (CPU, 轻量级)
79
+ print(" Loading scheduler...")
80
+ _cpu_components['scheduler'] = FlowMatchEulerDiscreteScheduler.from_pretrained(
81
+ MODEL_NAME, subfolder='scheduler'
82
+ )
83
+
84
+ # Load tokenizer & processor (CPU, 轻量级)
85
+ print(" Loading tokenizer & processor...")
86
+ _cpu_components['tokenizer'] = AutoTokenizer.from_pretrained(MODEL_NAME, subfolder='tokenizer')
87
+ _cpu_components['processor'] = Qwen2VLProcessor.from_pretrained(MODEL_NAME, subfolder='processor')
88
+
89
+ # Load text encoder to CPU
90
+ print(" Loading text_encoder to CPU...")
91
+ _cpu_components['text_encoder'] = AutoModel.from_pretrained(
92
+ MODEL_NAME,
93
+ subfolder='text_encoder',
94
+ torch_dtype=dtype,
95
+ ).eval()
96
+
97
+ # Load transformer to CPU
98
+ print(" Loading transformer to CPU...")
99
+ def load_transformer():
100
+ if os.path.exists(TRANSFORMER_PATH):
101
+ if os.path.isdir(TRANSFORMER_PATH):
102
+ config_path = os.path.join(TRANSFORMER_PATH, "config.json")
103
+ if os.path.exists(config_path):
104
+ return QwenImageTransformer2DModel.from_pretrained(
105
+ TRANSFORMER_PATH,
106
+ torch_dtype=dtype,
107
+ use_safetensors=False
108
+ ).eval()
109
+ else:
110
+ return QwenImageTransformer2DModel.from_pretrained(
111
+ TRANSFORMER_PATH,
112
+ subfolder='transformer',
113
+ torch_dtype=dtype,
114
+ use_safetensors=False
115
+ ).eval()
116
+ raise ValueError(f"Invalid transformer path: {TRANSFORMER_PATH}")
117
+ else:
118
+ path_parts = TRANSFORMER_PATH.split('/')
119
+ if len(path_parts) >= 3:
120
+ repo_id = '/'.join(path_parts[:2])
121
+ subfolder = '/'.join(path_parts[2:])
122
  return QwenImageTransformer2DModel.from_pretrained(
123
+ repo_id,
124
+ subfolder=subfolder,
125
  torch_dtype=dtype,
126
  use_safetensors=False
127
  ).eval()
 
132
  torch_dtype=dtype,
133
  use_safetensors=False
134
  ).eval()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
+ _cpu_components['transformer'] = load_transformer()
 
 
 
 
 
 
137
 
138
+ # Load VAE to CPU
139
+ print(" Loading VAE to CPU...")
140
+ _cpu_components['vae'] = AutoencoderKLQwenImage.from_pretrained(
141
+ MODEL_NAME,
142
+ subfolder='vae',
143
+ torch_dtype=dtype,
144
+ ).eval()
145
+
146
+ # 存储 Pipeline 类以便后续使用
147
+ _cpu_components['pipeline_class'] = QwenImageEditPipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
+ _models_loaded = True
150
+ print("✅ All models loaded to CPU!")
151
 
152
 
153
+ # 立即在全局加载到 CPU
154
+ load_models_to_cpu()
155
+
156
 
157
  # ============================================================
158
+ # GPU 推理函数 - 在这里初始化 Pipeline
159
  # ============================================================
160
 
161
  @spaces.GPU(duration=120)
 
167
  num_steps: int
168
  ) -> Image.Image:
169
  """
170
+ GPU 推理函数
171
+ 关键:Pipeline 在这里创建,确保在真实 GPU 环境中初始化
172
  """
173
+ global pipe
174
+
175
  print(f"🎨 Generating with {len(images)} image(s)...")
176
  print(f" Prompt: {prompt[:50]}...")
177
  print(f" Steps: {num_steps}, CFG: {true_cfg_scale}, Seed: {seed}")
178
 
179
+ # 关键修复:在真实 GPU 环境中创建 Pipeline
180
+ if pipe is None:
181
+ print(" [INIT] Creating pipeline on real GPU...")
182
+
183
+ # 方法:将 CPU 模型移动到 GPU,然后创建 pipeline
184
+ device = 'cuda'
185
+
186
+ # 移动模型到 GPU
187
+ text_encoder = _cpu_components['text_encoder'].to(device)
188
+ transformer = _cpu_components['transformer'].to(device)
189
+ vae = _cpu_components['vae'].to(device)
190
+
191
+ # 创建 Pipeline
192
+ PipelineClass = _cpu_components['pipeline_class']
193
+ pipe = PipelineClass(
194
+ scheduler=_cpu_components['scheduler'],
195
+ vae=vae,
196
+ text_encoder=text_encoder,
197
+ tokenizer=_cpu_components['tokenizer'],
198
+ processor=_cpu_components['processor'],
199
+ transformer=transformer
200
+ )
201
+
202
+ print(" [INIT] Pipeline created successfully!")
203
+ else:
204
+ # Pipeline 已存在,确保在正确的设备上
205
+ pipe.to('cuda')
206
 
207
+ # 验证设备
208
  print(f" [DEBUG] text_encoder device: {next(pipe.text_encoder.parameters()).device}")
209
  print(f" [DEBUG] transformer device: {next(pipe.transformer.parameters()).device}")
210
  print(f" [DEBUG] vae device: {next(pipe.vae.parameters()).device}")
 
298
 
299
  CUSTOM_CSS = """
300
  @import url('https://fonts.googleapis.com/css2?family=Outfit:wght@300;400;500;600;700&family=JetBrains+Mono:wght@400;500&display=swap');
 
301
  :root {
302
  --primary: #6366f1;
303
  --primary-dark: #4f46e5;
 
313
  --gradient-1: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
314
  --gradient-hero: linear-gradient(135deg, #0f0f23 0%, #1a1a3e 50%, #252552 100%);
315
  }
 
316
  .gradio-container {
317
  font-family: 'Outfit', sans-serif !important;
318
  background: var(--gradient-hero) !important;
319
  min-height: 100vh;
320
  }
 
321
  .main-header {
322
  text-align: center;
323
  padding: 2rem 1rem;
 
326
  margin-bottom: 2rem;
327
  border: 1px solid rgba(99, 102, 241, 0.2);
328
  }
 
329
  .main-header h1 {
330
  font-size: 2.5rem;
331
  font-weight: 700;
 
335
  background-clip: text;
336
  margin-bottom: 0.5rem;
337
  }
 
338
  .main-header p {
339
  color: var(--text-muted);
340
  font-size: 1.1rem;
341
  max-width: 600px;
342
  margin: 0 auto;
343
  }
 
344
  .feature-badges {
345
  display: flex;
346
  gap: 1rem;
 
348
  flex-wrap: wrap;
349
  margin-top: 1.5rem;
350
  }
 
351
  .badge {
352
  display: inline-flex;
353
  align-items: center;
 
360
  font-size: 0.875rem;
361
  font-weight: 500;
362
  }
 
363
  .section-header {
364
  display: flex;
365
  align-items: center;
 
368
  padding-bottom: 0.75rem;
369
  border-bottom: 1px solid var(--border);
370
  }
 
371
  .section-header h3 {
372
  font-size: 1.125rem;
373
  font-weight: 600;
374
  color: var(--text);
375
  margin: 0;
376
  }
 
377
  .generate-btn {
378
  background: var(--gradient-1) !important;
379
  border: none !important;
 
386
  transition: all 0.3s ease !important;
387
  box-shadow: 0 4px 15px rgba(99, 102, 241, 0.4) !important;
388
  }
 
389
  .generate-btn:hover {
390
  transform: translateY(-2px) !important;
391
  box-shadow: 0 6px 20px rgba(99, 102, 241, 0.5) !important;
392
  }
 
393
  .output-image {
394
  border-radius: 16px;
395
  overflow: hidden;
 
397
  background: linear-gradient(var(--surface-light), var(--surface-light)) padding-box,
398
  var(--gradient-1) border-box;
399
  }
 
400
  @media (max-width: 768px) {
401
  .main-header h1 {
402
  font-size: 1.75rem;
 
523
  type="pil",
524
  label="Output",
525
  elem_classes=["output-image"],
 
526
  )
527
 
528
  status_text = gr.Textbox(
529
  label="Status",
530
  value="✨ Ready! Upload images and click Generate.",
531
  interactive=False,
 
532
  )
533
 
534
  gr.HTML("""