tchung1970 Claude Opus 4.5 commited on
Commit
d41a998
·
1 Parent(s): c9d3110

Consolidate to single app.py entry point

Browse files

- Rename app_diffusers.py to app.py (uses OvisImagePipeline)
- Backup original modular app.py as app_old.py for reference
- Update README.md app_file and short_description
- Update CLAUDE.md documentation

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

Files changed (4) hide show
  1. CLAUDE.md +4 -7
  2. README.md +2 -2
  3. app.py +55 -105
  4. app_diffusers.py → app_old.py +105 -55
CLAUDE.md CHANGED
@@ -12,12 +12,9 @@ Ovis-Image is a 7-billion parameter text-to-image generation model optimized for
12
  # Install dependencies
13
  pip install -r requirements.txt
14
 
15
- # Run web UI (custom implementation)
16
  python app.py
17
 
18
- # Run web UI (diffusers-based, simpler)
19
- python app_diffusers.py
20
-
21
  # Standalone inference test
22
  python ovis_image/test.py \
23
  --model_path <path/to/ovis_image.safetensors> \
@@ -40,9 +37,9 @@ Models auto-download from HuggingFace Hub. Set `HF_TOKEN` environment variable i
40
  - `ovis_image/model/layers.py` - Transformer blocks, attention, embeddings
41
  - `ovis_image/model/ops.py` - Attention backends (Flash3, SDPA, eager)
42
 
43
- **Two Entry Points:**
44
- - `app.py` - Loads individual components (modular, educational)
45
- - `app_diffusers.py` - Uses `OvisImagePipeline.from_pretrained()` (simpler)
46
 
47
  ## Configuration
48
 
 
12
  # Install dependencies
13
  pip install -r requirements.txt
14
 
15
+ # Run web UI (uses diffusers pipeline)
16
  python app.py
17
 
 
 
 
18
  # Standalone inference test
19
  python ovis_image/test.py \
20
  --model_path <path/to/ovis_image.safetensors> \
 
37
  - `ovis_image/model/layers.py` - Transformer blocks, attention, embeddings
38
  - `ovis_image/model/ops.py` - Attention backends (Flash3, SDPA, eager)
39
 
40
+ **Entry Point:**
41
+ - `app.py` - Uses `OvisImagePipeline.from_pretrained()` for simple, clean loading
42
+ - `app_old.py` - Backup of modular implementation that loads individual components (for reference)
43
 
44
  ## Configuration
45
 
README.md CHANGED
@@ -5,10 +5,10 @@ colorFrom: purple
5
  colorTo: red
6
  sdk: gradio
7
  sdk_version: 6.0.2
8
- app_file: app_diffusers.py
9
  pinned: false
10
  license: apache-2.0
11
- short_description: Official demo for Ovis-Image
12
  thumbnail: >-
13
  https://cdn-uploads.huggingface.co/production/uploads/636f4c6b5d2050767e4a1491/yMWqt9g4PC2SGCrgk6kRj.png
14
  ---
 
5
  colorTo: red
6
  sdk: gradio
7
  sdk_version: 6.0.2
8
+ app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
+ short_description: Ovis-Image UI with Apple-style CSS
12
  thumbnail: >-
13
  https://cdn-uploads.huggingface.co/production/uploads/636f4c6b5d2050767e4a1491/yMWqt9g4PC2SGCrgk6kRj.png
14
  ---
app.py CHANGED
@@ -4,74 +4,27 @@ import gradio as gr
4
  import spaces
5
  import random
6
  import numpy as np
7
- from safetensors.torch import load_file
8
- from huggingface_hub import hf_hub_download
9
 
10
  from diffusers.utils import logging
11
  from PIL import Image
12
 
13
- from ovis_image.model.tokenizer import build_ovis_tokenizer
14
- from ovis_image.model.autoencoder import load_ae
15
- from ovis_image.model.hf_embedder import OvisEmbedder
16
- from ovis_image.model.model import OvisImageModel
17
- from ovis_image.sampling import generate_image
18
- from ovis_image import ovis_image_configs
19
 
20
  logging.set_verbosity_error()
21
 
22
- # DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
23
  MAX_SEED = np.iinfo(np.int32).max
24
 
25
  device = "cuda"
26
  _dtype = torch.bfloat16
27
  hf_token = os.getenv("HF_TOKEN")
28
 
29
- print("init ovis_image")
30
- model_config = ovis_image_configs["ovis-image-7b"]
31
- ovis_image = OvisImageModel(model_config)
32
- ovis_image_path = hf_hub_download(
33
- repo_id="AIDC-AI/Ovis-Image-7B",
34
- filename="ovis_image.safetensors",
35
- token=hf_token,
36
- )
37
- model_state_dict = load_file(ovis_image_path)
38
- missing_keys, unexpected_keys = ovis_image.load_state_dict(model_state_dict)
39
- print(f"Load Missing Keys {missing_keys}")
40
- print(f"Load Unexpected Keys {unexpected_keys}")
41
- ovis_image = ovis_image.to(device=device, dtype=_dtype)
42
- ovis_image.eval()
43
-
44
- print("init vae")
45
- vae_path = hf_hub_download(
46
- repo_id="AIDC-AI/Ovis-Image-7B",
47
- filename="ae.safetensors",
48
  token=hf_token,
 
49
  )
50
- autoencoder = load_ae(
51
- vae_path,
52
- model_config.autoencoder_params,
53
- device=device,
54
- dtype=_dtype,
55
- random_init=False,
56
- )
57
- autoencoder.eval()
58
-
59
- print("init ovis")
60
- # ovis_path = hf_hub_download(
61
- # repo_id="AIDC-AI/Ovis-Image-7B",
62
- # subfolder="Ovis2.5-2B",
63
- # token=hf_token,
64
- # )
65
- ovis_tokenizer = build_ovis_tokenizer(
66
- "AIDC-AI/Ovis2.5-2B",
67
- )
68
- ovis_encoder = OvisEmbedder(
69
- model_path="AIDC-AI/Ovis2.5-2B",
70
- random_init=False,
71
- low_cpu_mem_usage=True,
72
- torch_dtype=torch.bfloat16,
73
- ).to(device=device, dtype=_dtype)
74
-
75
 
76
  examples = [
77
  "Five shimmering goldfish weave through crevices between stones; four are red-and-white, while one is silver-white. By the pond's edge, a golden shaded British Shorthair cat watches them intently, counting on blind luck. Watercolor style.",
@@ -582,60 +535,51 @@ def infer(
582
 
583
  print(f'inference with prompt: {prompt}, size: {height}x{width}, seed: {seed}, steps: {num_inference_steps}, cfg: {guidance_scale}')
584
 
585
- image = generate_image(
586
- device=next(ovis_image.parameters()).device,
587
- dtype=_dtype,
588
- model=ovis_image,
589
- prompt=prompt,
590
- autoencoder=autoencoder,
591
- ovis_tokenizer=ovis_tokenizer,
592
- ovis_encoder=ovis_encoder,
593
- img_height=height,
594
- img_width=width,
595
- denoising_steps=num_inference_steps,
596
- cfg_scale=guidance_scale,
597
- seed=seed,
598
- )
599
- # bring into PIL format and save
600
- image = image.clamp(-1, 1)
601
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
602
- image = (image * 255).round().astype("uint8")
603
-
604
- return image[0], seed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
605
 
606
  with gr.Blocks(
607
  title="Ovis-Image",
608
  fill_height=False,
609
- theme=gr.themes.Soft(
610
- primary_hue=gr.themes.colors.blue,
611
- secondary_hue=gr.themes.colors.slate,
612
- neutral_hue=gr.themes.colors.gray,
613
- spacing_size=gr.themes.sizes.spacing_lg,
614
- radius_size=gr.themes.sizes.radius_lg,
615
- text_size=gr.themes.sizes.text_md,
616
- font=[gr.themes.GoogleFont("Inter"), "SF Pro Display", "-apple-system", "BlinkMacSystemFont", "system-ui", "sans-serif"],
617
- font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "SF Mono", "ui-monospace", "monospace"],
618
- ).set(
619
- body_background_fill='#f5f5f7',
620
- body_background_fill_dark='#000000',
621
- button_primary_background_fill='#0071e3',
622
- button_primary_background_fill_hover='#0077ed',
623
- button_primary_text_color='#ffffff',
624
- block_background_fill='#ffffff',
625
- block_background_fill_dark='#1d1d1f',
626
- block_border_width='0px',
627
- block_shadow='0 2px 12px rgba(0, 0, 0, 0.08)',
628
- block_shadow_dark='0 2px 12px rgba(0, 0, 0, 0.4)',
629
- input_background_fill='#ffffff',
630
- input_background_fill_dark='#1d1d1f',
631
- input_border_width='1px',
632
- input_border_color='#d2d2d7',
633
- input_border_color_dark='#424245',
634
- input_shadow='none',
635
- input_shadow_focus='0 0 0 4px rgba(0, 113, 227, 0.15)',
636
- ),
637
- css=apple_css,
638
- js=js_code,
639
  ) as demo:
640
  # Two-column layout - variant='panel' prevents responsive stacking
641
  with gr.Row(equal_height=False, variant="panel", elem_id="main-row"):
@@ -712,7 +656,7 @@ with gr.Blocks(
712
  result = gr.Image(
713
  label="Result",
714
  show_label=False,
715
- type="numpy",
716
  format="png",
717
  )
718
 
@@ -731,5 +675,11 @@ with gr.Blocks(
731
  outputs=[result, seed],
732
  )
733
 
 
 
 
734
  if __name__ == '__main__':
735
- demo.launch()
 
 
 
 
4
  import spaces
5
  import random
6
  import numpy as np
 
 
7
 
8
  from diffusers.utils import logging
9
  from PIL import Image
10
 
11
+ from diffusers import OvisImagePipeline
12
+
 
 
 
 
13
 
14
  logging.set_verbosity_error()
15
 
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
 
18
  device = "cuda"
19
  _dtype = torch.bfloat16
20
  hf_token = os.getenv("HF_TOKEN")
21
 
22
+ pipe = OvisImagePipeline.from_pretrained(
23
+ "AIDC-AI/Ovis-Image-7B",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  token=hf_token,
25
+ torch_dtype=torch.bfloat16
26
  )
27
+ pipe.to("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  examples = [
30
  "Five shimmering goldfish weave through crevices between stones; four are red-and-white, while one is silver-white. By the pond's edge, a golden shaded British Shorthair cat watches them intently, counting on blind luck. Watercolor style.",
 
535
 
536
  print(f'inference with prompt: {prompt}, size: {height}x{width}, seed: {seed}, steps: {num_inference_steps}, cfg: {guidance_scale}')
537
 
538
+ generator = torch.Generator().manual_seed(seed)
539
+ image = pipe(
540
+ prompt,
541
+ negative_prompt="",
542
+ height=height,
543
+ width=width,
544
+ num_inference_steps=num_inference_steps,
545
+ true_cfg_scale=guidance_scale,
546
+ generator=generator,
547
+ ).images[0]
548
+
549
+ return image, seed
550
+
551
+ custom_theme = gr.themes.Soft(
552
+ primary_hue=gr.themes.colors.blue,
553
+ secondary_hue=gr.themes.colors.slate,
554
+ neutral_hue=gr.themes.colors.gray,
555
+ spacing_size=gr.themes.sizes.spacing_lg,
556
+ radius_size=gr.themes.sizes.radius_lg,
557
+ text_size=gr.themes.sizes.text_md,
558
+ font=[gr.themes.GoogleFont("Inter"), "SF Pro Display", "-apple-system", "BlinkMacSystemFont", "system-ui", "sans-serif"],
559
+ font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "SF Mono", "ui-monospace", "monospace"],
560
+ ).set(
561
+ body_background_fill='#f5f5f7',
562
+ body_background_fill_dark='#000000',
563
+ button_primary_background_fill='#0071e3',
564
+ button_primary_background_fill_hover='#0077ed',
565
+ button_primary_text_color='#ffffff',
566
+ block_background_fill='#ffffff',
567
+ block_background_fill_dark='#1d1d1f',
568
+ block_border_width='0px',
569
+ block_shadow='0 2px 12px rgba(0, 0, 0, 0.08)',
570
+ block_shadow_dark='0 2px 12px rgba(0, 0, 0, 0.4)',
571
+ input_background_fill='#ffffff',
572
+ input_background_fill_dark='#1d1d1f',
573
+ input_border_width='1px',
574
+ input_border_color='#d2d2d7',
575
+ input_border_color_dark='#424245',
576
+ input_shadow='none',
577
+ input_shadow_focus='0 0 0 4px rgba(0, 113, 227, 0.15)',
578
+ )
579
 
580
  with gr.Blocks(
581
  title="Ovis-Image",
582
  fill_height=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
583
  ) as demo:
584
  # Two-column layout - variant='panel' prevents responsive stacking
585
  with gr.Row(equal_height=False, variant="panel", elem_id="main-row"):
 
656
  result = gr.Image(
657
  label="Result",
658
  show_label=False,
659
+ type="pil",
660
  format="png",
661
  )
662
 
 
675
  outputs=[result, seed],
676
  )
677
 
678
+ # Load JS after DOM is ready (like Qwen-Image)
679
+ demo.load(None, None, None, js=js_code)
680
+
681
  if __name__ == '__main__':
682
+ demo.launch(
683
+ theme=custom_theme,
684
+ css=apple_css,
685
+ )
app_diffusers.py → app_old.py RENAMED
@@ -4,27 +4,74 @@ import gradio as gr
4
  import spaces
5
  import random
6
  import numpy as np
 
 
7
 
8
  from diffusers.utils import logging
9
  from PIL import Image
10
 
11
- from diffusers import OvisImagePipeline
12
-
 
 
 
 
13
 
14
  logging.set_verbosity_error()
15
 
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
 
18
  device = "cuda"
19
  _dtype = torch.bfloat16
20
  hf_token = os.getenv("HF_TOKEN")
21
 
22
- pipe = OvisImagePipeline.from_pretrained(
23
- "AIDC-AI/Ovis-Image-7B",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  token=hf_token,
25
- torch_dtype=torch.bfloat16
26
  )
27
- pipe.to("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  examples = [
30
  "Five shimmering goldfish weave through crevices between stones; four are red-and-white, while one is silver-white. By the pond's edge, a golden shaded British Shorthair cat watches them intently, counting on blind luck. Watercolor style.",
@@ -535,51 +582,60 @@ def infer(
535
 
536
  print(f'inference with prompt: {prompt}, size: {height}x{width}, seed: {seed}, steps: {num_inference_steps}, cfg: {guidance_scale}')
537
 
538
- generator = torch.Generator().manual_seed(seed)
539
- image = pipe(
540
- prompt,
541
- negative_prompt="",
542
- height=height,
543
- width=width,
544
- num_inference_steps=num_inference_steps,
545
- true_cfg_scale=guidance_scale,
546
- generator=generator,
547
- ).images[0]
548
-
549
- return image, seed
550
-
551
- custom_theme = gr.themes.Soft(
552
- primary_hue=gr.themes.colors.blue,
553
- secondary_hue=gr.themes.colors.slate,
554
- neutral_hue=gr.themes.colors.gray,
555
- spacing_size=gr.themes.sizes.spacing_lg,
556
- radius_size=gr.themes.sizes.radius_lg,
557
- text_size=gr.themes.sizes.text_md,
558
- font=[gr.themes.GoogleFont("Inter"), "SF Pro Display", "-apple-system", "BlinkMacSystemFont", "system-ui", "sans-serif"],
559
- font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "SF Mono", "ui-monospace", "monospace"],
560
- ).set(
561
- body_background_fill='#f5f5f7',
562
- body_background_fill_dark='#000000',
563
- button_primary_background_fill='#0071e3',
564
- button_primary_background_fill_hover='#0077ed',
565
- button_primary_text_color='#ffffff',
566
- block_background_fill='#ffffff',
567
- block_background_fill_dark='#1d1d1f',
568
- block_border_width='0px',
569
- block_shadow='0 2px 12px rgba(0, 0, 0, 0.08)',
570
- block_shadow_dark='0 2px 12px rgba(0, 0, 0, 0.4)',
571
- input_background_fill='#ffffff',
572
- input_background_fill_dark='#1d1d1f',
573
- input_border_width='1px',
574
- input_border_color='#d2d2d7',
575
- input_border_color_dark='#424245',
576
- input_shadow='none',
577
- input_shadow_focus='0 0 0 4px rgba(0, 113, 227, 0.15)',
578
- )
579
 
580
  with gr.Blocks(
581
  title="Ovis-Image",
582
  fill_height=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
583
  ) as demo:
584
  # Two-column layout - variant='panel' prevents responsive stacking
585
  with gr.Row(equal_height=False, variant="panel", elem_id="main-row"):
@@ -656,7 +712,7 @@ with gr.Blocks(
656
  result = gr.Image(
657
  label="Result",
658
  show_label=False,
659
- type="pil",
660
  format="png",
661
  )
662
 
@@ -675,11 +731,5 @@ with gr.Blocks(
675
  outputs=[result, seed],
676
  )
677
 
678
- # Load JS after DOM is ready (like Qwen-Image)
679
- demo.load(None, None, None, js=js_code)
680
-
681
  if __name__ == '__main__':
682
- demo.launch(
683
- theme=custom_theme,
684
- css=apple_css,
685
- )
 
4
  import spaces
5
  import random
6
  import numpy as np
7
+ from safetensors.torch import load_file
8
+ from huggingface_hub import hf_hub_download
9
 
10
  from diffusers.utils import logging
11
  from PIL import Image
12
 
13
+ from ovis_image.model.tokenizer import build_ovis_tokenizer
14
+ from ovis_image.model.autoencoder import load_ae
15
+ from ovis_image.model.hf_embedder import OvisEmbedder
16
+ from ovis_image.model.model import OvisImageModel
17
+ from ovis_image.sampling import generate_image
18
+ from ovis_image import ovis_image_configs
19
 
20
  logging.set_verbosity_error()
21
 
22
+ # DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
23
  MAX_SEED = np.iinfo(np.int32).max
24
 
25
  device = "cuda"
26
  _dtype = torch.bfloat16
27
  hf_token = os.getenv("HF_TOKEN")
28
 
29
+ print("init ovis_image")
30
+ model_config = ovis_image_configs["ovis-image-7b"]
31
+ ovis_image = OvisImageModel(model_config)
32
+ ovis_image_path = hf_hub_download(
33
+ repo_id="AIDC-AI/Ovis-Image-7B",
34
+ filename="ovis_image.safetensors",
35
+ token=hf_token,
36
+ )
37
+ model_state_dict = load_file(ovis_image_path)
38
+ missing_keys, unexpected_keys = ovis_image.load_state_dict(model_state_dict)
39
+ print(f"Load Missing Keys {missing_keys}")
40
+ print(f"Load Unexpected Keys {unexpected_keys}")
41
+ ovis_image = ovis_image.to(device=device, dtype=_dtype)
42
+ ovis_image.eval()
43
+
44
+ print("init vae")
45
+ vae_path = hf_hub_download(
46
+ repo_id="AIDC-AI/Ovis-Image-7B",
47
+ filename="ae.safetensors",
48
  token=hf_token,
 
49
  )
50
+ autoencoder = load_ae(
51
+ vae_path,
52
+ model_config.autoencoder_params,
53
+ device=device,
54
+ dtype=_dtype,
55
+ random_init=False,
56
+ )
57
+ autoencoder.eval()
58
+
59
+ print("init ovis")
60
+ # ovis_path = hf_hub_download(
61
+ # repo_id="AIDC-AI/Ovis-Image-7B",
62
+ # subfolder="Ovis2.5-2B",
63
+ # token=hf_token,
64
+ # )
65
+ ovis_tokenizer = build_ovis_tokenizer(
66
+ "AIDC-AI/Ovis2.5-2B",
67
+ )
68
+ ovis_encoder = OvisEmbedder(
69
+ model_path="AIDC-AI/Ovis2.5-2B",
70
+ random_init=False,
71
+ low_cpu_mem_usage=True,
72
+ torch_dtype=torch.bfloat16,
73
+ ).to(device=device, dtype=_dtype)
74
+
75
 
76
  examples = [
77
  "Five shimmering goldfish weave through crevices between stones; four are red-and-white, while one is silver-white. By the pond's edge, a golden shaded British Shorthair cat watches them intently, counting on blind luck. Watercolor style.",
 
582
 
583
  print(f'inference with prompt: {prompt}, size: {height}x{width}, seed: {seed}, steps: {num_inference_steps}, cfg: {guidance_scale}')
584
 
585
+ image = generate_image(
586
+ device=next(ovis_image.parameters()).device,
587
+ dtype=_dtype,
588
+ model=ovis_image,
589
+ prompt=prompt,
590
+ autoencoder=autoencoder,
591
+ ovis_tokenizer=ovis_tokenizer,
592
+ ovis_encoder=ovis_encoder,
593
+ img_height=height,
594
+ img_width=width,
595
+ denoising_steps=num_inference_steps,
596
+ cfg_scale=guidance_scale,
597
+ seed=seed,
598
+ )
599
+ # bring into PIL format and save
600
+ image = image.clamp(-1, 1)
601
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
602
+ image = (image * 255).round().astype("uint8")
603
+
604
+ return image[0], seed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
605
 
606
  with gr.Blocks(
607
  title="Ovis-Image",
608
  fill_height=False,
609
+ theme=gr.themes.Soft(
610
+ primary_hue=gr.themes.colors.blue,
611
+ secondary_hue=gr.themes.colors.slate,
612
+ neutral_hue=gr.themes.colors.gray,
613
+ spacing_size=gr.themes.sizes.spacing_lg,
614
+ radius_size=gr.themes.sizes.radius_lg,
615
+ text_size=gr.themes.sizes.text_md,
616
+ font=[gr.themes.GoogleFont("Inter"), "SF Pro Display", "-apple-system", "BlinkMacSystemFont", "system-ui", "sans-serif"],
617
+ font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "SF Mono", "ui-monospace", "monospace"],
618
+ ).set(
619
+ body_background_fill='#f5f5f7',
620
+ body_background_fill_dark='#000000',
621
+ button_primary_background_fill='#0071e3',
622
+ button_primary_background_fill_hover='#0077ed',
623
+ button_primary_text_color='#ffffff',
624
+ block_background_fill='#ffffff',
625
+ block_background_fill_dark='#1d1d1f',
626
+ block_border_width='0px',
627
+ block_shadow='0 2px 12px rgba(0, 0, 0, 0.08)',
628
+ block_shadow_dark='0 2px 12px rgba(0, 0, 0, 0.4)',
629
+ input_background_fill='#ffffff',
630
+ input_background_fill_dark='#1d1d1f',
631
+ input_border_width='1px',
632
+ input_border_color='#d2d2d7',
633
+ input_border_color_dark='#424245',
634
+ input_shadow='none',
635
+ input_shadow_focus='0 0 0 4px rgba(0, 113, 227, 0.15)',
636
+ ),
637
+ css=apple_css,
638
+ js=js_code,
639
  ) as demo:
640
  # Two-column layout - variant='panel' prevents responsive stacking
641
  with gr.Row(equal_height=False, variant="panel", elem_id="main-row"):
 
712
  result = gr.Image(
713
  label="Result",
714
  show_label=False,
715
+ type="numpy",
716
  format="png",
717
  )
718
 
 
731
  outputs=[result, seed],
732
  )
733
 
 
 
 
734
  if __name__ == '__main__':
735
+ demo.launch()