Upload 2 files
#2
by meimeilook - opened
- .gitattributes +1 -0
- app-fp8.py +859 -0
- demo.jpg +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
demo.jpg filter=lfs diff=lfs merge=lfs -text
|
app-fp8.py
ADDED
|
@@ -0,0 +1,859 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# new add
|
| 2 |
+
import time
|
| 3 |
+
import psutil
|
| 4 |
+
import platform
|
| 5 |
+
import atexit
|
| 6 |
+
|
| 7 |
+
pynvml_available = False
|
| 8 |
+
if platform.system() == "Linux" or platform.system() == "Windows":
|
| 9 |
+
try:
|
| 10 |
+
from pynvml import *
|
| 11 |
+
nvmlInit()
|
| 12 |
+
pynvml_available = True
|
| 13 |
+
print("pynvml (NVIDIA GPU monitoring library) initialized successfully.")
|
| 14 |
+
|
| 15 |
+
def shutdown_pynvml():
|
| 16 |
+
print("Shutting down pynvml...")
|
| 17 |
+
nvmlShutdown()
|
| 18 |
+
atexit.register(shutdown_pynvml) # register close pynvml when it quit
|
| 19 |
+
|
| 20 |
+
except Exception as e:
|
| 21 |
+
print(f"Warning: pynvml could not be initialized. Detailed GPU stats via pynvml will not be available. Error: {e}")
|
| 22 |
+
if "NVML Shared Library Not Found" in str(e):
|
| 23 |
+
print("pynvml error hint: NVML shared library not found. If you have an NVIDIA GPU and drivers, ensure the library is accessible.")
|
| 24 |
+
elif "Driver Not Loaded" in str(e):
|
| 25 |
+
print("pynvml error hint: NVIDIA driver is not loaded. Please check your GPU driver installation.")
|
| 26 |
+
|
| 27 |
+
import gradio as gr
|
| 28 |
+
import numpy as np
|
| 29 |
+
import os
|
| 30 |
+
import torch
|
| 31 |
+
import random
|
| 32 |
+
|
| 33 |
+
from accelerate import infer_auto_device_map, load_checkpoint_and_dispatch, init_empty_weights
|
| 34 |
+
from PIL import Image
|
| 35 |
+
|
| 36 |
+
from data.data_utils import add_special_tokens, pil_img2rgb
|
| 37 |
+
from data.transforms import ImageTransform
|
| 38 |
+
from inferencer import InterleaveInferencer
|
| 39 |
+
from modeling.autoencoder import load_ae
|
| 40 |
+
from modeling.bagel.qwen2_navit import NaiveCache
|
| 41 |
+
from modeling.bagel import (
|
| 42 |
+
BagelConfig, Bagel, Qwen2Config, Qwen2ForCausalLM,
|
| 43 |
+
SiglipVisionConfig, SiglipVisionModel
|
| 44 |
+
)
|
| 45 |
+
from modeling.qwen2 import Qwen2Tokenizer
|
| 46 |
+
|
| 47 |
+
import argparse
|
| 48 |
+
|
| 49 |
+
parser = argparse.ArgumentParser()
|
| 50 |
+
parser.add_argument("--server_name", type=str, default="127.0.0.1")
|
| 51 |
+
parser.add_argument("--server_port", type=int, default=7860)
|
| 52 |
+
parser.add_argument("--share", action="store_true")
|
| 53 |
+
parser.add_argument("--model_path", type=str, default="/root/your_path/BAGEL-7B-MoT")
|
| 54 |
+
args = parser.parse_args()
|
| 55 |
+
|
| 56 |
+
# Model Initialization
|
| 57 |
+
model_path = args.model_path #Download from https://huggingface.co/ByteDance-Seed/BAGEL-7B-MoT to models/BAGEL-7B-MoT
|
| 58 |
+
|
| 59 |
+
llm_config = Qwen2Config.from_json_file(os.path.join(model_path, "llm_config.json"))
|
| 60 |
+
llm_config.qk_norm = True
|
| 61 |
+
llm_config.tie_word_embeddings = False
|
| 62 |
+
llm_config.layer_module = "Qwen2MoTDecoderLayer"
|
| 63 |
+
|
| 64 |
+
vit_config = SiglipVisionConfig.from_json_file(os.path.join(model_path, "vit_config.json"))
|
| 65 |
+
vit_config.rope = False
|
| 66 |
+
vit_config.num_hidden_layers -= 1
|
| 67 |
+
|
| 68 |
+
vae_model, vae_config = load_ae(local_path=os.path.join(model_path, "ae.safetensors"))
|
| 69 |
+
|
| 70 |
+
config = BagelConfig(
|
| 71 |
+
visual_gen=True,
|
| 72 |
+
visual_und=True,
|
| 73 |
+
llm_config=llm_config,
|
| 74 |
+
vit_config=vit_config,
|
| 75 |
+
vae_config=vae_config,
|
| 76 |
+
vit_max_num_patch_per_side=70,
|
| 77 |
+
connector_act='gelu_pytorch_tanh',
|
| 78 |
+
latent_patch_size=2,
|
| 79 |
+
max_latent_size=64,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
with init_empty_weights():
|
| 83 |
+
language_model = Qwen2ForCausalLM(llm_config)
|
| 84 |
+
vit_model = SiglipVisionModel(vit_config)
|
| 85 |
+
model = Bagel(language_model, vit_model, config)
|
| 86 |
+
model.vit_model.vision_model.embeddings.convert_conv2d_to_linear(vit_config, meta=True)
|
| 87 |
+
|
| 88 |
+
tokenizer = Qwen2Tokenizer.from_pretrained(model_path)
|
| 89 |
+
tokenizer, new_token_ids, _ = add_special_tokens(tokenizer)
|
| 90 |
+
|
| 91 |
+
vae_transform = ImageTransform(1024, 512, 16)
|
| 92 |
+
vit_transform = ImageTransform(980, 224, 14)
|
| 93 |
+
|
| 94 |
+
# # Model Loading and Multi GPU Infernece Preparing
|
| 95 |
+
# device_map = infer_auto_device_map(
|
| 96 |
+
# model,
|
| 97 |
+
# max_memory={i: "24GiB" for i in range(torch.cuda.device_count())},
|
| 98 |
+
# no_split_module_classes=["Bagel", "Qwen2MoTDecoderLayer"],
|
| 99 |
+
# )
|
| 100 |
+
|
| 101 |
+
# same_device_modules = [
|
| 102 |
+
# 'language_model.model.embed_tokens',
|
| 103 |
+
# 'time_embedder',
|
| 104 |
+
# 'latent_pos_embed',
|
| 105 |
+
# 'vae2llm',
|
| 106 |
+
# 'llm2vae',
|
| 107 |
+
# 'connector',
|
| 108 |
+
# 'vit_pos_embed'
|
| 109 |
+
# ]
|
| 110 |
+
|
| 111 |
+
# if torch.cuda.device_count() == 1:
|
| 112 |
+
# first_device = device_map.get(same_device_modules[0], "cuda:0")
|
| 113 |
+
# for k in same_device_modules:
|
| 114 |
+
# if k in device_map:
|
| 115 |
+
# device_map[k] = first_device
|
| 116 |
+
# else:
|
| 117 |
+
# device_map[k] = "cuda:0"
|
| 118 |
+
# else:
|
| 119 |
+
# first_device = device_map.get(same_device_modules[0])
|
| 120 |
+
# for k in same_device_modules:
|
| 121 |
+
# if k in device_map:
|
| 122 |
+
# device_map[k] = first_device
|
| 123 |
+
|
| 124 |
+
# --- new changes ---
|
| 125 |
+
print("Starting model loading and device map configuration...")
|
| 126 |
+
|
| 127 |
+
# --- ram & vram helps functions ---
|
| 128 |
+
def get_gpu_memory_stats_pynvml(device_id=0):
|
| 129 |
+
if not pynvml_available:
|
| 130 |
+
return f"GPU-{device_id} (pynvml): Not available."
|
| 131 |
+
try:
|
| 132 |
+
handle = nvmlDeviceGetHandleByIndex(device_id)
|
| 133 |
+
info = nvmlDeviceGetMemoryInfo(handle)
|
| 134 |
+
total_gb = info.total / (1024**3)
|
| 135 |
+
used_gb = info.used / (1024**3)
|
| 136 |
+
# free_gb = info.free / (1024**3) # It can be calculated by the sum already used
|
| 137 |
+
return f"GPU-{device_id} (pynvml): Total: {total_gb:.2f} GB, Used (Overall): {used_gb:.2f} GB"
|
| 138 |
+
except NVMLError as e:
|
| 139 |
+
return f"GPU-{device_id} (pynvml) Error: {e}"
|
| 140 |
+
|
| 141 |
+
def get_gpu_memory_stats_pytorch(device_id=0):
|
| 142 |
+
if not torch.cuda.is_available():
|
| 143 |
+
return "PyTorch: CUDA not available."
|
| 144 |
+
if device_id < 0 or device_id >= torch.cuda.device_count():
|
| 145 |
+
return f"PyTorch GPU-{device_id}: Invalid device ID."
|
| 146 |
+
|
| 147 |
+
allocated_gb = torch.cuda.memory_allocated(device_id) / (1024**3)
|
| 148 |
+
reserved_gb = torch.cuda.memory_reserved(device_id) / (1024**3) # PyTorch Reserved Total vram
|
| 149 |
+
|
| 150 |
+
# try gets pynvml info
|
| 151 |
+
total_capacity_str_pt = ""
|
| 152 |
+
if pynvml_available:
|
| 153 |
+
try:
|
| 154 |
+
handle = nvmlDeviceGetHandleByIndex(device_id)
|
| 155 |
+
info = nvmlDeviceGetMemoryInfo(handle)
|
| 156 |
+
total_gb_pt = info.total / (1024**3)
|
| 157 |
+
total_capacity_str_pt = f"Total Capacity: {total_gb_pt:.2f} GB, "
|
| 158 |
+
except NVMLError:
|
| 159 |
+
pass # If the acquisition fails, the total capacity will not be displayed
|
| 160 |
+
|
| 161 |
+
return (f"PyTorch GPU-{device_id}: {total_capacity_str_pt}"
|
| 162 |
+
f"Allocated: {allocated_gb:.2f} GB, Reserved: {reserved_gb:.2f} GB")
|
| 163 |
+
|
| 164 |
+
def get_system_ram_stats():
|
| 165 |
+
mem = psutil.virtual_memory()
|
| 166 |
+
total_gb = mem.total / (1024**3)
|
| 167 |
+
available_gb = mem.available / (1024**3)
|
| 168 |
+
used_gb = mem.used / (1024**3)
|
| 169 |
+
percent_used = mem.percent
|
| 170 |
+
return (f"System RAM: Total: {total_gb:.2f} GB, Available: {available_gb:.2f} GB, "
|
| 171 |
+
f"Used (Overall): {used_gb:.2f} GB ({percent_used}%)")
|
| 172 |
+
|
| 173 |
+
def get_process_ram_stats():
|
| 174 |
+
process = psutil.Process(os.getpid()) # get the current Python process
|
| 175 |
+
mem_info = process.memory_info()
|
| 176 |
+
rss_gb = mem_info.rss / (1024**3) # Resident Set Size (Actual physical memory usage)
|
| 177 |
+
return f"App Process RAM (RSS): {rss_gb:.2f} GB"
|
| 178 |
+
|
| 179 |
+
def get_all_memory_stats_for_gradio_display():
|
| 180 |
+
"""Prepare the string of memory/video memory statistics for Gradio display"""
|
| 181 |
+
stats_lines = []
|
| 182 |
+
if torch.cuda.is_available() and torch.cuda.device_count() > 0:
|
| 183 |
+
stats_lines.append("**GPU VRAM Usage:**")
|
| 184 |
+
for i in range(torch.cuda.device_count()):
|
| 185 |
+
stats_lines.append(get_gpu_memory_stats_pynvml(i))
|
| 186 |
+
stats_lines.append(get_gpu_memory_stats_pytorch(i))
|
| 187 |
+
if i < torch.cuda.device_count() - 1: # If there are multiple GPUs, add a separator
|
| 188 |
+
stats_lines.append("---")
|
| 189 |
+
else:
|
| 190 |
+
stats_lines.append("**GPU VRAM Usage:** CUDA not available or no GPUs found.")
|
| 191 |
+
|
| 192 |
+
stats_lines.append("\n**CPU RAM Usage:**")
|
| 193 |
+
stats_lines.append(get_system_ram_stats())
|
| 194 |
+
stats_lines.append(get_process_ram_stats())
|
| 195 |
+
|
| 196 |
+
return "\n".join(s for s in stats_lines if s)
|
| 197 |
+
# --- ram & vram helps functions end ---
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
# ram & vram setting
|
| 201 |
+
# ram & vram setting
|
| 202 |
+
# ram & vram setting edit by your spec
|
| 203 |
+
# If you have 60GB CPU vram,there are 55GiB (Leave some vram)
|
| 204 |
+
|
| 205 |
+
cpu_mem_for_offload = "16GiB"
|
| 206 |
+
gpu_mem_per_device = "24GiB" # Your GPU Vram
|
| 207 |
+
|
| 208 |
+
max_memory_config = {i: gpu_mem_per_device for i in range(torch.cuda.device_count())}
|
| 209 |
+
if torch.cuda.device_count() == 0: # If there is no GPU, a basic configuration is also required
|
| 210 |
+
max_memory_config["cpu"] = cpu_mem_for_offload
|
| 211 |
+
else:
|
| 212 |
+
max_memory_config["cpu"] = cpu_mem_for_offload # Add a budget for the CPU
|
| 213 |
+
|
| 214 |
+
print(f"Using max_memory_config: {max_memory_config}")
|
| 215 |
+
|
| 216 |
+
device_map = infer_auto_device_map(
|
| 217 |
+
model,
|
| 218 |
+
max_memory=max_memory_config, # Use the configuration that includes the CPU memory budget
|
| 219 |
+
no_split_module_classes=["Bagel", "Qwen2MoTDecoderLayer"],
|
| 220 |
+
)
|
| 221 |
+
print("Device map after infer_auto_device_map (with CPU budget):")
|
| 222 |
+
for k, v_map in device_map.items(): # Check info
|
| 223 |
+
print(f" {k}: {v_map}")
|
| 224 |
+
|
| 225 |
+
same_device_modules = [
|
| 226 |
+
'language_model.model.embed_tokens',
|
| 227 |
+
'time_embedder',
|
| 228 |
+
'latent_pos_embed',
|
| 229 |
+
'vae2llm',
|
| 230 |
+
'llm2vae',
|
| 231 |
+
'connector',
|
| 232 |
+
'vit_pos_embed'
|
| 233 |
+
]
|
| 234 |
+
|
| 235 |
+
# already have same_device_modules
|
| 236 |
+
if torch.cuda.device_count() > 0:
|
| 237 |
+
first_device_key = same_device_modules[0]
|
| 238 |
+
default_target_device = "cuda:0" # The default target is the first GPU
|
| 239 |
+
first_module_target_device = device_map.get(first_device_key, default_target_device)
|
| 240 |
+
|
| 241 |
+
print(f"Target device for same_device_modules (based on {first_device_key}): {first_module_target_device}")
|
| 242 |
+
|
| 243 |
+
for k_module in same_device_modules:
|
| 244 |
+
if k_module in device_map:
|
| 245 |
+
if device_map[k_module] != first_module_target_device:
|
| 246 |
+
print(f" Moving {k_module} from {device_map[k_module]} to {first_module_target_device} (same_device_modules)")
|
| 247 |
+
device_map[k_module] = first_module_target_device
|
| 248 |
+
else: # If the module is not in the automatically generated map but you want it to be on a specific device
|
| 249 |
+
print(f" Assigning {k_module} (from same_device_modules) to {first_module_target_device} as it was not in initial map.")
|
| 250 |
+
device_map[k_module] = first_module_target_device
|
| 251 |
+
elif torch.cuda.device_count() == 0 and "cpu" in max_memory_config: # without GPU
|
| 252 |
+
print("No CUDA devices found. Assigning same_device_modules to CPU.")
|
| 253 |
+
for k_module in same_device_modules:
|
| 254 |
+
device_map[k_module] = "cpu"
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
print("Device map after same_device_modules logic:")
|
| 258 |
+
for k, v_map in device_map.items():
|
| 259 |
+
print(f" {k}: {v_map}")
|
| 260 |
+
|
| 261 |
+
# key point 2:make sure no 'disk' (backup)
|
| 262 |
+
keys_to_change_to_cpu = []
|
| 263 |
+
for module_name, device_assignment in device_map.items():
|
| 264 |
+
if device_assignment == "disk":
|
| 265 |
+
keys_to_change_to_cpu.append(module_name)
|
| 266 |
+
|
| 267 |
+
if keys_to_change_to_cpu:
|
| 268 |
+
print(f"Manually changing the following layers from 'disk' to 'cpu': {keys_to_change_to_cpu}")
|
| 269 |
+
for module_name in keys_to_change_to_cpu:
|
| 270 |
+
device_map[module_name] = "cpu"
|
| 271 |
+
print("Final device_map before loading checkpoint (after disk override):")
|
| 272 |
+
for k, v_map in device_map.items():
|
| 273 |
+
print(f" {k}: {v_map}")
|
| 274 |
+
else:
|
| 275 |
+
print("No layers assigned to 'disk' by infer_auto_device_map, or they were already handled. Final device_map is as above.")
|
| 276 |
+
# --- fix model loadding end ---
|
| 277 |
+
|
| 278 |
+
# adjust layers more clearly&detail to GPU
|
| 279 |
+
# make sure,The device_map only contains GPU indexes (such as 0) or 'cpu'.
|
| 280 |
+
print("\nStarting custom device_map modifications to maximize GPU utilization...")
|
| 281 |
+
print("Device map state BEFORE custom modifications:")
|
| 282 |
+
for k_map_item, v_map_item in device_map.items():
|
| 283 |
+
print(f" {k_map_item}: {v_map_item}")
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
# -- Key tuning parameters Start --
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
# 1. Try to move more LLM Transformer layers (layers 11 to 27) to GPU 0
|
| 291 |
+
# These layers are currently on the CPU. There are a total of 17 such layers (ranging from 11 to 27).
|
| 292 |
+
# You can set the number of LLM layers that you wish to move from the CPU to GPU 0.
|
| 293 |
+
# Please start the experiment with a smaller value, such as 5 or 8, and then gradually increase it.
|
| 294 |
+
# If set to 17, all layers 11-27 will be attempted to be moved.
|
| 295 |
+
|
| 296 |
+
NUM_ADDITIONAL_LLM_LAYERS_TO_GPU = 5 # <--- 5 fit for 24GB Vram for TEST layers(like: 5, 8, 10, 12, 15, 17)
|
| 297 |
+
|
| 298 |
+
# 2. Whether to attempt to move the 'norm' and 'lm_head' layers of LLM to GPU 0 (if they are on the CPU)
|
| 299 |
+
# It is usually recommended to place them on the same device as the last layer of the LLM.
|
| 300 |
+
|
| 301 |
+
TRY_MOVE_LLM_NORM_HEAD_TO_GPU = True # <--- Default True, Turn to False,If you don't want to remove
|
| 302 |
+
|
| 303 |
+
# 3. (Optional) Whether to attempt to move 'vit_model' to GPU 0 (if it is on the CPU)
|
| 304 |
+
# This is usually considered only after the LLM layer has been successfully moved to the GPU
|
| 305 |
+
# And there is still a considerable amount of video memory left.
|
| 306 |
+
|
| 307 |
+
TRY_MOVE_VIT_MODEL_TO_GPU = False # <--- Default False , can be test
|
| 308 |
+
|
| 309 |
+
# --- Adjust end ---
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
# run LLM layers move
|
| 313 |
+
moved_llm_layers_count = 0
|
| 314 |
+
if NUM_ADDITIONAL_LLM_LAYERS_TO_GPU > 0:
|
| 315 |
+
print(f"\nAttempting to move up to {NUM_ADDITIONAL_LLM_LAYERS_TO_GPU} LLM layers (11 to {10 + NUM_ADDITIONAL_LLM_LAYERS_TO_GPU}) to GPU 0...")
|
| 316 |
+
for i in range(NUM_ADDITIONAL_LLM_LAYERS_TO_GPU):
|
| 317 |
+
layer_idx = 11 + i # From layer 11 to start
|
| 318 |
+
if layer_idx > 27: # language_model.model.layers Max to 27
|
| 319 |
+
print(f" Reached max layer index (27). Stopped LLM layer promotion.")
|
| 320 |
+
break
|
| 321 |
+
layer_name = f"language_model.model.layers.{layer_idx}"
|
| 322 |
+
|
| 323 |
+
if device_map.get(layer_name) == 'cpu':
|
| 324 |
+
print(f" Promoting LLM layer '{layer_name}' from 'cpu' to GPU 0.")
|
| 325 |
+
device_map[layer_name] = 0 # move to GPU 0
|
| 326 |
+
moved_llm_layers_count += 1
|
| 327 |
+
elif layer_name in device_map:
|
| 328 |
+
print(f" LLM Layer '{layer_name}' is already on device '{device_map[layer_name]}'. Skipping promotion.")
|
| 329 |
+
else:
|
| 330 |
+
print(f" Warning: LLM Layer '{layer_name}' not found in device_map. Cannot promote.")
|
| 331 |
+
print(f"Successfully promoted {moved_llm_layers_count} LLM layers to GPU 0.")
|
| 332 |
+
else:
|
| 333 |
+
print("\nSkipping promotion of additional LLM layers based on NUM_ADDITIONAL_LLM_LAYERS_TO_GPU setting.")
|
| 334 |
+
|
| 335 |
+
# run LLM norm & lm_head move
|
| 336 |
+
if TRY_MOVE_LLM_NORM_HEAD_TO_GPU:
|
| 337 |
+
print("\nAttempting to move LLM 'norm' and 'lm_head' to GPU 0 (if on CPU)...")
|
| 338 |
+
llm_aux_modules = ["language_model.model.norm", "language_model.model.lm_head"]
|
| 339 |
+
# you can choose other modules with LLM,eg: rotary_emb, norm_moe_gen,if there are on the CPU
|
| 340 |
+
# llm_aux_modules.append("language_model.model.rotary_emb")
|
| 341 |
+
# llm_aux_modules.append("language_model.model.norm_moe_gen")
|
| 342 |
+
|
| 343 |
+
for module_name in llm_aux_modules:
|
| 344 |
+
if device_map.get(module_name) == 'cpu':
|
| 345 |
+
print(f" Promoting '{module_name}' from 'cpu' to GPU 0.")
|
| 346 |
+
device_map[module_name] = 0
|
| 347 |
+
elif module_name in device_map:
|
| 348 |
+
print(f" Module '{module_name}' is already on device '{device_map[module_name]}'. Skipping promotion.")
|
| 349 |
+
else:
|
| 350 |
+
print(f" Warning: Module '{module_name}' not found in device_map. Cannot promote.")
|
| 351 |
+
else:
|
| 352 |
+
print("\nSkipping promotion of LLM 'norm' and 'lm_head' based on TRY_MOVE_LLM_NORM_HEAD_TO_GPU setting.")
|
| 353 |
+
|
| 354 |
+
# (option)run vit_model move
|
| 355 |
+
if TRY_MOVE_VIT_MODEL_TO_GPU:
|
| 356 |
+
print("\nAttempting to move 'vit_model' to GPU 0 (if on CPU)...")
|
| 357 |
+
vit_module_name = "vit_model"
|
| 358 |
+
if device_map.get(vit_module_name) == 'cpu':
|
| 359 |
+
print(f" Promoting '{vit_module_name}' from 'cpu' to GPU 0.")
|
| 360 |
+
device_map[vit_module_name] = 0
|
| 361 |
+
elif vit_module_name in device_map:
|
| 362 |
+
print(f" Module '{vit_module_name}' is already on device '{device_map[vit_module_name]}'. Skipping promotion.")
|
| 363 |
+
else:
|
| 364 |
+
print(f" Warning: Module '{vit_module_name}' not found in device_map. Cannot promote.")
|
| 365 |
+
else:
|
| 366 |
+
print("\nSkipping promotion of 'vit_model' based on TRY_MOVE_VIT_MODEL_TO_GPU setting.")
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
print("\nFinal device_map after all custom modifications:")
|
| 370 |
+
for k_map_item, v_map_item in device_map.items():
|
| 371 |
+
print(f" {k_map_item}: {v_map_item}")
|
| 372 |
+
print("--- End of custom device_map modifications ---")
|
| 373 |
+
|
| 374 |
+
# adjust gpu vram end
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
model = load_checkpoint_and_dispatch(
|
| 380 |
+
model,
|
| 381 |
+
checkpoint=os.path.join(model_path, "ema-FP8.safetensors"),
|
| 382 |
+
device_map=device_map,
|
| 383 |
+
offload_buffers=True,
|
| 384 |
+
offload_folder="offload",
|
| 385 |
+
dtype=torch.bfloat16,
|
| 386 |
+
force_hooks=True,
|
| 387 |
+
).eval()
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
# Inferencer Preparing
|
| 391 |
+
inferencer = InterleaveInferencer(
|
| 392 |
+
model=model,
|
| 393 |
+
vae_model=vae_model,
|
| 394 |
+
tokenizer=tokenizer,
|
| 395 |
+
vae_transform=vae_transform,
|
| 396 |
+
vit_transform=vit_transform,
|
| 397 |
+
new_token_ids=new_token_ids,
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
def set_seed(seed):
|
| 401 |
+
"""Set random seeds for reproducibility"""
|
| 402 |
+
if seed > 0:
|
| 403 |
+
random.seed(seed)
|
| 404 |
+
np.random.seed(seed)
|
| 405 |
+
torch.manual_seed(seed)
|
| 406 |
+
if torch.cuda.is_available():
|
| 407 |
+
torch.cuda.manual_seed(seed)
|
| 408 |
+
torch.cuda.manual_seed_all(seed)
|
| 409 |
+
torch.backends.cudnn.deterministic = True
|
| 410 |
+
torch.backends.cudnn.benchmark = False
|
| 411 |
+
return seed
|
| 412 |
+
|
| 413 |
+
# Text to Image function with thinking option and hyperparameters
|
| 414 |
+
def text_to_image(prompt, show_thinking=False, cfg_text_scale=4.0, cfg_interval=0.4,
|
| 415 |
+
timestep_shift=3.0, num_timesteps=50,
|
| 416 |
+
cfg_renorm_min=1.0, cfg_renorm_type="global",
|
| 417 |
+
max_think_token_n=1024, do_sample=False, text_temperature=0.3,
|
| 418 |
+
seed=0, image_ratio="1:1"):
|
| 419 |
+
# Set seed for reproducibility
|
| 420 |
+
set_seed(seed)
|
| 421 |
+
|
| 422 |
+
if image_ratio == "1:1":
|
| 423 |
+
image_shapes = (1024, 1024)
|
| 424 |
+
elif image_ratio == "4:3":
|
| 425 |
+
image_shapes = (768, 1024)
|
| 426 |
+
elif image_ratio == "3:4":
|
| 427 |
+
image_shapes = (1024, 768)
|
| 428 |
+
elif image_ratio == "16:9":
|
| 429 |
+
image_shapes = (576, 1024)
|
| 430 |
+
elif image_ratio == "9:16":
|
| 431 |
+
image_shapes = (1024, 576)
|
| 432 |
+
|
| 433 |
+
# Set hyperparameters
|
| 434 |
+
inference_hyper = dict(
|
| 435 |
+
max_think_token_n=max_think_token_n if show_thinking else 1024,
|
| 436 |
+
do_sample=do_sample if show_thinking else False,
|
| 437 |
+
text_temperature=text_temperature if show_thinking else 0.3,
|
| 438 |
+
cfg_text_scale=cfg_text_scale,
|
| 439 |
+
cfg_interval=[cfg_interval, 1.0], # End fixed at 1.0
|
| 440 |
+
timestep_shift=timestep_shift,
|
| 441 |
+
num_timesteps=num_timesteps,
|
| 442 |
+
cfg_renorm_min=cfg_renorm_min,
|
| 443 |
+
cfg_renorm_type=cfg_renorm_type,
|
| 444 |
+
image_shapes=image_shapes,
|
| 445 |
+
)
|
| 446 |
+
# --- add:record start_time ---
|
| 447 |
+
start_time = time.time()
|
| 448 |
+
|
| 449 |
+
# Call inferencer with or without think parameter based on user choice
|
| 450 |
+
result = inferencer(text=prompt, think=show_thinking, **inference_hyper)
|
| 451 |
+
|
| 452 |
+
# --- add:record end_time ---
|
| 453 |
+
end_time = time.time()
|
| 454 |
+
duration = end_time - start_time
|
| 455 |
+
duration_str = f"{duration:.2f} seconds"
|
| 456 |
+
print(f"The image takes time: {duration_str}") # conslog
|
| 457 |
+
|
| 458 |
+
return result["image"], result.get("text", None), duration_str
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
# Image Understanding function with thinking option and hyperparameters
|
| 462 |
+
def image_understanding(image: Image.Image, prompt: str, show_thinking=False,
|
| 463 |
+
do_sample=False, text_temperature=0.3, max_new_tokens=512):
|
| 464 |
+
# --- add:record start_time ---
|
| 465 |
+
start_time = time.time()
|
| 466 |
+
|
| 467 |
+
if image is None:
|
| 468 |
+
return "Please upload an image."
|
| 469 |
+
|
| 470 |
+
if isinstance(image, np.ndarray):
|
| 471 |
+
image = Image.fromarray(image)
|
| 472 |
+
|
| 473 |
+
image = pil_img2rgb(image)
|
| 474 |
+
|
| 475 |
+
# Set hyperparameters
|
| 476 |
+
inference_hyper = dict(
|
| 477 |
+
do_sample=do_sample,
|
| 478 |
+
text_temperature=text_temperature,
|
| 479 |
+
max_think_token_n=max_new_tokens, # Set max_length
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
+
# Use show_thinking parameter to control thinking process
|
| 483 |
+
result = inferencer(image=image, text=prompt, think=show_thinking,
|
| 484 |
+
understanding_output=True, **inference_hyper)
|
| 485 |
+
|
| 486 |
+
# --- add:record end_time ---
|
| 487 |
+
end_time = time.time()
|
| 488 |
+
duration = end_time - start_time
|
| 489 |
+
duration_str = f"{duration:.2f} seconds"
|
| 490 |
+
print(f"The image takes time: {duration_str}") # conslog
|
| 491 |
+
|
| 492 |
+
return result["text"], duration_str
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
# Image Editing function with thinking option and hyperparameters
|
| 496 |
+
def edit_image(image: Image.Image, prompt: str, show_thinking=False, cfg_text_scale=4.0,
|
| 497 |
+
cfg_img_scale=2.0, cfg_interval=0.0,
|
| 498 |
+
timestep_shift=3.0, num_timesteps=50, cfg_renorm_min=1.0,
|
| 499 |
+
cfg_renorm_type="text_channel", max_think_token_n=1024,
|
| 500 |
+
do_sample=False, text_temperature=0.3, seed=0):
|
| 501 |
+
# Set seed for reproducibility
|
| 502 |
+
set_seed(seed)
|
| 503 |
+
|
| 504 |
+
# --- add:record start_time ---
|
| 505 |
+
start_time = time.time()
|
| 506 |
+
|
| 507 |
+
if image is None:
|
| 508 |
+
return "Please upload an image.", ""
|
| 509 |
+
|
| 510 |
+
if isinstance(image, np.ndarray):
|
| 511 |
+
image = Image.fromarray(image)
|
| 512 |
+
|
| 513 |
+
image = pil_img2rgb(image)
|
| 514 |
+
|
| 515 |
+
# Set hyperparameters
|
| 516 |
+
inference_hyper = dict(
|
| 517 |
+
max_think_token_n=max_think_token_n if show_thinking else 1024,
|
| 518 |
+
do_sample=do_sample if show_thinking else False,
|
| 519 |
+
text_temperature=text_temperature if show_thinking else 0.3,
|
| 520 |
+
cfg_text_scale=cfg_text_scale,
|
| 521 |
+
cfg_img_scale=cfg_img_scale,
|
| 522 |
+
cfg_interval=[cfg_interval, 1.0], # End fixed at 1.0
|
| 523 |
+
timestep_shift=timestep_shift,
|
| 524 |
+
num_timesteps=num_timesteps,
|
| 525 |
+
cfg_renorm_min=cfg_renorm_min,
|
| 526 |
+
cfg_renorm_type=cfg_renorm_type,
|
| 527 |
+
)
|
| 528 |
+
|
| 529 |
+
# Include thinking parameter based on user choice
|
| 530 |
+
result = inferencer(image=image, text=prompt, think=show_thinking, **inference_hyper)
|
| 531 |
+
|
| 532 |
+
# --- add:record end_time ---
|
| 533 |
+
end_time = time.time()
|
| 534 |
+
duration = end_time - start_time
|
| 535 |
+
duration_str = f"{duration:.2f} seconds"
|
| 536 |
+
print(f"The image takes time: {duration_str}") # conslog
|
| 537 |
+
|
| 538 |
+
return result["image"], result.get("text", ""), duration_str
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
# Helper function to load example images
|
| 542 |
+
def load_example_image(image_path):
|
| 543 |
+
try:
|
| 544 |
+
return Image.open(image_path)
|
| 545 |
+
except Exception as e:
|
| 546 |
+
print(f"Error loading example image: {e}")
|
| 547 |
+
return None
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
# Gradio UI
|
| 551 |
+
with gr.Blocks() as demo:
|
| 552 |
+
gr.Markdown("""
|
| 553 |
+
<div>
|
| 554 |
+
<img src="https://lf3-static.bytednsdoc.com/obj/eden-cn/nuhojubrps/banner.png" alt="BAGEL" width="380"/>
|
| 555 |
+
</div>
|
| 556 |
+
""")
|
| 557 |
+
|
| 558 |
+
with gr.Tab("📝 Text to Image"):
|
| 559 |
+
txt_input = gr.Textbox(
|
| 560 |
+
label="Prompt",
|
| 561 |
+
value="A female cosplayer portraying an ethereal fairy or elf, wearing a flowing dress made of delicate fabrics in soft, mystical colors like emerald green and silver. She has pointed ears, a gentle, enchanting expression, and her outfit is adorned with sparkling jewels and intricate patterns. The background is a magical forest with glowing plants, mystical creatures, and a serene atmosphere."
|
| 562 |
+
)
|
| 563 |
+
|
| 564 |
+
with gr.Row():
|
| 565 |
+
show_thinking = gr.Checkbox(label="Thinking", value=False)
|
| 566 |
+
|
| 567 |
+
# Add hyperparameter controls in an accordion
|
| 568 |
+
with gr.Accordion("Inference Hyperparameters", open=False):
|
| 569 |
+
with gr.Group():
|
| 570 |
+
with gr.Row():
|
| 571 |
+
seed = gr.Slider(minimum=0, maximum=1000000, value=0, step=1,
|
| 572 |
+
label="Seed", info="0 for random seed, positive for reproducible results")
|
| 573 |
+
image_ratio = gr.Dropdown(choices=["1:1", "4:3", "3:4", "16:9", "9:16"],
|
| 574 |
+
value="1:1", label="Image Ratio",
|
| 575 |
+
info="The longer size is fixed to 1024")
|
| 576 |
+
|
| 577 |
+
with gr.Row():
|
| 578 |
+
cfg_text_scale = gr.Slider(minimum=1.0, maximum=8.0, value=4.0, step=0.1, interactive=True,
|
| 579 |
+
label="CFG Text Scale", info="Controls how strongly the model follows the text prompt (4.0-8.0)")
|
| 580 |
+
cfg_interval = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, step=0.1,
|
| 581 |
+
label="CFG Interval", info="Start of CFG application interval (end is fixed at 1.0)")
|
| 582 |
+
|
| 583 |
+
with gr.Row():
|
| 584 |
+
cfg_renorm_type = gr.Dropdown(choices=["global", "local", "text_channel"],
|
| 585 |
+
value="global", label="CFG Renorm Type",
|
| 586 |
+
info="If the genrated image is blurry, use 'global'")
|
| 587 |
+
cfg_renorm_min = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.1, interactive=True,
|
| 588 |
+
label="CFG Renorm Min", info="1.0 disables CFG-Renorm")
|
| 589 |
+
|
| 590 |
+
with gr.Row():
|
| 591 |
+
num_timesteps = gr.Slider(minimum=10, maximum=100, value=50, step=5, interactive=True,
|
| 592 |
+
label="Timesteps", info="Total denoising steps")
|
| 593 |
+
timestep_shift = gr.Slider(minimum=1.0, maximum=5.0, value=3.0, step=0.5, interactive=True,
|
| 594 |
+
label="Timestep Shift", info="Higher values for layout, lower for details")
|
| 595 |
+
|
| 596 |
+
# Thinking parameters in a single row
|
| 597 |
+
thinking_params = gr.Group(visible=False)
|
| 598 |
+
with thinking_params:
|
| 599 |
+
with gr.Row():
|
| 600 |
+
do_sample = gr.Checkbox(label="Sampling", value=False, info="Enable sampling for text generation")
|
| 601 |
+
max_think_token_n = gr.Slider(minimum=64, maximum=4006, value=1024, step=64, interactive=True,
|
| 602 |
+
label="Max Think Tokens", info="Maximum number of tokens for thinking")
|
| 603 |
+
text_temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.3, step=0.1, interactive=True,
|
| 604 |
+
label="Temperature", info="Controls randomness in text generation")
|
| 605 |
+
|
| 606 |
+
thinking_output = gr.Textbox(label="Thinking Process", visible=False)
|
| 607 |
+
img_output = gr.Image(label="Generated Image")
|
| 608 |
+
gen_btn = gr.Button("Generate", variant="primary")
|
| 609 |
+
|
| 610 |
+
# --- add:A text box used to display the generation time ---
|
| 611 |
+
generation_time_output = gr.Textbox(label="Processing Time", interactive=False)
|
| 612 |
+
# --- add end ---
|
| 613 |
+
|
| 614 |
+
# Dynamically show/hide thinking process box and parameters
|
| 615 |
+
def update_thinking_visibility(show):
|
| 616 |
+
return gr.update(visible=show), gr.update(visible=show)
|
| 617 |
+
|
| 618 |
+
show_thinking.change(
|
| 619 |
+
fn=update_thinking_visibility,
|
| 620 |
+
inputs=[show_thinking],
|
| 621 |
+
outputs=[thinking_output, thinking_params]
|
| 622 |
+
)
|
| 623 |
+
|
| 624 |
+
# Process function based on thinking option and hyperparameters
|
| 625 |
+
def process_text_to_image(prompt, show_thinking, cfg_text_scale,
|
| 626 |
+
cfg_interval, timestep_shift,
|
| 627 |
+
num_timesteps, cfg_renorm_min, cfg_renorm_type,
|
| 628 |
+
max_think_token_n, do_sample, text_temperature, seed, image_ratio):
|
| 629 |
+
image, thinking, duration_str = text_to_image(
|
| 630 |
+
prompt, show_thinking, cfg_text_scale, cfg_interval,
|
| 631 |
+
timestep_shift, num_timesteps,
|
| 632 |
+
cfg_renorm_min, cfg_renorm_type,
|
| 633 |
+
max_think_token_n, do_sample, text_temperature, seed, image_ratio
|
| 634 |
+
)
|
| 635 |
+
return image, thinking if thinking else "", duration_str
|
| 636 |
+
|
| 637 |
+
gr.on(
|
| 638 |
+
triggers=[gen_btn.click, txt_input.submit],
|
| 639 |
+
fn=process_text_to_image,
|
| 640 |
+
inputs=[
|
| 641 |
+
txt_input, show_thinking, cfg_text_scale,
|
| 642 |
+
cfg_interval, timestep_shift,
|
| 643 |
+
num_timesteps, cfg_renorm_min, cfg_renorm_type,
|
| 644 |
+
max_think_token_n, do_sample, text_temperature, seed, image_ratio
|
| 645 |
+
],
|
| 646 |
+
# --- 这里是关键修改:确保 outputs 列表包含三个组件 ---
|
| 647 |
+
outputs=[img_output, thinking_output, generation_time_output]
|
| 648 |
+
# --- 修改结束 ---
|
| 649 |
+
)
|
| 650 |
+
|
| 651 |
+
with gr.Tab("🖌️ Image Edit"):
|
| 652 |
+
with gr.Row():
|
| 653 |
+
with gr.Column(scale=1):
|
| 654 |
+
edit_image_input = gr.Image(label="Input Image", value=load_example_image('test_images/women.jpg'))
|
| 655 |
+
edit_prompt = gr.Textbox(
|
| 656 |
+
label="Prompt",
|
| 657 |
+
value="She boards a modern subway, quietly reading a folded newspaper, wearing the same clothes."
|
| 658 |
+
)
|
| 659 |
+
|
| 660 |
+
with gr.Column(scale=1):
|
| 661 |
+
edit_image_output = gr.Image(label="Result")
|
| 662 |
+
edit_thinking_output = gr.Textbox(label="Thinking Process", visible=False)
|
| 663 |
+
|
| 664 |
+
with gr.Row():
|
| 665 |
+
edit_show_thinking = gr.Checkbox(label="Thinking", value=False)
|
| 666 |
+
|
| 667 |
+
# Add hyperparameter controls in an accordion
|
| 668 |
+
with gr.Accordion("Inference Hyperparameters", open=False):
|
| 669 |
+
with gr.Group():
|
| 670 |
+
with gr.Row():
|
| 671 |
+
edit_seed = gr.Slider(minimum=0, maximum=1000000, value=0, step=1, interactive=True,
|
| 672 |
+
label="Seed", info="0 for random seed, positive for reproducible results")
|
| 673 |
+
edit_cfg_text_scale = gr.Slider(minimum=1.0, maximum=8.0, value=4.0, step=0.1, interactive=True,
|
| 674 |
+
label="CFG Text Scale", info="Controls how strongly the model follows the text prompt")
|
| 675 |
+
|
| 676 |
+
with gr.Row():
|
| 677 |
+
edit_cfg_img_scale = gr.Slider(minimum=1.0, maximum=4.0, value=2.0, step=0.1, interactive=True,
|
| 678 |
+
label="CFG Image Scale", info="Controls how much the model preserves input image details")
|
| 679 |
+
edit_cfg_interval = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.1, interactive=True,
|
| 680 |
+
label="CFG Interval", info="Start of CFG application interval (end is fixed at 1.0)")
|
| 681 |
+
|
| 682 |
+
with gr.Row():
|
| 683 |
+
edit_cfg_renorm_type = gr.Dropdown(choices=["global", "local", "text_channel"],
|
| 684 |
+
value="text_channel", label="CFG Renorm Type",
|
| 685 |
+
info="If the genrated image is blurry, use 'global")
|
| 686 |
+
edit_cfg_renorm_min = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.1, interactive=True,
|
| 687 |
+
label="CFG Renorm Min", info="1.0 disables CFG-Renorm")
|
| 688 |
+
|
| 689 |
+
with gr.Row():
|
| 690 |
+
edit_num_timesteps = gr.Slider(minimum=10, maximum=100, value=50, step=5, interactive=True,
|
| 691 |
+
label="Timesteps", info="Total denoising steps")
|
| 692 |
+
edit_timestep_shift = gr.Slider(minimum=1.0, maximum=10.0, value=3.0, step=0.5, interactive=True,
|
| 693 |
+
label="Timestep Shift", info="Higher values for layout, lower for details")
|
| 694 |
+
|
| 695 |
+
|
| 696 |
+
# Thinking parameters in a single row
|
| 697 |
+
edit_thinking_params = gr.Group(visible=False)
|
| 698 |
+
with edit_thinking_params:
|
| 699 |
+
with gr.Row():
|
| 700 |
+
edit_do_sample = gr.Checkbox(label="Sampling", value=False, info="Enable sampling for text generation")
|
| 701 |
+
edit_max_think_token_n = gr.Slider(minimum=64, maximum=4006, value=1024, step=64, interactive=True,
|
| 702 |
+
label="Max Think Tokens", info="Maximum number of tokens for thinking")
|
| 703 |
+
edit_text_temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.3, step=0.1, interactive=True,
|
| 704 |
+
label="Temperature", info="Controls randomness in text generation")
|
| 705 |
+
|
| 706 |
+
edit_btn = gr.Button("Submit", variant="primary")
|
| 707 |
+
# --- add:A text box used to display the generation time ---
|
| 708 |
+
edit_generation_time_output = gr.Textbox(label="Processing Time", interactive=False)
|
| 709 |
+
# --- add end ---
|
| 710 |
+
|
| 711 |
+
# Dynamically show/hide thinking process box for editing
|
| 712 |
+
def update_edit_thinking_visibility(show):
|
| 713 |
+
return gr.update(visible=show), gr.update(visible=show)
|
| 714 |
+
|
| 715 |
+
edit_show_thinking.change(
|
| 716 |
+
fn=update_edit_thinking_visibility,
|
| 717 |
+
inputs=[edit_show_thinking],
|
| 718 |
+
outputs=[edit_thinking_output, edit_thinking_params]
|
| 719 |
+
)
|
| 720 |
+
|
| 721 |
+
# Process editing with thinking option and hyperparameters
|
| 722 |
+
def process_edit_image(image, prompt, show_thinking, cfg_text_scale,
|
| 723 |
+
cfg_img_scale, cfg_interval,
|
| 724 |
+
timestep_shift, num_timesteps, cfg_renorm_min,
|
| 725 |
+
cfg_renorm_type, max_think_token_n, do_sample,
|
| 726 |
+
text_temperature, seed):
|
| 727 |
+
edited_image, thinking, duration_str = edit_image(
|
| 728 |
+
image, prompt, show_thinking, cfg_text_scale, cfg_img_scale,
|
| 729 |
+
cfg_interval, timestep_shift,
|
| 730 |
+
num_timesteps, cfg_renorm_min, cfg_renorm_type,
|
| 731 |
+
max_think_token_n, do_sample, text_temperature, seed
|
| 732 |
+
)
|
| 733 |
+
|
| 734 |
+
return edited_image, thinking if thinking else "", duration_str
|
| 735 |
+
|
| 736 |
+
gr.on(
|
| 737 |
+
triggers=[edit_btn.click, edit_prompt.submit],
|
| 738 |
+
fn=process_edit_image,
|
| 739 |
+
inputs=[
|
| 740 |
+
edit_image_input, edit_prompt, edit_show_thinking,
|
| 741 |
+
edit_cfg_text_scale, edit_cfg_img_scale, edit_cfg_interval,
|
| 742 |
+
edit_timestep_shift, edit_num_timesteps,
|
| 743 |
+
edit_cfg_renorm_min, edit_cfg_renorm_type,
|
| 744 |
+
edit_max_think_token_n, edit_do_sample, edit_text_temperature, edit_seed
|
| 745 |
+
],
|
| 746 |
+
outputs=[edit_image_output, edit_thinking_output, edit_generation_time_output]
|
| 747 |
+
)
|
| 748 |
+
|
| 749 |
+
with gr.Tab("🖼️ Image Understanding"):
|
| 750 |
+
with gr.Row():
|
| 751 |
+
with gr.Column(scale=1):
|
| 752 |
+
img_input = gr.Image(label="Input Image", value=load_example_image('test_images/meme.jpg'))
|
| 753 |
+
understand_prompt = gr.Textbox(
|
| 754 |
+
label="Prompt",
|
| 755 |
+
value="Can someone explain what's funny about this meme??"
|
| 756 |
+
)
|
| 757 |
+
|
| 758 |
+
with gr.Column(scale=1):
|
| 759 |
+
txt_output = gr.Textbox(label="Result", lines=20)
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
with gr.Row():
|
| 763 |
+
understand_show_thinking = gr.Checkbox(label="Thinking", value=False)
|
| 764 |
+
|
| 765 |
+
# Add hyperparameter controls in an accordion
|
| 766 |
+
with gr.Accordion("Inference Hyperparameters", open=False):
|
| 767 |
+
with gr.Row():
|
| 768 |
+
understand_do_sample = gr.Checkbox(label="Sampling", value=False, info="Enable sampling for text generation")
|
| 769 |
+
understand_text_temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.3, step=0.05, interactive=True,
|
| 770 |
+
label="Temperature", info="Controls randomness in text generation (0=deterministic, 1=creative)")
|
| 771 |
+
understand_max_new_tokens = gr.Slider(minimum=64, maximum=4096, value=512, step=64, interactive=True,
|
| 772 |
+
label="Max New Tokens", info="Maximum length of generated text, including potential thinking")
|
| 773 |
+
|
| 774 |
+
img_understand_btn = gr.Button("Submit", variant="primary")
|
| 775 |
+
# --- add:A text box used to display the generation time ---
|
| 776 |
+
understand_generation_time_output = gr.Textbox(label="Processing Time", interactive=False)
|
| 777 |
+
# --- add end ---
|
| 778 |
+
|
| 779 |
+
# Process understanding with thinking option and hyperparameters
|
| 780 |
+
def process_understanding(image, prompt, show_thinking, do_sample,
|
| 781 |
+
text_temperature, max_new_tokens):
|
| 782 |
+
result, duration_str = image_understanding(
|
| 783 |
+
image, prompt, show_thinking, do_sample,
|
| 784 |
+
text_temperature, max_new_tokens
|
| 785 |
+
)
|
| 786 |
+
return result, duration_str
|
| 787 |
+
|
| 788 |
+
gr.on(
|
| 789 |
+
triggers=[img_understand_btn.click, understand_prompt.submit],
|
| 790 |
+
fn=process_understanding,
|
| 791 |
+
inputs=[
|
| 792 |
+
img_input, understand_prompt, understand_show_thinking,
|
| 793 |
+
understand_do_sample, understand_text_temperature, understand_max_new_tokens
|
| 794 |
+
],
|
| 795 |
+
outputs=[txt_output, understand_generation_time_output]
|
| 796 |
+
)
|
| 797 |
+
|
| 798 |
+
gr.Markdown("""
|
| 799 |
+
<div style="display: flex; justify-content: flex-start; flex-wrap: wrap; gap: 10px;">
|
| 800 |
+
<a href="https://bagel-ai.org/">
|
| 801 |
+
<img
|
| 802 |
+
src="https://img.shields.io/badge/BAGEL-Website-0A66C2?logo=safari&logoColor=white"
|
| 803 |
+
alt="BAGEL Website"
|
| 804 |
+
/>
|
| 805 |
+
</a>
|
| 806 |
+
<a href="https://arxiv.org/abs/2505.14683">
|
| 807 |
+
<img
|
| 808 |
+
src="https://img.shields.io/badge/BAGEL-Paper-red?logo=arxiv&logoColor=red"
|
| 809 |
+
alt="BAGEL Paper on arXiv"
|
| 810 |
+
/>
|
| 811 |
+
</a>
|
| 812 |
+
<a href="https://huggingface.co/ByteDance-Seed/BAGEL-7B-MoT">
|
| 813 |
+
<img
|
| 814 |
+
src="https://img.shields.io/badge/BAGEL-Hugging%20Face-orange?logo=huggingface&logoColor=yellow"
|
| 815 |
+
alt="BAGEL on Hugging Face"
|
| 816 |
+
/>
|
| 817 |
+
</a>
|
| 818 |
+
<a href="https://demo.bagel-ai.org/">
|
| 819 |
+
<img
|
| 820 |
+
src="https://img.shields.io/badge/BAGEL-Demo-blue?logo=googleplay&logoColor=blue"
|
| 821 |
+
alt="BAGEL Demo"
|
| 822 |
+
/>
|
| 823 |
+
</a>
|
| 824 |
+
<a href="https://discord.gg/Z836xxzy">
|
| 825 |
+
<img
|
| 826 |
+
src="https://img.shields.io/badge/BAGEL-Discord-5865F2?logo=discord&logoColor=purple"
|
| 827 |
+
alt="BAGEL Discord"
|
| 828 |
+
/>
|
| 829 |
+
</a>
|
| 830 |
+
<a href="mailto:bagel@bytedance.com">
|
| 831 |
+
<img
|
| 832 |
+
src="https://img.shields.io/badge/BAGEL-Email-D14836?logo=gmail&logoColor=red"
|
| 833 |
+
alt="BAGEL Email"
|
| 834 |
+
/>
|
| 835 |
+
</a>
|
| 836 |
+
</div>
|
| 837 |
+
""")
|
| 838 |
+
# --- add ram/vram Stats tab ---
|
| 839 |
+
with gr.Tab("📊 System Monitor"):
|
| 840 |
+
with gr.Column():
|
| 841 |
+
memory_stats_display = gr.Markdown("Check RAM/VRAM Stats")
|
| 842 |
+
refresh_button = gr.Button("🔄 Check RAM/VRAM Stats")
|
| 843 |
+
|
| 844 |
+
# When the button is clicked, the function get_all_memory_stats_for_gradio_display is called.
|
| 845 |
+
# And update its return value to the memory_stats_display component
|
| 846 |
+
refresh_button.click(
|
| 847 |
+
fn=get_all_memory_stats_for_gradio_display,
|
| 848 |
+
inputs=None,
|
| 849 |
+
outputs=[memory_stats_display]
|
| 850 |
+
)
|
| 851 |
+
# --- ram/vram Stats end ---
|
| 852 |
+
|
| 853 |
+
if __name__ == "__main__":
|
| 854 |
+
demo.launch(
|
| 855 |
+
server_name=args.server_name,
|
| 856 |
+
server_port=args.server_port,
|
| 857 |
+
share=args.share,
|
| 858 |
+
inbrowser=True,
|
| 859 |
+
)
|
demo.jpg
ADDED
|
Git LFS Details
|