|
|
import logging |
|
|
import os |
|
|
|
|
|
|
|
|
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" |
|
|
|
|
|
import multiprocessing |
|
|
cpu_cores = multiprocessing.cpu_count() |
|
|
os.environ["OMP_NUM_THREADS"] = str(min(cpu_cores, 8)) |
|
|
os.environ["MKL_NUM_THREADS"] = str(min(cpu_cores, 8)) |
|
|
os.environ["NUMEXPR_NUM_THREADS"] = str(min(cpu_cores, 8)) |
|
|
|
|
|
|
|
|
try: |
|
|
import torchvision.transforms.functional_tensor |
|
|
except ImportError: |
|
|
|
|
|
import torchvision.transforms.functional as F |
|
|
import torchvision.transforms as transforms |
|
|
import sys |
|
|
from types import ModuleType |
|
|
|
|
|
|
|
|
functional_tensor = ModuleType('torchvision.transforms.functional_tensor') |
|
|
|
|
|
|
|
|
if hasattr(F, 'rgb_to_grayscale'): |
|
|
functional_tensor.rgb_to_grayscale = F.rgb_to_grayscale |
|
|
if hasattr(F, 'adjust_brightness'): |
|
|
functional_tensor.adjust_brightness = F.adjust_brightness |
|
|
if hasattr(F, 'adjust_contrast'): |
|
|
functional_tensor.adjust_contrast = F.adjust_contrast |
|
|
if hasattr(F, 'adjust_saturation'): |
|
|
functional_tensor.adjust_saturation = F.adjust_saturation |
|
|
if hasattr(F, 'normalize'): |
|
|
functional_tensor.normalize = F.normalize |
|
|
if hasattr(F, 'resize'): |
|
|
functional_tensor.resize = F.resize |
|
|
if hasattr(F, 'crop'): |
|
|
functional_tensor.crop = F.crop |
|
|
if hasattr(F, 'pad'): |
|
|
functional_tensor.pad = F.pad |
|
|
|
|
|
|
|
|
sys.modules['torchvision.transforms.functional_tensor'] = functional_tensor |
|
|
transforms.functional_tensor = functional_tensor |
|
|
|
|
|
|
|
|
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0" |
|
|
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" |
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" |
|
|
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "false" |
|
|
|
|
|
|
|
|
try: |
|
|
import torch |
|
|
import torch.onnx |
|
|
|
|
|
|
|
|
if not hasattr(torch.onnx._internal.exporter, 'ExportOptions'): |
|
|
from types import SimpleNamespace |
|
|
torch.onnx._internal.exporter.ExportOptions = SimpleNamespace |
|
|
|
|
|
|
|
|
import torch.utils |
|
|
if not hasattr(torch.utils, '_pytree'): |
|
|
|
|
|
from types import ModuleType |
|
|
torch.utils._pytree = ModuleType('_pytree') |
|
|
|
|
|
pytree = torch.utils._pytree |
|
|
|
|
|
if not hasattr(pytree, 'register_pytree_node'): |
|
|
def register_pytree_node(typ, flatten_fn, unflatten_fn, *, flatten_with_keys_fn=None, **kwargs): |
|
|
"""兼容性实现:注册PyTree节点类型""" |
|
|
pass |
|
|
pytree.register_pytree_node = register_pytree_node |
|
|
|
|
|
if not hasattr(pytree, 'tree_flatten'): |
|
|
def tree_flatten(tree, is_leaf=None): |
|
|
"""兼容性实现:展平树结构""" |
|
|
if isinstance(tree, (list, tuple)): |
|
|
flat = [] |
|
|
spec = [] |
|
|
for i, item in enumerate(tree): |
|
|
if isinstance(item, (list, tuple, dict)): |
|
|
sub_flat, sub_spec = tree_flatten(item, is_leaf) |
|
|
flat.extend(sub_flat) |
|
|
spec.append((i, sub_spec)) |
|
|
else: |
|
|
flat.append(item) |
|
|
spec.append((i, None)) |
|
|
return flat, (type(tree), spec) |
|
|
elif isinstance(tree, dict): |
|
|
flat = [] |
|
|
spec = [] |
|
|
for key, value in sorted(tree.items()): |
|
|
if isinstance(value, (list, tuple, dict)): |
|
|
sub_flat, sub_spec = tree_flatten(value, is_leaf) |
|
|
flat.extend(sub_flat) |
|
|
spec.append((key, sub_spec)) |
|
|
else: |
|
|
flat.append(value) |
|
|
spec.append((key, None)) |
|
|
return flat, (dict, spec) |
|
|
else: |
|
|
return [tree], None |
|
|
pytree.tree_flatten = tree_flatten |
|
|
|
|
|
if not hasattr(pytree, 'tree_unflatten'): |
|
|
def tree_unflatten(values, spec): |
|
|
"""兼容性实现:重构树结构""" |
|
|
if spec is None: |
|
|
return values[0] if values else None |
|
|
|
|
|
tree_type, tree_spec = spec |
|
|
if tree_type in (list, tuple): |
|
|
result = [] |
|
|
value_idx = 0 |
|
|
for pos, sub_spec in tree_spec: |
|
|
if sub_spec is None: |
|
|
result.append(values[value_idx]) |
|
|
value_idx += 1 |
|
|
else: |
|
|
|
|
|
sub_count = _count_tree_values(sub_spec) |
|
|
sub_values = values[value_idx:value_idx + sub_count] |
|
|
result.append(tree_unflatten(sub_values, sub_spec)) |
|
|
value_idx += sub_count |
|
|
return tree_type(result) |
|
|
elif tree_type == dict: |
|
|
result = {} |
|
|
value_idx = 0 |
|
|
for key, sub_spec in tree_spec: |
|
|
if sub_spec is None: |
|
|
result[key] = values[value_idx] |
|
|
value_idx += 1 |
|
|
else: |
|
|
sub_count = _count_tree_values(sub_spec) |
|
|
sub_values = values[value_idx:value_idx + sub_count] |
|
|
result[key] = tree_unflatten(sub_values, sub_spec) |
|
|
value_idx += sub_count |
|
|
return result |
|
|
return values[0] if values else None |
|
|
pytree.tree_unflatten = tree_unflatten |
|
|
|
|
|
if not hasattr(pytree, 'tree_map'): |
|
|
def tree_map(fn, tree, *other_trees, is_leaf=None): |
|
|
"""兼容性实现:树映射""" |
|
|
flat, spec = tree_flatten(tree, is_leaf) |
|
|
if other_trees: |
|
|
other_flats = [tree_flatten(t, is_leaf)[0] for t in other_trees] |
|
|
mapped = [fn(x, *others) for x, *others in zip(flat, *other_flats)] |
|
|
else: |
|
|
mapped = [fn(x) for x in flat] |
|
|
return tree_unflatten(mapped, spec) |
|
|
pytree.tree_map = tree_map |
|
|
|
|
|
|
|
|
def _count_tree_values(spec): |
|
|
"""计算树规格中的值数量""" |
|
|
if spec is None: |
|
|
return 1 |
|
|
tree_type, tree_spec = spec |
|
|
return sum(_count_tree_values(sub_spec) if sub_spec else 1 for _, sub_spec in tree_spec) |
|
|
|
|
|
|
|
|
try: |
|
|
import pyarrow |
|
|
if not hasattr(pyarrow, 'PyExtensionType'): |
|
|
|
|
|
pyarrow.PyExtensionType = type('PyExtensionType', (), {}) |
|
|
except ImportError: |
|
|
pass |
|
|
|
|
|
except (ImportError, AttributeError) as e: |
|
|
print(f"Warning: PyTorch/PyArrow compatibility patch failed: {e}") |
|
|
pass |
|
|
IMAGES_DIR = os.environ.get("IMAGES_DIR", "/opt/data/images") |
|
|
OUTPUT_DIR = IMAGES_DIR |
|
|
|
|
|
|
|
|
CELEBRITY_SOURCE_DIR = os.environ.get( |
|
|
"CELEBRITY_SOURCE_DIR", "/opt/data/chinese_celeb_dataset" |
|
|
).strip() |
|
|
if CELEBRITY_SOURCE_DIR: |
|
|
CELEBRITY_SOURCE_DIR = os.path.abspath(os.path.expanduser(CELEBRITY_SOURCE_DIR)) |
|
|
|
|
|
CELEBRITY_DATASET_DIR = os.path.abspath( |
|
|
os.path.expanduser( |
|
|
os.environ.get( |
|
|
"CELEBRITY_DATASET_DIR", |
|
|
CELEBRITY_SOURCE_DIR or "/opt/data/chinese_celeb_dataset", |
|
|
) |
|
|
) |
|
|
) |
|
|
|
|
|
CELEBRITY_FIND_THRESHOLD = float( |
|
|
os.environ.get("CELEBRITY_FIND_THRESHOLD", 0.88) |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
WECHAT_APPID = os.environ.get("WECHAT_APPID", "******").strip() |
|
|
WECHAT_SECRET = os.environ.get("WCT_SECRET", "******").strip() |
|
|
APP_SECRET_TOKEN = os.environ.get("APP_SECRET_TOKEN", "******") |
|
|
|
|
|
MYSQL_HOST = os.environ.get("MYSQL_HOST", "******") |
|
|
MYSQL_PORT = int(os.environ.get("MYSQL_PORT", "3306")) |
|
|
MYSQL_DB = os.environ.get("MYSQL_DB", "******") |
|
|
MYSQL_USER = os.environ.get("MYSQL_USER", "******") |
|
|
MYSQL_PASSWORD = os.environ.get("MYSQL_PASSWORD", "******") |
|
|
|
|
|
BOS_ACCESS_KEY = os.environ.get("BOS_ACCESS_KEY", "******").strip() |
|
|
BOS_SECRET_KEY = os.environ.get("BOS_SECRET_KEY", "******").strip() |
|
|
BOS_ENDPOINT = os.environ.get("BOS_ENDPOINT", "******").strip() |
|
|
BOS_BUCKET_NAME = os.environ.get("BOS_BUCKET_NAME", "******").strip() |
|
|
BOS_IMAGE_DIR = os.environ.get("BOS_IMAGE_DIR", "******").strip() |
|
|
BOS_MODELS_PREFIX = os.environ.get("BOS_MODELS_PREFIX", "******").strip() |
|
|
BOS_CELEBRITY_PREFIX = os.environ.get("BOS_CELEBRITY_PREFIX", "******").strip() |
|
|
|
|
|
|
|
|
_bos_enabled_env = os.environ.get("BOS_UPLOAD_ENABLED") |
|
|
MYSQL_POOL_MIN_SIZE = int(os.environ.get("MYSQL_POOL_MIN_SIZE", "1")) |
|
|
MYSQL_POOL_MAX_SIZE = int(os.environ.get("MYSQL_POOL_MAX_SIZE", "10")) |
|
|
if _bos_enabled_env is not None: |
|
|
BOS_UPLOAD_ENABLED = _bos_enabled_env.lower() in ("1", "true", "on") |
|
|
else: |
|
|
BOS_UPLOAD_ENABLED = all( |
|
|
[ |
|
|
BOS_ACCESS_KEY.strip(), |
|
|
BOS_SECRET_KEY.strip(), |
|
|
BOS_ENDPOINT, |
|
|
BOS_BUCKET_NAME, |
|
|
] |
|
|
) |
|
|
HOSTNAME = os.environ.get("HOSTNAME", "default-hostname") |
|
|
MODELS_PATH = os.path.abspath( |
|
|
os.path.expanduser(os.environ.get("MODELS_PATH", "/opt/data/models")) |
|
|
) |
|
|
MODELS_DOWNLOAD_DIR = os.path.abspath( |
|
|
os.path.expanduser(os.environ.get("MODELS_DOWNLOAD_DIR", MODELS_PATH)) |
|
|
) |
|
|
|
|
|
HUGGINGFACE_SYNC_ENABLED = os.environ.get( |
|
|
"HUGGINGFACE_SYNC_ENABLED", "true" |
|
|
).lower() in ("1", "true", "on") |
|
|
HUGGINGFACE_REPO_ID = os.environ.get( |
|
|
"HUGGINGFACE_REPO_ID", "ethonmax/facescore" |
|
|
).strip() |
|
|
HUGGINGFACE_REVISION = os.environ.get( |
|
|
"HUGGINGFACE_REVISION", "main" |
|
|
).strip() |
|
|
_hf_allow_env = os.environ.get("HUGGINGFACE_ALLOW_PATTERNS", "").strip() |
|
|
HUGGINGFACE_ALLOW_PATTERNS = [ |
|
|
pattern.strip() for pattern in _hf_allow_env.split(",") if pattern.strip() |
|
|
] |
|
|
_hf_ignore_env = os.environ.get("HUGGINGFACE_IGNORE_PATTERNS", "").strip() |
|
|
HUGGINGFACE_IGNORE_PATTERNS = [ |
|
|
pattern.strip() for pattern in _hf_ignore_env.split(",") if pattern.strip() |
|
|
] |
|
|
|
|
|
_MODELSCOPE_CACHE_ENV = os.environ.get("MODELSCOPE_CACHE", "").strip() |
|
|
if _MODELSCOPE_CACHE_ENV: |
|
|
MODELSCOPE_CACHE_DIR = os.path.abspath(os.path.expanduser(_MODELSCOPE_CACHE_ENV)) |
|
|
else: |
|
|
MODELSCOPE_CACHE_DIR = os.path.join(MODELS_PATH, "modelscope") |
|
|
|
|
|
try: |
|
|
os.makedirs(MODELSCOPE_CACHE_DIR, exist_ok=True) |
|
|
except Exception as exc: |
|
|
print(f"创建 ModelScope 缓存目录失败: %s (%s)", MODELSCOPE_CACHE_DIR, exc) |
|
|
|
|
|
os.environ.setdefault("MODELSCOPE_CACHE", MODELSCOPE_CACHE_DIR) |
|
|
os.environ.setdefault("MODELSCOPE_HOME", MODELSCOPE_CACHE_DIR) |
|
|
os.environ.setdefault("MODELSCOPE_CACHE_HOME", MODELSCOPE_CACHE_DIR) |
|
|
|
|
|
DEEPFACE_HOME = os.environ.get("DEEPFACE_HOME", "/opt/data/models") |
|
|
os.environ["DEEPFACE_HOME"] = DEEPFACE_HOME |
|
|
|
|
|
|
|
|
GFPGAN_MODEL_DIR = MODELS_DOWNLOAD_DIR |
|
|
os.makedirs(GFPGAN_MODEL_DIR, exist_ok=True) |
|
|
|
|
|
|
|
|
os.environ["GFPGAN_MODEL_ROOT"] = GFPGAN_MODEL_DIR |
|
|
os.environ["FACEXLIB_CACHE_DIR"] = GFPGAN_MODEL_DIR |
|
|
os.environ["BASICSR_CACHE_DIR"] = GFPGAN_MODEL_DIR |
|
|
os.environ["REALESRGAN_MODEL_ROOT"] = GFPGAN_MODEL_DIR |
|
|
os.environ["HUB_CACHE_DIR"] = GFPGAN_MODEL_DIR |
|
|
|
|
|
|
|
|
REMBG_MODEL_DIR = os.path.expanduser(MODELS_PATH.replace("$HOME", "~")) |
|
|
os.environ["U2NET_HOME"] = REMBG_MODEL_DIR |
|
|
os.environ["REMBG_HOME"] = REMBG_MODEL_DIR |
|
|
|
|
|
IMG_QUALITY = float(os.environ.get("IMG_QUALITY", 0.5)) |
|
|
FACE_CONFIDENCE = float(os.environ.get("FACE_CONFIDENCE", 0.7)) |
|
|
AGE_CONFIDENCE = float(os.environ.get("AGE_CONFIDENCE", 0.99)) |
|
|
GENDER_CONFIDENCE = float(os.environ.get("GENDER_CONFIDENCE", 1.1)) |
|
|
|
|
|
DEEPFACE_EMOTION_ENABLED = os.environ.get("DEEPFACE_EMOTION_ENABLED", "true").lower() in ("1", "true", "on") |
|
|
UPSCALE_SIZE = int(os.environ.get("UPSCALE_SIZE", 2)) |
|
|
SAVE_QUALITY = int(os.environ.get("SAVE_QUALITY", 85)) |
|
|
REALESRGAN_MODEL = os.environ.get("REALESRGAN_MODEL", "realesr-general-x4v3") |
|
|
|
|
|
YOLO_MODEL = os.environ.get("YOLO_MODEL", "yolov11n-face.pt") |
|
|
|
|
|
RVM_MODEL = os.environ.get("RVM_MODEL", "resnet50") |
|
|
RVM_LOCAL_REPO = os.environ.get("RVM_LOCAL_REPO", "/opt/data/RobustVideoMatting").strip() |
|
|
RVM_WEIGHTS_PATH = os.environ.get("RVM_WEIGHTS_PATH", "/opt/data/models/torch/hub/checkpoints/rvm_resnet50.pth").strip() |
|
|
DRAW_SCORE = os.environ.get("DRAW_SCORE", "true").lower() in ("1", "true", "on") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BEAUTY_ADJUST_ENABLED = os.environ.get("BEAUTY_ADJUST_ENABLED", "true").lower() in ("1", "true", "on") |
|
|
BEAUTY_ADJUST_MIN = float(os.environ.get("BEAUTY_ADJUST_MIN", 1.0)) |
|
|
|
|
|
_legacy_thr = os.environ.get("BEAUTY_ADJUST_THRESHOLD") |
|
|
BEAUTY_ADJUST_MAX = float(os.environ.get("BEAUTY_ADJUST_MAX", _legacy_thr if _legacy_thr is not None else 8.0)) |
|
|
BEAUTY_ADJUST_GAMMA = float(os.environ.get("BEAUTY_ADJUST_GAMMA", 0.5)) |
|
|
|
|
|
|
|
|
BEAUTY_ADJUST_THRESHOLD = BEAUTY_ADJUST_MAX |
|
|
|
|
|
|
|
|
HARMONY_ADJUST_ENABLED = os.environ.get("HARMONY_ADJUST_ENABLED", "true").lower() in ("1", "true", "on") |
|
|
HARMONY_ADJUST_THRESHOLD = float(os.environ.get("HARMONY_ADJUST_THRESHOLD", 9.0)) |
|
|
HARMONY_ADJUST_GAMMA = float(os.environ.get("HARMONY_ADJUST_GAMMA", 0.3)) |
|
|
|
|
|
|
|
|
ENABLE_WARMUP = os.environ.get("ENABLE_WARMUP", "false").lower() in ("1", "true", "on") |
|
|
AUTO_INIT_ANALYZER = os.environ.get("AUTO_INIT_ANALYZER", "true").lower() in ("1", "true", "on") |
|
|
AUTO_INIT_GFPGAN = os.environ.get("AUTO_INIT_GFPGAN", "false").lower() in ("1", "true", "on") |
|
|
AUTO_INIT_DDCOLOR = os.environ.get("AUTO_INIT_DDCOLOR", "false").lower() in ("1", "true", "on") |
|
|
AUTO_INIT_REALESRGAN = os.environ.get("AUTO_INIT_REALESRGAN", "false").lower() in ("1", "true", "on") |
|
|
AUTO_INIT_REMBG = os.environ.get("AUTO_INIT_REMBG", "false").lower() in ("1", "true", "on") |
|
|
AUTO_INIT_ANIME_STYLE = os.environ.get("AUTO_INIT_ANIME_STYLE", "false").lower() in ("1", "true", "on") |
|
|
AUTO_INIT_RVM = os.environ.get("AUTO_INIT_RVM", "false").lower() in ("1", "true", "on") |
|
|
|
|
|
|
|
|
CLEANUP_INTERVAL_HOURS = float(os.environ.get("CLEANUP_INTERVAL_HOURS", 1.0)) |
|
|
CLEANUP_AGE_HOURS = float(os.environ.get("CLEANUP_AGE_HOURS", 1.0)) |
|
|
|
|
|
|
|
|
BOS_DOWNLOAD_TARGETS = [ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
] |
|
|
|
|
|
log_level_str = os.getenv("LOG_LEVEL", "INFO").upper() |
|
|
log_level = getattr(logging, log_level_str, logging.INFO) |
|
|
|
|
|
|
|
|
ENABLE_LOGGING = os.environ.get("ENABLE_LOGGING", "true").lower() in ("1", "true", "on") |
|
|
|
|
|
|
|
|
ENABLE_DDCOLOR = os.environ.get("ENABLE_DDCOLOR", "true").lower() in ("1", "true", "on") |
|
|
ENABLE_REALESRGAN = os.environ.get("ENABLE_REALESRGAN", "true").lower() in ("1", "true", "on") |
|
|
ENABLE_GFPGAN = os.environ.get("ENABLE_GFPGAN", "true").lower() in ("1", "true", "on") |
|
|
ENABLE_ANIME_STYLE = os.environ.get("ENABLE_ANIME_STYLE", "true").lower() in ("1", "true", "on") |
|
|
ENABLE_ANIME_PRELOAD = os.environ.get("ENABLE_ANIME_PRELOAD", "false").lower() in ("1", "true", "on") |
|
|
ENABLE_RVM = os.environ.get("ENABLE_RVM", "true").lower() in ("1", "true", "on") |
|
|
|
|
|
|
|
|
|
|
|
FACE_SCORE_MAX_IMAGES = int(os.environ.get("FACE_SCORE_MAX_IMAGES", 10)) |
|
|
|
|
|
|
|
|
FEMALE_AGE_ADJUSTMENT = int(os.environ.get("FEMALE_AGE_ADJUSTMENT", 3)) |
|
|
FEMALE_AGE_ADJUSTMENT_THRESHOLD = int(os.environ.get("FEMALE_AGE_ADJUSTMENT_THRESHOLD", 20)) |
|
|
|
|
|
|
|
|
if ENABLE_LOGGING: |
|
|
logging.basicConfig( |
|
|
level=log_level, |
|
|
format="[%(asctime)s] [%(levelname)s] %(message)s", |
|
|
datefmt="%Y-%m-%d %H:%M:%S", |
|
|
) |
|
|
logger = logging.getLogger(__name__) |
|
|
else: |
|
|
|
|
|
logging.basicConfig(level=logging.CRITICAL + 10) |
|
|
logger = logging.getLogger(__name__) |
|
|
logger.disabled = True |
|
|
|
|
|
|
|
|
access_token_cache = {"token": None, "expires_at": 0} |
|
|
|
|
|
|
|
|
try: |
|
|
from deepface import DeepFace |
|
|
|
|
|
DEEPFACE_AVAILABLE = True |
|
|
except ImportError: |
|
|
print("Warning: DeepFace not installed. Install with: pip install deepface") |
|
|
DEEPFACE_AVAILABLE = False |
|
|
|
|
|
try: |
|
|
import mediapipe as mp |
|
|
|
|
|
MEDIAPIPE_AVAILABLE = True |
|
|
except ImportError: |
|
|
print("Warning: mediapipe not installed. Install with: pip install mediapipe") |
|
|
MEDIAPIPE_AVAILABLE = False |
|
|
|
|
|
|
|
|
DLIB_AVAILABLE = MEDIAPIPE_AVAILABLE |
|
|
|
|
|
try: |
|
|
from ultralytics import YOLO |
|
|
|
|
|
YOLO_AVAILABLE = True |
|
|
except ImportError: |
|
|
print("Warning: ultralytics not installed. Install with: pip install ultralytics") |
|
|
YOLO_AVAILABLE = False |
|
|
|
|
|
|
|
|
if ENABLE_GFPGAN: |
|
|
try: |
|
|
required_files = [ |
|
|
os.path.join(os.path.dirname(__file__), "gfpgan_restorer.py"), |
|
|
os.path.join(MODELS_PATH, "gfpgan/weights/detection_Resnet50_Final.pth"), |
|
|
os.path.join(MODELS_PATH, "gfpgan/weights/parsing_parsenet.pth"), |
|
|
] |
|
|
|
|
|
missing_files = [path for path in required_files if not os.path.exists(path)] |
|
|
if missing_files: |
|
|
for file_path in missing_files: |
|
|
logger.info("GFPGAN 所需文件暂未找到,将等待模型同步: %s", file_path) |
|
|
|
|
|
from gfpgan_restorer import GFPGANRestorer |
|
|
GFPGAN_AVAILABLE = True |
|
|
|
|
|
if missing_files: |
|
|
logger.warning( |
|
|
"GFPGAN 文件尚未全部就绪,将在 HuggingFace/BOS 同步完成后继续初始化: %s", |
|
|
", ".join(missing_files), |
|
|
) |
|
|
else: |
|
|
logger.info("GFPGAN photo restoration feature prerequisites detected") |
|
|
except ImportError as e: |
|
|
print(f"Warning: GFPGAN enabled but not available: {e}") |
|
|
GFPGAN_AVAILABLE = False |
|
|
logger.warning(f"GFPGAN photo restoration feature is enabled but import failed: {e}") |
|
|
else: |
|
|
GFPGAN_AVAILABLE = False |
|
|
logger.info("GFPGAN photo restoration feature is disabled (via ENABLE_GFPGAN environment variable)") |
|
|
|
|
|
|
|
|
if ENABLE_DDCOLOR: |
|
|
try: |
|
|
from ddcolor_colorizer import DDColorColorizer |
|
|
DDCOLOR_AVAILABLE = True |
|
|
logger.info("DDColor feature is enabled and available") |
|
|
except ImportError as e: |
|
|
print(f"Warning: DDColor enabled but not available: {e}") |
|
|
DDCOLOR_AVAILABLE = False |
|
|
logger.warning(f"DDColor feature is enabled but import failed: {e}") |
|
|
else: |
|
|
DDCOLOR_AVAILABLE = False |
|
|
logger.info("DDColor feature is disabled (via ENABLE_DDCOLOR environment variable)") |
|
|
|
|
|
|
|
|
SIMPLE_RESTORER_AVAILABLE = False |
|
|
|
|
|
|
|
|
if ENABLE_REALESRGAN: |
|
|
try: |
|
|
from realesrgan_upscaler import RealESRGANUpscaler |
|
|
REALESRGAN_AVAILABLE = True |
|
|
logger.info("Real-ESRGAN super resolution feature is enabled and available") |
|
|
except ImportError as e: |
|
|
print(f"Warning: Real-ESRGAN enabled but not available: {e}") |
|
|
REALESRGAN_AVAILABLE = False |
|
|
logger.warning(f"Real-ESRGAN super resolution feature is enabled but import failed: {e}") |
|
|
else: |
|
|
REALESRGAN_AVAILABLE = False |
|
|
logger.info("Real-ESRGAN super resolution feature is disabled (via ENABLE_REALESRGAN environment variable)") |
|
|
|
|
|
|
|
|
ENABLE_REMBG = os.environ.get("ENABLE_REMBG", "true").lower() in ("1", "true", "on") |
|
|
|
|
|
|
|
|
if ENABLE_REMBG: |
|
|
try: |
|
|
import rembg |
|
|
from rembg import new_session |
|
|
REMBG_AVAILABLE = True |
|
|
logger.info("rembg background removal feature is enabled and available") |
|
|
logger.info(f"rembg model storage path: {REMBG_MODEL_DIR}") |
|
|
except ImportError as e: |
|
|
print(f"Warning: rembg enabled but not available: {e}") |
|
|
REMBG_AVAILABLE = False |
|
|
logger.warning(f"rembg background removal feature is enabled but import failed: {e}") |
|
|
else: |
|
|
REMBG_AVAILABLE = False |
|
|
logger.info("rembg background removal feature is disabled (via ENABLE_REMBG environment variable)") |
|
|
|
|
|
CLIP_AVAILABLE = False |
|
|
|
|
|
|
|
|
if ENABLE_ANIME_STYLE: |
|
|
try: |
|
|
from anime_stylizer import AnimeStylizer |
|
|
ANIME_STYLE_AVAILABLE = True |
|
|
logger.info("Anime stylization feature is enabled and available") |
|
|
except ImportError as e: |
|
|
print(f"Warning: Anime Style enabled but not available: {e}") |
|
|
ANIME_STYLE_AVAILABLE = False |
|
|
logger.warning(f"Anime stylization feature is enabled but import failed: {e}") |
|
|
else: |
|
|
ANIME_STYLE_AVAILABLE = False |
|
|
logger.info("Anime stylization feature is disabled (via ENABLE_ANIME_STYLE environment variable)") |
|
|
|
|
|
|
|
|
ENABLE_RVM = os.environ.get("ENABLE_RVM", "true").lower() in ("1", "true", "on") |
|
|
|
|
|
|
|
|
if ENABLE_RVM: |
|
|
try: |
|
|
import torch |
|
|
|
|
|
RVM_AVAILABLE = True |
|
|
logger.info("RVM background removal feature is enabled and available") |
|
|
except ImportError as e: |
|
|
print(f"Warning: RVM enabled but not available: {e}") |
|
|
RVM_AVAILABLE = False |
|
|
logger.warning(f"RVM background removal feature is enabled but import failed: {e}") |
|
|
else: |
|
|
RVM_AVAILABLE = False |
|
|
logger.info("RVM background removal feature is disabled (via ENABLE_RVM environment variable)") |
|
|
|