|
|
from typing import Tuple, Optional, List, Dict |
|
|
import cv2 |
|
|
import gradio as gr |
|
|
import numpy as np |
|
|
from PIL import Image |
|
|
import torch |
|
|
from functools import lru_cache |
|
|
from transformers import AutoImageProcessor, AutoModelForSemanticSegmentation |
|
|
import mediapipe as mp |
|
|
import warnings |
|
|
|
|
|
warnings.filterwarnings('ignore') |
|
|
|
|
|
def _ensure_rgb_uint8(image: np.ndarray) -> np.ndarray: |
|
|
"""Convert an input image array to RGB uint8 format.""" |
|
|
if image is None: |
|
|
raise ValueError("No image provided") |
|
|
|
|
|
if isinstance(image, Image.Image): |
|
|
image = np.array(image.convert("RGB")) |
|
|
elif image.dtype != np.uint8: |
|
|
image = image.astype(np.uint8) |
|
|
|
|
|
if image.ndim == 2: |
|
|
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) |
|
|
elif image.shape[2] == 4: |
|
|
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB) |
|
|
return image |
|
|
|
|
|
|
|
|
def _preprocess_image(image: np.ndarray) -> np.ndarray: |
|
|
"""Preprocess image to improve face detection.""" |
|
|
rgb = _ensure_rgb_uint8(image) |
|
|
|
|
|
|
|
|
h, w = rgb.shape[:2] |
|
|
|
|
|
|
|
|
max_dim = 1024 |
|
|
if max(h, w) > max_dim: |
|
|
scale = max_dim / max(h, w) |
|
|
new_w = int(w * scale) |
|
|
new_h = int(h * scale) |
|
|
rgb = cv2.resize(rgb, (new_w, new_h), interpolation=cv2.INTER_AREA) |
|
|
|
|
|
|
|
|
min_dim = 200 |
|
|
if min(h, w) < min_dim: |
|
|
scale = min_dim / min(h, w) |
|
|
new_w = int(w * scale) |
|
|
new_h = int(h * scale) |
|
|
rgb = cv2.resize(rgb, (new_w, new_h), interpolation=cv2.INTER_CUBIC) |
|
|
|
|
|
|
|
|
gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY) |
|
|
if np.mean(gray) < 50: |
|
|
lab = cv2.cvtColor(rgb, cv2.COLOR_RGB2LAB) |
|
|
l, a, b = cv2.split(lab) |
|
|
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) |
|
|
l = clahe.apply(l) |
|
|
lab = cv2.merge((l, a, b)) |
|
|
rgb = cv2.cvtColor(lab, cv2.COLOR_LAB2RGB) |
|
|
|
|
|
return rgb |
|
|
|
|
|
|
|
|
def _central_crop_bbox(width: int, height: int, frac: float = 0.6) -> Tuple[int, int, int, int]: |
|
|
"""Return a central crop bounding box (x1, y1, x2, y2) covering `frac` of width/height.""" |
|
|
frac = float(np.clip(frac, 0.2, 1.0)) |
|
|
crop_w = int(width * frac) |
|
|
crop_h = int(height * frac) |
|
|
x1 = (width - crop_w) // 2 |
|
|
y1 = (height - crop_h) // 2 |
|
|
x2 = x1 + crop_w |
|
|
y2 = y1 + crop_h |
|
|
return x1, y1, x2, y2 |
|
|
|
|
|
|
|
|
def _detect_face_bbox_mediapipe(image_rgb: np.ndarray) -> Optional[Tuple[int, int, int, int]]: |
|
|
"""Detect a face bounding box using MediaPipe Face Detection and return (x1, y1, x2, y2).""" |
|
|
try: |
|
|
height, width = image_rgb.shape[:2] |
|
|
|
|
|
|
|
|
mp_face_detection = mp.solutions.face_detection |
|
|
face_detection = mp_face_detection.FaceDetection( |
|
|
model_selection=1, |
|
|
min_detection_confidence=0.3 |
|
|
) |
|
|
|
|
|
|
|
|
image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR) |
|
|
results = face_detection.process(image_bgr) |
|
|
face_detection.close() |
|
|
|
|
|
if not results.detections: |
|
|
return None |
|
|
|
|
|
|
|
|
detections = [] |
|
|
for detection in results.detections: |
|
|
bbox = detection.location_data.relative_bounding_box |
|
|
confidence = detection.score[0] |
|
|
|
|
|
|
|
|
x = int(bbox.xmin * width) |
|
|
y = int(bbox.ymin * height) |
|
|
w = int(bbox.width * width) |
|
|
h = int(bbox.height * height) |
|
|
|
|
|
|
|
|
x = max(0, x) |
|
|
y = max(0, y) |
|
|
w = min(width - x, w) |
|
|
h = min(height - y, h) |
|
|
|
|
|
if w > 0 and h > 0: |
|
|
detections.append({ |
|
|
'bbox': (x, y, w, h), |
|
|
'confidence': confidence |
|
|
}) |
|
|
|
|
|
if not detections: |
|
|
return None |
|
|
|
|
|
|
|
|
detections.sort(key=lambda d: d['confidence'], reverse=True) |
|
|
best = detections[0] |
|
|
x, y, w, h = best['bbox'] |
|
|
|
|
|
|
|
|
expand_x = int(w * 0.15) |
|
|
expand_y = int(h * 0.20) |
|
|
|
|
|
x1 = max(0, x - expand_x) |
|
|
y1 = max(0, y - expand_y) |
|
|
x2 = min(width, x + w + expand_x) |
|
|
y2 = min(height, y + h + expand_y) |
|
|
|
|
|
|
|
|
if (x2 - x1) < 50 or (y2 - y1) < 50: |
|
|
|
|
|
return None |
|
|
|
|
|
return x1, y1, x2, y2 |
|
|
|
|
|
except Exception as e: |
|
|
print(f"MediaPipe error: {e}") |
|
|
return None |
|
|
|
|
|
|
|
|
def _detect_face_bbox_opencv(image_rgb: np.ndarray) -> Optional[Tuple[int, int, int, int]]: |
|
|
"""Fallback face detection using OpenCV Haar cascades.""" |
|
|
try: |
|
|
gray = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY) |
|
|
|
|
|
|
|
|
cascade_path = cv2.data.haarcascades + 'haarcascade_frontalface_default.xml' |
|
|
face_cascade = cv2.CascadeClassifier(cascade_path) |
|
|
|
|
|
if face_cascade.empty(): |
|
|
print("Haar cascade not loaded properly") |
|
|
return None |
|
|
|
|
|
|
|
|
faces = face_cascade.detectMultiScale( |
|
|
gray, |
|
|
scaleFactor=1.1, |
|
|
minNeighbors=5, |
|
|
minSize=(30, 30), |
|
|
flags=cv2.CASCADE_SCALE_IMAGE |
|
|
) |
|
|
|
|
|
if len(faces) == 0: |
|
|
return None |
|
|
|
|
|
|
|
|
faces = sorted(faces, key=lambda f: f[2] * f[3], reverse=True) |
|
|
x, y, w, h = faces[0] |
|
|
|
|
|
|
|
|
expand_x = int(w * 0.15) |
|
|
expand_y = int(h * 0.20) |
|
|
|
|
|
height, width = image_rgb.shape[:2] |
|
|
x1 = max(0, x - expand_x) |
|
|
y1 = max(0, y - expand_y) |
|
|
x2 = min(width, x + w + expand_x) |
|
|
y2 = min(height, y + h + expand_y) |
|
|
|
|
|
return x1, y1, x2, y2 |
|
|
|
|
|
except Exception as e: |
|
|
print(f"OpenCV face detection error: {e}") |
|
|
return None |
|
|
|
|
|
|
|
|
def _binary_open_close(mask: np.ndarray, kernel_size: int = 5, iterations: int = 1) -> np.ndarray: |
|
|
"""Apply morphological open then close to clean the binary mask.""" |
|
|
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size)) |
|
|
opened = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=iterations) |
|
|
closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, kernel, iterations=iterations) |
|
|
return closed |
|
|
|
|
|
|
|
|
@lru_cache(maxsize=1) |
|
|
def _load_face_parsing_model(): |
|
|
"""Load face-parsing model and processor from the Hugging Face Hub (cached).""" |
|
|
model_id = "jonathandinu/face-parsing" |
|
|
processor = AutoImageProcessor.from_pretrained(model_id) |
|
|
model = AutoModelForSemanticSegmentation.from_pretrained(model_id) |
|
|
model.eval() |
|
|
id2label: Dict[int, str] = model.config.id2label |
|
|
label2id: Dict[str, int] = model.config.label2id |
|
|
return processor, model, id2label, label2id |
|
|
|
|
|
|
|
|
def _segment_face_labels(image_rgb: np.ndarray) -> Tuple[np.ndarray, Dict[int, str]]: |
|
|
"""Run face-parsing segmentation on an RGB crop. Returns (labels HxW int, id2label).""" |
|
|
processor, model, id2label, _ = _load_face_parsing_model() |
|
|
pil_img = Image.fromarray(image_rgb) |
|
|
|
|
|
|
|
|
max_size = 512 |
|
|
if max(pil_img.size) > max_size: |
|
|
scale = max_size / max(pil_img.size) |
|
|
new_size = (int(pil_img.size[0] * scale), int(pil_img.size[1] * scale)) |
|
|
pil_img = pil_img.resize(new_size, Image.Resampling.LANCZOS) |
|
|
|
|
|
inputs = processor(images=pil_img, return_tensors="pt") |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model(**inputs) |
|
|
logits = outputs.logits |
|
|
|
|
|
|
|
|
upsampled = torch.nn.functional.interpolate( |
|
|
logits, |
|
|
size=pil_img.size[::-1], |
|
|
mode="bilinear", |
|
|
align_corners=False, |
|
|
) |
|
|
labels = upsampled.argmax(dim=1)[0].cpu().numpy().astype(np.int32) |
|
|
return labels, id2label |
|
|
|
|
|
|
|
|
def _skin_indices_from_id2label(id2label: Dict[int, str]) -> List[int]: |
|
|
skin_indices: List[int] = [] |
|
|
for idx, name in id2label.items(): |
|
|
name_l = name.lower() |
|
|
if "skin" in name_l: |
|
|
skin_indices.append(int(idx)) |
|
|
elif "face" in name_l and "skin" not in name_l and "hair" not in name_l: |
|
|
skin_indices.append(int(idx)) |
|
|
|
|
|
|
|
|
if not skin_indices: |
|
|
|
|
|
common_skin_indices = [1, 13, 14, 15] |
|
|
for idx in common_skin_indices: |
|
|
if idx in id2label: |
|
|
skin_indices.append(idx) |
|
|
|
|
|
return skin_indices |
|
|
|
|
|
|
|
|
def _compute_skin_color_hex(image_rgb: np.ndarray, mask: np.ndarray) -> Tuple[str, np.ndarray]: |
|
|
"""Compute a robust representative skin color as a hex string and return also the RGB color.""" |
|
|
if mask is None or mask.size == 0: |
|
|
raise ValueError("Invalid mask for skin color computation") |
|
|
|
|
|
|
|
|
mask_bool = mask.astype(bool) |
|
|
if not np.any(mask_bool): |
|
|
raise ValueError("No skin pixels detected") |
|
|
|
|
|
skin_pixels = image_rgb[mask_bool] |
|
|
|
|
|
|
|
|
median_color = np.median(skin_pixels, axis=0) |
|
|
median_color = np.clip(median_color, 0, 255).astype(np.uint8) |
|
|
|
|
|
|
|
|
mean_color = np.mean(skin_pixels, axis=0) |
|
|
mean_color = np.clip(mean_color, 0, 255).astype(np.uint8) |
|
|
|
|
|
|
|
|
if np.std(median_color) > 100: |
|
|
color_rgb = mean_color |
|
|
else: |
|
|
color_rgb = median_color |
|
|
|
|
|
r, g, b = int(color_rgb[0]), int(color_rgb[1]), int(color_rgb[2]) |
|
|
hex_code = f"#{r:02X}{g:02X}{b:02X}" |
|
|
return hex_code, color_rgb |
|
|
|
|
|
|
|
|
def _solid_color_image(color_rgb: np.ndarray, size: Tuple[int, int] = (160, 160)) -> np.ndarray: |
|
|
swatch = np.zeros((size[1], size[0], 3), dtype=np.uint8) |
|
|
swatch[:, :] = color_rgb |
|
|
return swatch |
|
|
|
|
|
|
|
|
def detect_skin_tone(image: np.ndarray) -> Tuple[str, np.ndarray, np.ndarray]: |
|
|
"""Main pipeline: returns (hex_code, color_swatch_image, debug_mask_overlay).""" |
|
|
try: |
|
|
|
|
|
rgb = _preprocess_image(image) |
|
|
height, width = rgb.shape[:2] |
|
|
|
|
|
|
|
|
debug_img = rgb.copy() |
|
|
|
|
|
|
|
|
face_bbox = None |
|
|
detection_method = "" |
|
|
|
|
|
|
|
|
face_bbox = _detect_face_bbox_mediapipe(rgb) |
|
|
if face_bbox is not None: |
|
|
detection_method = "MediaPipe" |
|
|
|
|
|
|
|
|
if face_bbox is None: |
|
|
face_bbox = _detect_face_bbox_opencv(rgb) |
|
|
if face_bbox is not None: |
|
|
detection_method = "OpenCV Haar" |
|
|
|
|
|
|
|
|
if face_bbox is None: |
|
|
face_bbox = _central_crop_bbox(width, height, frac=0.5) |
|
|
detection_method = "Central Crop" |
|
|
print(f"Warning: Using central crop as fallback") |
|
|
|
|
|
x1, y1, x2, y2 = face_bbox |
|
|
|
|
|
|
|
|
if x2 <= x1 or y2 <= y1: |
|
|
raise ValueError("Invalid bounding box coordinates") |
|
|
|
|
|
if (x2 - x1) < 20 or (y2 - y1) < 20: |
|
|
raise ValueError("Face region too small") |
|
|
|
|
|
|
|
|
face_crop = rgb[y1:y2, x1:x2] |
|
|
|
|
|
if face_crop.size == 0: |
|
|
raise ValueError("Empty face crop") |
|
|
|
|
|
|
|
|
color = (0, 255, 0) if detection_method != "Central Crop" else (255, 0, 0) |
|
|
cv2.rectangle(debug_img, (x1, y1), (x2, y2), color, 2) |
|
|
cv2.putText(debug_img, detection_method, (x1, y1 - 10), |
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) |
|
|
|
|
|
|
|
|
try: |
|
|
labels, id2label = _segment_face_labels(face_crop) |
|
|
skin_indices = _skin_indices_from_id2label(id2label) |
|
|
|
|
|
if not skin_indices: |
|
|
|
|
|
h, w = face_crop.shape[:2] |
|
|
skin_mask = np.zeros((h, w), dtype=np.uint8) |
|
|
center_y, center_x = h // 2, w // 2 |
|
|
mask_size = min(h, w) // 3 |
|
|
cv2.ellipse(skin_mask, (center_x, center_y), |
|
|
(mask_size, mask_size // 2), 0, 0, 360, 255, -1) |
|
|
else: |
|
|
skin_mask = np.isin(labels, np.array(skin_indices, dtype=np.int32)).astype(np.uint8) * 255 |
|
|
|
|
|
|
|
|
skin_mask = _binary_open_close(skin_mask, kernel_size=3, iterations=1) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Face parsing error: {e}") |
|
|
|
|
|
h, w = face_crop.shape[:2] |
|
|
skin_mask = np.zeros((h, w), dtype=np.uint8) |
|
|
center_y, center_x = h // 2, w // 2 |
|
|
mask_size = min(h, w) // 3 |
|
|
cv2.ellipse(skin_mask, (center_x, center_y), |
|
|
(mask_size, mask_size // 2), 0, 0, 360, 255, -1) |
|
|
|
|
|
|
|
|
if np.sum(skin_mask) == 0: |
|
|
|
|
|
skin_mask = np.ones((face_crop.shape[0], face_crop.shape[1]), dtype=np.uint8) * 255 |
|
|
|
|
|
|
|
|
hex_code, color_rgb = _compute_skin_color_hex(face_crop, skin_mask) |
|
|
|
|
|
|
|
|
swatch = _solid_color_image(color_rgb) |
|
|
|
|
|
|
|
|
full_mask = np.zeros((height, width), dtype=np.uint8) |
|
|
full_mask[y1:y2, x1:x2] = skin_mask |
|
|
|
|
|
|
|
|
color_mask = np.zeros_like(rgb) |
|
|
color_mask[:, :, 0] = 0 |
|
|
color_mask[:, :, 1] = 255 |
|
|
color_mask[:, :, 2] = 0 |
|
|
|
|
|
|
|
|
mask_3d = np.stack([full_mask] * 3, axis=2) / 255.0 |
|
|
overlay = (rgb * (1 - mask_3d) + color_mask * mask_3d).astype(np.uint8) |
|
|
|
|
|
|
|
|
cv2.putText(debug_img, hex_code, (10, 30), |
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2) |
|
|
cv2.putText(debug_img, hex_code, (10, 30), |
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 1) |
|
|
|
|
|
return hex_code, swatch, debug_img |
|
|
|
|
|
except Exception as e: |
|
|
error_msg = f"Error: {str(e)}" |
|
|
print(error_msg) |
|
|
|
|
|
error_color = np.array([255, 0, 0], dtype=np.uint8) |
|
|
error_hex = "#FF0000" |
|
|
error_swatch = _solid_color_image(error_color) |
|
|
|
|
|
|
|
|
if 'rgb' in locals(): |
|
|
error_debug = rgb.copy() |
|
|
else: |
|
|
error_debug = np.zeros((300, 300, 3), dtype=np.uint8) |
|
|
error_debug[:] = [100, 100, 100] |
|
|
|
|
|
cv2.putText(error_debug, "ERROR", (50, 100), |
|
|
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3) |
|
|
cv2.putText(error_debug, error_msg[:30], (50, 150), |
|
|
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) |
|
|
|
|
|
return error_hex, error_swatch, error_debug |
|
|
|
|
|
|
|
|
def _hex_html(hex_code: str) -> str: |
|
|
style = ( |
|
|
"display:flex;align-items:center;gap:12px;padding:8px 0;" |
|
|
) |
|
|
swatch_style = ( |
|
|
f"width:24px;height:24px;border-radius:4px;background:{hex_code};" |
|
|
"border:2px solid #333;box-shadow:2px 2px 5px rgba(0,0,0,0.2);" |
|
|
) |
|
|
return ( |
|
|
f"<div style='{style}'>" |
|
|
f"<div style='{swatch_style}'></div>" |
|
|
f"<span style='font-family:monospace;font-size:18px;font-weight:bold;'>{hex_code}</span>" |
|
|
"</div>" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(title="Skin Tone Detector", theme=gr.themes.Soft()) as demo: |
|
|
gr.Markdown( |
|
|
""" |
|
|
# π¨ Skin Tone Hex Detector |
|
|
|
|
|
Upload a photo with a face to detect the skin tone color. The app will return a HEX color code. |
|
|
|
|
|
### How it works: |
|
|
1. Face detection using MediaPipe/OpenCV |
|
|
2. Skin region segmentation using AI |
|
|
3. Color extraction from skin pixels |
|
|
4. HEX code generation |
|
|
|
|
|
**Tip:** Use clear, well-lit frontal face photos for best results. |
|
|
""" |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
input_image = gr.Image( |
|
|
label="π· Upload Face Image", |
|
|
type="numpy", |
|
|
image_mode="RGB", |
|
|
height=400, |
|
|
sources=["upload", "webcam"], |
|
|
interactive=True |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
run_btn = gr.Button("π Detect Skin Tone", variant="primary", size="lg") |
|
|
clear_btn = gr.Button("ποΈ Clear", variant="secondary") |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
with gr.Group(): |
|
|
hex_output = gr.HTML( |
|
|
label="π¨ Detected Color", |
|
|
value="<div style='text-align:center;padding:20px;'>Upload an image to begin</div>" |
|
|
) |
|
|
swatch_output = gr.Image( |
|
|
label="Color Swatch", |
|
|
type="numpy", |
|
|
height=200, |
|
|
interactive=False |
|
|
) |
|
|
|
|
|
with gr.Accordion("π Debug View", open=False): |
|
|
debug_output = gr.Image( |
|
|
label="Detection Visualization", |
|
|
type="numpy", |
|
|
height=400, |
|
|
interactive=False |
|
|
) |
|
|
|
|
|
gr.Markdown(""" |
|
|
**Detection Legend:** |
|
|
- π’ Green box: Face detected (MediaPipe/OpenCV) |
|
|
- π΄ Red box: Central crop (fallback) |
|
|
- π‘ Yellow overlay: Skin mask |
|
|
""") |
|
|
|
|
|
gr.Markdown(""" |
|
|
### π Notes: |
|
|
- Works best with frontal face photos in good lighting |
|
|
- Multiple detection methods ensure reliability |
|
|
- Results may vary based on lighting and image quality |
|
|
- The HEX code represents the median skin color from detected regions |
|
|
""") |
|
|
|
|
|
def _run(image: Optional[np.ndarray]): |
|
|
if image is None: |
|
|
return ( |
|
|
"<div style='text-align:center;padding:20px;color:#666;'>" |
|
|
"Please upload an image first</div>", |
|
|
np.zeros((200, 200, 3), dtype=np.uint8), |
|
|
np.zeros((400, 400, 3), dtype=np.uint8) |
|
|
) |
|
|
|
|
|
try: |
|
|
hex_code, swatch, debug = detect_skin_tone(image) |
|
|
return _hex_html(hex_code), swatch, debug |
|
|
except Exception as e: |
|
|
error_html = f""" |
|
|
<div style='text-align:center;padding:20px;color:#d00;'> |
|
|
<h3>β Error</h3> |
|
|
<p>{str(e)[:100]}...</p> |
|
|
<p>Please try a different image.</p> |
|
|
</div> |
|
|
""" |
|
|
error_img = np.zeros((200, 200, 3), dtype=np.uint8) |
|
|
error_img[:] = [255, 200, 200] |
|
|
return error_html, error_img, None |
|
|
|
|
|
def _clear(): |
|
|
return ( |
|
|
None, |
|
|
"<div style='text-align:center;padding:20px;color:#666;'>" |
|
|
"Upload an image to begin</div>", |
|
|
np.zeros((200, 200, 3), dtype=np.uint8), |
|
|
np.zeros((400, 400, 3), dtype=np.uint8) |
|
|
) |
|
|
|
|
|
|
|
|
run_btn.click( |
|
|
fn=_run, |
|
|
inputs=[input_image], |
|
|
outputs=[hex_output, swatch_output, debug_output] |
|
|
) |
|
|
|
|
|
clear_btn.click( |
|
|
fn=_clear, |
|
|
inputs=[], |
|
|
outputs=[input_image, hex_output, swatch_output, debug_output] |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
print("=" * 60) |
|
|
print("π Starting Skin Tone Detector") |
|
|
print("=" * 60) |
|
|
print("\nAccess the app at: http://localhost:7860") |
|
|
print("\nPress Ctrl+C to stop the server") |
|
|
|
|
|
|
|
|
demo.launch( |
|
|
server_name="0.0.0.0", |
|
|
server_port=7860, |
|
|
share=False, |
|
|
debug=False, |
|
|
show_error=True, |
|
|
quiet=False |
|
|
) |