Spaces:
Running
on
Zero
Running
on
Zero
π¨ Redesign from AnyCoder (#10)
Browse files- π¨ Redesign from AnyCoder (c91e9325407f4aca82de3c75efe08065bab16204)
app.py
CHANGED
|
@@ -4,149 +4,290 @@ os.system("pip -qq install facenet_pytorch")
|
|
| 4 |
from facenet_pytorch import MTCNN
|
| 5 |
from torchvision import transforms
|
| 6 |
import torch, PIL
|
| 7 |
-
from tqdm.notebook import tqdm
|
| 8 |
import gradio as gr
|
| 9 |
-
import torch
|
| 10 |
|
|
|
|
| 11 |
modelarcanev4 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.4", filename="ArcaneGANv0.4.jit")
|
| 12 |
modelarcanev3 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.3", filename="ArcaneGANv0.3.jit")
|
| 13 |
modelarcanev2 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.2", filename="ArcaneGANv0.2.jit")
|
| 14 |
|
| 15 |
-
|
| 16 |
mtcnn = MTCNN(image_size=256, margin=80)
|
| 17 |
|
| 18 |
-
#
|
| 19 |
def detect(img):
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
batch_boxes, batch_probs, batch_points = mtcnn.
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
batch_boxes, batch_probs, batch_points, img, method=mtcnn.selection_method
|
| 27 |
-
)
|
| 28 |
-
|
| 29 |
-
return batch_boxes, batch_points
|
| 30 |
|
| 31 |
-
# my version of isOdd, should make a separate repo for it :D
|
| 32 |
def makeEven(_x):
|
| 33 |
-
|
| 34 |
|
| 35 |
-
# the actual scaler function
|
| 36 |
def scale(boxes, _img, max_res=1_500_000, target_face=256, fixed_ratio=0, max_upscale=2, VERBOSE=False):
|
| 37 |
-
|
| 38 |
x, y = _img.size
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
#scale to desired face size
|
| 43 |
if (boxes is not None):
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
if fixed_ratio>0:
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
x*=ratio
|
| 54 |
y*=ratio
|
| 55 |
-
|
| 56 |
-
#downscale to fit into max res
|
| 57 |
res = x*y
|
| 58 |
if res > max_res:
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
#make dimensions even, because usually NNs fail on uneven dimensions due skip connection size mismatch
|
| 65 |
x = makeEven(int(x))
|
| 66 |
y = makeEven(int(y))
|
| 67 |
-
|
| 68 |
size = (x, y)
|
| 69 |
-
|
| 70 |
return _img.resize(size)
|
| 71 |
|
| 72 |
-
"""
|
| 73 |
-
A useful scaler algorithm, based on face detection.
|
| 74 |
-
Takes PIL.Image, returns a uniformly scaled PIL.Image
|
| 75 |
-
boxes: a list of detected bboxes
|
| 76 |
-
_img: PIL.Image
|
| 77 |
-
max_res: maximum pixel area to fit into. Use to stay below the VRAM limits of your GPU.
|
| 78 |
-
target_face: desired face size. Upscale or downscale the whole image to fit the detected face into that dimension.
|
| 79 |
-
fixed_ratio: fixed scale. Ignores the face size, but doesn't ignore the max_res limit.
|
| 80 |
-
max_upscale: maximum upscale ratio. Prevents from scaling images with tiny faces to a blurry mess.
|
| 81 |
-
"""
|
| 82 |
-
|
| 83 |
def scale_by_face_size(_img, max_res=1_500_000, target_face=256, fix_ratio=0, max_upscale=2, VERBOSE=False):
|
| 84 |
boxes = None
|
| 85 |
boxes, _ = detect(_img)
|
| 86 |
-
if VERBOSE: print('boxes',boxes)
|
| 87 |
img_resized = scale(boxes, _img, max_res, target_face, fix_ratio, max_upscale, VERBOSE)
|
| 88 |
return img_resized
|
| 89 |
|
| 90 |
-
|
| 91 |
size = 256
|
| 92 |
-
|
| 93 |
means = [0.485, 0.456, 0.406]
|
| 94 |
stds = [0.229, 0.224, 0.225]
|
| 95 |
|
| 96 |
t_stds = torch.tensor(stds).cuda().half()[:,None,None]
|
| 97 |
t_means = torch.tensor(means).cuda().half()[:,None,None]
|
| 98 |
|
| 99 |
-
def makeEven(_x):
|
| 100 |
-
return int(_x) if (_x % 2 == 0) else int(_x+1)
|
| 101 |
-
|
| 102 |
img_transforms = transforms.Compose([
|
| 103 |
-
|
| 104 |
-
|
|
|
|
| 105 |
|
| 106 |
def tensor2im(var):
|
| 107 |
-
|
| 108 |
|
| 109 |
def proc_pil_img(input_image, model):
|
| 110 |
transformed_image = img_transforms(input_image)[None,...].cuda().half()
|
| 111 |
-
|
| 112 |
with torch.no_grad():
|
| 113 |
result_image = model(transformed_image)[0]
|
| 114 |
output_image = tensor2im(result_image)
|
| 115 |
output_image = output_image.detach().cpu().numpy().astype('uint8')
|
| 116 |
output_image = PIL.Image.fromarray(output_image)
|
| 117 |
return output_image
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
modelv4 = torch.jit.load(modelarcanev4).eval().cuda().half()
|
| 122 |
modelv3 = torch.jit.load(modelarcanev3).eval().cuda().half()
|
| 123 |
modelv2 = torch.jit.load(modelarcanev2).eval().cuda().half()
|
| 124 |
|
| 125 |
def process(im, version):
|
| 126 |
-
if version == '
|
| 127 |
im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1)
|
| 128 |
res = proc_pil_img(im, modelv4)
|
| 129 |
-
elif version == '
|
| 130 |
im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1)
|
| 131 |
res = proc_pil_img(im, modelv3)
|
| 132 |
else:
|
| 133 |
im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1)
|
| 134 |
res = proc_pil_img(im, modelv2)
|
| 135 |
return res
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
from facenet_pytorch import MTCNN
|
| 5 |
from torchvision import transforms
|
| 6 |
import torch, PIL
|
|
|
|
| 7 |
import gradio as gr
|
|
|
|
| 8 |
|
| 9 |
+
# Download models
|
| 10 |
modelarcanev4 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.4", filename="ArcaneGANv0.4.jit")
|
| 11 |
modelarcanev3 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.3", filename="ArcaneGANv0.3.jit")
|
| 12 |
modelarcanev2 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.2", filename="ArcaneGANv0.2.jit")
|
| 13 |
|
|
|
|
| 14 |
mtcnn = MTCNN(image_size=256, margin=80)
|
| 15 |
|
| 16 |
+
# Face detection
|
| 17 |
def detect(img):
|
| 18 |
+
batch_boxes, batch_probs, batch_points = mtcnn.detect(img, landmarks=True)
|
| 19 |
+
if not mtcnn.keep_all:
|
| 20 |
+
batch_boxes, batch_probs, batch_points = mtcnn.select_boxes(
|
| 21 |
+
batch_boxes, batch_probs, batch_points, img, method=mtcnn.selection_method
|
| 22 |
+
)
|
| 23 |
+
return batch_boxes, batch_points
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
|
|
|
| 25 |
def makeEven(_x):
|
| 26 |
+
return _x if (_x % 2 == 0) else _x+1
|
| 27 |
|
|
|
|
| 28 |
def scale(boxes, _img, max_res=1_500_000, target_face=256, fixed_ratio=0, max_upscale=2, VERBOSE=False):
|
|
|
|
| 29 |
x, y = _img.size
|
| 30 |
+
ratio = 2
|
| 31 |
+
|
|
|
|
|
|
|
| 32 |
if (boxes is not None):
|
| 33 |
+
if len(boxes)>0:
|
| 34 |
+
ratio = target_face/max(boxes[0][2:]-boxes[0][:2])
|
| 35 |
+
ratio = min(ratio, max_upscale)
|
| 36 |
+
|
|
|
|
| 37 |
if fixed_ratio>0:
|
| 38 |
+
ratio = fixed_ratio
|
| 39 |
+
|
|
|
|
| 40 |
x*=ratio
|
| 41 |
y*=ratio
|
| 42 |
+
|
|
|
|
| 43 |
res = x*y
|
| 44 |
if res > max_res:
|
| 45 |
+
ratio = pow(res/max_res,1/2)
|
| 46 |
+
x=int(x/ratio)
|
| 47 |
+
y=int(y/ratio)
|
| 48 |
+
|
|
|
|
|
|
|
| 49 |
x = makeEven(int(x))
|
| 50 |
y = makeEven(int(y))
|
|
|
|
| 51 |
size = (x, y)
|
|
|
|
| 52 |
return _img.resize(size)
|
| 53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
def scale_by_face_size(_img, max_res=1_500_000, target_face=256, fix_ratio=0, max_upscale=2, VERBOSE=False):
|
| 55 |
boxes = None
|
| 56 |
boxes, _ = detect(_img)
|
|
|
|
| 57 |
img_resized = scale(boxes, _img, max_res, target_face, fix_ratio, max_upscale, VERBOSE)
|
| 58 |
return img_resized
|
| 59 |
|
| 60 |
+
# Image processing setup
|
| 61 |
size = 256
|
|
|
|
| 62 |
means = [0.485, 0.456, 0.406]
|
| 63 |
stds = [0.229, 0.224, 0.225]
|
| 64 |
|
| 65 |
t_stds = torch.tensor(stds).cuda().half()[:,None,None]
|
| 66 |
t_means = torch.tensor(means).cuda().half()[:,None,None]
|
| 67 |
|
|
|
|
|
|
|
|
|
|
| 68 |
img_transforms = transforms.Compose([
|
| 69 |
+
transforms.ToTensor(),
|
| 70 |
+
transforms.Normalize(means,stds)
|
| 71 |
+
])
|
| 72 |
|
| 73 |
def tensor2im(var):
|
| 74 |
+
return var.mul(t_stds).add(t_means).mul(255.).clamp(0,255).permute(1,2,0)
|
| 75 |
|
| 76 |
def proc_pil_img(input_image, model):
|
| 77 |
transformed_image = img_transforms(input_image)[None,...].cuda().half()
|
|
|
|
| 78 |
with torch.no_grad():
|
| 79 |
result_image = model(transformed_image)[0]
|
| 80 |
output_image = tensor2im(result_image)
|
| 81 |
output_image = output_image.detach().cpu().numpy().astype('uint8')
|
| 82 |
output_image = PIL.Image.fromarray(output_image)
|
| 83 |
return output_image
|
| 84 |
+
|
| 85 |
+
# Load models
|
|
|
|
| 86 |
modelv4 = torch.jit.load(modelarcanev4).eval().cuda().half()
|
| 87 |
modelv3 = torch.jit.load(modelarcanev3).eval().cuda().half()
|
| 88 |
modelv2 = torch.jit.load(modelarcanev2).eval().cuda().half()
|
| 89 |
|
| 90 |
def process(im, version):
|
| 91 |
+
if version == 'v0.4 (Recommended)':
|
| 92 |
im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1)
|
| 93 |
res = proc_pil_img(im, modelv4)
|
| 94 |
+
elif version == 'v0.3':
|
| 95 |
im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1)
|
| 96 |
res = proc_pil_img(im, modelv3)
|
| 97 |
else:
|
| 98 |
im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1)
|
| 99 |
res = proc_pil_img(im, modelv2)
|
| 100 |
return res
|
| 101 |
+
|
| 102 |
+
# Custom theme
|
| 103 |
+
custom_theme = gr.themes.Soft(
|
| 104 |
+
primary_hue="blue",
|
| 105 |
+
secondary_hue="indigo",
|
| 106 |
+
neutral_hue="slate",
|
| 107 |
+
font=gr.themes.GoogleFont("Inter"),
|
| 108 |
+
text_size="lg",
|
| 109 |
+
spacing_size="md",
|
| 110 |
+
radius_size="lg"
|
| 111 |
+
).set(
|
| 112 |
+
button_primary_background_fill="*primary_600",
|
| 113 |
+
button_primary_background_fill_hover="*primary_700",
|
| 114 |
+
block_title_text_weight="600",
|
| 115 |
+
block_border_width="2px",
|
| 116 |
+
block_shadow="*shadow_drop_lg",
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
# Custom CSS for mobile-friendly design
|
| 120 |
+
custom_css = """
|
| 121 |
+
.gradio-container {
|
| 122 |
+
max-width: 1200px !important;
|
| 123 |
+
margin: auto !important;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
#header {
|
| 127 |
+
text-align: center;
|
| 128 |
+
margin-bottom: 2rem;
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
#header h1 {
|
| 132 |
+
font-size: 2.5rem;
|
| 133 |
+
font-weight: 700;
|
| 134 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 135 |
+
-webkit-background-clip: text;
|
| 136 |
+
-webkit-text-fill-color: transparent;
|
| 137 |
+
margin-bottom: 0.5rem;
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
#description {
|
| 141 |
+
font-size: 1.1rem;
|
| 142 |
+
color: #64748b;
|
| 143 |
+
margin-bottom: 1rem;
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
.input-column, .output-column {
|
| 147 |
+
border-radius: 16px;
|
| 148 |
+
padding: 1.5rem;
|
| 149 |
+
background: linear-gradient(135deg, rgba(102, 126, 234, 0.05) 0%, rgba(118, 75, 162, 0.05) 100%);
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
@media (max-width: 768px) {
|
| 153 |
+
#header h1 {
|
| 154 |
+
font-size: 2rem;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
#description {
|
| 158 |
+
font-size: 1rem;
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
.input-column, .output-column {
|
| 162 |
+
padding: 1rem;
|
| 163 |
+
}
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
#footer {
|
| 167 |
+
text-align: center;
|
| 168 |
+
margin-top: 2rem;
|
| 169 |
+
padding: 1.5rem;
|
| 170 |
+
border-top: 2px solid #e2e8f0;
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
#footer a {
|
| 174 |
+
color: #667eea;
|
| 175 |
+
text-decoration: none;
|
| 176 |
+
font-weight: 600;
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
#footer a:hover {
|
| 180 |
+
color: #764ba2;
|
| 181 |
+
text-decoration: underline;
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
.example-container {
|
| 185 |
+
margin-top: 1rem;
|
| 186 |
+
}
|
| 187 |
+
"""
|
| 188 |
+
|
| 189 |
+
# Build the interface
|
| 190 |
+
with gr.Blocks() as demo:
|
| 191 |
+
|
| 192 |
+
# Header
|
| 193 |
+
with gr.Column(elem_id="header"):
|
| 194 |
+
gr.Markdown(
|
| 195 |
+
"""
|
| 196 |
+
# π¨ ArcaneGAN
|
| 197 |
+
### Transform Your Photos into Arcane-Style Art
|
| 198 |
+
Upload a portrait and watch it transform into the stunning visual style of Netflix's Arcane series.
|
| 199 |
+
"""
|
| 200 |
+
)
|
| 201 |
+
gr.Markdown(
|
| 202 |
+
"[Built with anycoder](https://huggingface.co/spaces/akhaliq/anycoder)",
|
| 203 |
+
elem_id="anycoder-link"
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
# Main content
|
| 207 |
+
with gr.Row(equal_height=True):
|
| 208 |
+
# Input column
|
| 209 |
+
with gr.Column(scale=1, elem_classes="input-column"):
|
| 210 |
+
gr.Markdown("### π€ Upload Your Photo")
|
| 211 |
+
input_image = gr.Image(
|
| 212 |
+
type="pil",
|
| 213 |
+
label="Input Image",
|
| 214 |
+
sources=["upload", "webcam", "clipboard"],
|
| 215 |
+
height=400
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
version_selector = gr.Radio(
|
| 219 |
+
choices=['v0.4 (Recommended)', 'v0.3', 'v0.2'],
|
| 220 |
+
value='v0.4 (Recommended)',
|
| 221 |
+
label="Model Version",
|
| 222 |
+
info="v0.4 offers the best quality"
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
transform_btn = gr.Button(
|
| 226 |
+
"β¨ Transform to Arcane Style",
|
| 227 |
+
variant="primary",
|
| 228 |
+
size="lg"
|
| 229 |
+
)
|
| 230 |
|
| 231 |
+
# Output column
|
| 232 |
+
with gr.Column(scale=1, elem_classes="output-column"):
|
| 233 |
+
gr.Markdown("### π Arcane-Style Result")
|
| 234 |
+
output_image = gr.Image(
|
| 235 |
+
type="pil",
|
| 236 |
+
label="Transformed Image",
|
| 237 |
+
height=400,
|
| 238 |
+
buttons=["download", "share"]
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
# Examples
|
| 242 |
+
with gr.Row():
|
| 243 |
+
gr.Examples(
|
| 244 |
+
examples=[
|
| 245 |
+
['bill.png', 'v0.3'],
|
| 246 |
+
['keanu.png', 'v0.4 (Recommended)'],
|
| 247 |
+
['will.jpeg', 'v0.4 (Recommended)']
|
| 248 |
+
],
|
| 249 |
+
inputs=[input_image, version_selector],
|
| 250 |
+
outputs=output_image,
|
| 251 |
+
fn=process,
|
| 252 |
+
cache_examples=True,
|
| 253 |
+
label="Try These Examples",
|
| 254 |
+
examples_per_page=3
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
# Footer
|
| 258 |
+
with gr.Column(elem_id="footer"):
|
| 259 |
+
gr.Markdown(
|
| 260 |
+
"""
|
| 261 |
+
---
|
| 262 |
+
**ArcaneGAN** by [Alexander S](https://twitter.com/devdef) |
|
| 263 |
+
[GitHub Repository](https://github.com/Sxela/ArcaneGAN) |
|
| 264 |
+
[Original Space](https://huggingface.co/spaces/akhaliq/ArcaneGAN)
|
| 265 |
+
|
| 266 |
+
<div style='margin-top: 1rem;'>
|
| 267 |
+
<img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_arcanegan' alt='visitor badge'>
|
| 268 |
+
</div>
|
| 269 |
+
"""
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
# Event handlers
|
| 273 |
+
transform_btn.click(
|
| 274 |
+
fn=process,
|
| 275 |
+
inputs=[input_image, version_selector],
|
| 276 |
+
outputs=output_image,
|
| 277 |
+
api_name="transform"
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
input_image.upload(
|
| 281 |
+
fn=process,
|
| 282 |
+
inputs=[input_image, version_selector],
|
| 283 |
+
outputs=output_image
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
# Launch with Gradio 6 syntax
|
| 287 |
+
demo.launch(
|
| 288 |
+
theme=custom_theme,
|
| 289 |
+
css=custom_css,
|
| 290 |
+
footer_links=[
|
| 291 |
+
{"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}
|
| 292 |
+
]
|
| 293 |
+
)
|