Spaces:
Running
on
Zero
Running
on
Zero
| import os | |
| import sys | |
| from huggingface_hub import hf_hub_download | |
| import spaces | |
| from facenet_pytorch import MTCNN | |
| from torchvision import transforms | |
| import torch | |
| import PIL | |
| from PIL import Image | |
| import gradio as gr | |
| # Download models | |
| modelarcanev4 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.4", filename="ArcaneGANv0.4.jit") | |
| modelarcanev3 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.3", filename="ArcaneGANv0.3.jit") | |
| modelarcanev2 = hf_hub_download(repo_id="akhaliq/ArcaneGANv0.2", filename="ArcaneGANv0.2.jit") | |
| mtcnn = MTCNN(image_size=256, margin=80) | |
| # Face detection | |
| def detect(img): | |
| batch_boxes, batch_probs, batch_points = mtcnn.detect(img, landmarks=True) | |
| if not mtcnn.keep_all: | |
| batch_boxes, batch_probs, batch_points = mtcnn.select_boxes( | |
| batch_boxes, batch_probs, batch_points, img, method=mtcnn.selection_method | |
| ) | |
| return batch_boxes, batch_points | |
| def makeEven(_x): | |
| return _x if (_x % 2 == 0) else _x+1 | |
| def scale(boxes, _img, max_res=1_500_000, target_face=256, fixed_ratio=0, max_upscale=2, VERBOSE=False): | |
| x, y = _img.size | |
| ratio = 2 | |
| if (boxes is not None): | |
| if len(boxes)>0: | |
| ratio = target_face/max(boxes[0][2:]-boxes[0][:2]) | |
| ratio = min(ratio, max_upscale) | |
| if fixed_ratio>0: | |
| ratio = fixed_ratio | |
| x*=ratio | |
| y*=ratio | |
| res = x*y | |
| if res > max_res: | |
| ratio = pow(res/max_res,1/2) | |
| x=int(x/ratio) | |
| y=int(y/ratio) | |
| x = makeEven(int(x)) | |
| y = makeEven(int(y)) | |
| size = (x, y) | |
| return _img.resize(size) | |
| def scale_by_face_size(_img, max_res=1_500_000, target_face=256, fix_ratio=0, max_upscale=2, VERBOSE=False): | |
| boxes = None | |
| boxes, _ = detect(_img) | |
| img_resized = scale(boxes, _img, max_res, target_face, fix_ratio, max_upscale, VERBOSE) | |
| return img_resized | |
| # Image processing setup | |
| size = 256 | |
| means = [0.485, 0.456, 0.406] | |
| stds = [0.229, 0.224, 0.225] | |
| img_transforms = transforms.Compose([ | |
| transforms.ToTensor(), | |
| transforms.Normalize(means, stds) | |
| ]) | |
| def proc_pil_img(input_image, model_path): | |
| """GPU-accelerated image processing with half precision support""" | |
| # Load model on GPU | |
| model = torch.jit.load(model_path, map_location='cuda').eval() | |
| # Create tensors on GPU in half precision to match model | |
| t_stds = torch.tensor(stds).cuda().half().view(3, 1, 1) | |
| t_means = torch.tensor(means).cuda().half().view(3, 1, 1) | |
| # Transform image and move to GPU with half precision | |
| transformed_image = img_transforms(input_image).unsqueeze(0).cuda().half() | |
| with torch.no_grad(): | |
| result_image = model(transformed_image)[0] | |
| # Convert back to float for post-processing | |
| output_image = result_image.float().mul(t_stds.float()).add(t_means.float()).mul(255.).clamp(0, 255).permute(1, 2, 0) | |
| output_image = output_image.cpu().numpy().astype('uint8') | |
| output_image = PIL.Image.fromarray(output_image) | |
| # Clean up | |
| del model | |
| torch.cuda.empty_cache() | |
| return output_image | |
| def process(im, version): | |
| """Main processing function with GPU acceleration""" | |
| if im is None: | |
| raise gr.Error("Please upload an image first!") | |
| try: | |
| # Ensure image is PIL Image | |
| if not isinstance(im, Image.Image): | |
| im = Image.fromarray(im) | |
| # Convert to RGB if needed | |
| if im.mode != 'RGB': | |
| im = im.convert('RGB') | |
| # Scale image (CPU operation) | |
| im = scale_by_face_size(im, target_face=256, max_res=1_500_000, max_upscale=1) | |
| # Select model based on version | |
| if version == 'v0.4 (Recommended)': | |
| res = proc_pil_img(im, modelarcanev4) | |
| elif version == 'v0.3': | |
| res = proc_pil_img(im, modelarcanev3) | |
| else: | |
| res = proc_pil_img(im, modelarcanev2) | |
| return res | |
| except Exception as e: | |
| raise gr.Error(f"Error processing image: {str(e)}") | |
| # Custom theme | |
| custom_theme = gr.themes.Soft( | |
| primary_hue="blue", | |
| secondary_hue="indigo", | |
| neutral_hue="slate", | |
| font=gr.themes.GoogleFont("Inter"), | |
| text_size="lg", | |
| spacing_size="md", | |
| radius_size="lg" | |
| ).set( | |
| button_primary_background_fill="*primary_600", | |
| button_primary_background_fill_hover="*primary_700", | |
| block_title_text_weight="600", | |
| block_border_width="2px", | |
| block_shadow="*shadow_drop_lg", | |
| ) | |
| # Custom CSS for mobile-friendly design | |
| custom_css = """ | |
| .gradio-container { | |
| max-width: 1200px !important; | |
| margin: auto !important; | |
| } | |
| #header { | |
| text-align: center; | |
| margin-bottom: 2rem; | |
| } | |
| #header h1 { | |
| font-size: 2.5rem; | |
| font-weight: 700; | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| -webkit-background-clip: text; | |
| -webkit-text-fill-color: transparent; | |
| margin-bottom: 0.5rem; | |
| } | |
| #description { | |
| font-size: 1.1rem; | |
| color: #64748b; | |
| margin-bottom: 1rem; | |
| } | |
| .input-column, .output-column { | |
| border-radius: 16px; | |
| padding: 1.5rem; | |
| background: linear-gradient(135deg, rgba(102, 126, 234, 0.05) 0%, rgba(118, 75, 162, 0.05) 100%); | |
| } | |
| @media (max-width: 768px) { | |
| #header h1 { | |
| font-size: 2rem; | |
| } | |
| #description { | |
| font-size: 1rem; | |
| } | |
| .input-column, .output-column { | |
| padding: 1rem; | |
| } | |
| } | |
| #footer { | |
| text-align: center; | |
| margin-top: 2rem; | |
| padding: 1.5rem; | |
| border-top: 2px solid #e2e8f0; | |
| } | |
| #footer a { | |
| color: #667eea; | |
| text-decoration: none; | |
| font-weight: 600; | |
| } | |
| #footer a:hover { | |
| color: #764ba2; | |
| text-decoration: underline; | |
| } | |
| .example-container { | |
| margin-top: 1rem; | |
| } | |
| .gpu-badge { | |
| display: inline-block; | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| color: white; | |
| padding: 0.5rem 1rem; | |
| border-radius: 20px; | |
| font-weight: 600; | |
| margin-top: 0.5rem; | |
| } | |
| #anycoder-link { | |
| text-align: center; | |
| margin-top: 0.5rem; | |
| } | |
| #anycoder-link a { | |
| color: #667eea; | |
| text-decoration: none; | |
| font-weight: 600; | |
| font-size: 0.9rem; | |
| } | |
| #anycoder-link a:hover { | |
| color: #764ba2; | |
| text-decoration: underline; | |
| } | |
| """ | |
| # Build the interface | |
| with gr.Blocks() as demo: | |
| # Header | |
| with gr.Column(elem_id="header"): | |
| gr.Markdown( | |
| """ | |
| # π¨ ArcaneGAN | |
| ### Transform Your Photos into Arcane-Style Art | |
| Upload a portrait and watch it transform into the stunning visual style of Netflix's Arcane series. | |
| <span class="gpu-badge">β‘ Powered by Zero-GPU</span> | |
| """ | |
| ) | |
| gr.Markdown( | |
| "[Built with anycoder](https://huggingface.co/spaces/akhaliq/anycoder)", | |
| elem_id="anycoder-link" | |
| ) | |
| # Main content | |
| with gr.Row(equal_height=True): | |
| # Input column | |
| with gr.Column(scale=1, elem_classes="input-column"): | |
| gr.Markdown("### π€ Upload Your Photo") | |
| input_image = gr.Image( | |
| type="pil", | |
| label="Input Image", | |
| sources=["upload", "webcam", "clipboard"], | |
| height=400 | |
| ) | |
| version_selector = gr.Radio( | |
| choices=['v0.4 (Recommended)', 'v0.3', 'v0.2'], | |
| value='v0.4 (Recommended)', | |
| label="Model Version", | |
| info="v0.4 offers the best quality" | |
| ) | |
| transform_btn = gr.Button( | |
| "β¨ Transform to Arcane Style", | |
| variant="primary", | |
| size="lg" | |
| ) | |
| # Output column | |
| with gr.Column(scale=1, elem_classes="output-column"): | |
| gr.Markdown("### π Arcane-Style Result") | |
| output_image = gr.Image( | |
| type="pil", | |
| label="Transformed Image", | |
| height=400, | |
| buttons=["download", "share"] | |
| ) | |
| # Tips section | |
| with gr.Row(): | |
| gr.Markdown( | |
| """ | |
| ### π‘ Tips for Best Results | |
| - Use clear, well-lit portrait photos | |
| - Face should be clearly visible and not too small | |
| - Works best with frontal or slightly angled faces | |
| - Try different model versions for varied artistic styles | |
| """ | |
| ) | |
| # Footer | |
| with gr.Column(elem_id="footer"): | |
| gr.Markdown( | |
| """ | |
| --- | |
| **ArcaneGAN** by [Alexander S](https://twitter.com/devdef) | | |
| [GitHub Repository](https://github.com/Sxela/ArcaneGAN) | | |
| [Original Space](https://huggingface.co/spaces/akhaliq/ArcaneGAN) | |
| **β‘ Zero-GPU Optimization**: This Space uses Hugging Face's Zero-GPU infrastructure for efficient GPU allocation. | |
| **Model Versions:** | |
| - **v0.4**: Latest and recommended - best quality and style accuracy | |
| - **v0.3**: Alternative style interpretation | |
| - **v0.2**: Original version with unique characteristics | |
| <div style='margin-top: 1rem;'> | |
| <img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_arcanegan' alt='visitor badge'> | |
| </div> | |
| """ | |
| ) | |
| # Event handlers | |
| transform_btn.click( | |
| fn=process, | |
| inputs=[input_image, version_selector], | |
| outputs=output_image, | |
| api_visibility="public" | |
| ) | |
| input_image.upload( | |
| fn=process, | |
| inputs=[input_image, version_selector], | |
| outputs=output_image | |
| ) | |
| # Launch with Gradio 6 syntax | |
| demo.launch( | |
| theme=custom_theme, | |
| css=custom_css, | |
| footer_links=[ | |
| {"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"} | |
| ] | |
| ) |