import os import spaces import glob import argparse import shutil from PIL import Image import PIL import gradio as gr import random import numpy as np from diffusion import generate_latent from vq_vae import create_mask from huggingface_hub import snapshot_download from huggingface_hub import login login(token = os.getenv('HF_TOKEN')) model_dir = snapshot_download( repo_id="srijaydeshpande/diffusion" ) @spaces.GPU(duration=120) def create_image(cancer_type): tmp_dir = "./tmp" if os.path.exists(tmp_dir): shutil.rmtree(tmp_dir) os.makedirs(tmp_dir) generate_latent(model_dir, cancer_type, tmp_dir) create_mask(model_dir, "./tmp", "./tmp/test_masks") if(cancer_type == 'benign'): diffusion_model_name = 'diffusion_dp_benign' else: diffusion_model_name = 'diffusion_dp' os.system('python pix2pixhd_test.py --name ' + diffusion_model_name + ' --dataroot ./tmp --label_nc 0 --results_dir ./tmp --no_instance --resize_or_crop none --checkpoints_dir ' + model_dir) image_dir = "./tmp/" + diffusion_model_name + "/test_latest/images" input_label_image = Image.open(os.path.join(image_dir, "sample_input_label.jpg")) synthesized_image = Image.open(os.path.join(image_dir, "sample_synthesized_image.jpg")) return input_label_image, synthesized_image demo = gr.Interface( create_image, inputs=gr.Radio(choices=["benign", "malignant"], label="Choose Type", value="benign"), outputs=[gr.Image(), gr.Image()], title="Diffusion based Image Generation" ) demo.launch() # create_image('benign')