| import gradio as gr |
| import torch |
| import os |
| import random |
| import sys |
| from typing import Sequence, Mapping, Any, Union |
| from transformers import AutoModel, AutoTokenizer |
|
|
| |
| lora_model = AutoModel.from_pretrained("xerox-elf/eyeballlora") |
|
|
| |
| checkpoint = AutoModel.from_pretrained("stablediffusionapi/juggernaut-xl-v8") |
|
|
| import gradio as gr |
| from PIL import Image, ImageOps |
|
|
| def process_image(input_image): |
| """ |
| Convert the input image to grayscale. |
| |
| Args: |
| - input_image: The input image as a PIL.Image object. |
| |
| Returns: |
| - A PIL.Image object representing the processed image. |
| """ |
| |
| grayscale_image = ImageOps.grayscale(input_image) |
| return grayscale_image |
|
|
| |
| interface = gr.Interface(fn=process_image, |
| inputs=gr.inputs.Image(shape=(224, 224)), |
| outputs=gr.outputs.Image(type="pil"), |
| title="Image Grayscale Converter", |
| description="Upload an image to convert it to grayscale.") |
|
|
| |
| interface.launch() |
|
|
|
|
| def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any: |
| """Returns the value at the given index of a sequence or mapping. |
| |
| If the object is a sequence (like list or string), returns the value at the given index. |
| If the object is a mapping (like a dictionary), returns the value at the index-th key. |
| |
| Some return a dictionary, in these cases, we look for the "results" key |
| |
| Args: |
| obj (Union[Sequence, Mapping]): The object to retrieve the value from. |
| index (int): The index of the value to retrieve. |
| |
| Returns: |
| Any: The value at the given index. |
| |
| Raises: |
| IndexError: If the index is out of bounds for the object and the object is not a mapping. |
| """ |
| try: |
| return obj[index] |
| except KeyError: |
| return obj["result"][index] |
|
|
|
|
| def find_path(name: str, path: str = None) -> str: |
| """ |
| Recursively looks at parent folders starting from the given path until it finds the given name. |
| Returns the path as a Path object if found, or None otherwise. |
| """ |
| |
| if path is None: |
| path = os.getcwd() |
|
|
| |
| if name in os.listdir(path): |
| path_name = os.path.join(path, name) |
| print(f"{name} found: {path_name}") |
| return path_name |
|
|
| |
| parent_directory = os.path.dirname(path) |
|
|
| |
| if parent_directory == path: |
| return None |
|
|
| |
| return find_path(name, parent_directory) |
|
|
|
|
| def add_comfyui_directory_to_sys_path() -> None: |
| """ |
| Add 'ComfyUI' to the sys.path |
| """ |
| comfyui_path = find_path("ComfyUI") |
| if comfyui_path is not None and os.path.isdir(comfyui_path): |
| sys.path.append(comfyui_path) |
| print(f"'{comfyui_path}' added to sys.path") |
|
|
|
|
| def add_extra_model_paths() -> None: |
| """ |
| Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path. |
| """ |
| from main import load_extra_path_config |
|
|
| extra_model_paths = find_path("extra_model_paths.yaml") |
|
|
| if extra_model_paths is not None: |
| load_extra_path_config(extra_model_paths) |
| else: |
| print("Could not find the extra_model_paths config file.") |
|
|
|
|
| add_comfyui_directory_to_sys_path() |
| add_extra_model_paths() |
|
|
|
|
| def import_custom_nodes() -> None: |
| """Find all custom nodes in the custom_nodes folder and add those node objects to NODE_CLASS_MAPPINGS |
| |
| This function sets up a new asyncio event loop, initializes the PromptServer, |
| creates a PromptQueue, and initializes the custom nodes. |
| """ |
| import asyncio |
| import execution |
| from nodes import init_custom_nodes |
| import server |
|
|
| |
| loop = asyncio.new_event_loop() |
| asyncio.set_event_loop(loop) |
|
|
| |
| server_instance = server.PromptServer(loop) |
| execution.PromptQueue(server_instance) |
|
|
| |
| init_custom_nodes() |
|
|
|
|
| from nodes import ( |
| LoadImage, |
| SaveImage, |
| LatentUpscale, |
| SetLatentNoiseMask, |
| CLIPTextEncode, |
| NODE_CLASS_MAPPINGS, |
| CheckpointLoaderSimple, |
| LoraLoader, |
| KSampler, |
| VAEDecode, |
| VAEEncode, |
| ) |
|
|
|
|
| def main(): |
| import_custom_nodes() |
| with torch.inference_mode(): |
| checkpointloadersimple = CheckpointLoaderSimple() |
| checkpointloadersimple_38 = checkpointloadersimple.load_checkpoint( |
| ckpt_name="juggernautXL_v8Rundiffusion.safetensors" |
| ) |
|
|
| loraloader = LoraLoader() |
| loraloader_51 = loraloader.load_lora( |
| lora_name="model/eyeballsXL-000025.safetensors", |
| strength_model=1, |
| strength_clip=1, |
| model=get_value_at_index(checkpointloadersimple_38, 0), |
| clip=get_value_at_index(checkpointloadersimple_38, 1), |
| ) |
|
|
| cliptextencode = CLIPTextEncode() |
| cliptextencode_40 = cliptextencode.encode( |
| text="photograph of person with spherical round eyeballs, black pupils", |
| clip=get_value_at_index(loraloader_51, 1), |
| ) |
|
|
| cliptextencode_41 = cliptextencode.encode( |
| text="", clip=get_value_at_index(loraloader_51, 1) |
| ) |
|
|
| loadimage = LoadImage() |
| loadimage_167 = loadimage.load_image(image="IMG_2734_copy.jpg") |
|
|
| mediapipe_facemeshpreprocessor = NODE_CLASS_MAPPINGS[ |
| "MediaPipe-FaceMeshPreprocessor" |
| ]() |
| mediapipe_facemeshpreprocessor_25 = mediapipe_facemeshpreprocessor.detect( |
| max_faces=10, |
| min_confidence=0.5, |
| resolution=512, |
| image=get_value_at_index(loadimage_167, 0), |
| ) |
|
|
| mediapipefacemeshtosegs = NODE_CLASS_MAPPINGS["MediaPipeFaceMeshToSEGS"]() |
| mediapipefacemeshtosegs_18 = mediapipefacemeshtosegs.doit( |
| crop_factor=3, |
| bbox_fill=False, |
| crop_min_size=50, |
| drop_size=1, |
| dilation=0, |
| face=False, |
| mouth=False, |
| left_eyebrow=False, |
| left_eye=True, |
| left_pupil=False, |
| right_eyebrow=False, |
| right_eye=True, |
| right_pupil=False, |
| image=get_value_at_index(mediapipe_facemeshpreprocessor_25, 0), |
| ) |
|
|
| segstocombinedmask = NODE_CLASS_MAPPINGS["SegsToCombinedMask"]() |
| segstocombinedmask_23 = segstocombinedmask.doit( |
| segs=get_value_at_index(mediapipefacemeshtosegs_18, 0) |
| ) |
|
|
| impactdilatemask = NODE_CLASS_MAPPINGS["ImpactDilateMask"]() |
| impactdilatemask_26 = impactdilatemask.doit( |
| dilation=55, mask=get_value_at_index(segstocombinedmask_23, 0) |
| ) |
|
|
| impactgaussianblurmask = NODE_CLASS_MAPPINGS["ImpactGaussianBlurMask"]() |
| impactgaussianblurmask_27 = impactgaussianblurmask.doit( |
| kernel_size=20, sigma=10, mask=get_value_at_index(impactdilatemask_26, 0) |
| ) |
|
|
| masktoimage = NODE_CLASS_MAPPINGS["MaskToImage"]() |
| masktoimage_21 = masktoimage.mask_to_image( |
| mask=get_value_at_index(impactgaussianblurmask_27, 0) |
| ) |
|
|
| mask_to_region = NODE_CLASS_MAPPINGS["Mask To Region"]() |
| mask_to_region_29 = mask_to_region.get_region( |
| padding=0, |
| constraints="keep_ratio", |
| constraint_x=64, |
| constraint_y=64, |
| min_width=0, |
| min_height=0, |
| batch_behavior="match_ratio", |
| mask=get_value_at_index(masktoimage_21, 0), |
| ) |
|
|
| cut_by_mask = NODE_CLASS_MAPPINGS["Cut By Mask"]() |
| cut_by_mask_30 = cut_by_mask.cut( |
| force_resize_width=0, |
| force_resize_height=0, |
| image=get_value_at_index(loadimage_167, 0), |
| mask=get_value_at_index(mask_to_region_29, 0), |
| ) |
|
|
| vaeencode = VAEEncode() |
| vaeencode_48 = vaeencode.encode( |
| pixels=get_value_at_index(cut_by_mask_30, 0), |
| vae=get_value_at_index(checkpointloadersimple_38, 2), |
| ) |
|
|
| latentupscale = LatentUpscale() |
| latentupscale_60 = latentupscale.upscale( |
| upscale_method="nearest-exact", |
| width=1024, |
| height=1024, |
| crop="disabled", |
| samples=get_value_at_index(vaeencode_48, 0), |
| ) |
|
|
| ksampler = KSampler() |
| ksampler_39 = ksampler.sample( |
| seed=random.randint(1, 2**64), |
| steps=20, |
| cfg=8, |
| sampler_name="euler", |
| scheduler="normal", |
| denoise=0.65, |
| model=get_value_at_index(loraloader_51, 0), |
| positive=get_value_at_index(cliptextencode_40, 0), |
| negative=get_value_at_index(cliptextencode_41, 0), |
| latent_image=get_value_at_index(latentupscale_60, 0), |
| ) |
|
|
| vaedecode = VAEDecode() |
| vaedecode_43 = vaedecode.decode( |
| samples=get_value_at_index(ksampler_39, 0), |
| vae=get_value_at_index(checkpointloadersimple_38, 2), |
| ) |
|
|
| cut_by_mask_32 = cut_by_mask.cut( |
| force_resize_width=0, |
| force_resize_height=0, |
| image=get_value_at_index(masktoimage_21, 0), |
| mask=get_value_at_index(mask_to_region_29, 0), |
| ) |
|
|
| cut_by_mask_62 = cut_by_mask.cut( |
| force_resize_width=0, |
| force_resize_height=0, |
| image=get_value_at_index(vaedecode_43, 0), |
| mask=get_value_at_index(cut_by_mask_32, 0), |
| ) |
|
|
| paste_by_mask = NODE_CLASS_MAPPINGS["Paste By Mask"]() |
| paste_by_mask_45 = paste_by_mask.paste( |
| resize_behavior="resize", |
| image_base=get_value_at_index(loadimage_167, 0), |
| image_to_paste=get_value_at_index(cut_by_mask_62, 0), |
| mask=get_value_at_index(masktoimage_21, 0), |
| ) |
|
|
| cut_by_mask_91 = cut_by_mask.cut( |
| force_resize_width=0, |
| force_resize_height=0, |
| image=get_value_at_index(paste_by_mask_45, 0), |
| mask=get_value_at_index(mask_to_region_29, 0), |
| ) |
|
|
| vaeencode_93 = vaeencode.encode( |
| pixels=get_value_at_index(cut_by_mask_91, 0), |
| vae=get_value_at_index(checkpointloadersimple_38, 2), |
| ) |
|
|
| load_image_batch = NODE_CLASS_MAPPINGS["Load Image Batch"]() |
| load_image_batch_166 = load_image_batch.load_batch_images( |
| mode="incremental_image", |
| index=0, |
| label="Batch 001", |
| path="/workspace/input", |
| pattern="*", |
| allow_RGBA_output="false", |
| filename_text_extension="true", |
| ) |
|
|
| image_to_mask = NODE_CLASS_MAPPINGS["Image To Mask"]() |
| setlatentnoisemask = SetLatentNoiseMask() |
| saveimage = SaveImage() |
|
|
| for q in range(10): |
| image_to_mask_50 = image_to_mask.convert( |
| method="intensity", image=get_value_at_index(mask_to_region_29, 0) |
| ) |
|
|
| setlatentnoisemask_49 = setlatentnoisemask.set_mask( |
| samples=get_value_at_index(latentupscale_60, 0), |
| mask=get_value_at_index(image_to_mask_50, 0), |
| ) |
|
|
| latentupscale_111 = latentupscale.upscale( |
| upscale_method="nearest-exact", |
| width=1024, |
| height=1024, |
| crop="disabled", |
| samples=get_value_at_index(vaeencode_93, 0), |
| ) |
|
|
| ksampler_95 = ksampler.sample( |
| seed=random.randint(1, 2**64), |
| steps=60, |
| cfg=8, |
| sampler_name="dpm_2", |
| scheduler="normal", |
| denoise=0.55, |
| model=get_value_at_index(loraloader_51, 0), |
| positive=get_value_at_index(cliptextencode_40, 0), |
| negative=get_value_at_index(cliptextencode_41, 0), |
| latent_image=get_value_at_index(latentupscale_111, 0), |
| ) |
|
|
| vaedecode_96 = vaedecode.decode( |
| samples=get_value_at_index(ksampler_95, 0), |
| vae=get_value_at_index(checkpointloadersimple_38, 2), |
| ) |
|
|
| cut_by_mask_124 = cut_by_mask.cut( |
| force_resize_width=0, |
| force_resize_height=0, |
| image=get_value_at_index(vaedecode_96, 0), |
| mask=get_value_at_index(cut_by_mask_32, 0), |
| ) |
|
|
| paste_by_mask_127 = paste_by_mask.paste( |
| resize_behavior="resize", |
| image_base=get_value_at_index(paste_by_mask_45, 0), |
| image_to_paste=get_value_at_index(cut_by_mask_124, 0), |
| mask=get_value_at_index(masktoimage_21, 0), |
| ) |
|
|
| saveimage_147 = saveimage.save_images( |
| filename_prefix="ComfyUI", |
| images=get_value_at_index(paste_by_mask_127, 0), |
| ) |
|
|
|
|
| if __name__ == "__main__": |
| main() |