diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..38121deaa70df1b695383f1c100a24e90378ecd5 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,17 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +exhm/detailer/stable-diffusion-webui-eyemask/models/shape_predictor_68_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text +exhm/extensions[[:space:]]img2/ComfyUI-nodes-hnmr/examples/workflow_mbw_multi.png filter=lfs diff=lfs merge=lfs -text +exhm/extensions[[:space:]]img2/ComfyUI-nodes-hnmr/examples/workflow_xyz.png filter=lfs diff=lfs merge=lfs -text +exhm/extensions[[:space:]]img2/latent-upscale/assets/default.png filter=lfs diff=lfs merge=lfs -text +exhm/extensions[[:space:]]img2/latent-upscale/assets/img2img_latent_upscale_process.png filter=lfs diff=lfs merge=lfs -text +exhm/extensions[[:space:]]img2/latent-upscale/assets/nearest-exact-normal1.png filter=lfs diff=lfs merge=lfs -text +exhm/extensions[[:space:]]img2/latent-upscale/assets/nearest-exact-normal2.png filter=lfs diff=lfs merge=lfs -text +exhm/extensions[[:space:]]img2/latent-upscale/assets/nearest-exact-simple1.png filter=lfs diff=lfs merge=lfs -text +exhm/extensions[[:space:]]img2/latent-upscale/assets/nearest-exact-simple2.png filter=lfs diff=lfs merge=lfs -text +exhm/extensions[[:space:]]img2/latent-upscale/assets/nearest-exact-simple8.png filter=lfs diff=lfs merge=lfs -text +exhm/extensions[[:space:]]img2/sd-webui-img2txt/sd-webui-img2txt.gif filter=lfs diff=lfs merge=lfs -text +exhm/extensions[[:space:]]img2/sd-webui-inpaint-anything/images/inpaint_anything_ui_image_1.png filter=lfs diff=lfs merge=lfs -text +exhm/extensions[[:space:]]img2/sd-webui-manga-inpainting/manga_inpainting/repo/examples/representative.png filter=lfs diff=lfs merge=lfs -text +exhm/extensions[[:space:]]img2/sd-webui-real-image-artifacts/examples/before.png filter=lfs diff=lfs merge=lfs -text diff --git a/exhm/detailer/dddetailer/.gitignore b/exhm/detailer/dddetailer/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a0e67413961089596eb61d1df811722abb7cc999 --- /dev/null +++ b/exhm/detailer/dddetailer/.gitignore @@ -0,0 +1,10 @@ +__pycache__ +*.ckpt +*.pth +/tmp +/outputs +/log +.vscode +/test-cases +.mypy_cache/ +.ruff_cache/ diff --git a/exhm/detailer/dddetailer/README.md b/exhm/detailer/dddetailer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1f37f1650df1af4e0d524b2938b5d42cc748436b --- /dev/null +++ b/exhm/detailer/dddetailer/README.md @@ -0,0 +1,62 @@ +# 돚거 Detection Detailer + +Dotgeo(hijack) Detection Detailer + +ddetailer with torch 2.0, mmcv 2.0, mmdet 3.0 + +integrated with [noahge4/ddetailer](https://github.com/noahge4/ddetailer) + +AI실사채널 ChatGPT23님의 [ddetailer 수정본](https://arca.live/b/aireal/72297207) 병합됨 + +## Installation + +1. remove original ddetailer extension - `stable-diffusion-webui/extensions/ddetailer` folder +2. remove original model files - `stable-diffusion-webui/models/mmdet` folder +3. install from the extensions tab with url `https://github.com/Bing-su/dddetailer` + +## Problem + +The predictive accuracy of the segmentation model has become very poor. + +# Detection Detailer +An object detection and auto-mask extension for [Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui). See [Installation](https://github.com/dustysys/ddetailer#installation). + +![adoringfan](/misc/ddetailer_example_1.png) + +### Segmentation +Default models enable person and face instance segmentation. + +![amgothic](/misc/ddetailer_example_2.png) + +### Detailing +With full-resolution inpainting, the extension is handy for improving faces without the hassle of manual masking. + +![zion](/misc/ddetailer_example_3.gif) + +## Installation +1. Use `git clone https://github.com/dustysys/ddetailer.git` from your SD web UI `/extensions` folder. Alternatively, install from the extensions tab with url `https://github.com/dustysys/ddetailer` +2. Start or reload SD web UI. + +The models and dependencies should download automatically. To install them manually, follow the [official instructions for installing mmdet](https://mmcv.readthedocs.io/en/latest/get_started/installation.html#install-with-mim-recommended). The models can be [downloaded here](https://huggingface.co/dustysys/ddetailer) and should be placed in `/models/mmdet/bbox` for bounding box (`anime-face_yolov3`) or `/models/mmdet/segm` for instance segmentation models (`dd-person_mask2former`). See the [MMDetection docs](https://mmdetection.readthedocs.io/en/latest/1_exist_data_model.html) for guidance on training your own models. For using official MMDetection pretrained models see [here](https://github.com/dustysys/ddetailer/issues/5#issuecomment-1311231989), these are trained for photorealism. See [Troubleshooting](https://github.com/dustysys/ddetailer#troubleshooting) if you encounter issues during installation. + +## Usage +Select Detection Detailer as the script in SD web UI to use the extension. Click 'Generate' to run the script. Here are some tips: +- `anime-face_yolov3` can detect the bounding box of faces as the primary model while `dd-person_mask2former` isolates the head's silhouette as the secondary model by using the bitwise AND option. Refer to [this example](https://github.com/dustysys/ddetailer/issues/4#issuecomment-1311200268). +- The dilation factor expands the mask, while the x & y offsets move the mask around. +- The script is available in txt2img mode as well and can improve the quality of your 10 pulls with moderate settings (low denoise). + +## Troubleshooting +If you get the message ERROR: 'Failed building wheel for pycocotools' follow [these steps](https://github.com/dustysys/ddetailer/issues/1#issuecomment-1309415543). + +Any other issues installing, open an [issue](https://github.com/dustysys/ddetailer/issues). + +## Credits +hysts/[anime-face-detector](https://github.com/hysts/anime-face-detector) - Creator of `anime-face_yolov3`, which has impressive performance on a variety of art styles. + +skytnt/[anime-segmentation](https://huggingface.co/datasets/skytnt/anime-segmentation) - Synthetic dataset used to train `dd-person_mask2former`. + +jerryli27/[AniSeg](https://github.com/jerryli27/AniSeg) - Annotated dataset used to train `dd-person_mask2former`. + +open-mmlab/[mmdetection](https://github.com/open-mmlab/mmdetection) - Object detection toolset. `dd-person_mask2former` was trained via transfer learning using their [R-50 Mask2Former instance segmentation model](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask2former#instance-segmentation) as a base. + +AUTOMATIC1111/[stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) - Web UI for Stable Diffusion, base application for this extension. diff --git a/exhm/detailer/dddetailer/config/coco_panoptic.py b/exhm/detailer/dddetailer/config/coco_panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..954daaded2f2f5e9cf745506d4bc59ac519eebd3 --- /dev/null +++ b/exhm/detailer/dddetailer/config/coco_panoptic.py @@ -0,0 +1,98 @@ +# dataset settings +dataset_type = "CocoPanopticDataset" +data_root = 'data/coco/' + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = "s3://openmmlab/datasets/detection/coco/" + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type="LoadImageFromFile", backend_args=backend_args), + dict(type="LoadPanopticAnnotations", backend_args=backend_args), + dict(type="Resize", scale=(1333, 800), keep_ratio=True), + dict(type="RandomFlip", prob=0.5), + dict(type="PackDetInputs"), +] +test_pipeline = [ + dict(type="LoadImageFromFile", backend_args=backend_args), + dict(type="Resize", scale=(1333, 800), keep_ratio=True), + dict(type="LoadPanopticAnnotations", backend_args=backend_args), + dict( + type="PackDetInputs", + meta_keys=("img_id", "img_path", "ori_shape", "img_shape", "scale_factor"), + ), +] + +train_dataloader = dict( + batch_size=2, + num_workers=2, + persistent_workers=True, + sampler=dict(type="DefaultSampler", shuffle=True), + batch_sampler=dict(type="AspectRatioBatchSampler"), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file="annotations/panoptic_train2017.json", + data_prefix=dict(img="train2017/", seg="annotations/panoptic_train2017/"), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args, + ), +) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type="DefaultSampler", shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file="annotations/panoptic_val2017.json", + data_prefix=dict(img="val2017/", seg="annotations/panoptic_val2017/"), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args, + ), +) +test_dataloader = val_dataloader + +val_evaluator = dict( + type="CocoPanopticMetric", + ann_file=data_root + "annotations/panoptic_val2017.json", + seg_prefix=data_root + "annotations/panoptic_val2017/", + backend_args=backend_args, +) +test_evaluator = val_evaluator + +# inference on test dataset and +# format the output results for submission. +# test_dataloader = dict( +# batch_size=1, +# num_workers=1, +# persistent_workers=True, +# drop_last=False, +# sampler=dict(type='DefaultSampler', shuffle=False), +# dataset=dict( +# type=dataset_type, +# data_root=data_root, +# ann_file='annotations/panoptic_image_info_test-dev2017.json', +# data_prefix=dict(img='test2017/'), +# test_mode=True, +# pipeline=test_pipeline)) +# test_evaluator = dict( +# type='CocoPanopticMetric', +# format_only=True, +# ann_file=data_root + 'annotations/panoptic_image_info_test-dev2017.json', +# outfile_prefix='./work_dirs/coco_panoptic/test') diff --git a/exhm/detailer/dddetailer/config/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py b/exhm/detailer/dddetailer/config/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py new file mode 100644 index 0000000000000000000000000000000000000000..b67d9b0310c2ecc8ac0cc692d9fc6a888ea63b3f --- /dev/null +++ b/exhm/detailer/dddetailer/config/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py @@ -0,0 +1,265 @@ +_base_ = ["./coco_panoptic.py"] +image_size = (1024, 1024) +batch_augments = [ + dict( + type="BatchFixedSizePad", + size=image_size, + img_pad_value=0, + pad_mask=True, + mask_pad_value=0, + pad_seg=True, + seg_pad_value=255, + ) +] +data_preprocessor = dict( + type="DetDataPreprocessor", + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32, + pad_mask=True, + mask_pad_value=0, + pad_seg=True, + seg_pad_value=255, + batch_augments=batch_augments, +) + +num_things_classes = 1 +num_stuff_classes = 0 +num_classes = num_things_classes + num_stuff_classes +model = dict( + type="Mask2Former", + data_preprocessor=data_preprocessor, + backbone=dict( + type="ResNet", + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type="BN", requires_grad=False), + norm_eval=True, + style="pytorch", + init_cfg=dict(type="Pretrained", checkpoint="torchvision://resnet50"), + ), + panoptic_head=dict( + type="Mask2FormerHead", + in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside + strides=[4, 8, 16, 32], + feat_channels=256, + out_channels=256, + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type="MSDeformAttnPixelDecoder", + num_outs=3, + norm_cfg=dict(type="GN", num_groups=32), + act_cfg=dict(type="ReLU"), + encoder=dict( # DeformableDetrTransformerEncoder + num_layers=6, + layer_cfg=dict( # DeformableDetrTransformerEncoderLayer + self_attn_cfg=dict( # MultiScaleDeformableAttention + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + dropout=0.0, + batch_first=True, + ), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type="ReLU", inplace=True), + ), + ), + ), + positional_encoding=dict(num_feats=128, normalize=True), + ), + enforce_decoder_input_project=False, + positional_encoding=dict(num_feats=128, normalize=True), + transformer_decoder=dict( # Mask2FormerTransformerDecoder + return_intermediate=True, + num_layers=9, + layer_cfg=dict( # Mask2FormerTransformerDecoderLayer + self_attn_cfg=dict( # MultiheadAttention + embed_dims=256, num_heads=8, dropout=0.0, batch_first=True + ), + cross_attn_cfg=dict( # MultiheadAttention + embed_dims=256, num_heads=8, dropout=0.0, batch_first=True + ), + ffn_cfg=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type="ReLU", inplace=True), + ), + ), + init_cfg=None, + ), + loss_cls=dict( + type="CrossEntropyLoss", + use_sigmoid=False, + loss_weight=2.0, + reduction="mean", + class_weight=[1.0] * num_classes + [0.1], + ), + loss_mask=dict( + type="CrossEntropyLoss", use_sigmoid=True, reduction="mean", loss_weight=5.0 + ), + loss_dice=dict( + type="DiceLoss", + use_sigmoid=True, + activate=True, + reduction="mean", + naive_dice=True, + eps=1.0, + loss_weight=5.0, + ), + ), + panoptic_fusion_head=dict( + type="MaskFormerFusionHead", + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes, + loss_panoptic=None, + init_cfg=None, + ), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type="HungarianAssigner", + match_costs=[ + dict(type="ClassificationCost", weight=2.0), + dict(type="CrossEntropyLossCost", weight=5.0, use_sigmoid=True), + dict(type="DiceCost", weight=5.0, pred_act=True, eps=1.0), + ], + ), + sampler=dict(type="MaskPseudoSampler"), + ), + test_cfg=dict( + panoptic_on=True, + # For now, the dataset does not support + # evaluating semantic segmentation metric. + semantic_on=False, + instance_on=True, + # max_per_image is for instance segmentation. + max_per_image=100, + iou_thr=0.8, + # In Mask2Former's panoptic postprocessing, + # it will filter mask area where score is less than 0.5 . + filter_low_score=True, + ), + init_cfg=None, +) + +# dataset settings +data_root = "data/coco/" +train_pipeline = [ + dict( + type="LoadImageFromFile", to_float32=True, backend_args={{_base_.backend_args}} + ), + dict( + type="LoadPanopticAnnotations", + with_bbox=True, + with_mask=True, + with_seg=True, + backend_args={{_base_.backend_args}}, + ), + dict(type="RandomFlip", prob=0.5), + # large scale jittering + dict( + type="RandomResize", scale=image_size, ratio_range=(0.1, 2.0), keep_ratio=True + ), + dict( + type="RandomCrop", + crop_size=image_size, + crop_type="absolute", + recompute_bbox=True, + allow_negative_crop=True, + ), + dict(type="PackDetInputs"), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +val_evaluator = [ + dict( + type="CocoPanopticMetric", + ann_file=data_root + "annotations/panoptic_val2017.json", + seg_prefix=data_root + "annotations/panoptic_val2017/", + backend_args={{_base_.backend_args}}, + ), + dict( + type="CocoMetric", + ann_file=data_root + "annotations/instances_val2017.json", + metric=["bbox", "segm"], + backend_args={{_base_.backend_args}}, + ), +] +test_evaluator = val_evaluator + +# optimizer +embed_multi = dict(lr_mult=1.0, decay_mult=0.0) +optim_wrapper = dict( + type="OptimWrapper", + optimizer=dict( + type="AdamW", lr=0.0001, weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999) + ), + paramwise_cfg=dict( + custom_keys={ + "backbone": dict(lr_mult=0.1, decay_mult=1.0), + "query_embed": embed_multi, + "query_feat": embed_multi, + "level_embed": embed_multi, + }, + norm_decay_mult=0.0, + ), + clip_grad=dict(max_norm=0.01, norm_type=2), +) + +# learning policy +max_iters = 368750 +param_scheduler = dict( + type="MultiStepLR", + begin=0, + end=max_iters, + by_epoch=False, + milestones=[327778, 355092], + gamma=0.1, +) + +# Before 365001th iteration, we do evaluation every 5000 iterations. +# After 365000th iteration, we do evaluation every 368750 iterations, +# which means that we do evaluation at the end of training. +interval = 5000 +dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)] +train_cfg = dict( + type="IterBasedTrainLoop", + max_iters=max_iters, + val_interval=interval, + dynamic_intervals=dynamic_intervals, +) +val_cfg = dict(type="ValLoop") +test_cfg = dict(type="TestLoop") + +default_hooks = dict( + checkpoint=dict( + type="CheckpointHook", + by_epoch=False, + save_last=True, + max_keep_ckpts=3, + interval=interval, + ) +) +log_processor = dict(type="LogProcessor", window_size=50, by_epoch=False) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (2 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=16) diff --git a/exhm/detailer/dddetailer/config/mmdet_anime-face_yolov3.py b/exhm/detailer/dddetailer/config/mmdet_anime-face_yolov3.py new file mode 100644 index 0000000000000000000000000000000000000000..5d384b556c113baf491d96ea47317d3133eae9b5 --- /dev/null +++ b/exhm/detailer/dddetailer/config/mmdet_anime-face_yolov3.py @@ -0,0 +1,177 @@ +# _base_ = ["../_base_/schedules/schedule_1x.py", "../_base_/default_runtime.py"] +# model settings +data_preprocessor = dict( + type="DetDataPreprocessor", + mean=[0, 0, 0], + std=[255.0, 255.0, 255.0], + bgr_to_rgb=True, + pad_size_divisor=32, +) +model = dict( + type="YOLOV3", + data_preprocessor=data_preprocessor, + backbone=dict( + type="Darknet", + depth=53, + out_indices=(3, 4, 5), + init_cfg=dict(type="Pretrained", checkpoint="open-mmlab://darknet53"), + ), + neck=dict( + type="YOLOV3Neck", + num_scales=3, + in_channels=[1024, 512, 256], + out_channels=[512, 256, 128], + ), + bbox_head=dict( + type="YOLOV3Head", + num_classes=1, + in_channels=[512, 256, 128], + out_channels=[1024, 512, 256], + anchor_generator=dict( + type="YOLOAnchorGenerator", + base_sizes=[ + [(116, 90), (156, 198), (373, 326)], + [(30, 61), (62, 45), (59, 119)], + [(10, 13), (16, 30), (33, 23)], + ], + strides=[32, 16, 8], + ), + bbox_coder=dict(type="YOLOBBoxCoder"), + featmap_strides=[32, 16, 8], + loss_cls=dict( + type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0, reduction="sum" + ), + loss_conf=dict( + type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0, reduction="sum" + ), + loss_xy=dict( + type="CrossEntropyLoss", use_sigmoid=True, loss_weight=2.0, reduction="sum" + ), + loss_wh=dict(type="MSELoss", loss_weight=2.0, reduction="sum"), + ), + # training and testing settings + train_cfg=dict( + assigner=dict( + type="GridAssigner", pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0 + ) + ), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + conf_thr=0.005, + nms=dict(type="nms", iou_threshold=0.45), + max_per_img=100, + ), +) +# dataset settings +dataset_type = "CocoDataset" +data_root = "data/coco/" + +# Example to use different file client +# Method 1: simply set the data root and let the file I/O module +# automatically infer from prefix (not support LMDB and Memcache yet) + +# data_root = 's3://openmmlab/datasets/detection/coco/' + +# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6 +# backend_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/': 's3://openmmlab/datasets/detection/', +# 'data/': 's3://openmmlab/datasets/detection/' +# })) +backend_args = None + +train_pipeline = [ + dict(type="LoadImageFromFile", backend_args=backend_args), + dict(type="LoadAnnotations", with_bbox=True), + dict( + type="Expand", + mean=data_preprocessor["mean"], + to_rgb=data_preprocessor["bgr_to_rgb"], + ratio_range=(1, 2), + ), + dict( + type="MinIoURandomCrop", + min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), + min_crop_size=0.3, + ), + dict(type="RandomResize", scale=[(320, 320), (608, 608)], keep_ratio=True), + dict(type="RandomFlip", prob=0.5), + dict(type="PhotoMetricDistortion"), + dict(type="PackDetInputs"), +] +test_pipeline = [ + dict(type="LoadImageFromFile", backend_args=backend_args), + dict(type="Resize", scale=(608, 608), keep_ratio=True), + dict(type="LoadAnnotations", with_bbox=True), + dict( + type="PackDetInputs", + meta_keys=("img_id", "img_path", "ori_shape", "img_shape", "scale_factor"), + ), +] + +train_dataloader = dict( + batch_size=8, + num_workers=4, + persistent_workers=True, + sampler=dict(type="DefaultSampler", shuffle=True), + batch_sampler=dict(type="AspectRatioBatchSampler"), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file="annotations/instances_train2017.json", + data_prefix=dict(img="train2017/"), + filter_cfg=dict(filter_empty_gt=True, min_size=32), + pipeline=train_pipeline, + backend_args=backend_args, + ), +) +val_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type="DefaultSampler", shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file="annotations/instances_val2017.json", + data_prefix=dict(img="val2017/"), + test_mode=True, + pipeline=test_pipeline, + backend_args=backend_args, + ), +) +test_dataloader = val_dataloader + +val_evaluator = dict( + type="CocoMetric", + ann_file=data_root + "annotations/instances_val2017.json", + metric="bbox", + backend_args=backend_args, +) +test_evaluator = val_evaluator + +train_cfg = dict(max_epochs=273, val_interval=7) + +# optimizer +optim_wrapper = dict( + type="OptimWrapper", + optimizer=dict(type="SGD", lr=0.001, momentum=0.9, weight_decay=0.0005), + clip_grad=dict(max_norm=35, norm_type=2), +) + +# learning policy +param_scheduler = [ + dict(type="LinearLR", start_factor=0.1, by_epoch=False, begin=0, end=2000), + dict(type="MultiStepLR", by_epoch=True, milestones=[218, 246], gamma=0.1), +] + +default_hooks = dict(checkpoint=dict(type="CheckpointHook", interval=7)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# USER SHOULD NOT CHANGE ITS VALUES. +# base_batch_size = (8 GPUs) x (8 samples per GPU) +auto_scale_lr = dict(base_batch_size=64) diff --git a/exhm/detailer/dddetailer/config/mmdet_dd-person_mask2former.py b/exhm/detailer/dddetailer/config/mmdet_dd-person_mask2former.py new file mode 100644 index 0000000000000000000000000000000000000000..1b2e94f4aa79cb816aaec390c9891eb410584ce8 --- /dev/null +++ b/exhm/detailer/dddetailer/config/mmdet_dd-person_mask2former.py @@ -0,0 +1,105 @@ +_base_ = ["./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py"] + +num_things_classes = 1 +num_stuff_classes = 0 +num_classes = num_things_classes + num_stuff_classes +image_size = (1024, 1024) +batch_augments = [ + dict( + type="BatchFixedSizePad", + size=image_size, + img_pad_value=0, + pad_mask=True, + mask_pad_value=0, + pad_seg=False, + ) +] +data_preprocessor = dict( + type="DetDataPreprocessor", + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32, + pad_mask=True, + mask_pad_value=0, + pad_seg=False, + batch_augments=batch_augments, +) +model = dict( + data_preprocessor=data_preprocessor, + panoptic_head=dict( + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes, + loss_cls=dict(class_weight=[1.0] * num_classes + [0.1]), + ), + panoptic_fusion_head=dict( + num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes + ), + test_cfg=dict(panoptic_on=False), +) + +# dataset settings +train_pipeline = [ + dict(type="LoadImageFromFile", to_float32=True, backend_args=None), + dict(type="LoadAnnotations", with_bbox=True, with_mask=True), + dict(type="RandomFlip", prob=0.5), + # large scale jittering + dict( + type="RandomResize", + scale=image_size, + ratio_range=(0.1, 2.0), + resize_type="Resize", + keep_ratio=True, + ), + dict( + type="RandomCrop", + crop_size=image_size, + crop_type="absolute", + recompute_bbox=True, + allow_negative_crop=True, + ), + dict(type="FilterAnnotations", min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True), + dict(type="PackDetInputs"), +] + +test_pipeline = [ + dict(type="LoadImageFromFile", to_float32=True, backend_args=None), + dict(type="Resize", scale=(1333, 800), keep_ratio=True), + # If you don't have a gt annotation, delete the pipeline + dict(type="LoadAnnotations", with_bbox=True, with_mask=True), + dict( + type="PackDetInputs", + meta_keys=("img_id", "img_path", "ori_shape", "img_shape", "scale_factor"), + ), +] + +dataset_type = "CocoDataset" +data_root = "data/coco/" + +train_dataloader = dict( + dataset=dict( + type=dataset_type, + ann_file="annotations/instances_train2017.json", + data_prefix=dict(img="train2017/"), + pipeline=train_pipeline, + ) +) +val_dataloader = dict( + dataset=dict( + type=dataset_type, + ann_file="annotations/instances_val2017.json", + data_prefix=dict(img="val2017/"), + pipeline=test_pipeline, + ) +) +test_dataloader = val_dataloader + +val_evaluator = dict( + _delete_=True, + type="CocoMetric", + ann_file=data_root + "annotations/instances_val2017.json", + metric=["bbox", "segm"], + format_only=False, + backend_args=None, +) +test_evaluator = val_evaluator diff --git a/exhm/detailer/dddetailer/install.py b/exhm/detailer/dddetailer/install.py new file mode 100644 index 0000000000000000000000000000000000000000..7954c7f36e79dd615a32d03f67620e386606be8b --- /dev/null +++ b/exhm/detailer/dddetailer/install.py @@ -0,0 +1,71 @@ +import sys +from pathlib import Path +from textwrap import dedent + +from packaging import version + +import launch +from launch import is_installed, run, run_pip + +try: + skip_install = launch.args.skip_install +except Exception: + skip_install = False + +python = sys.executable + +def check_ddetailer() -> bool: + try: + from modules.paths import extensions_dir + + extensions_path = Path(extensions_dir) + except ImportError: + from modules.paths import data_path + + extensions_path = Path(data_path, "extensions") + + ddetailer_exists = any(p.is_dir() and p.name.startswith("ddetailer") for p in extensions_path.iterdir()) + return not ddetailer_exists + + +def check_install() -> bool: + try: + import mmcv + import mmdet + from mmdet.evaluation import get_classes + except Exception: + return False + + if not hasattr(mmcv, "__version__") or not hasattr(mmdet, "__version__"): + return False + + v1 = version.parse(mmcv.__version__) >= version.parse("2.0.0") + v2 = version.parse(mmdet.__version__) >= version.parse("3.0.0") + return v1 and v2 + + +def install(): + if not is_installed("pycocotools"): + run(f"{python} -m pip install pycocotools", live=True) + + if not is_installed("mim"): + run_pip("install openmim", desc="openmim") + + if not check_install(): + print("Uninstalling mmcv mmdet... (if installed)") + run(f'"{python}" -m pip uninstall -y mmcv mmcv-full mmdet mmengine', live=True) + print("Installing mmcv mmdet...") + run(f'"{python}" -m mim install -U mmcv>=2.0.0 mmdet>=3.0.0', live=True) + + +if not check_ddetailer(): + message = """ + [-] dddetailer: Please remove the following: + 1. the original ddetailer extension - "stable-diffusion-webui/extensions/ddetailer" folder. + 2. original model files - "stable-diffusion-webui/models/mmdet" folder. + """ + message = dedent(message) + raise RuntimeError(message) + +if not skip_install: + install() diff --git a/exhm/detailer/dddetailer/misc/ddetailer_example_1.png b/exhm/detailer/dddetailer/misc/ddetailer_example_1.png new file mode 100644 index 0000000000000000000000000000000000000000..7d0d9ec848c4d2c1cddcc0faefd2353c4227d04b Binary files /dev/null and b/exhm/detailer/dddetailer/misc/ddetailer_example_1.png differ diff --git a/exhm/detailer/dddetailer/misc/ddetailer_example_2.png b/exhm/detailer/dddetailer/misc/ddetailer_example_2.png new file mode 100644 index 0000000000000000000000000000000000000000..a6afadc683a6a77e7d9e092ea264b5e11e362f06 Binary files /dev/null and b/exhm/detailer/dddetailer/misc/ddetailer_example_2.png differ diff --git a/exhm/detailer/dddetailer/misc/ddetailer_example_3.gif b/exhm/detailer/dddetailer/misc/ddetailer_example_3.gif new file mode 100644 index 0000000000000000000000000000000000000000..74c0affa37784518689f150fb619623847b03d94 Binary files /dev/null and b/exhm/detailer/dddetailer/misc/ddetailer_example_3.gif differ diff --git a/exhm/detailer/dddetailer/pyproject.toml b/exhm/detailer/dddetailer/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..4c2570653064ed1657cf84690c65f7f61beb6f75 --- /dev/null +++ b/exhm/detailer/dddetailer/pyproject.toml @@ -0,0 +1,29 @@ +[project] +name = "dddetailer" +version = "23.8.0" +description = "An object detection and auto-mask extension for Stable Diffusion web UI." +authors = [ + {name = "dowon", email = "ks2515@naver.com"}, +] +requires-python = ">=3.8,<3.12" +readme = "README.md" +license = {text = "MIT"} + +[project.urls] +repository = "https://github.com/Bing-su/dddetailer" + +[tool.isort] +profile = "black" +known_first_party = ["modules", "launch"] + +[tool.black] +line-length = 120 + +[tool.ruff] +select = ["A", "B", "C4", "E", "F", "I001", "ISC", "N", "PIE", "PT", "RET", "SIM", "UP", "W"] +ignore = ["B008", "B905", "E501"] +unfixable = ["F401"] +line-length = 120 + +[tool.ruff.isort] +known-first-party = ["modules", "launch"] diff --git a/exhm/detailer/dddetailer/scripts/dddetailer.py b/exhm/detailer/dddetailer/scripts/dddetailer.py new file mode 100644 index 0000000000000000000000000000000000000000..dfd3b1f80cbcaa7683bf1ef00216f586e0582e77 --- /dev/null +++ b/exhm/detailer/dddetailer/scripts/dddetailer.py @@ -0,0 +1,1057 @@ +import os +import sys +from copy import copy +from pathlib import Path +from textwrap import dedent + +import cv2 +import gradio as gr +import numpy as np +from basicsr.utils.download_util import load_file_from_url +from packaging.version import parse +from PIL import Image + +from launch import run +from modules import ( + devices, + images, + modelloader, + processing, + script_callbacks, + scripts, + shared, +) +from modules.paths import data_path, models_path +from modules.processing import ( + Processed, + StableDiffusionProcessingImg2Img, + StableDiffusionProcessingTxt2Img, +) +from modules.sd_models import model_hash +from modules.shared import cmd_opts, opts, state + +DETECTION_DETAILER = "Detection Detailer" +dd_models_path = os.path.join(models_path, "mmdet") +python = sys.executable + + +def check_ddetailer() -> bool: + try: + from modules.paths import extensions_dir + + extensions_path = Path(extensions_dir) + except ImportError: + from modules.paths import data_path + + extensions_path = Path(data_path, "extensions") + + ddetailer_exists = any(p.is_dir() and p.name.startswith("ddetailer") for p in extensions_path.iterdir()) + return not ddetailer_exists + + +def check_install() -> bool: + try: + import mmcv + import mmdet + from mmdet.evaluation import get_classes + except Exception: + return False + + if not hasattr(mmcv, "__version__") or not hasattr(mmdet, "__version__"): + return False + + v1 = parse(mmcv.__version__) >= parse("2.0.0") + v2 = parse(mmdet.__version__) >= parse("3.0.0") + return v1 and v2 + + +def list_models(model_path): + model_list = modelloader.load_models(model_path=model_path, ext_filter=[".pth"]) + + def modeltitle(path, shorthash): + abspath = os.path.abspath(path) + + if abspath.startswith(model_path): + name = abspath.replace(model_path, "") + else: + name = os.path.basename(path) + + if name.startswith(("\\", "/")): + name = name[1:] + + shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0] + + return f"{name} [{shorthash}]", shortname + + models = [] + for filename in model_list: + h = model_hash(filename) + title, short_model_name = modeltitle(filename, h) + models.append(title) + + return models + + +def startup(): + if not check_ddetailer(): + message = """ + [-] dddetailer: dddetailer doesn't work with the original ddetailer extension. + dddetailer는 원본 ddetailer 확장이 있을 때 동작하지 않습니다. + """ + raise RuntimeError(dedent(message)) + + if not check_install(): + run(f'"{python}" -m pip uninstall -y mmcv mmcv-full mmdet mmengine') + run(f'"{python}" -m pip install openmim', desc="Installing openmim", errdesc="Couldn't install openmim") + run( + f'"{python}" -m mim install mmcv>=2.0.0 mmdet>=3.0.0', + desc="Installing mmdet", + errdesc="Couldn't install mmdet", + ) + + if len(list_models(dd_models_path)) == 0: + print("No detection models found, downloading...") + bbox_path = os.path.join(dd_models_path, "bbox") + segm_path = os.path.join(dd_models_path, "segm") + # bbox + load_file_from_url( + "https://huggingface.co/dustysys/ddetailer/resolve/main/mmdet/bbox/mmdet_anime-face_yolov3.pth", + bbox_path, + ) + load_file_from_url( + "https://raw.githubusercontent.com/Bing-su/dddetailer/master/config/mmdet_anime-face_yolov3.py", + bbox_path, + ) + # segm + load_file_from_url( + "https://github.com/Bing-su/dddetailer/releases/download/segm/mmdet_dd-person_mask2former.pth", + segm_path, + ) + load_file_from_url( + "https://raw.githubusercontent.com/Bing-su/dddetailer/master/config/mmdet_dd-person_mask2former.py", + segm_path, + ) + load_file_from_url( + "https://raw.githubusercontent.com/Bing-su/dddetailer/master/config/mask2former_r50_8xb2-lsj-50e_coco-panoptic.py", + segm_path, + ) + load_file_from_url( + "https://raw.githubusercontent.com/Bing-su/dddetailer/master/config/coco_panoptic.py", + segm_path, + ) + + +startup() + + +def gr_show(visible=True): + return {"visible": visible, "__type__": "update"} + + +def ddetailer_extra_generation_params( + dd_prompt, + dd_neg_prompt, + dd_model_a, + dd_conf_a, + dd_dilation_factor_a, + dd_offset_x_a, + dd_offset_y_a, + dd_preprocess_b, + dd_bitwise_op, + dd_model_b, + dd_conf_b, + dd_dilation_factor_b, + dd_offset_x_b, + dd_offset_y_b, + dd_mask_blur, + dd_denoising_strength, + dd_inpaint_full_res, + dd_inpaint_full_res_padding, + dd_cfg_scale, +): + params = { + "DDetailer prompt": dd_prompt, + "DDetailer neg prompt": dd_neg_prompt, + "DDetailer model a": dd_model_a, + "DDetailer conf a": dd_conf_a, + "DDetailer dilation a": dd_dilation_factor_a, + "DDetailer offset x a": dd_offset_x_a, + "DDetailer offset y a": dd_offset_y_a, + "DDetailer preprocess b": dd_preprocess_b, + "DDetailer bitwise": dd_bitwise_op, + "DDetailer model b": dd_model_b, + "DDetailer conf b": dd_conf_b, + "DDetailer dilation b": dd_dilation_factor_b, + "DDetailer offset x b": dd_offset_x_b, + "DDetailer offset y b": dd_offset_y_b, + "DDetailer mask blur": dd_mask_blur, + "DDetailer denoising": dd_denoising_strength, + "DDetailer inpaint full": dd_inpaint_full_res, + "DDetailer inpaint padding": dd_inpaint_full_res_padding, + "DDetailer cfg": dd_cfg_scale, + "Script": DETECTION_DETAILER, + } + if not dd_prompt: + params.pop("DDetailer prompt") + if not dd_neg_prompt: + params.pop("DDetailer neg prompt") + return params + + +class DetectionDetailerScript(scripts.Script): + def title(self): + return DETECTION_DETAILER + + def show(self, is_img2img): + return True + + def ui(self, is_img2img): + import modules.ui + + model_list = list_models(dd_models_path) + model_list.insert(0, "None") + if is_img2img: + info = gr.HTML( + '

Recommended settings: Use from inpaint tab, inpaint at full res ON, denoise < 0.5

' + ) + else: + info = gr.HTML("") + dd_prompt = None + with gr.Group(): + if not is_img2img: + with gr.Row(): + dd_prompt = gr.Textbox( + label="dd_prompt", + elem_id="t2i_dd_prompt", + show_label=False, + lines=3, + placeholder="Ddetailer Prompt", + ) + + with gr.Row(): + dd_neg_prompt = gr.Textbox( + label="dd_neg_prompt", + elem_id="t2i_dd_neg_prompt", + show_label=False, + lines=2, + placeholder="Ddetailer Negative prompt", + ) + + with gr.Row(): + dd_model_a = gr.Dropdown( + label="Primary detection model (A)", + choices=model_list, + value="None", + visible=True, + type="value", + ) + + with gr.Row(): + dd_conf_a = gr.Slider( + label="Detection confidence threshold % (A)", + minimum=0, + maximum=100, + step=1, + value=30, + visible=True, + ) + dd_dilation_factor_a = gr.Slider( + label="Dilation factor (A)", + minimum=0, + maximum=255, + step=1, + value=4, + visible=True, + ) + + with gr.Row(): + dd_offset_x_a = gr.Slider( + label="X offset (A)", + minimum=-200, + maximum=200, + step=1, + value=0, + visible=True, + ) + dd_offset_y_a = gr.Slider( + label="Y offset (A)", + minimum=-200, + maximum=200, + step=1, + value=0, + visible=True, + ) + + with gr.Row(): + dd_preprocess_b = gr.Checkbox( + label="Inpaint model B detections before model A runs", + value=False, + visible=True, + ) + dd_bitwise_op = gr.Radio( + label="Bitwise operation", + choices=["None", "A&B", "A-B"], + value="None", + visible=True, + ) + + br = gr.HTML("
") + + with gr.Group(): + with gr.Row(): + dd_model_b = gr.Dropdown( + label="Secondary detection model (B) (optional)", + choices=model_list, + value="None", + visible=True, + type="value", + ) + + with gr.Row(): + dd_conf_b = gr.Slider( + label="Detection confidence threshold % (B)", + minimum=0, + maximum=100, + step=1, + value=30, + visible=True, + ) + dd_dilation_factor_b = gr.Slider( + label="Dilation factor (B)", + minimum=0, + maximum=255, + step=1, + value=4, + visible=True, + ) + + with gr.Row(): + dd_offset_x_b = gr.Slider( + label="X offset (B)", + minimum=-200, + maximum=200, + step=1, + value=0, + visible=True, + ) + dd_offset_y_b = gr.Slider( + label="Y offset (B)", + minimum=-200, + maximum=200, + step=1, + value=0, + visible=True, + ) + + with gr.Group(): + with gr.Row(): + dd_mask_blur = gr.Slider( + label="Mask blur ", + minimum=0, + maximum=64, + step=1, + value=4, + visible=(not is_img2img), + ) + dd_denoising_strength = gr.Slider( + label="Denoising strength (Inpaint)", + minimum=0.0, + maximum=1.0, + step=0.01, + value=0.4, + visible=(not is_img2img), + ) + + with gr.Row(): + dd_inpaint_full_res = gr.Checkbox( + label="Inpaint at full resolution ", + value=True, + visible=(not is_img2img), + ) + dd_inpaint_full_res_padding = gr.Slider( + label="Inpaint at full resolution padding, pixels ", + minimum=0, + maximum=256, + step=4, + value=32, + visible=(not is_img2img), + ) + + with gr.Row(): + dd_cfg_scale = gr.Slider( + label="CFG Scale", + minimum=0, + maximum=30, + step=0.5, + value=7, + visible=True, + ) + + dd_model_a.change( + lambda modelname: { + dd_model_b: gr_show(modelname != "None"), + dd_conf_a: gr_show(modelname != "None"), + dd_dilation_factor_a: gr_show(modelname != "None"), + dd_offset_x_a: gr_show(modelname != "None"), + dd_offset_y_a: gr_show(modelname != "None"), + }, + inputs=[dd_model_a], + outputs=[ + dd_model_b, + dd_conf_a, + dd_dilation_factor_a, + dd_offset_x_a, + dd_offset_y_a, + ], + ) + + dd_model_b.change( + lambda modelname: { + dd_preprocess_b: gr_show(modelname != "None"), + dd_bitwise_op: gr_show(modelname != "None"), + dd_conf_b: gr_show(modelname != "None"), + dd_dilation_factor_b: gr_show(modelname != "None"), + dd_offset_x_b: gr_show(modelname != "None"), + dd_offset_y_b: gr_show(modelname != "None"), + }, + inputs=[dd_model_b], + outputs=[ + dd_preprocess_b, + dd_bitwise_op, + dd_conf_b, + dd_dilation_factor_b, + dd_offset_x_b, + dd_offset_y_b, + ], + ) + if dd_prompt: + self.infotext_fields = ( + (dd_prompt, "DDetailer prompt"), + (dd_neg_prompt, "DDetailer neg prompt"), + (dd_model_a, "DDetailer model a"), + (dd_conf_a, "DDetailer conf a"), + (dd_dilation_factor_a, "DDetailer dilation a"), + (dd_offset_x_a, "DDetailer offset x a"), + (dd_offset_y_a, "DDetailer offset y a"), + (dd_preprocess_b, "DDetailer preprocess b"), + (dd_bitwise_op, "DDetailer bitwise"), + (dd_model_b, "DDetailer model b"), + (dd_conf_b, "DDetailer conf b"), + (dd_dilation_factor_b, "DDetailer dilation b"), + (dd_offset_x_b, "DDetailer offset x b"), + (dd_offset_y_b, "DDetailer offset y b"), + (dd_mask_blur, "DDetailer mask blur"), + (dd_denoising_strength, "DDetailer denoising"), + (dd_inpaint_full_res, "DDetailer inpaint full"), + (dd_inpaint_full_res_padding, "DDetailer inpaint padding"), + (dd_cfg_scale, "DDetailer cfg"), + ) + + ret = [ + info, + dd_model_a, + dd_conf_a, + dd_dilation_factor_a, + dd_offset_x_a, + dd_offset_y_a, + dd_preprocess_b, + dd_bitwise_op, + br, + dd_model_b, + dd_conf_b, + dd_dilation_factor_b, + dd_offset_x_b, + dd_offset_y_b, + dd_mask_blur, + dd_denoising_strength, + dd_inpaint_full_res, + dd_inpaint_full_res_padding, + dd_cfg_scale, + ] + if not is_img2img: + ret += [dd_prompt, dd_neg_prompt] + return ret + + def run( + self, + p, + info, + dd_model_a, + dd_conf_a, + dd_dilation_factor_a, + dd_offset_x_a, + dd_offset_y_a, + dd_preprocess_b, + dd_bitwise_op, + br, + dd_model_b, + dd_conf_b, + dd_dilation_factor_b, + dd_offset_x_b, + dd_offset_y_b, + dd_mask_blur, + dd_denoising_strength, + dd_inpaint_full_res, + dd_inpaint_full_res_padding, + dd_cfg_scale, + dd_prompt=None, + dd_neg_prompt=None, + ): + processing.fix_seed(p) + seed = p.seed + subseed = p.subseed + p.batch_size = 1 + ddetail_count = p.n_iter + p.n_iter = 1 + p.do_not_save_grid = True + p.do_not_save_samples = True + is_txt2img = isinstance(p, StableDiffusionProcessingTxt2Img) + info = "" + + # ddetailer info + extra_generation_params = ddetailer_extra_generation_params( + dd_prompt, + dd_neg_prompt, + dd_model_a, + dd_conf_a, + dd_dilation_factor_a, + dd_offset_x_a, + dd_offset_y_a, + dd_preprocess_b, + dd_bitwise_op, + dd_model_b, + dd_conf_b, + dd_dilation_factor_b, + dd_offset_x_b, + dd_offset_y_b, + dd_mask_blur, + dd_denoising_strength, + dd_inpaint_full_res, + dd_inpaint_full_res_padding, + dd_cfg_scale, + ) + p.extra_generation_params.update(extra_generation_params) + + p_txt = copy(p) + if not is_txt2img: + orig_image = p.init_images[0] + else: + img2img_sampler_name = p_txt.sampler_name + # PLMS/UniPC do not support img2img so we just silently switch to DDIM + if p_txt.sampler_name in ["PLMS", "UniPC"]: + img2img_sampler_name = "DDIM" + p_txt_prompt = dd_prompt if dd_prompt else p_txt.prompt + p_txt_neg_prompt = dd_neg_prompt if dd_neg_prompt else p_txt.negative_prompt + p = StableDiffusionProcessingImg2Img( + init_images=None, + resize_mode=0, + denoising_strength=dd_denoising_strength, + mask=None, + mask_blur=dd_mask_blur, + inpainting_fill=1, + inpaint_full_res=dd_inpaint_full_res, + inpaint_full_res_padding=dd_inpaint_full_res_padding, + inpainting_mask_invert=0, + sd_model=p_txt.sd_model, + outpath_samples=p_txt.outpath_samples, + outpath_grids=p_txt.outpath_grids, + prompt=p_txt_prompt, + negative_prompt=p_txt_neg_prompt, + styles=p_txt.styles, + seed=p_txt.seed, + subseed=p_txt.subseed, + subseed_strength=p_txt.subseed_strength, + seed_resize_from_h=p_txt.seed_resize_from_h, + seed_resize_from_w=p_txt.seed_resize_from_w, + sampler_name=img2img_sampler_name, + n_iter=p_txt.n_iter, + steps=p_txt.steps, + cfg_scale=p_txt.cfg_scale, + width=p_txt.width, + height=p_txt.height, + tiling=p_txt.tiling, + extra_generation_params=p_txt.extra_generation_params, + ) + p.do_not_save_grid = True + p.do_not_save_samples = True + p.cached_c = [None, None] + p.cached_uc = [None, None] + + p.scripts = p_txt.scripts + p.script_args = p_txt.script_args + + # output info + all_prompts = [] + all_negative_prompts = [] + all_seeds = [] + all_subseeds = [] + infotexts = [] + output_images = [] + + state.job_count = ddetail_count + for n in range(ddetail_count): + devices.torch_gc() + start_seed = seed + n + + all_prompts.append(p_txt.prompt) + all_negative_prompts.append(p_txt.negative_prompt) + all_seeds.append(start_seed) + all_subseeds.append(subseed + n) + + if is_txt2img: + print(f"Processing initial image for output generation {n + 1}.") + p_txt.seed = start_seed + processed = processing.process_images(p_txt) + init_image = processed.images[0] + info = processed.info + if not dd_prompt: + p.prompt = processed.all_prompts[0] + if not dd_neg_prompt: + p.negative_prompt = processed.all_negative_prompts[0] + all_prompts[n] = processed.all_prompts[0] + all_negative_prompts[n] = processed.all_negative_prompts[0] + else: + init_image = orig_image + p.prompt = p_txt.prompt + p.negative_prompt = p_txt.negative_prompt + p.cfg_scale = dd_cfg_scale + + if opts.enable_pnginfo: + init_image.info["parameters"] = info + + infotexts.append(info) + output_images.append(init_image) + + masks_a = [] + masks_b_pre = [] + + # Optional secondary pre-processing run + if dd_model_b != "None" and dd_preprocess_b: + label_b_pre = "B" + results_b_pre = inference(init_image, dd_model_b, dd_conf_b / 100.0, label_b_pre) + masks_b_pre = create_segmasks(results_b_pre) + masks_b_pre = dilate_masks(masks_b_pre, dd_dilation_factor_b, 1) + masks_b_pre = offset_masks(masks_b_pre, dd_offset_x_b, dd_offset_y_b) + if len(masks_b_pre) > 0: + results_b_pre = update_result_masks(results_b_pre, masks_b_pre) + segmask_preview_b = create_segmask_preview(results_b_pre, init_image) + shared.state.current_image = segmask_preview_b + if opts.dd_save_previews: + images.save_image( + segmask_preview_b, + opts.outdir_ddetailer_previews, + "", + start_seed, + p.prompt, + opts.samples_format, + p=p, + ) + gen_count = len(masks_b_pre) + state.job_count += gen_count + print(f"Processing {gen_count} model {label_b_pre} detections for output generation {n + 1}.") + p.seed = start_seed + p.init_images = [init_image] + + for i in range(gen_count): + p.image_mask = masks_b_pre[i] + if opts.dd_save_masks: + images.save_image( + masks_b_pre[i], + opts.outdir_ddetailer_masks, + "", + start_seed, + p.prompt, + opts.samples_format, + p=p, + ) + processed = processing.process_images(p) + if not is_txt2img: + p.prompt = processed.all_prompts[0] + p.negative_prompt = processed.all_negative_prompts[0] + p.seed = processed.seed + 1 + p.subseed = processed.subseed + 1 + p.init_images = [processed.images[0]] + + if gen_count > 0: + output_images[n] = processed.images[0] + init_image = processed.images[0] + + else: + print(f"No model B detections for output generation {n} with current settings.") + + # Primary run + if dd_model_a != "None": + label_a = "A" + if dd_model_b != "None" and dd_bitwise_op != "None": + label_a = dd_bitwise_op + results_a = inference(init_image, dd_model_a, dd_conf_a / 100.0, label_a) + masks_a = create_segmasks(results_a) + masks_a = dilate_masks(masks_a, dd_dilation_factor_a, 1) + masks_a = offset_masks(masks_a, dd_offset_x_a, dd_offset_y_a) + if dd_model_b != "None" and dd_bitwise_op != "None": + label_b = "B" + results_b = inference(init_image, dd_model_b, dd_conf_b / 100.0, label_b) + masks_b = create_segmasks(results_b) + masks_b = dilate_masks(masks_b, dd_dilation_factor_b, 1) + masks_b = offset_masks(masks_b, dd_offset_x_b, dd_offset_y_b) + if len(masks_b) > 0: + combined_mask_b = combine_masks(masks_b) + for i in reversed(range(len(masks_a))): + if dd_bitwise_op == "A&B": + masks_a[i] = bitwise_and_masks(masks_a[i], combined_mask_b) + elif dd_bitwise_op == "A-B": + masks_a[i] = subtract_masks(masks_a[i], combined_mask_b) + if is_allblack(masks_a[i]): + del masks_a[i] + for result in results_a: + del result[i] + + else: + print("No model B detections to overlap with model A masks") + results_a = [] + masks_a = [] + + if len(masks_a) > 0: + results_a = update_result_masks(results_a, masks_a) + segmask_preview_a = create_segmask_preview(results_a, init_image) + shared.state.current_image = segmask_preview_a + if opts.dd_save_previews: + images.save_image( + segmask_preview_a, + opts.outdir_ddetailer_previews, + "", + start_seed, + p.prompt, + opts.samples_format, + p=p, + ) + gen_count = len(masks_a) + state.job_count += gen_count + print(f"Processing {gen_count} model {label_a} detections for output generation {n + 1}.") + p.seed = start_seed + p.init_images = [init_image] + + for i in range(gen_count): + p.image_mask = masks_a[i] + if opts.dd_save_masks: + images.save_image( + masks_a[i], + opts.outdir_ddetailer_masks, + "", + start_seed, + p.prompt, + opts.samples_format, + p=p, + ) + + processed = processing.process_images(p) + if not is_txt2img: + p.prompt = processed.all_prompts[0] + p.negative_prompt = processed.all_negative_prompts[0] + info = processed.info + all_prompts[n] = processed.all_prompts[0] + all_negative_prompts[n] = processed.all_negative_prompts[0] + p.seed = processed.seed + 1 + p.subseed = processed.subseed + 1 + p.init_images = [processed.images[0]] + + if gen_count > 0: + final_image = processed.images[0] + + if opts.enable_pnginfo: + final_image.info["parameters"] = info + output_images[n] = final_image + infotexts[n] = info + + if opts.samples_save: + images.save_image( + final_image, + p.outpath_samples, + "", + start_seed, + p.prompt, + opts.samples_format, + info=info, + p=p, + ) + + else: + print(f"No model {label_a} detections for output generation {n} with current settings.") + + if opts.samples_save: + images.save_image( + init_image, + p.outpath_samples, + "", + start_seed, + p.prompt, + opts.samples_format, + info=info, + p=p, + ) + + state.job = f"Generation {n + 1} out of {state.job_count}" + + if dd_prompt or dd_neg_prompt: + params_txt = os.path.join(data_path, "params.txt") + with open(params_txt, "w", encoding="utf-8") as file: + file.write(infotexts[0]) + + return Processed( + p, + output_images, + seed, + infotexts[0], + all_prompts=all_prompts, + all_negative_prompts=all_negative_prompts, + all_seeds=all_seeds, + all_subseeds=all_subseeds, + infotexts=infotexts, + ) + + +def modeldataset(model_shortname): + path = modelpath(model_shortname) + dataset = "coco" if "mmdet" in path and "segm" in path else "bbox" + return dataset + + +def modelpath(model_shortname): + model_list = modelloader.load_models(model_path=dd_models_path, ext_filter=[".pth"]) + model_h = model_shortname.split("[")[-1].split("]")[0] + for path in model_list: + if model_hash(path) == model_h: + return path + return None + + +def update_result_masks(results, masks): + for i in range(len(masks)): + boolmask = np.array(masks[i], dtype=bool) + results[2][i] = boolmask + return results + + +def create_segmask_preview(results, image): + labels = results[0] + bboxes = results[1] + segms = results[2] + scores = results[3] + + cv2_image = np.array(image) + cv2_image = cv2_image[:, :, ::-1].copy() + + for i in range(len(segms)): + color = np.full_like(cv2_image, np.random.randint(100, 256, (1, 3), dtype=np.uint8)) + alpha = 0.2 + color_image = cv2.addWeighted(cv2_image, alpha, color, 1 - alpha, 0) + cv2_mask = segms[i].astype(np.uint8) * 255 + cv2_mask_bool = np.array(segms[i], dtype=bool) + centroid = np.mean(np.argwhere(cv2_mask_bool), axis=0) + centroid_x, centroid_y = int(centroid[1]), int(centroid[0]) + + cv2_mask_rgb = cv2.merge((cv2_mask, cv2_mask, cv2_mask)) + cv2_image = np.where(cv2_mask_rgb == 255, color_image, cv2_image) + text_color = tuple([int(x) for x in (color[0][0] - 100)]) + name = labels[i] + score = scores[i] + score = str(score)[:4] + text = name + ":" + score + cv2.putText( + cv2_image, + text, + (centroid_x - 30, centroid_y), + cv2.FONT_HERSHEY_DUPLEX, + 0.4, + text_color, + 1, + cv2.LINE_AA, + ) + + if len(segms) > 0: + preview_image = Image.fromarray(cv2.cvtColor(cv2_image, cv2.COLOR_BGR2RGB)) + else: + preview_image = image + + return preview_image + + +def is_allblack(mask): + cv2_mask = np.array(mask) + return cv2.countNonZero(cv2_mask) == 0 + + +def bitwise_and_masks(mask1, mask2): + cv2_mask1 = np.array(mask1) + cv2_mask2 = np.array(mask2) + cv2_mask = cv2.bitwise_and(cv2_mask1, cv2_mask2) + mask = Image.fromarray(cv2_mask) + return mask + + +def subtract_masks(mask1, mask2): + cv2_mask1 = np.array(mask1) + cv2_mask2 = np.array(mask2) + cv2_mask = cv2.subtract(cv2_mask1, cv2_mask2) + mask = Image.fromarray(cv2_mask) + return mask + + +def dilate_masks(masks, dilation_factor, iter=1): + if dilation_factor == 0: + return masks + dilated_masks = [] + kernel = np.ones((dilation_factor, dilation_factor), np.uint8) + for i in range(len(masks)): + cv2_mask = np.array(masks[i]) + dilated_mask = cv2.dilate(cv2_mask, kernel, iter) + dilated_masks.append(Image.fromarray(dilated_mask)) + return dilated_masks + + +def offset_masks(masks, offset_x, offset_y): + if offset_x == 0 and offset_y == 0: + return masks + offset_masks = [] + for i in range(len(masks)): + cv2_mask = np.array(masks[i]) + offset_mask = cv2_mask.copy() + offset_mask = np.roll(offset_mask, -offset_y, axis=0) + offset_mask = np.roll(offset_mask, offset_x, axis=1) + + offset_masks.append(Image.fromarray(offset_mask)) + return offset_masks + + +def combine_masks(masks): + initial_cv2_mask = np.array(masks[0]) + combined_cv2_mask = initial_cv2_mask + for i in range(1, len(masks)): + cv2_mask = np.array(masks[i]) + combined_cv2_mask = cv2.bitwise_or(combined_cv2_mask, cv2_mask) + + combined_mask = Image.fromarray(combined_cv2_mask) + return combined_mask + + +def on_ui_settings(): + shared.opts.add_option( + "dd_save_previews", + shared.OptionInfo(False, "Save mask previews", section=("ddetailer", DETECTION_DETAILER)), + ) + shared.opts.add_option( + "outdir_ddetailer_previews", + shared.OptionInfo( + "extensions/dddetailer/outputs/masks-previews", + "Output directory for mask previews", + section=("ddetailer", DETECTION_DETAILER), + ), + ) + shared.opts.add_option( + "dd_save_masks", + shared.OptionInfo(False, "Save masks", section=("ddetailer", DETECTION_DETAILER)), + ) + shared.opts.add_option( + "outdir_ddetailer_masks", + shared.OptionInfo( + "extensions/dddetailer/outputs/masks", + "Output directory for masks", + section=("ddetailer", DETECTION_DETAILER), + ), + ) + + +def create_segmasks(results): + segms = results[2] + segmasks = [] + for i in range(len(segms)): + cv2_mask = segms[i].astype(np.uint8) * 255 + mask = Image.fromarray(cv2_mask) + segmasks.append(mask) + + return segmasks + + +from mmdet.apis import inference_detector, init_detector +from mmdet.evaluation import get_classes + + +def get_device(): + device = devices.get_optimal_device_name() + if device == "mps": + return device + if any(getattr(cmd_opts, vram, False) for vram in ["lowvram", "medvram"]): + return "cpu" + return device + + +def inference(image, modelname, conf_thres, label): + path = modelpath(modelname) + if "mmdet" in path and "bbox" in path: + results = inference_mmdet_bbox(image, modelname, conf_thres, label) + elif "mmdet" in path and "segm" in path: + results = inference_mmdet_segm(image, modelname, conf_thres, label) + return results + + +def inference_mmdet_segm(image, modelname, conf_thres, label): + model_checkpoint = modelpath(modelname) + model_config = os.path.splitext(model_checkpoint)[0] + ".py" + model_device = get_device() + model = init_detector(model_config, model_checkpoint, device=model_device) + mmdet_results = inference_detector(model, np.array(image)).pred_instances + bboxes = mmdet_results.bboxes.cpu().numpy() + segms = mmdet_results.masks.cpu().numpy() + scores = mmdet_results.scores.cpu().numpy() + dataset = modeldataset(modelname) + classes = get_classes(dataset) + + n, m = bboxes.shape + if n == 0: + return [[], [], [], []] + labels = mmdet_results.labels + filter_inds = np.where(scores > conf_thres)[0] + results = [[], [], [], []] + for i in filter_inds: + results[0].append(label + "-" + classes[labels[i]]) + results[1].append(bboxes[i]) + results[2].append(segms[i]) + results[3].append(scores[i]) + + return results + + +def inference_mmdet_bbox(image, modelname, conf_thres, label): + model_checkpoint = modelpath(modelname) + model_config = os.path.splitext(model_checkpoint)[0] + ".py" + model_device = get_device() + model = init_detector(model_config, model_checkpoint, device=model_device) + output = inference_detector(model, np.array(image)).pred_instances + cv2_image = np.array(image) + cv2_image = cv2_image[:, :, ::-1].copy() + cv2_gray = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2GRAY) + + segms = [] + for x0, y0, x1, y1 in output.bboxes: + cv2_mask = np.zeros((cv2_gray.shape), np.uint8) + cv2.rectangle(cv2_mask, (int(x0), int(y0)), (int(x1), int(y1)), 255, -1) + cv2_mask_bool = cv2_mask.astype(bool) + segms.append(cv2_mask_bool) + + n, m = output.bboxes.shape + if n == 0: + return [[], [], [], []] + bboxes = output.bboxes.cpu().numpy() + scores = output.scores.cpu().numpy() + filter_inds = np.where(scores > conf_thres)[0] + results = [[], [], [], []] + for i in filter_inds: + results[0].append(label) + results[1].append(bboxes[i]) + results[2].append(segms[i]) + results[3].append(scores[i]) + + return results + + +script_callbacks.on_ui_settings(on_ui_settings) diff --git a/exhm/detailer/ddetailer/.gitignore b/exhm/detailer/ddetailer/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b0f67e2f8dbca74912bc5ab9e8fef0d394c8ea73 --- /dev/null +++ b/exhm/detailer/ddetailer/.gitignore @@ -0,0 +1,8 @@ +__pycache__ +*.ckpt +*.pth +/tmp +/outputs +/log +.vscode +/test-cases \ No newline at end of file diff --git a/exhm/detailer/ddetailer/README.md b/exhm/detailer/ddetailer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a553bf00b6f3f0830e10105462b1929c33e1f43e --- /dev/null +++ b/exhm/detailer/ddetailer/README.md @@ -0,0 +1,44 @@ +Detection and img2img have come a long way. This project is no longer maintained and there are now several alternatives for this function. See [μ Detection Detailer](https://github.com/wkpark/uddetailer) or [adetailer](https://github.com/Bing-su/adetailer) implementations. + +# Detection Detailer +An object detection and auto-mask extension for [Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui). See [Installation](https://github.com/dustysys/ddetailer#installation). + +![adoringfan](/misc/ddetailer_example_1.png) + +### Segmentation +Default models enable person and face instance segmentation. + +![amgothic](/misc/ddetailer_example_2.png) + +### Detailing +With full-resolution inpainting, the extension is handy for improving faces without the hassle of manual masking. + +![zion](/misc/ddetailer_example_3.gif) + +## Installation +1. Use `git clone https://github.com/dustysys/ddetailer.git` from your SD web UI `/extensions` folder. Alternatively, install from the extensions tab with url `https://github.com/dustysys/ddetailer` +2. Start or reload SD web UI. + +The models and dependencies should download automatically. To install them manually, follow the [official instructions for installing mmdet](https://mmcv.readthedocs.io/en/latest/get_started/installation.html#install-with-mim-recommended). The models can be [downloaded here](https://huggingface.co/dustysys/ddetailer) and should be placed in `/models/mmdet/bbox` for bounding box (`anime-face_yolov3`) or `/models/mmdet/segm` for instance segmentation models (`dd-person_mask2former`). See the [MMDetection docs](https://mmdetection.readthedocs.io/en/latest/1_exist_data_model.html) for guidance on training your own models. For using official MMDetection pretrained models see [here](https://github.com/dustysys/ddetailer/issues/5#issuecomment-1311231989), these are trained for photorealism. See [Troubleshooting](https://github.com/dustysys/ddetailer#troubleshooting) if you encounter issues during installation. + +## Usage +Select Detection Detailer as the script in SD web UI to use the extension. Click 'Generate' to run the script. Here are some tips: +- `anime-face_yolov3` can detect the bounding box of faces as the primary model while `dd-person_mask2former` isolates the head's silhouette as the secondary model by using the bitwise AND option. Refer to [this example](https://github.com/dustysys/ddetailer/issues/4#issuecomment-1311200268). +- The dilation factor expands the mask, while the x & y offsets move the mask around. +- The script is available in txt2img mode as well and can improve the quality of your 10 pulls with moderate settings (low denoise). + +## Troubleshooting +If you get the message ERROR: 'Failed building wheel for pycocotools' follow [these steps](https://github.com/dustysys/ddetailer/issues/1#issuecomment-1309415543). + +Any other issues installing, open an [issue](https://github.com/dustysys/ddetailer/issues). + +## Credits +hysts/[anime-face-detector](https://github.com/hysts/anime-face-detector) - Creator of `anime-face_yolov3`, which has impressive performance on a variety of art styles. + +skytnt/[anime-segmentation](https://huggingface.co/datasets/skytnt/anime-segmentation) - Synthetic dataset used to train `dd-person_mask2former`. + +jerryli27/[AniSeg](https://github.com/jerryli27/AniSeg) - Annotated dataset used to train `dd-person_mask2former`. + +open-mmlab/[mmdetection](https://github.com/open-mmlab/mmdetection) - Object detection toolset. `dd-person_mask2former` was trained via transfer learning using their [R-50 Mask2Former instance segmentation model](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask2former#instance-segmentation) as a base. + +AUTOMATIC1111/[stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) - Web UI for Stable Diffusion, base application for this extension. diff --git a/exhm/detailer/ddetailer/misc/ddetailer_example_1.png b/exhm/detailer/ddetailer/misc/ddetailer_example_1.png new file mode 100644 index 0000000000000000000000000000000000000000..7d0d9ec848c4d2c1cddcc0faefd2353c4227d04b Binary files /dev/null and b/exhm/detailer/ddetailer/misc/ddetailer_example_1.png differ diff --git a/exhm/detailer/ddetailer/misc/ddetailer_example_2.png b/exhm/detailer/ddetailer/misc/ddetailer_example_2.png new file mode 100644 index 0000000000000000000000000000000000000000..a6afadc683a6a77e7d9e092ea264b5e11e362f06 Binary files /dev/null and b/exhm/detailer/ddetailer/misc/ddetailer_example_2.png differ diff --git a/exhm/detailer/ddetailer/misc/ddetailer_example_3.gif b/exhm/detailer/ddetailer/misc/ddetailer_example_3.gif new file mode 100644 index 0000000000000000000000000000000000000000..74c0affa37784518689f150fb619623847b03d94 Binary files /dev/null and b/exhm/detailer/ddetailer/misc/ddetailer_example_3.gif differ diff --git a/exhm/detailer/ddetailer/scripts/__pycache__/ddetailer.cpython-310.pyc b/exhm/detailer/ddetailer/scripts/__pycache__/ddetailer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0f7a33688953943b5179deb56005212f2188b76 Binary files /dev/null and b/exhm/detailer/ddetailer/scripts/__pycache__/ddetailer.cpython-310.pyc differ diff --git a/exhm/detailer/ddetailer/scripts/ddetailer.py b/exhm/detailer/ddetailer/scripts/ddetailer.py new file mode 100644 index 0000000000000000000000000000000000000000..7841d8ec6ccc42fcae069b13c0a7b32ca4288e50 --- /dev/null +++ b/exhm/detailer/ddetailer/scripts/ddetailer.py @@ -0,0 +1,536 @@ +import os +import sys +import cv2 +from PIL import Image +import numpy as np +import gradio as gr + +from modules import processing, images +from modules import scripts, script_callbacks, shared, devices, modelloader +from modules.processing import Processed, StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img +from modules.shared import opts, cmd_opts, state +from modules.sd_models import model_hash +from modules.paths import models_path +from basicsr.utils.download_util import load_file_from_url + +dd_models_path = os.path.join(models_path, "mmdet") + +def list_models(model_path): + model_list = modelloader.load_models(model_path=model_path, ext_filter=[".pth"]) + + def modeltitle(path, shorthash): + abspath = os.path.abspath(path) + + if abspath.startswith(model_path): + name = abspath.replace(model_path, '') + else: + name = os.path.basename(path) + + if name.startswith("\\") or name.startswith("/"): + name = name[1:] + + shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0] + + return f'{name} [{shorthash}]', shortname + + models = [] + for filename in model_list: + h = model_hash(filename) + title, short_model_name = modeltitle(filename, h) + models.append(title) + + return models + +def startup(): + from launch import is_installed, run + if not is_installed("mmdet"): + python = sys.executable + run(f'"{python}" -m pip install -U openmim', desc="Installing openmim", errdesc="Couldn't install openmim") + run(f'"{python}" -m mim install mmcv-full', desc=f"Installing mmcv-full", errdesc=f"Couldn't install mmcv-full") + run(f'"{python}" -m pip install mmdet', desc=f"Installing mmdet", errdesc=f"Couldn't install mmdet") + + if (len(list_models(dd_models_path)) == 0): + print("No detection models found, downloading...") + bbox_path = os.path.join(dd_models_path, "bbox") + segm_path = os.path.join(dd_models_path, "segm") + load_file_from_url("https://huggingface.co/dustysys/ddetailer/resolve/main/mmdet/bbox/mmdet_anime-face_yolov3.pth", bbox_path) + load_file_from_url("https://huggingface.co/dustysys/ddetailer/raw/main/mmdet/bbox/mmdet_anime-face_yolov3.py", bbox_path) + load_file_from_url("https://huggingface.co/dustysys/ddetailer/resolve/main/mmdet/segm/mmdet_dd-person_mask2former.pth", segm_path) + load_file_from_url("https://huggingface.co/dustysys/ddetailer/raw/main/mmdet/segm/mmdet_dd-person_mask2former.py", segm_path) + +startup() + +def gr_show(visible=True): + return {"visible": visible, "__type__": "update"} + +class DetectionDetailerScript(scripts.Script): + def title(self): + return "Detection Detailer" + + def show(self, is_img2img): + return True + + def ui(self, is_img2img): + import modules.ui + + model_list = list_models(dd_models_path) + model_list.insert(0, "None") + if is_img2img: + info = gr.HTML("

Recommended settings: Use from inpaint tab, inpaint at full res ON, denoise <0.5

") + else: + info = gr.HTML("") + with gr.Group(): + with gr.Row(): + dd_model_a = gr.Dropdown(label="Primary detection model (A)", choices=model_list,value = "None", visible=True, type="value") + + with gr.Row(): + dd_conf_a = gr.Slider(label='Detection confidence threshold % (A)', minimum=0, maximum=100, step=1, value=30, visible=False) + dd_dilation_factor_a = gr.Slider(label='Dilation factor (A)', minimum=0, maximum=255, step=1, value=4, visible=False) + + with gr.Row(): + dd_offset_x_a = gr.Slider(label='X offset (A)', minimum=-200, maximum=200, step=1, value=0, visible=False) + dd_offset_y_a = gr.Slider(label='Y offset (A)', minimum=-200, maximum=200, step=1, value=0, visible=False) + + with gr.Row(): + dd_preprocess_b = gr.Checkbox(label='Inpaint model B detections before model A runs', value=False, visible=False) + dd_bitwise_op = gr.Radio(label='Bitwise operation', choices=['None', 'A&B', 'A-B'], value="None", visible=False) + + br = gr.HTML("
") + + with gr.Group(): + with gr.Row(): + dd_model_b = gr.Dropdown(label="Secondary detection model (B) (optional)", choices=model_list,value = "None", visible =False, type="value") + + with gr.Row(): + dd_conf_b = gr.Slider(label='Detection confidence threshold % (B)', minimum=0, maximum=100, step=1, value=30, visible=False) + dd_dilation_factor_b = gr.Slider(label='Dilation factor (B)', minimum=0, maximum=255, step=1, value=4, visible=False) + + with gr.Row(): + dd_offset_x_b = gr.Slider(label='X offset (B)', minimum=-200, maximum=200, step=1, value=0, visible=False) + dd_offset_y_b = gr.Slider(label='Y offset (B)', minimum=-200, maximum=200, step=1, value=0, visible=False) + + with gr.Group(): + with gr.Row(): + dd_mask_blur = gr.Slider(label='Mask blur ', minimum=0, maximum=64, step=1, value=4, visible=(not is_img2img)) + dd_denoising_strength = gr.Slider(label='Denoising strength (Inpaint)', minimum=0.0, maximum=1.0, step=0.01, value=0.4, visible=(not is_img2img)) + + with gr.Row(): + dd_inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution ', value=True, visible = (not is_img2img)) + dd_inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels ', minimum=0, maximum=256, step=4, value=32, visible=(not is_img2img)) + + dd_model_a.change( + lambda modelname: { + dd_model_b:gr_show( modelname != "None" ), + dd_conf_a:gr_show( modelname != "None" ), + dd_dilation_factor_a:gr_show( modelname != "None"), + dd_offset_x_a:gr_show( modelname != "None" ), + dd_offset_y_a:gr_show( modelname != "None" ) + + }, + inputs= [dd_model_a], + outputs =[dd_model_b, dd_conf_a, dd_dilation_factor_a, dd_offset_x_a, dd_offset_y_a] + ) + + dd_model_b.change( + lambda modelname: { + dd_preprocess_b:gr_show( modelname != "None" ), + dd_bitwise_op:gr_show( modelname != "None" ), + dd_conf_b:gr_show( modelname != "None" ), + dd_dilation_factor_b:gr_show( modelname != "None"), + dd_offset_x_b:gr_show( modelname != "None" ), + dd_offset_y_b:gr_show( modelname != "None" ) + }, + inputs= [dd_model_b], + outputs =[dd_preprocess_b, dd_bitwise_op, dd_conf_b, dd_dilation_factor_b, dd_offset_x_b, dd_offset_y_b] + ) + + return [info, + dd_model_a, + dd_conf_a, dd_dilation_factor_a, + dd_offset_x_a, dd_offset_y_a, + dd_preprocess_b, dd_bitwise_op, + br, + dd_model_b, + dd_conf_b, dd_dilation_factor_b, + dd_offset_x_b, dd_offset_y_b, + dd_mask_blur, dd_denoising_strength, + dd_inpaint_full_res, dd_inpaint_full_res_padding + ] + + def run(self, p, info, + dd_model_a, + dd_conf_a, dd_dilation_factor_a, + dd_offset_x_a, dd_offset_y_a, + dd_preprocess_b, dd_bitwise_op, + br, + dd_model_b, + dd_conf_b, dd_dilation_factor_b, + dd_offset_x_b, dd_offset_y_b, + dd_mask_blur, dd_denoising_strength, + dd_inpaint_full_res, dd_inpaint_full_res_padding): + + processing.fix_seed(p) + initial_info = None + seed = p.seed + p.batch_size = 1 + ddetail_count = p.n_iter + p.n_iter = 1 + p.do_not_save_grid = True + p.do_not_save_samples = True + is_txt2img = isinstance(p, StableDiffusionProcessingTxt2Img) + if (not is_txt2img): + orig_image = p.init_images[0] + else: + p_txt = p + p = StableDiffusionProcessingImg2Img( + init_images = None, + resize_mode = 0, + denoising_strength = dd_denoising_strength, + mask = None, + mask_blur= dd_mask_blur, + inpainting_fill = 1, + inpaint_full_res = dd_inpaint_full_res, + inpaint_full_res_padding= dd_inpaint_full_res_padding, + inpainting_mask_invert= 0, + sd_model=p_txt.sd_model, + outpath_samples=p_txt.outpath_samples, + outpath_grids=p_txt.outpath_grids, + prompt=p_txt.prompt, + negative_prompt=p_txt.negative_prompt, + styles=p_txt.styles, + seed=p_txt.seed, + subseed=p_txt.subseed, + subseed_strength=p_txt.subseed_strength, + seed_resize_from_h=p_txt.seed_resize_from_h, + seed_resize_from_w=p_txt.seed_resize_from_w, + sampler_name=p_txt.sampler_name, + n_iter=p_txt.n_iter, + steps=p_txt.steps, + cfg_scale=p_txt.cfg_scale, + width=p_txt.width, + height=p_txt.height, + tiling=p_txt.tiling, + ) + p.do_not_save_grid = True + p.do_not_save_samples = True + output_images = [] + state.job_count = ddetail_count + for n in range(ddetail_count): + devices.torch_gc() + start_seed = seed + n + if ( is_txt2img ): + print(f"Processing initial image for output generation {n + 1}.") + p_txt.seed = start_seed + processed = processing.process_images(p_txt) + init_image = processed.images[0] + else: + init_image = orig_image + + output_images.append(init_image) + masks_a = [] + masks_b_pre = [] + + # Optional secondary pre-processing run + if (dd_model_b != "None" and dd_preprocess_b): + label_b_pre = "B" + results_b_pre = inference(init_image, dd_model_b, dd_conf_b/100.0, label_b_pre) + masks_b_pre = create_segmasks(results_b_pre) + masks_b_pre = dilate_masks(masks_b_pre, dd_dilation_factor_b, 1) + masks_b_pre = offset_masks(masks_b_pre,dd_offset_x_b, dd_offset_y_b) + if (len(masks_b_pre) > 0): + results_b_pre = update_result_masks(results_b_pre, masks_b_pre) + segmask_preview_b = create_segmask_preview(results_b_pre, init_image) + shared.state.current_image = segmask_preview_b + if ( opts.dd_save_previews): + images.save_image(segmask_preview_b, opts.outdir_ddetailer_previews, "", start_seed, p.prompt, opts.samples_format, p=p) + gen_count = len(masks_b_pre) + state.job_count += gen_count + print(f"Processing {gen_count} model {label_b_pre} detections for output generation {n + 1}.") + p.seed = start_seed + p.init_images = [init_image] + + for i in range(gen_count): + p.image_mask = masks_b_pre[i] + if ( opts.dd_save_masks): + images.save_image(masks_b_pre[i], opts.outdir_ddetailer_masks, "", start_seed, p.prompt, opts.samples_format, p=p) + processed = processing.process_images(p) + p.seed = processed.seed + 1 + p.init_images = processed.images + + if (gen_count > 0): + output_images[n] = processed.images[0] + init_image = processed.images[0] + + else: + print(f"No model B detections for output generation {n} with current settings.") + + # Primary run + if (dd_model_a != "None"): + label_a = "A" + if (dd_model_b != "None" and dd_bitwise_op != "None"): + label_a = dd_bitwise_op + results_a = inference(init_image, dd_model_a, dd_conf_a/100.0, label_a) + masks_a = create_segmasks(results_a) + masks_a = dilate_masks(masks_a, dd_dilation_factor_a, 1) + masks_a = offset_masks(masks_a,dd_offset_x_a, dd_offset_y_a) + if (dd_model_b != "None" and dd_bitwise_op != "None"): + label_b = "B" + results_b = inference(init_image, dd_model_b, dd_conf_b/100.0, label_b) + masks_b = create_segmasks(results_b) + masks_b = dilate_masks(masks_b, dd_dilation_factor_b, 1) + masks_b = offset_masks(masks_b,dd_offset_x_b, dd_offset_y_b) + if (len(masks_b) > 0): + combined_mask_b = combine_masks(masks_b) + for i in reversed(range(len(masks_a))): + if (dd_bitwise_op == "A&B"): + masks_a[i] = bitwise_and_masks(masks_a[i], combined_mask_b) + elif (dd_bitwise_op == "A-B"): + masks_a[i] = subtract_masks(masks_a[i], combined_mask_b) + if (is_allblack(masks_a[i])): + del masks_a[i] + for result in results_a: + del result[i] + + else: + print("No model B detections to overlap with model A masks") + results_a = [] + masks_a = [] + + if (len(masks_a) > 0): + results_a = update_result_masks(results_a, masks_a) + segmask_preview_a = create_segmask_preview(results_a, init_image) + shared.state.current_image = segmask_preview_a + if ( opts.dd_save_previews): + images.save_image(segmask_preview_a, opts.outdir_ddetailer_previews, "", start_seed, p.prompt, opts.samples_format, p=p) + gen_count = len(masks_a) + state.job_count += gen_count + print(f"Processing {gen_count} model {label_a} detections for output generation {n + 1}.") + p.seed = start_seed + p.init_images = [init_image] + + for i in range(gen_count): + p.image_mask = masks_a[i] + if ( opts.dd_save_masks): + images.save_image(masks_a[i], opts.outdir_ddetailer_masks, "", start_seed, p.prompt, opts.samples_format, p=p) + + processed = processing.process_images(p) + if initial_info is None: + initial_info = processed.info + p.seed = processed.seed + 1 + p.init_images = processed.images + + if (gen_count > 0): + output_images[n] = processed.images[0] + if ( opts.samples_save ): + images.save_image(processed.images[0], p.outpath_samples, "", start_seed, p.prompt, opts.samples_format, info=initial_info, p=p) + + else: + print(f"No model {label_a} detections for output generation {n} with current settings.") + state.job = f"Generation {n + 1} out of {state.job_count}" + if (initial_info is None): + initial_info = "No detections found." + + return Processed(p, output_images, seed, initial_info) + +def modeldataset(model_shortname): + path = modelpath(model_shortname) + if ("mmdet" in path and "segm" in path): + dataset = 'coco' + else: + dataset = 'bbox' + return dataset + +def modelpath(model_shortname): + model_list = modelloader.load_models(model_path=dd_models_path, ext_filter=[".pth"]) + model_h = model_shortname.split("[")[-1].split("]")[0] + for path in model_list: + if ( model_hash(path) == model_h): + return path + +def update_result_masks(results, masks): + for i in range(len(masks)): + boolmask = np.array(masks[i], dtype=bool) + results[2][i] = boolmask + return results + +def create_segmask_preview(results, image): + labels = results[0] + bboxes = results[1] + segms = results[2] + + cv2_image = np.array(image) + cv2_image = cv2_image[:, :, ::-1].copy() + + for i in range(len(segms)): + color = np.full_like(cv2_image, np.random.randint(100, 256, (1, 3), dtype=np.uint8)) + alpha = 0.2 + color_image = cv2.addWeighted(cv2_image, alpha, color, 1-alpha, 0) + cv2_mask = segms[i].astype(np.uint8) * 255 + cv2_mask_bool = np.array(segms[i], dtype=bool) + centroid = np.mean(np.argwhere(cv2_mask_bool),axis=0) + centroid_x, centroid_y = int(centroid[1]), int(centroid[0]) + + cv2_mask_rgb = cv2.merge((cv2_mask, cv2_mask, cv2_mask)) + cv2_image = np.where(cv2_mask_rgb == 255, color_image, cv2_image) + text_color = tuple([int(x) for x in ( color[0][0] - 100 )]) + name = labels[i] + score = bboxes[i][4] + score = str(score)[:4] + text = name + ":" + score + cv2.putText(cv2_image, text, (centroid_x - 30, centroid_y), cv2.FONT_HERSHEY_DUPLEX, 0.4, text_color, 1, cv2.LINE_AA) + + if ( len(segms) > 0): + preview_image = Image.fromarray(cv2.cvtColor(cv2_image, cv2.COLOR_BGR2RGB)) + else: + preview_image = image + + return preview_image + +def is_allblack(mask): + cv2_mask = np.array(mask) + return cv2.countNonZero(cv2_mask) == 0 + +def bitwise_and_masks(mask1, mask2): + cv2_mask1 = np.array(mask1) + cv2_mask2 = np.array(mask2) + cv2_mask = cv2.bitwise_and(cv2_mask1, cv2_mask2) + mask = Image.fromarray(cv2_mask) + return mask + +def subtract_masks(mask1, mask2): + cv2_mask1 = np.array(mask1) + cv2_mask2 = np.array(mask2) + cv2_mask = cv2.subtract(cv2_mask1, cv2_mask2) + mask = Image.fromarray(cv2_mask) + return mask + +def dilate_masks(masks, dilation_factor, iter=1): + if dilation_factor == 0: + return masks + dilated_masks = [] + kernel = np.ones((dilation_factor,dilation_factor), np.uint8) + for i in range(len(masks)): + cv2_mask = np.array(masks[i]) + dilated_mask = cv2.dilate(cv2_mask, kernel, iter) + dilated_masks.append(Image.fromarray(dilated_mask)) + return dilated_masks + +def offset_masks(masks, offset_x, offset_y): + if (offset_x == 0 and offset_y == 0): + return masks + offset_masks = [] + for i in range(len(masks)): + cv2_mask = np.array(masks[i]) + offset_mask = cv2_mask.copy() + offset_mask = np.roll(offset_mask, -offset_y, axis=0) + offset_mask = np.roll(offset_mask, offset_x, axis=1) + + offset_masks.append(Image.fromarray(offset_mask)) + return offset_masks + +def combine_masks(masks): + initial_cv2_mask = np.array(masks[0]) + combined_cv2_mask = initial_cv2_mask + for i in range(1, len(masks)): + cv2_mask = np.array(masks[i]) + combined_cv2_mask = cv2.bitwise_or(combined_cv2_mask, cv2_mask) + + combined_mask = Image.fromarray(combined_cv2_mask) + return combined_mask + +def on_ui_settings(): + shared.opts.add_option("dd_save_previews", shared.OptionInfo(False, "Save mask previews", section=("ddetailer", "Detection Detailer"))) + shared.opts.add_option("outdir_ddetailer_previews", shared.OptionInfo("extensions/ddetailer/outputs/masks-previews", 'Output directory for mask previews', section=("ddetailer", "Detection Detailer"))) + shared.opts.add_option("dd_save_masks", shared.OptionInfo(False, "Save masks", section=("ddetailer", "Detection Detailer"))) + shared.opts.add_option("outdir_ddetailer_masks", shared.OptionInfo("extensions/ddetailer/outputs/masks", 'Output directory for masks', section=("ddetailer", "Detection Detailer"))) + +def create_segmasks(results): + segms = results[2] + segmasks = [] + for i in range(len(segms)): + cv2_mask = segms[i].astype(np.uint8) * 255 + mask = Image.fromarray(cv2_mask) + segmasks.append(mask) + + return segmasks + +import mmcv +from mmdet.core import get_classes +from mmdet.apis import (inference_detector, + init_detector) + +def get_device(): + device_id = shared.cmd_opts.device_id + if device_id is not None: + cuda_device = f"cuda:{device_id}" + else: + cuda_device = "cpu" + return cuda_device + +def inference(image, modelname, conf_thres, label): + path = modelpath(modelname) + if ( "mmdet" in path and "bbox" in path ): + results = inference_mmdet_bbox(image, modelname, conf_thres, label) + elif ( "mmdet" in path and "segm" in path): + results = inference_mmdet_segm(image, modelname, conf_thres, label) + return results + +def inference_mmdet_segm(image, modelname, conf_thres, label): + model_checkpoint = modelpath(modelname) + model_config = os.path.splitext(model_checkpoint)[0] + ".py" + model_device = get_device() + model = init_detector(model_config, model_checkpoint, device=model_device) + mmdet_results = inference_detector(model, np.array(image)) + bbox_results, segm_results = mmdet_results + dataset = modeldataset(modelname) + classes = get_classes(dataset) + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_results) + ] + n,m = bbox_results[0].shape + if (n == 0): + return [[],[],[]] + labels = np.concatenate(labels) + bboxes = np.vstack(bbox_results) + segms = mmcv.concat_list(segm_results) + filter_inds = np.where(bboxes[:,-1] > conf_thres)[0] + results = [[],[],[]] + for i in filter_inds: + results[0].append(label + "-" + classes[labels[i]]) + results[1].append(bboxes[i]) + results[2].append(segms[i]) + + return results + +def inference_mmdet_bbox(image, modelname, conf_thres, label): + model_checkpoint = modelpath(modelname) + model_config = os.path.splitext(model_checkpoint)[0] + ".py" + model_device = get_device() + model = init_detector(model_config, model_checkpoint, device=model_device) + results = inference_detector(model, np.array(image)) + cv2_image = np.array(image) + cv2_image = cv2_image[:, :, ::-1].copy() + cv2_gray = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2GRAY) + + segms = [] + for (x0, y0, x1, y1, conf) in results[0]: + cv2_mask = np.zeros((cv2_gray.shape), np.uint8) + cv2.rectangle(cv2_mask, (int(x0), int(y0)), (int(x1), int(y1)), 255, -1) + cv2_mask_bool = cv2_mask.astype(bool) + segms.append(cv2_mask_bool) + + n,m = results[0].shape + if (n == 0): + return [[],[],[]] + bboxes = np.vstack(results[0]) + filter_inds = np.where(bboxes[:,-1] > conf_thres)[0] + results = [[],[],[]] + for i in filter_inds: + results[0].append(label) + results[1].append(bboxes[i]) + results[2].append(segms[i]) + + return results + +script_callbacks.on_ui_settings(on_ui_settings) diff --git a/exhm/detailer/sd-webui-ddsd-orig/.gitignore b/exhm/detailer/sd-webui-ddsd-orig/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..0a986a396cccd33787225afb7f9ad51afe25f45c --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd-orig/.gitignore @@ -0,0 +1,170 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea +*.pt +*.pth +*.ckpt +*.safetensors +models/control_sd15_scribble.pth +detected_maps/ + +# Ignore all .ddcfg files except for Empty.ddcfg +config/*.ddcfg +!config/Empty.ddcfg \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd-orig/README.md b/exhm/detailer/sd-webui-ddsd-orig/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8b369f0bbf0a071177604a92c951fce2fa4078b6 --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd-orig/README.md @@ -0,0 +1,108 @@ +# sd-webui-ddsd +자동으로 동작하는 후보정 작업 확장. + +## What is +### Outpaint +#### Outpaint How to use +1. 증가시킬 픽셀을 선택 +2. 증가시킬 방향 선택 + 1. 방향이 None이면 미동작 +3. 증가시킬때 사용할 프롬프트 작성(전체 인페인팅시 이용) + 1. 비어있을때 원본 프롬프트 사용 +4. Denoise, CFG, Step 선택 + 1. Step은 최소 원본 Step 2 ~ 3배 이상 적절한 값 요구 +5. 생성! +### Upscale +이미지를 특정 크기로 잘라내어 타일별 업스케일을 하는 도구. 업스케일시 VRAM을 적게 소모. +#### Upscale How to use +1. 크기를 키울때 사용할 upscaler 모델 선택 +2. 크기를 키울 배수 선택 +3. 가로, 세로를 내가 단일로 생성할 수 있는 이미지의 최대 크기로 선택(이미지 생성 속도를 최대한 빠르게 하기 위하여) + 1. 가로 또는 세로중 한개를 0으로 세팅시 업스케일만 동작(세부 구조를 디테일하게하는 인페인팅이 동작하지 않음) +4. before running 체크 + 1. 체크시 업스케일을 먼저 돌려서 인페인팅의 퀄리티 상승. 단, 인페인팅시 더 많은 VRAM 요구 +5. 생성! +### Detect Detailer +특정 키워드로 이미지를 탐색 후 인페인팅하는 도구. +#### Detect Detailer How to use +0. 인페인팅의 범위 제한(I2I 전용) + 1. Inner 옵션은 I2I의 인페인팅에서 칠한 범위 내부만 이미지를 탐색 + 2. Outer 옵션은 I2I의 인페인팅에서 칠한 범위 외부만 이미지를 탐색 +1. 탐색 키워드 작성 + 1. 탐색할 키워드를 작성(face, person 등등) + 1. 탐색할 키워드는 문장형도 가능(happy face, running dog) + 2. 탐색할 키워드를 .으로 분할 가능(face. arm, face. chest) + 2. 탐색할 키워드에 사용 가능한 추가 옵션 존재 + 1. <area:type>을 이용하여 특정 범위 탐색 가능 + 1. 범위 종류는 left, right, top, bottom, all이 존재 + 2. <file:filename>을 이용하여 특정 파일 탐색 가능 + 1. 특정 파일의 위치는 models/ddsdmask + 3. <model:type>을 이용하여 특정 모델 탐색 가능 + 1. type은 face_media_full, face_media_short와 파일명이 존재 + 2. 파일은 models/yolo에 위치 + 4. <type1:type2:dilation:confidence> 같이 type1과 type2외에 dilation과 confidence도 추가 입력 가능 + 1. confidence는 model 타입에서만 사용되는 값 + 3. 탐색한 범위를 AND, OR, XOR, NAND, NOR 등의 게이트 옵션으로 연산 가능 + 1. face OR (body NAND outfit) -> 괄호안의 body NAND outfit을 먼저 한 후에 face와 OR 연산을 동작 + 2. 괄호는 최대한 적게 이용. 많이 이용시 많은 VRAM 소모. + 3. 동작은 왼쪽에서 오른쪽으로 순차적 동작. + 4. 탐색할 키워드에 옵션으로 여러가지 옵션 조절 가능 + 1. face:0:0.4:4 OR outfit:2:0.5:8 + 2. 순서대로 탐색할 프롬프트, SAM 탐색 레벨(0-2), 민감도(0-1), 팽창값(0-512)을 가짐 + 3. 값을 생략하면 초기값으로 세팅 +2. 긍정 프롬프트 입력 + 1. 인페인팅시 동작시킬 긍정 프롬프트 입력 +3. 부정 프롬프트 입력 + 1. 인페인팅시 동작시킬 부정 프롬프트 입력 +4. Denoising, CFG, Steps, Clip skip, Ckpt, Vae 수정 + 1. 인페인팅시 동작에 영향을 주는 옵션 +5. Split Mask 옵션 체크 + 1. 체크시 마스크가 떨어져 있는것이 존재한다면 따로 인페인팅. + 1. 따로 인페인팅시 퀄리티 상승. 하지만 더 많은 인페인팅을 요구하여 생성속도 하락. +6. Remove Area 옵션 체크 + 1. Split Mask 옵션이 Enable 되어야만 동작 + 2. 분할 인페인팅시 일정 크기 이하의 면적은 인페인팅에서 제외 +6. 생성! +### Postprocessing +최종적으로 생성된 이미지에 가하는 후보정 +#### Postprocessing How to use +1. 가하고자 하는 후보정을 선택 +2. 생성! +### Watermark +이미지 생성 최종본에 자신의 증명을 기입하는 기능 +#### Watermark How to use +1. 기입할 증명의 종류 선택(글자, 이미지) +2. 선택한 종류를 입력 +3. 선택한 종류의 크기와 위치를 지정 +4. Padding으로 해당 위치에서 얼만큼 떨어져 있을지 설정 +5. Alpha로 얼만큼 투명할지 결정 +6. 생성! + +### Video +[![Stable Diffusion - DDSD 확장 기능 (No - Talking)](http://img.youtube.com/vi/9wfZyJhPPho/0.jpg)](https://youtu.be/9wfZyJhPPho) + +## Installation +1. 다운로드 [CUDA](https://developer.nvidia.com/cuda-toolkit-archive)와 [cuDNN](https://developer.nvidia.com/rdp/cudnn-archive) + 1. 자신이 가진 WebUI와 동일한 버전의 `CUDA`와 `cuDNN`버전으로 설치 + 1. 이것은 다운로드를 편하게 하기위한 구글링크. [CUDA 117](https://drive.google.com/file/d/1HRTOLTB44-pRcrwIw9lQak2OC2ohNle3/view?usp=share_link)와 [cuDNN](https://drive.google.com/file/d/1QcgaxUra0WnCWrCLjsWp_QKw1PKcvqpj/view?usp=share_link) + 2. `CUDA` 설치 후 해당 폴더에 `cuDNN` 덮어쓰기 + 3. 일정 버전은 Easy Install을 지원. `CUDA`와 `cuDNN` 불필요. + 1. 지원버전 (torch == 1.13.1+cu117, torch==2.0.0+cu117 , torch==2.0.0+cu118) +2. 확장탭에서 설치 `https://github.com/NeoGraph-K/sd-webui-ddsd` 또는 다운로드 후 `extension/` 에 풀어넣기 +3. WebUI를 완전히 재시작 + +## Credits + +dustysys/[ddetailer](https://github.com/dustysys/ddetailer) + +AUTOMATIC1111/[stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) + +facebookresearch/[Segment Anything](https://github.com/facebookresearch/segment-anything) + +IDEA-Research/[GroundingDINO](https://github.com/IDEA-Research/GroundingDINO) + +IDEA-Research/[Grounded-Segment-Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything) + +continue-revolution/[sd-webui-segment-anything](https://github.com/continue-revolution/sd-webui-segment-anything) + +Bing-su/[adetailer](https://github.com/Bing-su/adetailer) diff --git a/exhm/detailer/sd-webui-ddsd-orig/config/Empty.ddcfg b/exhm/detailer/sd-webui-ddsd-orig/config/Empty.ddcfg new file mode 100644 index 0000000000000000000000000000000000000000..5cb4d37952781472ad74162a889f0e31a0b30008 --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd-orig/config/Empty.ddcfg @@ -0,0 +1 @@ +{"enable_script_names": "dynamic_thresholding;dynamic_prompting", "disable_watermark": true, "disable_postprocess": true, "disable_upscaler": true, "ddetailer_before_upscaler": false, "scalevalue": 2, "upscaler_sample": "Original", "overlap": 32, "upscaler_index": "SwinIR_4x", "rewidth": 512, "reheight": 512, "denoising_strength": 0.1, "upscaler_ckpt": "Original", "upscaler_vae": "Original", "disable_detailer": true, "disable_mask_paint_mode": true, "inpaint_mask_mode": "Inner", "detailer_sample": "Original", "detailer_sam_model": "sam_vit_b_01ec64.pth", "detailer_dino_model": "groundingdino_swinb_cogcoor.pth", "dino_full_res_inpaint": true, "dino_inpaint_padding": 0, "detailer_mask_blur": 4, "disable_outpaint": true, "outpaint_sample": "Original", "outpaint_mask_blur": 8, "dino_detect_count": 5, "dino_detection_ckpt_1": "Original", "dino_detection_vae_1": "Original", "dino_detection_prompt_1": "", "dino_detection_positive_1": "", "dino_detection_negative_1": "", "dino_detection_denoise_1": 0.4, "dino_detection_cfg_1": 0, "dino_detection_steps_1": 0, "dino_detection_spliter_disable_1": true, "dino_detection_spliter_remove_area_1": 16, "dino_detection_clip_skip_1": 0, "dino_detection_ckpt_2": "Original", "dino_detection_vae_2": "Original", "dino_detection_prompt_2": "", "dino_detection_positive_2": "", "dino_detection_negative_2": "", "dino_detection_denoise_2": 0.4, "dino_detection_cfg_2": 0, "dino_detection_steps_2": 0, "dino_detection_spliter_disable_2": true, "dino_detection_spliter_remove_area_2": 16, "dino_detection_clip_skip_2": 0, "dino_detection_ckpt_3": "Original", "dino_detection_vae_3": "Original", "dino_detection_prompt_3": "", "dino_detection_positive_3": "", "dino_detection_negative_3": "", "dino_detection_denoise_3": 0.4, "dino_detection_cfg_3": 0, "dino_detection_steps_3": 0, "dino_detection_spliter_disable_3": true, "dino_detection_spliter_remove_area_3": 16, "dino_detection_clip_skip_3": 0, "dino_detection_ckpt_4": "Original", "dino_detection_vae_4": "Original", "dino_detection_prompt_4": "", "dino_detection_positive_4": "", "dino_detection_negative_4": "", "dino_detection_denoise_4": 0.4, "dino_detection_cfg_4": 0, "dino_detection_steps_4": 0, "dino_detection_spliter_disable_4": true, "dino_detection_spliter_remove_area_4": 16, "dino_detection_clip_skip_4": 0, "dino_detection_ckpt_5": "Original", "dino_detection_vae_5": "Original", "dino_detection_prompt_5": "", "dino_detection_positive_5": "", "dino_detection_negative_5": "", "dino_detection_denoise_5": 0.4, "dino_detection_cfg_5": 0, "dino_detection_steps_5": 0, "dino_detection_spliter_disable_5": true, "dino_detection_spliter_remove_area_5": 16, "dino_detection_clip_skip_5": 0, "watermark_count": 2, "watermark_type_1": "Text", "watermark_position_1": "Center", "watermark_image_1": null, "watermark_image_size_width_1": 100, "watermark_image_size_height_1": 100, "watermark_text_1": "", "watermark_text_color_1": null, "watermark_text_font_1": "Courier New", "watermark_text_size_1": 50, "watermark_padding_1": 10, "watermark_alpha_1": 0.4, "watermark_type_2": "Text", "watermark_position_2": "Center", "watermark_image_2": null, "watermark_image_size_width_2": 100, "watermark_image_size_height_2": 100, "watermark_text_2": "", "watermark_text_color_2": null, "watermark_text_font_2": "Courier New", "watermark_text_size_2": 50, "watermark_padding_2": 10, "watermark_alpha_2": 0.4, "postprocessing_count": 2, "pp_type_1": "none", "pp_saturation_strength_1": 1.1, "pp_sharpening_radius_1": 2, "pp_sharpening_percent_1": 150, "pp_sharpening_threshold_1": 3, "pp_gaussian_radius_1": 2, "pp_brightness_strength_1": 1.1, "pp_color_strength_1": 1.1, "pp_contrast_strength_1": 1.1, "pp_hue_strength_1": 0, "pp_bilateral_sigmaC_1": 10, "pp_bilateral_sigmaS_1": 10, "pp_color_tint_type_name_1": "warm", "pp_color_tint_lut_name_1": "FGCineBasic.cube", "pp_type_2": "none", "pp_saturation_strength_2": 1.1, "pp_sharpening_radius_2": 2, "pp_sharpening_percent_2": 150, "pp_sharpening_threshold_2": 3, "pp_gaussian_radius_2": 2, "pp_brightness_strength_2": 1.1, "pp_color_strength_2": 1.1, "pp_contrast_strength_2": 1.1, "pp_hue_strength_2": 0, "pp_bilateral_sigmaC_2": 10, "pp_bilateral_sigmaS_2": 10, "pp_color_tint_type_name_2": "warm", "pp_color_tint_lut_name_2": "FGCineBasic.cube", "outpaint_count": 4, "outpaint_positive_1": "FGCineBasic.cube", "outpaint_negative_1": "", "outpaint_denoise_1": "", "outpaint_cfg_1": 0.8, "outpaint_steps_1": 0, "outpaint_pixels_1": 80, "outpaint_direction_1": 128, "outpaint_positive_2": "FGCineBasic.cube", "outpaint_negative_2": "", "outpaint_denoise_2": "", "outpaint_cfg_2": 0.8, "outpaint_steps_2": 0, "outpaint_pixels_2": 80, "outpaint_direction_2": 128, "outpaint_positive_3": "", "outpaint_negative_3": "", "outpaint_denoise_3": 0.8, "outpaint_cfg_3": 0, "outpaint_steps_3": 80, "outpaint_pixels_3": 128, "outpaint_direction_3": "None", "outpaint_positive_4": "", "outpaint_negative_4": "", "outpaint_denoise_4": 0.8, "outpaint_cfg_4": 0, "outpaint_steps_4": 80, "outpaint_pixels_4": 128, "outpaint_direction_4": "None"} \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd-orig/install.py b/exhm/detailer/sd-webui-ddsd-orig/install.py new file mode 100644 index 0000000000000000000000000000000000000000..6e12f1e0afc95dbeaf9ae95a8636c1b2f24f0910 --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd-orig/install.py @@ -0,0 +1,100 @@ +import os +import platform + +import launch + + +def check_system_machine(): + system = platform.system() + machine = platform.machine() + return (system, machine) in [('Windows', 'AMD64'), ('Linux', 'x86_64')] + + +def check_python_version(low: int, high: int): + ver = platform.python_version_tuple() + if int(ver[0]) == 3 and low <= int(ver[1]) <= high: + return ver[0] + ver[1] + return None + + +def install_pycocotools(): + base = 'https://github.com/Bing-su/dddetailer/releases/download/pycocotools/' + urls = { + 'Windows': 'pycocotools-2.0.6-cp{ver}-cp{ver}-win_amd64.whl', + 'Linux': 'pycocotools-2.0.6-cp{ver}-cp{ver}-manylinux_2_17_x86_64.manylinux2014_x86_64.whl', + } + + python_version = check_python_version(8, 11) + if not check_system_machine() or not python_version: + launch.run_pip('install pycocotools', 'sd-webui-ddsd requirement: pycocotools') + return + + url = urls[platform.system()].format(ver=python_version) + launch.run_pip(f'install {base + url}', 'sd-webui-ddsd requirement: pycocotools') + + +def install_groundingdino(): + import torch + from packaging.version import parse + + # torch_version: '1.13.1' or '2.0.0' or ... + torch_version = parse(torch.__version__).base_version + # cuda_version: '117' or '118' or 'None' + cuda_version = torch.version.cuda.replace('.', '') + python_version = check_python_version(9, 10) + + if ( + not check_system_machine() + or (torch_version, cuda_version) + not in [('1.13.1', '117'), ('2.0.0', '117'), ('2.0.0', '118')] + or not python_version + ): + launch.run_pip('install git+https://github.com/IDEA-Research/GroundingDINO', 'sd-webui-ddsd requirement: groundingdino') + return + + system = 'win' if platform.system() == 'Windows' else 'linux' + machine = 'amd64' if platform.machine() == 'AMD64' else 'x86_64' + + url = 'https://github.com/Bing-su/GroundingDINO/releases/download/wheel-0.1.0/groundingdino-0.1.0+torch{torch}.cu{cuda}-cp{py}-cp{py}-{system}_{machine}.whl' + url = url.format( + torch=torch_version, + cuda=cuda_version, + py=python_version, + system=system, + machine=machine, + ) + + launch.run_pip(f'install {url}', 'sd-webui-ddsd requirement: groundingdino') + + +current_dir = os.path.dirname(os.path.realpath(__file__)) +req_file = os.path.join(current_dir, 'requirements.txt') + +with open(req_file) as file: + for lib in file: + version = None + lib = lib.strip() + lib = 'skimage' if lib == 'scikit-image' else lib + if '==' in lib: + lib, version = [x.strip() for x in lib.split('==')] + if not launch.is_installed(lib): + if lib == 'pycocotools': + install_pycocotools() + elif lib == 'groundingdino': + install_groundingdino() + elif lib == 'skimage': + launch.run_pip( + f'install scikit-image', + f'sd-webui-ddsd requirement: scikit-image' + ) + elif lib == 'pillow_lut': + launch.run_pip( + f'install pillow_lut', + f'sd-webui-ddsd requirement: pillow_lut' + ) + else: + lib = lib if version is None else lib + '==' + version + launch.run_pip( + f'install {lib}', + f'sd-webui-ddsd requirement: {lib}' + ) diff --git a/exhm/detailer/sd-webui-ddsd-orig/requirements.txt b/exhm/detailer/sd-webui-ddsd-orig/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..fc05f0b2fd1c8b896788382c3115265af313a3f6 --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd-orig/requirements.txt @@ -0,0 +1,8 @@ +pycocotools +segment_anything +groundingdino +scipy +scikit-image +pillow_lut +ultralytics==8.0.87 +mediapipe==0.9.3.0 \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd.cpython-310.pyc b/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8af1c4c173601656090ad0baa24cf18a64afa30a Binary files /dev/null and b/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd.cpython-310.pyc differ diff --git a/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_bs.cpython-310.pyc b/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_bs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a03b2f5a8f3be7edbedebaed17cf837f8d210739 Binary files /dev/null and b/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_bs.cpython-310.pyc differ diff --git a/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_dino.cpython-310.pyc b/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_dino.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13d8ba7035109c70464921aee24f15fe60ba4f11 Binary files /dev/null and b/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_dino.cpython-310.pyc differ diff --git a/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_postprocess.cpython-310.pyc b/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_postprocess.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bce85402c5db93b50db00176bfecfaef14eb953d Binary files /dev/null and b/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_postprocess.cpython-310.pyc differ diff --git a/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_sam.cpython-310.pyc b/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_sam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3043ed8e0495c7bb108328b95adf957dc199a39 Binary files /dev/null and b/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_sam.cpython-310.pyc differ diff --git a/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_utils.cpython-310.pyc b/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b6c571a91b91cbc6375ab7215b00fe701ef2478 Binary files /dev/null and b/exhm/detailer/sd-webui-ddsd-orig/scripts/__pycache__/ddsd_utils.cpython-310.pyc differ diff --git a/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd.py b/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd.py new file mode 100644 index 0000000000000000000000000000000000000000..646a786113ef1c8802d57d0348304f14978d344c --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd.py @@ -0,0 +1,1295 @@ +from json import load as json_read, dumps as json_write +import os +import math +import re + +import gradio as gr +import numpy as np +from PIL import Image, ImageDraw + +from scripts.ddsd_sam import sam_model_list +from scripts.ddsd_dino import dino_model_list +from scripts.ddsd_postprocess import lut_model_list, ddsd_postprocess +from scripts.ddsd_utils import dino_detect_from_prompt, mask_spliter_and_remover, I2I_Generator_Create, get_fonts_list, image_apply_watermark, matched_noise + +import modules +from modules import processing, shared, images, devices, modelloader, sd_models, sd_vae +from modules.processing import create_infotext, StableDiffusionProcessingTxt2Img +from modules.shared import opts, state +from modules.sd_models import model_hash +from modules.paths import models_path +from modules.scripts import AlwaysVisible + +from basicsr.utils.download_util import load_file_from_url + +grounding_models_path = os.path.join(models_path, "grounding") +sam_models_path = os.path.join(models_path, "sam") +lut_models_path = os.path.join(models_path, 'lut') +yolo_models_path = os.path.join(models_path, 'yolo') +ddsd_config_path = os.path.join(os.path.dirname(os.path.dirname(__file__)),'config') + +ckpt_model_name_pattern = re.compile('([\\w\\.\\[\\]\\\\\\+\\(\\)/]+)\\s*\\[.*\\]') + +def list_models(model_path, filter): + model_list = modelloader.load_models(model_path=model_path, ext_filter=[filter]) + + def modeltitle(path, shorthash): + abspath = os.path.abspath(path) + + if abspath.startswith(model_path): + name = abspath.replace(model_path, '') + else: + name = os.path.basename(path) + + if name.startswith("\\") or name.startswith("/"): + name = name[1:] + + shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0] + + return f'{name} [{shorthash}]', shortname + + models = [] + for filename in model_list: + h = model_hash(filename) + title, short_model_name = modeltitle(filename, h) + models.append(title) + + return models + +def startup(): + if (len(list_models(yolo_models_path, '.pth')) == 0) and (len(list_models(yolo_models_path, '.pt')) == 0): + print("No detection yolo models found, downloading...") + load_file_from_url('https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8m.pt',yolo_models_path) + load_file_from_url('https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8n.pt',yolo_models_path) + load_file_from_url('https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8s.pt',yolo_models_path) + load_file_from_url('https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8n_v2.pt',yolo_models_path) + load_file_from_url('https://huggingface.co/Bingsu/adetailer/resolve/main/hand_yolov8n.pt',yolo_models_path) + load_file_from_url('https://huggingface.co/Bingsu/adetailer/resolve/main/hand_yolov8s.pt',yolo_models_path) + + if (len(list_models(grounding_models_path, '.pth')) == 0): + print("No detection groundingdino models found, downloading...") + load_file_from_url('https://huggingface.co/ShilongLiu/GroundingDINO/resolve/main/groundingdino_swint_ogc.pth',grounding_models_path) + load_file_from_url('https://raw.githubusercontent.com/IDEA-Research/GroundingDINO/main/groundingdino/config/GroundingDINO_SwinT_OGC.py',grounding_models_path, file_name='groundingdino_swint_ogc.py') + #load_file_from_url('https://huggingface.co/ShilongLiu/GroundingDINO/resolve/main/groundingdino_swinb_cogcoor.pth',grounding_models_path) + #load_file_from_url('https://raw.githubusercontent.com/IDEA-Research/GroundingDINO/main/groundingdino/config/GroundingDINO_SwinB.cfg.py',grounding_models_path, file_name='groundingdino_swinb_cogcoor.py') + + + if (len(list_models(sam_models_path, '.pth')) == 0): + print("No detection sam models found, downloading...") + #load_file_from_url('https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth',sam_models_path) + #load_file_from_url('https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth',sam_models_path) + load_file_from_url('https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth',sam_models_path) + + if (len(list_models(lut_models_path, '.cube')) == 0): # Free use lut files. + print('No detection lut models found, downloading...') + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Arabica%2012.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Ava%20614.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Azrael%2093.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Bourbon%2064.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Byers%2011.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Chemical%20168.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Clayton%2033.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Clouseau%2054.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Cobi%203.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Contrail%2035.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Cubicle%2099.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Django%2025.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Domingo%20145.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineBasic.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineBright.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineCold.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineDrama.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineTealOrange1.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineTealOrange2.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineVibrant.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineWarm.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Faded%2047.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Folger%2050.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Fusion%2088.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Hyla%2068.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Korben%20214.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/LBK-K-Tone_33.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Lenox%20340.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Lucky%2064.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/McKinnon%2075.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Milo%205.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Neon%20770.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Paladin%201875.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Pasadena%2021.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Pitaya%2015.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Reeve%2038.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Remy%2024.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Sprocket%20231.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Teigen%2028.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Trent%2018.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Tweed%2071.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Vireo%2037.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Zed%2032.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Zeke%2039.CUBE', lut_models_path) + +startup() + +def gr_show(visible=True): + return {"visible": visible, "__type__": "update"} + +def gr_list_refresh(choices, value): + return {'choices':choices,'value':value,'__type__':'update'} +def gr_value_refresh(value): + return {'value':value,'__type__':'update'} +class Script(modules.scripts.Script): + def __init__(self): + self.original_scripts = None + self.original_scripts_always = None + _ ,self.font_path = get_fonts_list() + self.ckptname = None + self.vae = None + self.clip_skip = 1 + + def title(self): + return "ddetailer + sdupscale" + + def show(self, is_img2img): + return AlwaysVisible + + def ui(self, is_img2img): + pp_types = [ + 'none', + 'saturation','sharpening','gaussian blur','brightness','color','contrast', + #'color extraction', + 'hue', 'inversion', 'bilateral','color tint(type)','color tint(lut)'] + ckpt_list = list(sd_models.checkpoints_list.keys()) + ckpt_list.insert(0, 'Original') + vae_list = list(sd_vae.vae_dict.keys()) + vae_list.insert(0, 'Original') + sample_list = [x.name for x in shared.list_samplers()] + sample_list = [x for x in sample_list if x not in ['PLMS','UniPC','DDIM']] + sample_list.insert(0,"Original") + fonts_list, _ = get_fonts_list() + ddsd_config_list = [x[:-6] for x in os.listdir(ddsd_config_path) if x.endswith('.ddcfg')] + ret = [] + dino_detection_ckpt_list = [] + dino_detection_vae_list = [] + dino_detection_prompt_list = [] + dino_detection_positive_list = [] + dino_detection_negative_list = [] + dino_detection_denoise_list = [] + dino_detection_cfg_list = [] + dino_detection_steps_list = [] + dino_detection_spliter_disable_list = [] + dino_detection_spliter_remove_area_list = [] + dino_detection_clip_skip_list = [] + pp_type_list = [] + pp_saturation_strength_list = [] + pp_sharpening_radius_list = [] + pp_sharpening_percent_list = [] + pp_sharpening_threshold_list = [] + pp_gaussian_radius_list = [] + pp_brightness_strength_list = [] + pp_color_strength_list = [] + pp_contrast_strength_list = [] + pp_hue_strength_list = [] + pp_bilateral_sigmaC_list = [] + pp_bilateral_sigmaS_list = [] + pp_color_tint_type_name_list = [] + pp_color_tint_lut_name_list = [] + watermark_type_list = [] + watermark_position_list = [] + watermark_image_list = [] + watermark_image_size_width_list = [] + watermark_image_size_height_list = [] + watermark_text_list = [] + watermark_text_color_list = [] + watermark_text_font_list = [] + watermark_text_size_list = [] + watermark_padding_list = [] + watermark_alpha_list = [] + outpaint_positive_list = [] + outpaint_negative_list = [] + outpaint_denoise_list = [] + outpaint_cfg_list = [] + outpaint_steps_list = [] + outpaint_pixels_list = [] + outpaint_direction_list = [] + dino_tabs = None + watermark_tabs = None + postprocess_tabs = None + outpaint_tabs = None + + with gr.Accordion('DDSD', open=False, elem_id='ddsd_all_option_acc'): + + with gr.Row(): + ddsd_save_path = gr.Textbox(label='Save File Name', visible=True, interactive=True, value='ddsd') + ddsd_save = gr.Button('Save', elem_id='save_button', visible=True, interactive=True) + with gr.Row(): + ddsd_load_path = gr.Dropdown(label='Load File Name', visible=True, interactive=True, choices=ddsd_config_list) + ddsd_load = gr.Button('Load', elem_id='load_button',visible=True, interactive=True) + + with gr.Accordion("Script Option", open = False, elem_id="ddsd_enable_script_acc"): + with gr.Column(): + all_target_info = gr.HTML('

I2I All process target script

') + enable_script_names = gr.Textbox(label="Enable Script(Extension)", elem_id="enable_script_names", value='dynamic_thresholding;dynamic_prompting',show_label=True, lines=1, placeholder="Extension python file name(ex - dynamic_thresholding;dynamic_prompting)") + + with gr.Accordion("Outpainting", open=False, elem_id='ddsd_outpaint_acc'): + with gr.Column(): + outpaint_target_info = gr.HTML('

I2I Outpainting

') + disable_outpaint = gr.Checkbox(label='Disable Outpaint', elem_id='disable_outpaint', value=True, visible=True) + outpaint_sample = gr.Dropdown(label='Outpaint Sampling', elem_id='outpaint_sample', choices=sample_list, value=sample_list[0], visible=False, type="value") + with gr.Tabs(elem_id = 'outpaint_arguments', visible=False) as outpaint_tabs_acc: + for outpaint_index in range(shared.opts.data.get('outpaint_count', 1)): + with gr.Tab(f'Outpaint {outpaint_index + 1} Argument', elem_id=f'outpaint_{outpaint_index+1}_argument_tab'): + outpaint_pixels = gr.Slider(label=f'Outpaint {outpaint_index+1} Pixels', minimum=0, maximum=256, value=64, step=16) + outpaint_direction = gr.Radio(choices=['None', 'Left','Right','Up','Down'], value='None', label=f'Outpaint {outpaint_index+1} Direction') + with gr.Row(): + outpaint_positive = gr.Textbox(label=f'Positive {outpaint_index+1} Prompt', show_label=True, lines=2, placeholder='Outpaint Positive Prompt(Empty is Original)') + outpaint_negative = gr.Textbox(label=f'Negative {outpaint_index+1} Prompt', show_label=True, lines=2, placeholder='Outpaint Negative Prompt(Empty is Original)') + outpaint_denoise = gr.Slider(label=f'Outpaint {outpaint_index+1} Denoise', minimum=0, maximum=1.0, step=0.01, value=0.8) + outpaint_cfg = gr.Slider(label=f'Outpaint {outpaint_index+1} CFG(0 To Original)', minimum=0, maximum=500, step=0.5, value=0) + outpaint_steps = gr.Slider(label=f'Outpaint {outpaint_index+1} Steps(0 To Original)', minimum=0, maximum=150, step=1, value=0) + outpaint_positive_list.append(outpaint_positive) + outpaint_negative_list.append(outpaint_negative) + outpaint_denoise_list.append(outpaint_denoise) + outpaint_cfg_list.append(outpaint_cfg) + outpaint_steps_list.append(outpaint_steps) + outpaint_pixels_list.append(outpaint_pixels) + outpaint_direction_list.append(outpaint_direction) + outpaint_tabs = outpaint_tabs_acc + outpaint_mask_blur = gr.Slider(label='Outpaint Blur', elem_id='outpaint_mask_blur', minimum=0, maximum=128, step=4, value=8, visible=False) + + with gr.Accordion("Upscaler", open=False, elem_id="ddsd_upscaler_acc"): + with gr.Column(): + sd_upscale_target_info = gr.HTML('

I2I Upscaler Option

') + disable_upscaler = gr.Checkbox(label='Disable Upscaler', elem_id='disable_upscaler', value=True, visible=True) + ddetailer_before_upscaler = gr.Checkbox(label='Upscaler before running detailer', elem_id='upscaler_before_running_detailer', value=False, visible=False) + with gr.Row(): + upscaler_sample = gr.Dropdown(label='Upscaler Sampling', elem_id='upscaler_sample', choices=sample_list, value=sample_list[0], visible=False, type="value") + upscaler_index = gr.Dropdown(label='Upscaler', elem_id='upscaler_index', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[-1].name, type="index", visible=False) + with gr.Row(): + upscaler_ckpt = gr.Dropdown(label='Upscaler CKPT Model', elem_id=f'upscaler_detect_ckpt', choices=ckpt_list, value=ckpt_list[0], visible=False) + upscaler_vae = gr.Dropdown(label='Upscaler VAE Model', elem_id=f'upscaler_detect_vae', choices=vae_list, value=vae_list[0], visible=False) + scalevalue = gr.Slider(minimum=1, maximum=16, step=0.5, elem_id='upscaler_scalevalue', label='Resize', value=2, visible=False) + overlap = gr.Slider(minimum=0, maximum=256, step=32, elem_id='upscaler_overlap', label='Tile overlap', value=32, visible=False) + with gr.Row(): + rewidth = gr.Slider(minimum=0, maximum=1024, step=64, elem_id='upscaler_rewidth', label='Width(0 to No Inpainting)', value=512, visible=False) + reheight = gr.Slider(minimum=0, maximum=1024, step=64, elem_id='upscaler_reheight', label='Height(0 to No Inpainting)', value=512, visible=False) + denoising_strength = gr.Slider(minimum=0, maximum=1.0, step=0.01, elem_id='upscaler_denoising', label='Denoising strength', value=0.1, visible=False) + + with gr.Accordion("DINO Detect", open=False, elem_id="ddsd_dino_detect_acc"): + with gr.Column(): + ddetailer_target_info = gr.HTML('

I2I Detection Detailer Option

') + disable_detailer = gr.Checkbox(label='Disable Detection Detailer', elem_id='disable_detailer',value=True, visible=True) + disable_mask_paint_mode = gr.Checkbox(label='Disable I2I Mask Paint Mode', value=True, visible=False) + inpaint_mask_mode = gr.Radio(choices=['Inner', 'Outer'], value='Inner', label='Inpaint Mask Paint Mode', visible=False, show_label=True) + detailer_sample = gr.Dropdown(label='Detailer Sampling', elem_id='detailer_sample', choices=sample_list, value=sample_list[0], visible=False, type="value") + with gr.Row(): + detailer_sam_model = gr.Dropdown(label='Detailer SAM Model', elem_id='detailer_sam_model', choices=sam_model_list(), value=sam_model_list()[0], visible=False) + detailer_dino_model = gr.Dropdown(label='Detailer DINO Model', elem_id='detailer_dino_model', choices=dino_model_list(), value=dino_model_list()[0], visible=False) + with gr.Tabs(elem_id = 'dino_detct_arguments', visible=False) as dino_tabs_acc: + for index in range(shared.opts.data.get('dino_detect_count', 2)): + with gr.Tab(f'DINO {index + 1} Argument', elem_id=f'dino_{index + 1}_argument_tab'): + with gr.Row(): + dino_detection_ckpt = gr.Dropdown(label='Detailer CKPT Model', elem_id=f'detailer_detect_ckpt_{index+1}', choices=ckpt_list, value=ckpt_list[0], visible=True) + dino_detection_vae = gr.Dropdown(label='Detailer VAE Model', elem_id=f'detailer_detect_vae_{index+1}', choices=vae_list, value=vae_list[0], visible=True) + dino_detection_prompt = gr.Textbox(label=f"Detect {index + 1} Prompt", elem_id=f"detailer_detect_prompt_{index + 1}", show_label=True, lines=2, placeholder="Detect Token Prompt(ex - face:level(0-2):threshold(0-1):dilation(0-128))", visible=True) + with gr.Row(): + dino_detection_positive = gr.Textbox(label=f"Positive {index + 1} Prompt", elem_id=f"detailer_detect_positive_{index + 1}", show_label=True, lines=2, placeholder="Detect Mask Inpaint Positive(ex - perfect anatomy)", visible=True) + dino_detection_negative = gr.Textbox(label=f"Negative {index + 1} Prompt", elem_id=f"detailer_detect_negative_{index + 1}", show_label=True, lines=2, placeholder="Detect Mask Inpaint Negative(ex - nsfw)", visible=True) + dino_detection_denoise = gr.Slider(minimum=0, maximum=1.0, step=0.01, elem_id=f'dino_detect_{index+1}_denoising', label=f'DINO {index + 1} Denoising strength', value=0.4, visible=True) + dino_detection_cfg = gr.Slider(minimum=0, maximum=500, step=0.5, elem_id=f'dino_detect_{index+1}_cfg_scale', label=f'DINO {index + 1} CFG Scale(0 to Origin)', value=0, visible=True) + dino_detection_steps = gr.Slider(minimum=0, maximum=150, step=1, elem_id=f'dino_detect_{index+1}_steps', label=f'DINO {index + 1} Steps(0 to Origin)', value=0, visible=True) + dino_detection_spliter_disable = gr.Checkbox(label=f'Disable DINO {index + 1} Detect Split Mask', value=True, visible=True) + dino_detection_spliter_remove_area = gr.Slider(minimum=0, maximum=800, step=8, elem_id=f'dino_detect_{index+1}_remove_area', label=f'Remove {index + 1} Area', value=16, visible=True) + dino_detection_clip_skip = gr.Slider(minimum=0, maximum=10, step=1, elem_id=f'dino_detect_{index+1}_clip_skip', label=f'Clip skip {index + 1} Inpaint(0 to Origin)', value=0, visible=True) + dino_detection_ckpt_list.append(dino_detection_ckpt) + dino_detection_vae_list.append(dino_detection_vae) + dino_detection_prompt_list.append(dino_detection_prompt) + dino_detection_positive_list.append(dino_detection_positive) + dino_detection_negative_list.append(dino_detection_negative) + dino_detection_denoise_list.append(dino_detection_denoise) + dino_detection_cfg_list.append(dino_detection_cfg) + dino_detection_steps_list.append(dino_detection_steps) + dino_detection_spliter_disable_list.append(dino_detection_spliter_disable) + dino_detection_spliter_remove_area_list.append(dino_detection_spliter_remove_area) + dino_detection_clip_skip_list.append(dino_detection_clip_skip) + dino_tabs = dino_tabs_acc + dino_full_res_inpaint = gr.Checkbox(label='Inpaint at full resolution ', elem_id='detailer_full_res', value=True, visible = False) + with gr.Row(): + dino_inpaint_padding = gr.Slider(label='Inpaint at full resolution padding, pixels ', elem_id='detailer_padding', minimum=0, maximum=256, step=4, value=0, visible=False) + detailer_mask_blur = gr.Slider(label='Detailer Blur', elem_id='detailer_mask_blur', minimum=0, maximum=64, step=1, value=4, visible=False) + + with gr.Accordion("Postprocessing", open=False, elem_id='ddsd_post_processing'): + with gr.Column(): + postprocess_info = gr.HTML('

Postprocessing to the final image

') + disable_postprocess = gr.Checkbox(label='Disable PostProcess', elem_id='disable_postprocess',value=True, visible=True) + with gr.Tabs(elem_id = 'ddsd_postprocess_arguments', visible=False) as postprocess_tabs_acc: + for index in range(shared.opts.data.get('postprocessing_count', 1)): + with gr.Tab(f'Postprocessing {index + 1} Argument', elem_id=f'postprocessing_{index + 1}_argument_tab'): + pp_type = gr.Dropdown(label=f'Postprocessing type {index+1}', elem_id=f'postprocessing_{index+1}', choices=pp_types, value=pp_types[0], visible=True) + pp_saturation_strength = gr.Slider(label=f'Saturation strength {index+1}', minimum=0, maximum=3, step=0.01, value=1.1, visible=False) + pp_sharpening_radius = gr.Slider(label=f'Sharpening radius {index+1}', minimum=0, maximum=50, step=1, value=2, visible=False) + pp_sharpening_percent = gr.Slider(label=f'Sharpening percent {index+1}', minimum=0, maximum=300, step=1, value=150, visible=False) + pp_sharpening_threshold = gr.Slider(label=f'Sharpening threshold {index+1}', minimum=0, maximum=10, step=0.01, value=3, visible=False) + pp_gaussian_radius = gr.Slider(label=f'Gaussian Blur radius {index+1}', minimum=0, maximum=50, step=1, value=2, visible=False) + pp_brightness_strength = gr.Slider(label=f'Brightness strength {index+1}', minimum=0, maximum=5, step=0.01, value=1.1, visible=False) + pp_color_strength = gr.Slider(label=f'Color strength {index+1}', minimum=0, maximum=5, step=0.01, value=1.1, visible=False) + pp_contrast_strength = gr.Slider(label=f'Contrast strength {index+1}', minimum=0, maximum=5, step=0.01, value=1.1, visible=False) + pp_hue_strength = gr.Slider(label=f'Hue strength {index+1}', minimum=-1, maximum=1, step=0.01, value=0, visible=False) + pp_bilateral_sigmaC = gr.Slider(label=f'Bilateral sigmaC {index+1}', minimum=0, maximum=100, step=1, value=10, visible=False) + pp_bilateral_sigmaS = gr.Slider(label=f'Bilateral sigmaS {index+1}', minimum=0, maximum=30, step=1, value=10, visible=False) + pp_color_tint_type_name = gr.Radio(label=f'Color tint type name {index+1}',choices=['warm', 'cool'], value='warm', visible=False) + pp_color_tint_lut_name = gr.Dropdown(label=f'Color tint lut name {index+1}',choices=lut_model_list(), value=lut_model_list()[0], visible=False) + pp_type_list.append(pp_type) + pp_saturation_strength_list.append(pp_saturation_strength) + pp_sharpening_radius_list.append(pp_sharpening_radius) + pp_sharpening_percent_list.append(pp_sharpening_percent) + pp_sharpening_threshold_list.append(pp_sharpening_threshold) + pp_gaussian_radius_list.append(pp_gaussian_radius) + pp_brightness_strength_list.append(pp_brightness_strength) + pp_color_strength_list.append(pp_color_strength) + pp_contrast_strength_list.append(pp_contrast_strength) + pp_hue_strength_list.append(pp_hue_strength) + pp_bilateral_sigmaC_list.append(pp_bilateral_sigmaC) + pp_bilateral_sigmaS_list.append(pp_bilateral_sigmaS) + pp_color_tint_type_name_list.append(pp_color_tint_type_name) + pp_color_tint_lut_name_list.append(pp_color_tint_lut_name) + def pp_type_change_func(pp_saturation_strength,pp_sharpening_radius,pp_sharpening_percent,pp_sharpening_threshold,pp_gaussian_radius,pp_brightness_strength,pp_color_strength,pp_contrast_strength,pp_hue_strength,pp_bilateral_sigmaC,pp_bilateral_sigmaS,pp_color_tint_type_name,pp_color_tint_lut_name): + saturation_strength, sharpening_radius, sharpening_percent, sharpening_threshold, gaussian_radius, brightness_strength, color_strength, contrast_strength, hue_strength, bilateral_sigmaC, bilateral_sigmaS, color_tint_type_name, color_tint_lut_name = pp_saturation_strength,pp_sharpening_radius,pp_sharpening_percent,pp_sharpening_threshold,pp_gaussian_radius,pp_brightness_strength,pp_color_strength,pp_contrast_strength,pp_hue_strength,pp_bilateral_sigmaC,pp_bilateral_sigmaS,pp_color_tint_type_name,pp_color_tint_lut_name + return lambda data:{ + saturation_strength:gr_show(data == 'saturation'), + sharpening_radius:gr_show(data == 'sharpening'), + sharpening_percent:gr_show(data == 'sharpening'), + sharpening_threshold:gr_show(data == 'sharpening'), + gaussian_radius:gr_show(data == 'gaussian blur'), + brightness_strength:gr_show(data == 'brightness'), + color_strength:gr_show(data == 'color'), + contrast_strength:gr_show(data == 'contrast'), + hue_strength:gr_show(data == 'hue'), + bilateral_sigmaC:gr_show(data == 'bilateral'), + bilateral_sigmaS:gr_show(data == 'bilateral'), + color_tint_type_name:gr_show(data == 'color tint(type)'), + color_tint_lut_name:gr_show(data == 'color tint(lut)') + } + def pp_type_change_func2(pp_saturation_strength,pp_sharpening_radius,pp_sharpening_percent,pp_sharpening_threshold,pp_gaussian_radius,pp_brightness_strength,pp_color_strength,pp_contrast_strength,pp_hue_strength,pp_bilateral_sigmaC,pp_bilateral_sigmaS,pp_color_tint_type_name,pp_color_tint_lut_name): + saturation_strength, sharpening_radius, sharpening_percent, sharpening_threshold, gaussian_radius, brightness_strength, color_strength, contrast_strength, hue_strength, bilateral_sigmaC, bilateral_sigmaS, color_tint_type_name, color_tint_lut_name = pp_saturation_strength,pp_sharpening_radius,pp_sharpening_percent,pp_sharpening_threshold,pp_gaussian_radius,pp_brightness_strength,pp_color_strength,pp_contrast_strength,pp_hue_strength,pp_bilateral_sigmaC,pp_bilateral_sigmaS,pp_color_tint_type_name,pp_color_tint_lut_name + return [saturation_strength, sharpening_radius, sharpening_percent, sharpening_threshold, gaussian_radius, brightness_strength, color_strength, contrast_strength, hue_strength, bilateral_sigmaC, bilateral_sigmaS, color_tint_type_name, color_tint_lut_name] + pp_type.change( + pp_type_change_func(pp_saturation_strength,pp_sharpening_radius,pp_sharpening_percent,pp_sharpening_threshold,pp_gaussian_radius,pp_brightness_strength,pp_color_strength,pp_contrast_strength,pp_hue_strength,pp_bilateral_sigmaC,pp_bilateral_sigmaS,pp_color_tint_type_name,pp_color_tint_lut_name), + inputs=[pp_type], + outputs=pp_type_change_func2(pp_saturation_strength,pp_sharpening_radius,pp_sharpening_percent,pp_sharpening_threshold,pp_gaussian_radius,pp_brightness_strength,pp_color_strength,pp_contrast_strength,pp_hue_strength,pp_bilateral_sigmaC,pp_bilateral_sigmaS,pp_color_tint_type_name,pp_color_tint_lut_name) + ) + postprocess_tabs = postprocess_tabs_acc + + with gr.Accordion("Watermark", open=False, elem_id='ddsd_watermark_option'): + with gr.Column(): + watermark_info = gr.HTML('

Add a watermark to the final saved image

') + disable_watermark = gr.Checkbox(label='Disable Watermark', elem_id='disable_watermark',value=True, visible=True) + with gr.Tabs(elem_id='watermark_tabs', visible=False) as watermark_tabs_acc: + for index in range(shared.opts.data.get('watermark_count', 1)): + with gr.Tab(f'Watermark {index + 1} Argument', elem_id=f'watermark_{index+1}_argument_tab'): + watermark_type = gr.Radio(choices=['Text','Image'], value='Text', label=f'Watermark {index+1} text') + watermark_position = gr.Dropdown(choices=['Left','Left-Top','Top','Right-Top','Right','Right-Bottom','Bottom','Left-Bottom','Center'], value='Center', label=f'Watermark {index+1} Position', elem_id=f'watermark_{index+1}_position') + with gr.Column(): + watermark_image = gr.Image(label=f"Watermark {index+1} Upload image", visible=False) + with gr.Row(): + watermark_image_size_width = gr.Slider(label=f'Watermark {index+1} Width', visible=False, minimum=50, maximum=500, step=10, value=100) + watermark_image_size_height = gr.Slider(label=f'Watermark {index+1} Height', visible=False, minimum=50, maximum=500, step=10, value=100) + with gr.Column(): + watermark_text = gr.Textbox(placeholder='watermark text - ex) Copyright © NeoGraph. All Rights Reserved.', visible=True, value='') + with gr.Row(): + watermark_text_color = gr.ColorPicker(label=f'Watermark {index+1} Color') + watermark_text_font = gr.Dropdown(label=f'Watermark {index+1} Fonts', choices=fonts_list, value=fonts_list[0]) + watermark_text_size = gr.Slider(label=f'Watermark {index+1} Size', visible=True, minimum=10, maximum=500, step=1, value=50) + watermark_padding = gr.Slider(label=f'Watermark {index+1} Padding', visible=True, minimum=0, maximum=200, step=1, value=10) + watermark_alpha = gr.Slider(label=f'Watermark {index+1} Alpha', visible=True, minimum=0, maximum=1, step=0.01, value=0.4) + watermark_type_list.append(watermark_type) + watermark_position_list.append(watermark_position) + watermark_image_list.append(watermark_image) + watermark_image_size_width_list.append(watermark_image_size_width) + watermark_image_size_height_list.append(watermark_image_size_height) + watermark_text_list.append(watermark_text) + watermark_text_color_list.append(watermark_text_color) + watermark_text_font_list.append(watermark_text_font) + watermark_text_size_list.append(watermark_text_size) + watermark_padding_list.append(watermark_padding) + watermark_alpha_list.append(watermark_alpha) + def watermark_type_change_func(watermark_image, watermark_image_size_width, watermark_image_size_height, watermark_text, watermark_text_color, watermark_text_font, watermark_text_size): + image, image_size_width, iamge_size_height, text, text_color, text_font, text_size = watermark_image, watermark_image_size_width, watermark_image_size_height, watermark_text, watermark_text_color, watermark_text_font, watermark_text_size + return lambda data:{ + image:gr_show(data == 'Image'), + image_size_width:gr_show(data == 'Image'), + iamge_size_height:gr_show(data == 'Image'), + text:gr_show(data == 'Text'), + text_color:gr_show(data == 'Text'), + text_font:gr_show(data == 'Text'), + text_size:gr_show(data == 'Text') + } + def watermark_type_change_func2(watermark_image, watermark_image_size_width, watermark_image_size_height, watermark_text, watermark_text_color, watermark_text_font, watermark_text_size): + image, image_size_width, iamge_size_height, text, text_color, text_font, text_size = watermark_image, watermark_image_size_width, watermark_image_size_height, watermark_text, watermark_text_color, watermark_text_font, watermark_text_size + return [image, image_size_width, iamge_size_height, text, text_color, text_font, text_size] + watermark_type.change( + watermark_type_change_func(watermark_image,watermark_image_size_width,watermark_image_size_height,watermark_text,watermark_text_color,watermark_text_font,watermark_text_size), + inputs=[watermark_type], + outputs=watermark_type_change_func2(watermark_image, watermark_image_size_width, watermark_image_size_height, watermark_text, watermark_text_color, watermark_text_font, watermark_text_size) + ) + watermark_tabs = watermark_tabs_acc + disable_outpaint.change( + lambda disable:{ + outpaint_sample:gr_show(not disable), + outpaint_tabs:gr_show(not disable), + outpaint_mask_blur:gr_show(not disable) + }, + inputs=[disable_outpaint], + outputs=[outpaint_sample, outpaint_tabs, outpaint_mask_blur] + ) + disable_watermark.change( + lambda disable:{ + watermark_tabs:gr_show(not disable) + }, + inputs=[disable_watermark], + outputs=watermark_tabs + ) + disable_postprocess.change( + lambda disable:{ + postprocess_tabs:gr_show(not disable) + }, + inputs=[disable_postprocess], + outputs=postprocess_tabs + ) + disable_upscaler.change( + lambda disable: { + ddetailer_before_upscaler:gr_show(not disable), + upscaler_sample:gr_show(not disable), + upscaler_index:gr_show(not disable), + upscaler_ckpt:gr_show(not disable), + upscaler_vae:gr_show(not disable), + scalevalue:gr_show(not disable), + overlap:gr_show(not disable), + rewidth:gr_show(not disable), + reheight:gr_show(not disable), + denoising_strength:gr_show(not disable), + }, + inputs= [disable_upscaler], + outputs =[ddetailer_before_upscaler, upscaler_sample, upscaler_index, upscaler_ckpt, upscaler_vae, scalevalue, overlap, rewidth, reheight, denoising_strength] + ) + + disable_mask_paint_mode.change( + lambda disable:{ + inpaint_mask_mode:gr_show(is_img2img and not disable) + }, + inputs=[disable_mask_paint_mode], + outputs=inpaint_mask_mode + ) + + disable_detailer.change( + lambda disable, in_disable:{ + disable_mask_paint_mode:gr_show(not disable and is_img2img), + inpaint_mask_mode:gr_show(not disable and is_img2img and not in_disable), + detailer_sample:gr_show(not disable), + detailer_sam_model:gr_show(not disable), + detailer_dino_model:gr_show(not disable), + dino_full_res_inpaint:gr_show(not disable), + dino_inpaint_padding:gr_show(not disable), + detailer_mask_blur:gr_show(not disable), + dino_tabs:gr_show(not disable) + }, + inputs=[disable_detailer, disable_mask_paint_mode], + outputs=[ + disable_mask_paint_mode, + inpaint_mask_mode, + detailer_sample, + detailer_sam_model, + detailer_dino_model, + dino_full_res_inpaint, + dino_inpaint_padding, + detailer_mask_blur, + dino_tabs + ] + ) + + ret += [enable_script_names] + ret += [disable_watermark, disable_postprocess] + ret += [disable_upscaler, ddetailer_before_upscaler, scalevalue, upscaler_sample, overlap, upscaler_index, rewidth, reheight, denoising_strength, upscaler_ckpt, upscaler_vae] + ret += [disable_detailer, disable_mask_paint_mode, inpaint_mask_mode, detailer_sample, detailer_sam_model, detailer_dino_model, dino_full_res_inpaint, dino_inpaint_padding, detailer_mask_blur] + ret += [disable_outpaint, outpaint_sample, outpaint_mask_blur] + ret += dino_detection_ckpt_list + \ + dino_detection_vae_list + \ + dino_detection_prompt_list + \ + dino_detection_positive_list + \ + dino_detection_negative_list + \ + dino_detection_denoise_list + \ + dino_detection_cfg_list + \ + dino_detection_steps_list + \ + dino_detection_spliter_disable_list + \ + dino_detection_spliter_remove_area_list + \ + dino_detection_clip_skip_list + \ + watermark_type_list + \ + watermark_position_list + \ + watermark_image_list + \ + watermark_image_size_width_list + \ + watermark_image_size_height_list + \ + watermark_text_list + \ + watermark_text_color_list + \ + watermark_text_font_list + \ + watermark_text_size_list + \ + watermark_padding_list + \ + watermark_alpha_list + \ + pp_type_list + \ + pp_saturation_strength_list + \ + pp_sharpening_radius_list + \ + pp_sharpening_percent_list + \ + pp_sharpening_threshold_list + \ + pp_gaussian_radius_list + \ + pp_brightness_strength_list + \ + pp_color_strength_list + \ + pp_contrast_strength_list + \ + pp_hue_strength_list + \ + pp_bilateral_sigmaC_list + \ + pp_bilateral_sigmaS_list + \ + pp_color_tint_type_name_list + \ + pp_color_tint_lut_name_list + \ + outpaint_positive_list + \ + outpaint_negative_list + \ + outpaint_denoise_list + \ + outpaint_cfg_list + \ + outpaint_steps_list + \ + outpaint_pixels_list + \ + outpaint_direction_list + + def ds(*args): + args = list(args) + ddsd_save_path = args[0] + args = args[1:] + enable_script_names,disable_watermark,disable_postprocess,disable_upscaler,ddetailer_before_upscaler,scalevalue,upscaler_sample,overlap,upscaler_index,rewidth,reheight,denoising_strength,upscaler_ckpt,upscaler_vae,disable_detailer,disable_mask_paint_mode,inpaint_mask_mode,detailer_sample,detailer_sam_model,detailer_dino_model,dino_full_res_inpaint,dino_inpaint_padding,detailer_mask_blur,disable_outpaint, outpaint_sample, outpaint_mask_blur = args[:26] + result = {} + result['enable_script_names'] = enable_script_names + result['disable_watermark'] = disable_watermark + result['disable_postprocess'] = disable_postprocess + result['disable_upscaler'] = disable_upscaler + result['ddetailer_before_upscaler'] = ddetailer_before_upscaler + result['scalevalue'] = scalevalue + result['upscaler_sample'] = upscaler_sample + result['overlap'] = overlap + result['upscaler_index'] = shared.sd_upscalers[upscaler_index].name + result['rewidth'] = rewidth + result['reheight'] = reheight + result['denoising_strength'] = denoising_strength + result['upscaler_ckpt'] = upscaler_ckpt + result['upscaler_vae'] = upscaler_vae + result['disable_detailer'] = disable_detailer + result['disable_mask_paint_mode'] = disable_mask_paint_mode + result['inpaint_mask_mode'] = inpaint_mask_mode + result['detailer_sample'] = detailer_sample + result['detailer_sam_model'] = detailer_sam_model + result['detailer_dino_model'] = detailer_dino_model + result['dino_full_res_inpaint'] = dino_full_res_inpaint + result['dino_inpaint_padding'] = dino_inpaint_padding + result['detailer_mask_blur'] = detailer_mask_blur + result['disable_outpaint'] = disable_outpaint + result['outpaint_sample'] = outpaint_sample + result['outpaint_mask_blur'] = outpaint_mask_blur + args = args[26:] + result['dino_detect_count'] = shared.opts.data.get('dino_detect_count', 2) + for index in range(result['dino_detect_count']): + result[f'dino_detection_ckpt_{index+1}'] = args[index + result['dino_detect_count'] * 0] + result[f'dino_detection_vae_{index+1}'] = args[index + result['dino_detect_count'] * 1] + result[f'dino_detection_prompt_{index+1}'] = args[index + result['dino_detect_count'] * 2] + result[f'dino_detection_positive_{index+1}'] = args[index + result['dino_detect_count'] * 3] + result[f'dino_detection_negative_{index+1}'] = args[index + result['dino_detect_count'] * 4] + result[f'dino_detection_denoise_{index+1}'] = args[index + result['dino_detect_count'] * 5] + result[f'dino_detection_cfg_{index+1}'] = args[index + result['dino_detect_count'] * 6] + result[f'dino_detection_steps_{index+1}'] = args[index + result['dino_detect_count'] * 7] + result[f'dino_detection_spliter_disable_{index+1}'] = args[index + result['dino_detect_count'] * 8] + result[f'dino_detection_spliter_remove_area_{index+1}'] = args[index + result['dino_detect_count'] * 9] + result[f'dino_detection_clip_skip_{index+1}'] = args[index + result['dino_detect_count'] * 10] + args = args[result['dino_detect_count'] * 11:] + result['watermark_count'] = shared.opts.data.get('watermark_count', 1) + for index in range(result['watermark_count']): + result[f'watermark_type_{index+1}'] = args[index + result['watermark_count'] * 0] + result[f'watermark_position_{index+1}'] = args[index + result['watermark_count'] * 1] + result[f'watermark_image_{index+1}'] = None + result[f'watermark_image_size_width_{index+1}'] = args[index + result['watermark_count'] * 3] + result[f'watermark_image_size_height_{index+1}'] = args[index + result['watermark_count'] * 4] + result[f'watermark_text_{index+1}'] = args[index + result['watermark_count'] * 5] + result[f'watermark_text_color_{index+1}'] = args[index + result['watermark_count'] * 6] + result[f'watermark_text_font_{index+1}'] = args[index + result['watermark_count'] * 7] + result[f'watermark_text_size_{index+1}'] = args[index + result['watermark_count'] * 8] + result[f'watermark_padding_{index+1}'] = args[index + result['watermark_count'] * 9] + result[f'watermark_alpha_{index+1}'] = args[index + result['watermark_count'] * 10] + args = args[result['watermark_count'] * 11:] + result['postprocessing_count'] = shared.opts.data.get('postprocessing_count', 1) + for index in range(result['postprocessing_count']): + result[f'pp_type_{index+1}'] = args[index + result['postprocessing_count'] * 0] + result[f'pp_saturation_strength_{index+1}'] = args[index + result['postprocessing_count'] * 1] + result[f'pp_sharpening_radius_{index+1}'] = args[index + result['postprocessing_count'] * 2] + result[f'pp_sharpening_percent_{index+1}'] = args[index + result['postprocessing_count'] * 3] + result[f'pp_sharpening_threshold_{index+1}'] = args[index + result['postprocessing_count'] * 4] + result[f'pp_gaussian_radius_{index+1}'] = args[index + result['postprocessing_count'] * 5] + result[f'pp_brightness_strength_{index+1}'] = args[index + result['postprocessing_count'] * 6] + result[f'pp_color_strength_{index+1}'] = args[index + result['postprocessing_count'] * 7] + result[f'pp_contrast_strength_{index+1}'] = args[index + result['postprocessing_count'] * 8] + result[f'pp_hue_strength_{index+1}'] = args[index + result['postprocessing_count'] * 9] + result[f'pp_bilateral_sigmaC_{index+1}'] = args[index + result['postprocessing_count'] * 10] + result[f'pp_bilateral_sigmaS_{index+1}'] = args[index + result['postprocessing_count'] * 11] + result[f'pp_color_tint_type_name_{index+1}'] = args[index + result['postprocessing_count'] * 12] + result[f'pp_color_tint_lut_name_{index+1}'] = args[index + result['postprocessing_count'] * 13] + args = args[result['postprocessing_count'] * 13:] + result['outpaint_count'] = shared.opts.data.get('outpaint_count', 1) + for index in range(result['outpaint_count']): + result[f'outpaint_positive_{index+1}'] = args[index + result['outpaint_count'] * 0] + result[f'outpaint_negative_{index+1}'] = args[index + result['outpaint_count'] * 1] + result[f'outpaint_denoise_{index+1}'] = args[index + result['outpaint_count'] * 2] + result[f'outpaint_cfg_{index+1}'] = args[index + result['outpaint_count'] * 3] + result[f'outpaint_steps_{index+1}'] = args[index + result['outpaint_count'] * 4] + result[f'outpaint_pixels_{index+1}'] = args[index + result['outpaint_count'] * 5] + result[f'outpaint_direction_{index+1}'] = args[index + result['outpaint_count'] * 6] + args = args[result['outpaint_count'] * 6:] + if not os.path.exists(ddsd_config_path): + os.mkdir(ddsd_config_path) + with open(os.path.join(ddsd_config_path, f'{ddsd_save_path}.ddcfg'), 'w', encoding='utf-8') as f: + f.write(json_write(result)) + choices = [x[:-6] for x in os.listdir(ddsd_config_path) if x.endswith('.ddcfg')] + return { + ddsd_load_path:gr_list_refresh(choices, choices[0]) + } + def dl(ddsd_load_path): + with open(os.path.join(ddsd_config_path, f'{ddsd_load_path}.ddcfg'), 'r', encoding='utf-8') as f: + result = json_read(f) + results = [result['enable_script_names'],result['disable_watermark'],result['disable_postprocess'],result['disable_upscaler'],result['ddetailer_before_upscaler'],result['scalevalue'],result['upscaler_sample'],result['overlap'],result['upscaler_index'],result['rewidth'],result['reheight'],result['denoising_strength'],result['upscaler_ckpt'],result['upscaler_vae'],result['disable_detailer'],result['disable_mask_paint_mode'],result['inpaint_mask_mode'],result['detailer_sample'],result['detailer_sam_model'],result['detailer_dino_model'],result['dino_full_res_inpaint'],result['dino_inpaint_padding'],result['detailer_mask_blur'],result['disable_outpaint'],result['outpaint_sample'],result['outpaint_mask_blur']] + def result_create(token,file_count,count, default): + data = file_count if file_count < count else count + temp = [] + for index in range(data): + temp.append(result.get(f'{token}_{index+1}',default)) + while len(temp) < count: + temp.append(default) + return temp + results += result_create('dino_detection_ckpt',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), 'Original') + results += result_create('dino_detection_vae',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), 'Original') + results += result_create('dino_detection_prompt',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), '') + results += result_create('dino_detection_positive',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), '') + results += result_create('dino_detection_negative',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), '') + results += result_create('dino_detection_denoise',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), 0.4) + results += result_create('dino_detection_cfg',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), 0) + results += result_create('dino_detection_steps',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), 0) + results += result_create('dino_detection_spliter_disable',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), True) + results += result_create('dino_detection_spliter_remove_area',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), 8) + results += result_create('dino_detection_clip_skip',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), 0) + + results += result_create('watermark_type',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 'Text') + results += result_create('watermark_position',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 'Center') + results += result_create('watermark_image',result['watermark_count'], shared.opts.data.get('watermark_count', 1), None) + results += result_create('watermark_image_size_width',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 100) + results += result_create('watermark_image_size_height',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 100) + results += result_create('watermark_text',result['watermark_count'], shared.opts.data.get('watermark_count', 1), '') + results += result_create('watermark_text_color',result['watermark_count'], shared.opts.data.get('watermark_count', 1), None) + results += result_create('watermark_text_font',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 'Arial') + results += result_create('watermark_text_size',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 50) + results += result_create('watermark_padding',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 10) + results += result_create('watermark_alpha',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 0.4) + + results += result_create('pp_type',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 'none') + results += result_create('pp_saturation_strength',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 1.1) + results += result_create('pp_sharpening_radius',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 2) + results += result_create('pp_sharpening_percent',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 100) + results += result_create('pp_sharpening_threshold',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 1) + results += result_create('pp_gaussian_radius',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 2) + results += result_create('pp_brightness_strength',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 1.1) + results += result_create('pp_color_strength',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 1.1) + results += result_create('pp_contrast_strength',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 1.1) + results += result_create('pp_hue_strength',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 0) + results += result_create('pp_bilateral_sigmaC',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 10) + results += result_create('pp_bilateral_sigmaS',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 10) + results += result_create('pp_color_tint_type_name',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 'warm') + results += result_create('pp_color_tint_lut_name',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 'FGCineBasic.cube') + + results += result_create('outpaint_positive', result['outpaint_count'], shared.opts.data.get('outpaint_count', 1), '') + results += result_create('outpaint_negative', result['outpaint_count'], shared.opts.data.get('outpaint_count', 1), '') + results += result_create('outpaint_denoise', result['outpaint_count'], shared.opts.data.get('outpaint_count', 1), 0.8) + results += result_create('outpaint_cfg', result['outpaint_count'], shared.opts.data.get('outpaint_count', 1), 0) + results += result_create('outpaint_steps', result['outpaint_count'], shared.opts.data.get('outpaint_count', 1), 0) + results += result_create('outpaint_pixels', result['outpaint_count'], shared.opts.data.get('outpaint_count', 1), 64) + results += result_create('outpaint_direction', result['outpaint_count'], shared.opts.data.get('outpaint_count', 1), 'None') + return dict(zip(ret, [gr_value_refresh(x) for x in results])) + ddsd_save.click(ds, inputs=[ddsd_save_path]+ret, outputs=[ddsd_load_path]) + ddsd_load.click(dl, inputs=[ddsd_load_path], outputs=ret) + + return ret + + def outpainting(self, p, init_image, + outpaint_sample, outpaint_mask_blur, outpaint_count, + outpaint_denoise_list, outpaint_cfg_list, outpaint_steps_list, outpaint_positive_list, outpaint_negative_list, + outpaint_pixels_list, outpaint_direction_list): + + for outpaint_index in range(outpaint_count): + if outpaint_direction_list[outpaint_index] == 'None': continue + is_horiz = outpaint_direction_list[outpaint_index] in ['Left','Right'] + is_vert = outpaint_direction_list[outpaint_index] in ['Up','Down'] + + target_w = init_image.width + (outpaint_pixels_list[outpaint_index] if is_horiz else 0) + target_h = init_image.height + (outpaint_pixels_list[outpaint_index] if is_vert else 0) + + image = Image.new('RGB', (target_w,target_h)) + image.paste(init_image, + (outpaint_pixels_list[outpaint_index] if outpaint_direction_list[outpaint_index] == 'Left' else 0, + outpaint_pixels_list[outpaint_index] if outpaint_direction_list[outpaint_index] == 'Up' else 0)) + mask = Image.new('L', (target_w, target_h), 'white') + mask_draw = ImageDraw.Draw(mask) + + mask_draw.rectangle(( + (outpaint_pixels_list[outpaint_index] + outpaint_mask_blur * 2) if outpaint_direction_list[outpaint_index] == 'Left' else 0, + (outpaint_pixels_list[outpaint_index] + outpaint_mask_blur * 2) if outpaint_direction_list[outpaint_index] == 'Up' else 0, + (mask.width - outpaint_pixels_list[outpaint_index] - outpaint_mask_blur * 2) if outpaint_direction_list[outpaint_index] == 'Right' else target_w, + (mask.height - outpaint_pixels_list[outpaint_index] - outpaint_mask_blur * 2) if outpaint_direction_list[outpaint_index] == 'Down' else target_h + ), fill='black') + + latent_mask = Image.new('L', (target_w, target_h), 'white') + latent_mask_draw = ImageDraw.Draw(latent_mask) + latent_mask_draw.rectangle(( + (outpaint_pixels_list[outpaint_index] + outpaint_mask_blur // 2) if outpaint_direction_list[outpaint_index] == 'Left' else 0, + (outpaint_pixels_list[outpaint_index] + outpaint_mask_blur // 2) if outpaint_direction_list[outpaint_index] == 'Up' else 0, + (mask.width - outpaint_pixels_list[outpaint_index] - outpaint_mask_blur // 2) if outpaint_direction_list[outpaint_index] == 'Right' else target_w, + (mask.height - outpaint_pixels_list[outpaint_index] - outpaint_mask_blur // 2) if outpaint_direction_list[outpaint_index] == 'Down' else target_h + ), fill='black') + + devices.torch_gc() + + pi = I2I_Generator_Create( + p, ('Euler' if p.sampler_name in ['PLMS', 'UniPC', 'DDIM'] else p.sampler_name) if outpaint_sample == 'Original' else outpaint_sample, + outpaint_mask_blur * 2, False, 0, image, + outpaint_denoise_list[outpaint_index], + outpaint_cfg_list[outpaint_index] if outpaint_cfg_list[outpaint_index] > 0 else p.cfg_scale, + outpaint_steps_list[outpaint_index] if outpaint_steps_list[outpaint_index] > 0 else p.steps, + target_w, + target_h, + p.tiling, p.scripts, self.i2i_scripts, self.i2i_scripts_always, p.script_args, + outpaint_positive_list[outpaint_index] if outpaint_positive_list[outpaint_index] else self.target_prompts, + outpaint_negative_list[outpaint_index] if outpaint_negative_list[outpaint_index] else self.target_negative_prompts, + 0 + ) + + + pi.image_mask = mask + pi.latent_mask = latent_mask + pi.seed = self.target_seeds + outpaint_index + + state.job_count += 1 + proc = processing.process_images(pi) + + p.extra_generation_params[f'Outpaint {outpaint_index + 1} Direction'] = outpaint_direction_list[outpaint_index] + p.extra_generation_params[f'Outpaint {outpaint_index + 1} Pixels'] = outpaint_pixels_list[outpaint_index] + p.extra_generation_params[f'Outpaint {outpaint_index + 1} Positive'] = proc.all_prompts[0] if outpaint_positive_list[outpaint_index] else "Original" + p.extra_generation_params[f'Outpaint {outpaint_index + 1} Negative'] = proc.all_negative_prompts[0] if outpaint_negative_list[outpaint_index] else "Original" + p.extra_generation_params[f'Outpaint {outpaint_index + 1} Denoising'] = pi.denoising_strength + p.extra_generation_params[f'Outpaint {outpaint_index + 1} CFG Scale'] = pi.cfg_scale + p.extra_generation_params[f'Outpaint {outpaint_index + 1} Steps'] = pi.steps + + init_image = proc.images[0] + + return init_image + + + + def dino_detect_detailer(self, p, init_image, + disable_mask_paint_mode, inpaint_mask_mode, detailer_sample, detailer_sam_model, detailer_dino_model, + dino_full_res_inpaint, dino_inpaint_padding, detailer_mask_blur, + dino_detect_count, + dino_detection_ckpt_list, + dino_detection_vae_list, + dino_detection_prompt_list, + dino_detection_positive_list, + dino_detection_negative_list, + dino_detection_denoise_list, + dino_detection_cfg_list, + dino_detection_steps_list, + dino_detection_spliter_disable_list, + dino_detection_spliter_remove_area_list, + dino_detection_clip_skip_list): + self.image_results.append([]) + def mask_image_suffle(mask, image): + if shared.opts.data.get('mask_type', False): return mask + mask_image = Image.new("RGBA", mask.size, (255,255,255,0)) + mask_image.paste(mask, mask=mask) + mask_image = Image.composite(mask, image, mask_image) + return Image.blend(image, mask_image, 0.5) + for detect_index in range(dino_detect_count): + if len(dino_detection_prompt_list[detect_index]) < 1: continue + pi = I2I_Generator_Create( + p, ('Euler' if p.sampler_name in ['PLMS', 'UniPC', 'DDIM'] else p.sampler_name) if detailer_sample == 'Original' else detailer_sample, + detailer_mask_blur, dino_full_res_inpaint, dino_inpaint_padding, init_image, + dino_detection_denoise_list[detect_index], + dino_detection_cfg_list[detect_index] if dino_detection_cfg_list[detect_index] > 0 else p.cfg_scale, + dino_detection_steps_list[detect_index] if dino_detection_steps_list[detect_index] > 0 else p.steps, + p.width, p.height, p.tiling, p.scripts, self.i2i_scripts, self.i2i_scripts_always, p.script_args, + dino_detection_positive_list[detect_index] if dino_detection_positive_list[detect_index] else self.target_prompts, + dino_detection_negative_list[detect_index] if dino_detection_negative_list[detect_index] else self.target_negative_prompts + ) + mask = dino_detect_from_prompt(dino_detection_prompt_list[detect_index], detailer_sam_model, detailer_dino_model, init_image, disable_mask_paint_mode or isinstance(p, StableDiffusionProcessingTxt2Img), inpaint_mask_mode, getattr(p,'image_mask',None)) + if mask is not None: + self.change_ckpt_model(dino_detection_ckpt_list[detect_index] if dino_detection_ckpt_list[detect_index] != 'Original' else self.ckptname) + self.change_vae_model(dino_detection_vae_list[detect_index] if dino_detection_vae_list[detect_index] != 'Original' else self.vae) + opts.CLIP_stop_at_last_layers = dino_detection_clip_skip_list[detect_index] if dino_detection_clip_skip_list[detect_index] else self.clip_skip + if not dino_detection_spliter_disable_list[detect_index]: + mask = mask_spliter_and_remover(mask, dino_detection_spliter_remove_area_list[detect_index]) + for mask_index, mask_split in enumerate(mask): + pi.seed = self.target_seeds + mask_index + detect_index + pi.init_images = [init_image] + pi.image_mask = Image.fromarray(mask_split) + if shared.opts.data.get('save_ddsd_working_on_dino_mask_images', False): + images.save_image(mask_image_suffle(pi.image_mask, pi.init_images[0]), p.outpath_samples, + shared.opts.data.get('save_ddsd_working_on_dino_mask_images_prefix', ''), + pi.seed, self.target_prompts, opts.samples_format, + suffix='' if shared.opts.data.get('save_ddsd_working_on_dino_mask_images_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_working_on_dino_mask_images_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + state.job_count += 1 + if shared.opts.data.get('preview_masks_images', False): + shared.state.current_image = mask_image_suffle(pi.image_mask, pi.init_images[0]) + if shared.opts.data.get('result_masks', False): + self.image_results[-1].append(mask_image_suffle(pi.image_mask, pi.init_images[0])) + processed = processing.process_images(pi) + init_image = processed.images[0] + if shared.opts.data.get('save_ddsd_working_on_images', False): + images.save_image(init_image, p.outpath_samples, + shared.opts.data.get('save_ddsd_working_on_images_prefix', ''), + pi.seed, self.target_prompts, opts.samples_format, + suffix='' if shared.opts.data.get('save_ddsd_working_on_images_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_working_on_images_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + else: + pi.seed = self.target_seeds + detect_index + pi.init_images = [init_image] + pi.image_mask = Image.fromarray(mask) + if shared.opts.data.get('save_ddsd_working_on_dino_mask_images', False): + images.save_image(mask_image_suffle(pi.image_mask, pi.init_images[0]), p.outpath_samples, + shared.opts.data.get('save_ddsd_working_on_dino_mask_images_prefix', ''), + pi.seed, self.target_prompts, opts.samples_format, + suffix='' if shared.opts.data.get('save_ddsd_working_on_dino_mask_images_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_working_on_dino_mask_images_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + state.job_count += 1 + if shared.opts.data.get('preview_masks_images', False): + shared.state.current_image = mask_image_suffle(pi.image_mask, pi.init_images[0]) + if shared.opts.data.get('result_masks', False): + self.image_results[-1].append(mask_image_suffle(pi.image_mask, pi.init_images[0])) + processed = processing.process_images(pi) + init_image = processed.images[0] + if shared.opts.data.get('save_ddsd_working_on_images', False): + images.save_image(init_image, p.outpath_samples, + shared.opts.data.get('save_ddsd_working_on_images_prefix', ''), + pi.seed, self.target_prompts, opts.samples_format, + suffix='' if shared.opts.data.get('save_ddsd_working_on_images_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_working_on_images_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + p.extra_generation_params[f'DINO {detect_index + 1}'] = dino_detection_prompt_list[detect_index] + p.extra_generation_params[f'DINO {detect_index + 1} Positive'] = processed.all_prompts[0] if dino_detection_positive_list[detect_index] else "Original" + p.extra_generation_params[f'DINO {detect_index + 1} Negative'] = processed.all_negative_prompts[0] if dino_detection_negative_list[detect_index] else "Original" + p.extra_generation_params[f'DINO {detect_index + 1} Denoising'] = pi.denoising_strength + p.extra_generation_params[f'DINO {detect_index + 1} CFG Scale'] = pi.cfg_scale + p.extra_generation_params[f'DINO {detect_index + 1} Steps'] = pi.steps + p.extra_generation_params[f'DINO {detect_index + 1} Spliter'] = not dino_detection_spliter_disable_list[detect_index] + p.extra_generation_params[f'DINO {detect_index + 1} SplitRemove Area'] = dino_detection_spliter_remove_area_list[detect_index] + p.extra_generation_params[f'DINO {detect_index + 1} Ckpt Model'] = dino_detection_ckpt_list[detect_index] if dino_detection_ckpt_list[detect_index] != 'Original' else self.ckptname + p.extra_generation_params[f'DINO {detect_index + 1} Vae Model'] = dino_detection_vae_list[detect_index] if dino_detection_vae_list[detect_index] != 'Original' else self.vae + p.extra_generation_params[f'DINO {detect_index + 1} Clip Skip'] = dino_detection_clip_skip_list[detect_index] if dino_detection_clip_skip_list[detect_index] else 'Original' + else: + p.extra_generation_params[f'DINO {detect_index + 1}'] = 'Error' + opts.CLIP_stop_at_last_layers = self.clip_skip + return init_image + + def upscale(self, p, init_image, + scalevalue, upscaler_sample, overlap, rewidth, reheight, denoising_strength, upscaler_ckpt, upscaler_vae, + detailer_mask_blur, dino_full_res_inpaint, dino_inpaint_padding): + self.change_ckpt_model(upscaler_ckpt if upscaler_ckpt != 'Original' else self.ckptname) + self.change_vae_model(upscaler_vae if upscaler_vae != 'Original' else self.vae) + pi = I2I_Generator_Create( + p, ('Euler' if p.sampler_name in ['PLMS', 'UniPC', 'DDIM'] else p.sampler_name) if upscaler_sample == 'Original' else upscaler_sample, + detailer_mask_blur, dino_full_res_inpaint, dino_inpaint_padding, init_image, + denoising_strength, p.cfg_scale, p.steps, + rewidth, reheight, p.tiling, p.scripts, self.i2i_scripts, self.i2i_scripts_always, p.script_args, + self.target_prompts, self.target_negative_prompts + ) + p.extra_generation_params[f'Tile upscale value'] = scalevalue + p.extra_generation_params[f'Tile upscale width'] = rewidth + p.extra_generation_params[f'Tile upscale height'] = reheight + p.extra_generation_params[f'Tile upscale overlap'] = overlap + p.extra_generation_params[f'Tile upscale upscaler'] = self.upscaler.name + p.extra_generation_params[f'Tile upscale Ckpt Model'] = upscaler_ckpt if upscaler_ckpt != 'Original' else self.ckptname + p.extra_generation_params[f'Tile upscale Vae Model'] = upscaler_vae if upscaler_vae != 'Original' else self.vae + if(self.upscaler.name != "None"): + img = self.upscaler.scaler.upscale(init_image, scalevalue, self.upscaler.data_path) + else: + img = init_image + if rewidth and reheight: + devices.torch_gc() + grid = images.split_grid(img, tile_w=rewidth, tile_h=reheight, overlap=overlap) + work = [] + for y, h, row in grid.tiles: + for tiledata in row: + work.append(tiledata[2]) + + batch_count = math.ceil(len(work)) + state.job = 'Upscaler Batching' + state.job_count += batch_count + + print(f"Tile upscaling will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)} per upscale in a total of {state.job_count} batches (I2I).") + + pi.seed = self.target_seeds + work_results = [] + for i in range(batch_count): + pi.init_images = work[i:(i+1)] + processed = processing.process_images(pi) + + p.seed = processed.seed + 1 + work_results += processed.images + + image_index = 0 + for y, h, row in grid.tiles: + for tiledata in row: + tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (rewidth, reheight)) + image_index += 1 + init_image = images.combine_grid(grid) + else: + init_image = img + if shared.opts.data.get('save_ddsd_working_on_images', False): + images.save_image(init_image, p.outpath_samples, + shared.opts.data.get('save_ddsd_working_on_images_prefix', ''), + pi.seed, self.target_prompts, opts.samples_format, + suffix = '' if shared.opts.data.get('save_ddsd_working_on_images_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_working_on_images_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + return init_image + + def watermark(self, p, init_image): + if shared.opts.data.get('save_ddsd_watermark_with_and_without', False): + images.save_image(init_image, p.outpath_samples, + shared.opts.data.get('save_ddsd_watermark_with_and_without_prefix', ''), + self.target_seeds, self.target_prompts, opts.samples_format, + suffix= '' if shared.opts.data.get('save_ddsd_watermark_with_and_without_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_watermark_with_and_without_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + for water_index in range(self.watermark_count): + init_image = image_apply_watermark(init_image, + self.watermark_type_list[water_index], + self.watermark_position_list[water_index], + self.watermark_image_list[water_index], + self.watermark_image_size_width_list[water_index], + self.watermark_image_size_height_list[water_index], + self.watermark_text_list[water_index], + self.watermark_text_color_list[water_index], + self.font_path[self.watermark_text_font_list[water_index]], + self.watermark_text_size_list[water_index], + self.watermark_padding_list[water_index], + self.watermark_alpha_list[water_index]) + return init_image + + def postprocess_target(self, p, init_image, + pp_type_list, + pp_saturation_strength_list, + pp_sharpening_radius_list, pp_sharpening_percent_list, pp_sharpening_threshold_list, + pp_gaussian_radius_list, + pp_brightness_strength_list, + pp_color_strength_list, + pp_contrast_strength_list, + pp_hue_strength_list, + pp_bilateral_sigmaC_list, pp_bilateral_sigmaS_list, + pp_color_tint_type_name_list, + pp_color_tint_lut_name_list): + for pp_index in range(shared.opts.data.get('postprocessing_count', 1)): + if pp_type_list[pp_index] == 'none': continue + if shared.opts.data.get('save_ddsd_postprocessing_with_and_without', False): + images.save_image(init_image, p.outpath_samples, + shared.opts.data.get('save_ddsd_postprocessing_with_and_without_prefix', ''), + self.target_seeds, self.target_prompts, opts.samples_format, + suffix= '' if shared.opts.data.get('save_ddsd_postprocessing_with_and_without_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_postprocessing_with_and_without_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + init_image = ddsd_postprocess(init_image, pp_type_list[pp_index], pp_saturation_strength_list[pp_index], pp_sharpening_radius_list[pp_index], pp_sharpening_percent_list[pp_index], pp_sharpening_threshold_list[pp_index], pp_gaussian_radius_list[pp_index], pp_brightness_strength_list[pp_index], pp_color_strength_list[pp_index], pp_contrast_strength_list[pp_index], pp_hue_strength_list[pp_index], pp_bilateral_sigmaC_list[pp_index], pp_bilateral_sigmaS_list[pp_index], pp_color_tint_lut_name_list[pp_index], pp_color_tint_type_name_list[pp_index]) + p.extra_generation_params[f'Postprocess {pp_index+1} type'] = pp_type_list[pp_index] + if pp_type_list[pp_index] == 'saturation': + p.extra_generation_params[f'Postprocess {pp_index+1} strength'] = pp_saturation_strength_list[pp_index] + elif pp_type_list[pp_index] == 'sharpening': + p.extra_generation_params[f'Postprocess {pp_index+1} radius'] = pp_sharpening_radius_list[pp_index] + p.extra_generation_params[f'Postprocess {pp_index+1} percent'] = pp_sharpening_percent_list[pp_index] + p.extra_generation_params[f'Postprocess {pp_index+1} threshold'] = pp_sharpening_threshold_list[pp_index] + elif pp_type_list[pp_index] == 'gaussian blur': + p.extra_generation_params[f'Postprocess {pp_index+1} radius'] = pp_gaussian_radius_list[pp_index] + elif pp_type_list[pp_index] == 'brightness': + p.extra_generation_params[f'Postprocess {pp_index+1} strength'] = pp_brightness_strength_list[pp_index] + elif pp_type_list[pp_index] == 'color': + p.extra_generation_params[f'Postprocess {pp_index+1} strength'] = pp_color_strength_list[pp_index] + elif pp_type_list[pp_index] == 'contrast': + p.extra_generation_params[f'Postprocess {pp_index+1} strength'] = pp_contrast_strength_list[pp_index] + elif pp_type_list[pp_index] == 'hue': + p.extra_generation_params[f'Postprocess {pp_index+1} strength'] = pp_hue_strength_list[pp_index] + elif pp_type_list[pp_index] == 'bilateral': + p.extra_generation_params[f'Postprocess {pp_index+1} sigma c'] = pp_bilateral_sigmaC_list[pp_index] + p.extra_generation_params[f'Postprocess {pp_index+1} sigma s'] = pp_bilateral_sigmaS_list[pp_index] + elif pp_type_list[pp_index] == 'color tint(type)': + p.extra_generation_params[f'Postprocess {pp_index+1} type'] = pp_color_tint_type_name_list[pp_index] + elif pp_type_list[pp_index] == 'color tint(lut)': + p.extra_generation_params[f'Postprocess {pp_index+1} lut'] = pp_color_tint_lut_name_list[pp_index] + return init_image + + def change_vae_model(self, name:str): + if name is None: return + if name.lower() in ['auto', 'automatic']: modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file=modules.sd_vae.unspecified) + elif name.lower() == 'none': modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file=None) + else: modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file=modules.sd_vae.vae_dict[name]) + + def change_ckpt_model(self, name:str): + if name is None: return + info = modules.sd_models.get_closet_checkpoint_match(name) + if info is None: + raise RuntimeError(f"Unknown checkpoint: {name}") + modules.sd_models.reload_model_weights(shared.sd_model, info) + + def postprocess(self, p, res, *args, **kargs): + if getattr(p, 'sub_processing', False): return + self.change_ckpt_model(self.ckptname) + self.change_vae_model(self.vae) + opts.CLIP_stop_at_last_layers = self.clip_skip + if len(self.image_results) < 1: return + final_count = len(res.images) + if (p.n_iter > 1 or p.batch_size > 1) and final_count != p.n_iter * p.batch_size: + grid = res.images[0] + res.images = res.images[1:] + grid_texts = res.infotexts[0] + res.infotexts = res.infotexts[1:] + images = [[*masks, image] for masks, image in zip(self.image_results,res.images)] + res.images = [image for sub in images for image in sub] + infos = [[info] * (len(masks) + 1) for masks, info in zip(self.image_results, res.infotexts)] + res.infotexts = [info for sub in infos for info in sub] + if (p.n_iter > 1 or p.batch_size > 1) and final_count != p.n_iter * p.batch_size: + res.images = [grid] + res.images + res.infotexts = [grid_texts] + res.infotexts + + def process(self, p, + enable_script_names, + disable_watermark, disable_postprocess, + disable_upscaler, ddetailer_before_upscaler, scalevalue, upscaler_sample, overlap, upscaler_index, rewidth, reheight, denoising_strength, upscaler_ckpt, upscaler_vae, + disable_detailer, disable_mask_paint_mode, inpaint_mask_mode, detailer_sample, detailer_sam_model, detailer_dino_model, + dino_full_res_inpaint, dino_inpaint_padding, detailer_mask_blur, + disable_outpaint, outpaint_sample, outpaint_mask_blur, + *args): + if getattr(p, 'sub_processing', False): return + self.image_results = [] + self.ckptname = shared.opts.data['sd_model_checkpoint'] + self.vae = shared.opts.data['sd_vae'] + self.clip_skip = opts.CLIP_stop_at_last_layers + self.restore_script(p) + self.enable_script_names = enable_script_names + self.disable_watermark = disable_watermark + self.disable_postprocess = disable_postprocess + self.disable_upscaler = disable_upscaler + self.ddetailer_before_upscaler = ddetailer_before_upscaler + self.scalevalue = scalevalue + self.upscaler_sample = upscaler_sample + self.overlap = overlap + self.upscaler_index = upscaler_index + self.rewidth = rewidth + self.reheight = reheight + self.denoising_strength = denoising_strength + self.upscaler_ckpt = upscaler_ckpt + self.upscaler_vae = upscaler_vae + self.disable_detailer = disable_detailer + self.disable_mask_paint_mode = disable_mask_paint_mode + self.inpaint_mask_mode = inpaint_mask_mode + self.detailer_sample = detailer_sample + self.detailer_sam_model = detailer_sam_model + self.detailer_dino_model = detailer_dino_model + self.dino_full_res_inpaint = dino_full_res_inpaint + self.dino_inpaint_padding = dino_inpaint_padding + self.detailer_mask_blur = detailer_mask_blur + self.disable_outpaint = disable_outpaint + self.outpaint_sample = outpaint_sample + self.outpaint_mask_blur = outpaint_mask_blur + args_list = [*args] + self.dino_detect_count = shared.opts.data.get('dino_detect_count', 2) + self.dino_detection_ckpt_list = args_list[self.dino_detect_count * 0:self.dino_detect_count * 1] + self.dino_detection_vae_list = args_list[self.dino_detect_count * 1:self.dino_detect_count * 2] + self.dino_detection_prompt_list = args_list[self.dino_detect_count * 2:self.dino_detect_count * 3] + self.dino_detection_positive_list = args_list[self.dino_detect_count * 3:self.dino_detect_count * 4] + self.dino_detection_negative_list = args_list[self.dino_detect_count * 4:self.dino_detect_count * 5] + self.dino_detection_denoise_list = args_list[self.dino_detect_count * 5:self.dino_detect_count * 6] + self.dino_detection_cfg_list = args_list[self.dino_detect_count * 6:self.dino_detect_count * 7] + self.dino_detection_steps_list = args_list[self.dino_detect_count * 7:self.dino_detect_count * 8] + self.dino_detection_spliter_disable_list = args_list[self.dino_detect_count * 8:self.dino_detect_count * 9] + self.dino_detection_spliter_remove_area_list = args_list[self.dino_detect_count * 9:self.dino_detect_count * 10] + self.dino_detection_clip_skip_list = args_list[self.dino_detect_count * 10 : self.dino_detect_count * 11] + self.watermark_count = shared.opts.data.get('watermark_count', 1) + self.watermark_type_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 0:self.dino_detect_count * 11 + self.watermark_count * 1] + self.watermark_position_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 1:self.dino_detect_count * 11 + self.watermark_count * 2] + self.watermark_image_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 2:self.dino_detect_count * 11 + self.watermark_count * 3] + self.watermark_image_size_width_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 3:self.dino_detect_count * 11 + self.watermark_count * 4] + self.watermark_image_size_height_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 4:self.dino_detect_count * 11 + self.watermark_count * 5] + self.watermark_text_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 5:self.dino_detect_count * 11 + self.watermark_count * 6] + self.watermark_text_color_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 6:self.dino_detect_count * 11 + self.watermark_count * 7] + self.watermark_text_font_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 7:self.dino_detect_count * 11 + self.watermark_count * 8] + self.watermark_text_size_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 8:self.dino_detect_count * 11 + self.watermark_count * 9] + self.watermark_padding_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 9:self.dino_detect_count * 11 + self.watermark_count * 10] + self.watermark_alpha_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 10:self.dino_detect_count * 11 + self.watermark_count * 11] + self.pp_count = shared.opts.data.get('postprocessing_count', 1) + self.pp_type_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 0:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 1] + self.pp_saturation_strength_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 1:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 2] + self.pp_sharpening_radius_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 2:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 3] + self.pp_sharpening_percent_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 3:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 4] + self.pp_sharpening_threshold_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 4:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 5] + self.pp_gaussian_radius_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 5:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 6] + self.pp_brightness_strength_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 6:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 7] + self.pp_color_strength_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 7:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 8] + self.pp_contrast_strength_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 8:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 9] + self.pp_hue_strength_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 9:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 10] + self.pp_bilateral_sigmaC_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 10:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 11] + self.pp_bilateral_sigmaS_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 11:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 12] + self.pp_color_tint_type_name_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 12:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 13] + self.pp_color_tint_lut_name_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 13:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14] + self.outpaint_count = shared.opts.data.get('outpaint_count', 1) + self.outpaint_positive_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 0:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 1] + self.outpaint_negative_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 1:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 2] + self.outpaint_denoise_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 2:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 3] + self.outpaint_cfg_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 3:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 4] + self.outpaint_steps_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 4:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 5] + self.outpaint_pixels_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 5:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 6] + self.outpaint_direction_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 6:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 7] + self.script_names_list = [x.strip()+'.py' for x in enable_script_names.split(';') if len(x) > 1] + self.script_names_list += [os.path.basename(__file__)] + self.i2i_scripts = [x for x in self.original_scripts if os.path.basename(x.filename) in self.script_names_list].copy() + self.i2i_scripts_always = [x for x in self.original_scripts_always if os.path.basename(x.filename) in self.script_names_list].copy() + self.upscaler = shared.sd_upscalers[upscaler_index] + + def before_process_batch(self, p, *args, **kargs): + if getattr(p, 'sub_processing', False): return + self.iter_number = kargs['batch_number'] + self.batch_number = 0 + + def restore_script(self, p): + if self.original_scripts is None: self.original_scripts = p.scripts.scripts.copy() + else: + if len(p.scripts.scripts) != len(self.original_scripts): p.scripts.scripts = self.original_scripts.copy() + if self.original_scripts_always is None: self.original_scripts_always = p.scripts.alwayson_scripts.copy() + else: + if len(p.scripts.alwayson_scripts) != len(self.original_scripts_always): p.scripts.alwayson_scripts = self.original_scripts_always.copy() + p.scripts.scripts = self.original_scripts.copy() + p.scripts.alwayson_scripts = self.original_scripts_always.copy() + + def postprocess_image(self, p, pp, *args): + if getattr(p, 'sub_processing', False): return + devices.torch_gc() + output_image = pp.image + self.target_prompts = p.all_prompts[self.iter_number * p.batch_size:(self.iter_number + 1) * p.batch_size][self.batch_number] + self.target_negative_prompts = p.all_negative_prompts[self.iter_number * p.batch_size:(self.iter_number + 1) * p.batch_size][self.batch_number] + self.target_seeds = p.all_seeds[self.iter_number * p.batch_size:(self.iter_number + 1) * p.batch_size][self.batch_number] + if shared.opts.data.get('save_ddsd_working_on_images', False): + images.save_image(output_image, p.outpath_samples, + shared.opts.data.get('save_ddsd_working_on_images_prefix', ''), + self.target_seeds, self.target_prompts, opts.samples_format, + suffix= '' if shared.opts.data.get('save_ddsd_working_on_images_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_working_on_images_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + + if not self.disable_outpaint: + output_image = self.outpainting(p, output_image, + self.outpaint_sample, self.outpaint_mask_blur, self.outpaint_count, + self.outpaint_denoise_list, self.outpaint_cfg_list, self.outpaint_steps_list, + self.outpaint_positive_list, self.outpaint_negative_list, self.outpaint_pixels_list,self.outpaint_direction_list) + devices.torch_gc() + + if self.ddetailer_before_upscaler and not self.disable_upscaler: + output_image = self.upscale(p, output_image, + self.scalevalue, self.upscaler_sample, + self.overlap, self.rewidth, self.reheight, self.denoising_strength, + self.upscaler_ckpt, self.upscaler_vae, + self.detailer_mask_blur, self.dino_full_res_inpaint, self.dino_inpaint_padding) + devices.torch_gc() + + if not self.disable_detailer: + output_image = self.dino_detect_detailer(p, output_image, + self.disable_mask_paint_mode, self.inpaint_mask_mode, self.detailer_sample, self.detailer_sam_model, self.detailer_dino_model, + self.dino_full_res_inpaint, self.dino_inpaint_padding, self.detailer_mask_blur, + self.dino_detect_count, + self.dino_detection_ckpt_list, + self.dino_detection_vae_list, + self.dino_detection_prompt_list, + self.dino_detection_positive_list, + self.dino_detection_negative_list, + self.dino_detection_denoise_list, + self.dino_detection_cfg_list, + self.dino_detection_steps_list, + self.dino_detection_spliter_disable_list, + self.dino_detection_spliter_remove_area_list, + self.dino_detection_clip_skip_list) + devices.torch_gc() + + if not self.ddetailer_before_upscaler and not self.disable_upscaler: + output_image = self.upscale(p, output_image, + self.scalevalue, self.upscaler_sample, + self.overlap, self.rewidth, self.reheight, self.denoising_strength, + self.upscaler_ckpt, self.upscaler_vae, + self.detailer_mask_blur, self.dino_full_res_inpaint, self.dino_inpaint_padding) + devices.torch_gc() + + if not self.disable_postprocess: + output_image = self.postprocess_target(p, output_image, + self.pp_type_list, + self.pp_saturation_strength_list, + self.pp_sharpening_radius_list, + self.pp_sharpening_percent_list, + self.pp_sharpening_threshold_list, + self.pp_gaussian_radius_list, + self.pp_brightness_strength_list, + self.pp_color_strength_list, + self.pp_contrast_strength_list, + self.pp_hue_strength_list, + self.pp_bilateral_sigmaC_list, + self.pp_bilateral_sigmaS_list, + self.pp_color_tint_type_name_list, + self.pp_color_tint_lut_name_list) + + devices.torch_gc() + + if not self.disable_watermark: + output_image = self.watermark(p, output_image) + + devices.torch_gc() + self.batch_number += 1 + self.restore_script(p) + pp.image = output_image + +def on_ui_settings(): + section = ('ddsd_script', "DDSD") + shared.opts.add_option("save_ddsd_working_on_images", shared.OptionInfo( + False, "Save all images you are working on", gr.Checkbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_working_on_images_prefix", shared.OptionInfo( + '', "Save all images you are working on prefix", gr.Textbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_working_on_images_suffix", shared.OptionInfo( + 'Working_On', "Save all images you are working on suffix", gr.Textbox, {"interactive": True}, section=section)) + + shared.opts.add_option("outpaint_count", shared.OptionInfo( + 1, "Outpainting Max Count", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}, section=section)) + + shared.opts.add_option("save_ddsd_working_on_dino_mask_images", shared.OptionInfo( + False, "Save dino mask images you are working on", gr.Checkbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_working_on_dino_mask_images_prefix", shared.OptionInfo( + '', "Save dino mask images you are working on prefix", gr.Textbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_working_on_dino_mask_images_suffix", shared.OptionInfo( + 'Mask', "Save dino mask images you are working on suffix", gr.Textbox, {"interactive": True}, section=section)) + shared.opts.add_option("dino_detect_count", shared.OptionInfo( + 2, "Dino Detect Max Count", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}, section=section)) + + shared.opts.add_option("save_ddsd_postprocessing_with_and_without", shared.OptionInfo( + False, "Save with and without postprocessing ", gr.Checkbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_postprocessing_with_and_without_prefix", shared.OptionInfo( + '', "Save with and without postprocesing prefix", gr.Textbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_postprocessing_with_and_without_suffix", shared.OptionInfo( + 'Without_Postprocessing', "Save with and without postprocessing suffix", gr.Textbox, {"interactive": True}, section=section)) + shared.opts.add_option("postprocessing_count", shared.OptionInfo( + 1, "Postprocessing Count", gr.Slider, {"minimum": 1, "maximum": 5, "step": 1}, section=section)) + + shared.opts.add_option("save_ddsd_watermark_with_and_without", shared.OptionInfo( + False, "Save with and without watermark ", gr.Checkbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_watermark_with_and_without_prefix", shared.OptionInfo( + '', "Save with and without watermark prefix", gr.Textbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_watermark_with_and_without_suffix", shared.OptionInfo( + 'Without_Watermark', "Save with and without watermark suffix", gr.Textbox, {"interactive": True}, section=section)) + shared.opts.add_option("watermark_count", shared.OptionInfo( + 1, "Watermark Count", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}, section=section)) + + shared.opts.add_option("preview_masks_images", shared.OptionInfo( + False, "Show the working mask in preview.", gr.Checkbox, {"interactive": True}, section=section)) + shared.opts.add_option("result_masks", shared.OptionInfo( + False, "The mask result is output on the final output.", gr.Checkbox, {"interactive": True}, section=section)) + shared.opts.add_option("mask_type", shared.OptionInfo( + False, "The type of mask is a black and white image.", gr.Checkbox, {"interactive": True}, section=section)) + +modules.script_callbacks.on_ui_settings(on_ui_settings) \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_bs.py b/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_bs.py new file mode 100644 index 0000000000000000000000000000000000000000..d830cdb3792e2a722665f3273905de995637c214 --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_bs.py @@ -0,0 +1,71 @@ +from __future__ import annotations + +import os +import torch + +import mediapipe as mp +import numpy as np + +from PIL import Image, ImageDraw +from ultralytics import YOLO + +from modules import safe +from modules.shared import cmd_opts +from modules.paths import models_path + +yolo_models_path = os.path.join(models_path, 'yolo') + +def mediapipe_face_detect(image, model_type, confidence): + width, height = image.size + image_np = np.array(image) + + mp_face_detection = mp.solutions.face_detection + with mp_face_detection.FaceDetection(model_selection=model_type, min_detection_confidence=confidence) as face_detector: + predictor = face_detector.process(image_np) + + if predictor.detections is None: return None + + bboxes = [] + for detection in predictor.detections: + + bbox = detection.location_data.relative_bounding_box + x1 = bbox.xmin * width + y1 = bbox.ymin * height + x2 = x1 + bbox.width * width + y2 = y1 + bbox.height * height + bboxes.append([x1,y1,x2,y2]) + + return create_mask_from_bbox(image, bboxes) + +def ultralytics_predict(image, model_type, confidence, device): + models = [os.path.join(yolo_models_path,x) for x in os.listdir(yolo_models_path) if (x.endswith('.pt') or x.endswith('.pth')) and os.path.splitext(os.path.basename(x))[0].upper() == model_type] + if len(models) == 0: return None + model = YOLO(models[0]) + predictor = model(image, conf=confidence, show_labels=False, device=device) + bboxes = predictor[0].boxes.xyxy.cpu().numpy() + if bboxes.size == 0: return None + bboxes = bboxes.tolist() + return create_mask_from_bbox(image, bboxes) + +def create_mask_from_bbox(image, bboxes): + mask = Image.new('L', image.size, 0) + draw = ImageDraw.Draw(mask) + for bbox in bboxes: + draw.rectangle(bbox, fill=255) + return np.array(mask) + +def bs_model(image, model_type, confidence): + image = Image.fromarray(image) + orig = torch.load + torch.load = safe.unsafe_torch_load + if model_type == 'FACE_MEDIA_FULL': + mask = mediapipe_face_detect(image, 1, confidence) + elif model_type == 'FACE_MEDIA_SHORT': + mask = mediapipe_face_detect(image, 0, confidence) + else: + device = '' + if getattr(cmd_opts, 'lowvram', False) or getattr(cmd_opts, 'medvram', False): + device = 'cpu' + mask = ultralytics_predict(image, model_type, confidence, device) + torch.load = orig + return mask \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_dino.py b/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_dino.py new file mode 100644 index 0000000000000000000000000000000000000000..12c3577d5cdef89ab284edd2df0689daeb9dd2ba --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_dino.py @@ -0,0 +1,99 @@ +import os +import gc +import torch +import copy +import cv2 +from collections import OrderedDict + +from modules import shared +from modules.devices import device, torch_gc, cpu + +import groundingdino.datasets.transforms as T +from groundingdino.models import build_model +from groundingdino.util.slconfig import SLConfig +from modules.paths import models_path +from groundingdino.util.utils import clean_state_dict + +dino_model_cache = OrderedDict() +grounding_models_dir = os.path.join(models_path, "grounding") + +def dino_model_list(): + return [x for x in os.listdir(grounding_models_dir) if x.endswith('.pth')] + +def dino_config_file_name(dino_model_name:str): + return dino_model_name.replace('.pth','.py') + +def clear_dino_cache(): + dino_model_cache.clear() + gc.collect() + torch_gc() + +def load_dino_model(dino_checkpoint): + print(f"Initializing GroundingDINO {dino_checkpoint}") + if dino_checkpoint in dino_model_cache: + dino = dino_model_cache[dino_checkpoint] + if shared.cmd_opts.lowvram: + dino.to(device=device) + else: + clear_dino_cache() + args = SLConfig.fromfile(os.path.join(grounding_models_dir,dino_config_file_name(dino_checkpoint))) + dino = build_model(args) + checkpoint = torch.load(os.path.join(grounding_models_dir,dino_checkpoint),map_location='cpu') + dino.load_state_dict(clean_state_dict(checkpoint['model']), strict=False) + dino.to(device=device) + dino_model_cache[dino_checkpoint] = dino + dino.eval() + return dino + + +def load_dino_image(image_pil): + transform = T.Compose( + [ + T.RandomResize([800], max_size=1333), + T.ToTensor(), + T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), + ] + ) + image, _ = transform(image_pil, None) # 3, h, w + return image + + +def get_grounding_output(model, image, caption, box_threshold): + caption = caption.lower() + caption = caption.strip() + if not caption.endswith("."): + caption = caption + "." + image = image.to(device) + with torch.no_grad(): + outputs = model(image[None], captions=[caption]) + if shared.cmd_opts.lowvram: + model.to(cpu) + logits = outputs["pred_logits"].sigmoid()[0] # (nq, 256) + boxes = outputs["pred_boxes"][0] # (nq, 4) + + # filter output + logits_filt = logits.clone() + boxes_filt = boxes.clone() + filt_mask = logits_filt.max(dim=1)[0] > box_threshold + logits_filt = logits_filt[filt_mask] # num_filt, 256 + boxes_filt = boxes_filt[filt_mask] # num_filt, 4 + + return boxes_filt.cpu() + + +def dino_predict_internal(input_image, dino_model_name, text_prompt, box_threshold): + print("Running GroundingDINO Inference") + dino_image = load_dino_image(input_image.convert("RGB")) + dino_model = load_dino_model(dino_model_name) + + boxes_filt = get_grounding_output( + dino_model, dino_image, text_prompt, box_threshold + ) + + H, W = input_image.size[1], input_image.size[0] + for i in range(boxes_filt.size(0)): + boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H]) + boxes_filt[i][:2] -= boxes_filt[i][2:] / 2 + boxes_filt[i][2:] += boxes_filt[i][:2] + clear_dino_cache() + return boxes_filt diff --git a/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_postprocess.py b/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_postprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..8c16aee650eae0c83ebe599ccfb8a08c69d33e9d --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_postprocess.py @@ -0,0 +1,83 @@ +import os +import numpy as np +import cv2 +from PIL import Image, ImageEnhance, ImageFilter, ImageOps +from pillow_lut import load_cube_file +from scipy.interpolate import UnivariateSpline + +from modules.paths import models_path + +lut_model_dir = os.path.join(models_path, "lut") + +def lut_model_list(): + return [x for x in os.listdir(lut_model_dir) if x.lower().endswith('.cube')] + +def saturation_image(image:Image.Image, strength:float) -> Image.Image: # 채도 조절 + return ImageEnhance.Color(image).enhance(strength) +def sharpening_image(image:Image.Image, radius:float, percent:int, threshold:float) -> Image.Image: # 선명도 조절 + return image.filter(ImageFilter.UnsharpMask(radius=radius, percent=percent, threshold=threshold)) +def gaussian_blur_image(image:Image.Image, radius:float) -> Image.Image: # 흐림도 조절 + return image.filter(ImageFilter.GaussianBlur(radius=radius)) +def brightness_image(image:Image.Image, strength:float) -> Image.Image: # 밝기 조절 + return ImageEnhance.Brightness(image).enhance(strength) +def color_image(image:Image.Image, strength:float) -> Image.Image: # 색조 조절 + return ImageEnhance.Color(image).enhance(strength) +def contrast_image(image:Image.Image, strength:float) -> Image.Image: # 대비 조절 + return ImageEnhance.Contrast(image).enhance(strength) +def color_extraction_image(image:Image.Image, lower:tuple[int,int,int], upper:tuple[int,int,int], strength:float) -> Image.Image: # 색상 추출 및 변화 + image_np = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2HSV) + mask = cv2.inRange(image_np, lower, upper) + image_np = image_np.astype(np.float64) + image_np[mask != 0] *= strength + image_np = image_np.astype(np.uint8) + return Image.fromarray(cv2.cvtColor(image_np, cv2.COLOR_HSV2RGB)) +def hue_image(image:Image.Image, strength:float) -> Image.Image: # Hue 조절 + image_np = np.array(image) + image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2HSV) + image_np[..., 0] = (image_np[..., 0] + strength * 180) % 180 + return Image.fromarray(cv2.cvtColor(image_np, cv2.COLOR_HSV2RGB)) +def inversion_image(image:Image.Image) -> Image.Image: # 반전 + return ImageOps.invert(image) +def bilateral_image(image:Image.Image, sigmaC:int, sigmaS:int) -> Image.Image: # 양방향 필터 + image_np = np.array(image) + return Image.fromarray(cv2.bilateralFilter(image_np, -1, sigmaC, sigmaS)) +def color_tint_lut_image(image:Image.Image, lut_file:str) -> Image.Image: # 색상 조절 + lut = load_cube_file(os.path.join(lut_model_dir, lut_file)) + return image.filter(lut) +def color_tint_type_image(image:Image.Image, type:str) -> Image.Image: # 색온도 조절(Warm, Cool) + increase = UnivariateSpline([0,64,128,192,256],[0,70,140,210,256])(range(256)) + decrease = UnivariateSpline([0,64,128,192,256],[0,30,80,120,192])(range(256)) + image_np = np.array(image) + r, g, b = cv2.split(image_np) + r = cv2.LUT(r, increase if type == 'warm' else decrease).astype(np.uint8) + b = cv2.LUT(b, decrease if type == 'warm' else increase).astype(np.uint8) + image_np = cv2.merge((r, g, b)) + h, s, v = cv2.split(cv2.cvtColor(image_np, cv2.COLOR_RGB2HSV)) + s = cv2.LUT(s, increase if type == 'warm' else decrease).astype(np.uint8) + return Image.fromarray(cv2.cvtColor(cv2.merge((h, s, v)), cv2.COLOR_HSV2RGB)) + +def ddsd_postprocess(image:Image.Image, pptype:str, + saturation_strength:float, + sharpening_radius:float, sharpening_percent:int, sharpening_threshold:float, + gaussian_blur_radius:float, + brightness_strength:float, + color_strength:float, + contrast_strength:float, + #color_extraction_lower:tuple[int,int,int], color_extraction_upper:tuple[int,int,int], color_extraction_strength:float, + hue_strength:float, + bilateral_sigmaC:int, bilateral_sigmaS:int, + color_tint_lut_file:str, + color_tint_type_name:str) -> Image.Image: + if pptype == 'saturation': return saturation_image(image, saturation_strength) + if pptype == 'sharpening': return sharpening_image(image, sharpening_radius, sharpening_percent, sharpening_threshold) + if pptype == 'gaussian blur': return gaussian_blur_image(image, gaussian_blur_radius) + if pptype == 'brightness': return brightness_image(image, brightness_strength) + if pptype == 'color': return color_image(image, color_strength) + if pptype == 'contrast': return contrast_image(image, contrast_strength) + #if pptype == 'color extraction': return color_extraction_image(image, color_extraction_lower, color_extraction_upper, color_extraction_strength) + if pptype == 'hue': return hue_image(hue_strength) + if pptype == 'inversion': return inversion_image(image) + if pptype == 'bilateral': return bilateral_image(image, bilateral_sigmaC, bilateral_sigmaS) + if pptype == 'color tint(type)': return color_tint_type_image(image, color_tint_type_name) + if pptype == 'color tint(lut)': return color_tint_lut_image(image, color_tint_lut_file) + return image \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_sam.py b/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_sam.py new file mode 100644 index 0000000000000000000000000000000000000000..4f76a769e3539c2b26c6926dd3f35120bb8208c0 --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_sam.py @@ -0,0 +1,89 @@ +import os +import numpy as np +import torch +import gc +import cv2 + +from modules import shared +from modules.paths import models_path +from modules.safe import unsafe_torch_load, load +from modules.devices import device, torch_gc, cpu + +from PIL import Image +from collections import OrderedDict +from scipy.ndimage import binary_dilation +from segment_anything import SamPredictor, sam_model_registry +from scripts.ddsd_dino import dino_predict_internal, clear_dino_cache + +sam_model_cache = OrderedDict() +sam_model_dir = os.path.join(models_path, "sam") + +def sam_model_list(): + return [x for x in os.listdir(sam_model_dir) if x.endswith('.pth')] + +def load_sam_model(sam_checkpoint): + model_type = '_'.join(sam_checkpoint.split('_')[1:-1]) + sam_checkpoint = os.path.join(sam_model_dir, sam_checkpoint) + torch.load = unsafe_torch_load + sam = sam_model_registry[model_type](checkpoint=sam_checkpoint) + sam.to(device=device) + sam.eval() + torch.load = load + return sam + +def clear_sam_cache(): + sam_model_cache.clear() + gc.collect() + torch_gc() + +def clear_cache(): + clear_sam_cache() + clear_dino_cache() + +def dilate_mask(mask, dilation): + dilation_kernel = np.ones((dilation, dilation), np.uint8) + return cv2.dilate(mask, dilation_kernel) + +def init_sam_model(sam_model_name): + print('Initializing SAM') + if sam_model_name in sam_model_cache: + sam = sam_model_cache[sam_model_name] + if(shared.cmd_opts.lowvram): + sam.to(device=device) + return sam + elif sam_model_name in sam_model_list(): + clear_sam_cache() + sam_model_cache[sam_model_name] = load_sam_model(sam_model_name) + return sam_model_cache[sam_model_name] + else: + Exception(f'{sam_model_name} not found, please download model to models/sam') + +def sam_predict(sam_model_name, dino_model_name, image, image_np, image_np_rgb, dino_text, dino_box_threshold, dilation, sam_level): + print('Start SAM Processing') + + assert dino_text, 'Please input dino text' + + boxes = dino_predict_internal(image, dino_model_name, dino_text, dino_box_threshold) + + if boxes.shape[0] < 1: return None + + sam = init_sam_model(sam_model_name) + + print(f'Running SAM Inference {image_np_rgb.shape}') + predictor = SamPredictor(sam) + predictor.set_image(image_np_rgb) + transformed_boxes = predictor.transform.apply_boxes_torch(boxes, image_np.shape[:2]) + masks, _, _ = predictor.predict_torch( + point_coords = None, + point_labels = None, + boxes = transformed_boxes.to(device), + multimask_output = True + ) + + masks = masks.permute(1,0,2,3).cpu().numpy() + + if shared.cmd_opts.lowvram: + sam.to(cpu) + clear_sam_cache() + + return dilate_mask(np.any(masks[sam_level], axis=0).astype(np.uint8) * 255,dilation) \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_utils.py b/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3388a4383fda5a24d6b4d745e073fa9d61418b83 --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd-orig/scripts/ddsd_utils.py @@ -0,0 +1,383 @@ +import os +import re +import numpy as np +import cv2 +import gc +import matplotlib.font_manager +from glob import glob +from PIL import Image, ImageDraw, ImageFont +from scripts.ddsd_sam import sam_predict, clear_cache, dilate_mask +from scripts.ddsd_bs import bs_model +from modules.devices import torch_gc +from skimage import measure, exposure + +from modules.paths import models_path +from modules.processing import StableDiffusionProcessingImg2Img + +token_split = re.compile(r"(AND|OR|NOR|XOR|NAND)") +token_first = re.compile(r'\(([^()]+)\)') +token_match = re.compile(r'(\d+)GROUPMASK') +token_file = re.compile(r'\s*<(.*)>\s*') + +ddsd_mask_path = os.path.join(models_path, "ddsdmask") +mask_embed = {} + +def startup(): + global mask_embed + if not os.path.exists(ddsd_mask_path): + os.makedirs(ddsd_mask_path) + with open(os.path.join(ddsd_mask_path, 'put_in_mask_here.txt'),'w') as f: pass + + masks = glob(os.path.join(ddsd_mask_path,'**\\*')) + masks = [(x, *os.path.splitext(os.path.basename(x))) for x in masks if os.path.isfile(x)] + masks = [(x, y) for x, y, z in masks if z in ['.png', '.jpg', '.jpeg', '.webp']] + mask_embed = {y.upper():x for x, y in masks} + +startup() + +def try_convert(data, type, default, min, max): + try: + convert = type(data) + if convert < min: return min + if convert > max: return max + return convert + except (ValueError, TypeError): + return default + +def prompt_spliter(prompt:str, split_token:str, count:int): + spliter = prompt.split(split_token) + while len(spliter) < count: + spliter.append('') + return spliter[:count] + +def combine_masks(mask, combine_masks_option, mask2): + if combine_masks_option == 'AND': return cv2.bitwise_and(mask, mask2) + if combine_masks_option == 'OR': return cv2.bitwise_or(mask, mask2) + if combine_masks_option == 'XOR': return cv2.bitwise_xor(mask, mask2) + if combine_masks_option == 'NOR': return cv2.bitwise_not(cv2.bitwise_or(mask, mask2)) + if combine_masks_option == 'NAND': return cv2.bitwise_not(cv2.bitwise_and(mask,mask2)) + +def dino_detect_from_prompt(prompt:str, detailer_sam_model, detailer_dino_model, init_image, disable_mask_paint_mode, inpaint_mask_mode, image_mask): + clear_cache() + image_np_zero = np.array(init_image.convert('L')) + image_np_zero[:,:] = 0 + image_np = np.array(init_image) + image_np_rgb = image_np[:,:,:3].copy() + image_set = (init_image, image_np, image_np_rgb, image_np_zero) + model_set = (detailer_sam_model, detailer_dino_model) + result = dino_prompt_detector(prompt, model_set, image_set) + clear_cache() + if np.array_equal(result, image_np_zero): return None + if disable_mask_paint_mode: return result + if image_mask is None: return result + image_mask = np.array(image_mask.resize((result.shape[1],result.shape[0])).convert('L')) + image_mask = np.resize(image_mask, result.shape) + if inpaint_mask_mode == 'Inner': return cv2.bitwise_and(result, image_mask) + if inpaint_mask_mode == 'Outer': return cv2.bitwise_and(result, cv2.bitwise_not(image_mask)) + return None + +def dino_prompt_token_file(prompt:str, image_np_zero, image_np_rgb): + usage_type, usage, dilation, confidence = prompt_spliter(prompt, ':', 4) + usage_type = usage_type.upper() + usage = usage.upper() + confidence = try_convert(confidence, float, 0.3, 0, 1) + if usage_type == 'AREA': + if usage == 'LEFT': + image_np_zero[:,:image_np_zero.shape[1] // 2] = 255 + image_np_zero[:,image_np_zero.shape[1] // 2:] = 0 + elif usage == 'RIGHT': + image_np_zero[:,:image_np_zero.shape[1] // 2] = 0 + image_np_zero[:,image_np_zero.shape[1] // 2:] = 255 + elif usage == 'TOP': + image_np_zero[:image_np_zero.shape[0] // 2,:] = 255 + image_np_zero[image_np_zero.shape[0] // 2:,:] = 0 + elif usage == 'BOTTOM': + image_np_zero[:image_np_zero.shape[0] // 2,:] = 0 + image_np_zero[image_np_zero.shape[0] // 2:,:] = 255 + elif usage == 'ALL': + image_np_zero[:,:] = 255 + if usage_type == 'FILE': + if usage in mask_embed: + image = Image.open(mask_embed[usage]).convert('L') + h, w = image_np_zero.shape[:2] + image = image.resize((w, h)) + image_np_zero = np.array(image) + if usage_type == 'MODEL': + mask = bs_model(image_np_rgb, usage, confidence) + if mask is None: return image_np_zero + image_np_zero = mask + return dilate_mask(image_np_zero, try_convert(dilation, int, 2, 0, 512)) + +def dino_prompt_detector(prompt:str, model_set, image_set): + find = token_first.search(prompt) + result_group = {} + result_count = 0 + while find: + result_group[f'{result_count}GROUPMASK'] = dino_prompt_detector(find.group(1), model_set, image_set) + prompt = prompt.replace(find.group(), f' {result_count}GROUPMASK ') + result_count += 1 + find = token_first.search(prompt) + + spliter = token_split.split(prompt) + + while len(spliter) > 1: + left, operator, right = spliter[:3] + if not isinstance(left, np.ndarray): + match = token_match.match(left.strip()) + if match is None: + match = token_file.match(left) + if match is None: + dino_text, sam_level, dino_box_threshold, dilation = prompt_spliter(left, ':', 4) + left = sam_predict(model_set[0], model_set[1], image_set[0], image_set[1], image_set[2], dino_text, + try_convert(dino_box_threshold.strip(), float, 0.3, 0, 1.0), + try_convert(dilation.strip(), int, 16, 0, 512), + try_convert(sam_level.strip(), int, 0, 0, 2)) + if left is None: left = image_set[3].copy() + else: + left = dino_prompt_token_file(match.group(1), image_set[3].copy(), image_set[2].copy()) + else: + left = result_group[left.strip()] + if not isinstance(right, np.ndarray): + match = token_match.match(right.strip()) + if match is None: + match = token_file.match(right) + if match is None: + dino_text, sam_level, dino_box_threshold, dilation = prompt_spliter(right, ':', 4) + right = sam_predict(model_set[0], model_set[1], image_set[0], image_set[1], image_set[2], dino_text, + try_convert(dino_box_threshold.strip(), float, 0.3, 0, 1.0), + try_convert(dilation.strip(), int, 16, 0, 512), + try_convert(sam_level.strip(), int, 0, 0, 2)) + if right is None: right = image_set[3].copy() + else: + right = dino_prompt_token_file(match.group(1), image_set[3].copy(), image_set[2].copy()) + else: + right = result_group[right.strip()] + spliter[:3] = [combine_masks(left, operator, right)] + gc.collect() + torch_gc() + if isinstance(spliter[0], np.ndarray): return spliter[0] + match = token_file.match(spliter[0]) + if match is None: + dino_text, sam_level, dino_box_threshold, dilation = prompt_spliter(spliter[0], ':', 4) + target = sam_predict(model_set[0], model_set[1], image_set[0], image_set[1], image_set[2], dino_text, + try_convert(dino_box_threshold.strip(), float, 0.3, 0, 1.0), + try_convert(dilation.strip(), int, 16, 0, 512), + try_convert(sam_level.strip(), int, 0, 0, 2)) + if target is None: return image_set[3].copy() + else: + target = dino_prompt_token_file(match.group(1), image_set[3].copy(), image_set[2].copy()) + return target + +def mask_spliter_and_remover(mask, area): + gc.collect() + torch_gc() + labels = measure.label(mask) + regions = measure.regionprops(labels) + + for r in regions: + if r.area < area: + for coord in r.coords: + labels[coord[0], coord[1]] = 0 + + num_labels = np.max(labels) + + label_images = [] + for x in range(num_labels): + label_image = np.zeros_like(mask, dtype=np.uint8) + label_image[labels == (x + 1)] = 255 + label_images.append(label_image) + return label_images + +def I2I_Generator_Create(p, i2i_sample, i2i_mask_blur, full_res_inpainting, inpainting_padding, init_image, denoise, cfg, steps, width, height, tiling, scripts, scripts_list, alwaysonscripts_list, script_args, positive, negative, fill = 1): + i2i = StableDiffusionProcessingImg2Img( + init_images = [init_image], + resize_mode = 0, + denoising_strength = 0, + mask = None, + mask_blur= i2i_mask_blur, + inpainting_fill = fill, + inpaint_full_res = full_res_inpainting, + inpaint_full_res_padding= inpainting_padding, + inpainting_mask_invert= 0, + sd_model=p.sd_model, + outpath_samples=p.outpath_samples, + outpath_grids=p.outpath_grids, + restore_faces=p.restore_faces, + prompt='', + negative_prompt='', + styles=p.styles, + seed=p.seed, + subseed=p.subseed, + subseed_strength=p.subseed_strength, + seed_resize_from_h=p.seed_resize_from_h, + seed_resize_from_w=p.seed_resize_from_w, + sampler_name=i2i_sample, + n_iter=1, + batch_size=1, + steps=steps, + cfg_scale=cfg, + width=width, + height=height, + tiling=tiling, + ) + i2i.denoising_strength = denoise + i2i.do_not_save_grid = True + i2i.do_not_save_samples = True + i2i.override_settings = {} + i2i.override_settings_restore_afterwards = {} + i2i.scripts = scripts + i2i.scripts.scripts = scripts_list.copy() + i2i.scripts.alwayson_scripts = alwaysonscripts_list.copy() + i2i.script_args = script_args + i2i.prompt = positive + i2i.negative_prompt = negative + i2i.sub_processing = True + + return i2i + +def get_fonts_list(): + fonts, font_paths = [], {} + fonts_list = matplotlib.font_manager.findSystemFonts() + for font in fonts_list: + try: + fonts.append(matplotlib.font_manager.FontProperties(fname=font).get_name()) + font_paths[fonts[-1]] = font + except RuntimeError: + print(f'Skip font file: {font}') + return fonts, font_paths + +def image_apply_watermark(image, watermark_type, watermark_position, watermark_image, watermark_image_size_width, watermark_image_size_height, watermark_text, watermark_text_color, watermark_text_font, watermark_text_size, watermark_padding, watermark_alpha): + gc.collect() + torch_gc() + if watermark_type == 'Text': + font = ImageFont.truetype(watermark_text_font, watermark_text_size) + copy_image = image.copy() + draw = ImageDraw.Draw(copy_image) + text_width, text_height = font.getsize(watermark_text) + left, right, top, bottom = 0 + watermark_padding, image.size[0] - watermark_padding, 0 + watermark_padding, image.size[1] - watermark_padding + if watermark_position == 'Left': position = (left, (top + bottom) // 2 - text_height // 2) + elif watermark_position == 'Left-Top': position = (left, top) + elif watermark_position == 'Top': position = ((left + right) // 2 - text_width // 2, top) + elif watermark_position == 'Right-Top': position = (right - text_width,top) + elif watermark_position == 'Right': position = (right - text_width, (top + bottom) // 2 - text_height // 2) + elif watermark_position == 'Right-Bottom': position = (right - text_width, bottom - text_height) + elif watermark_position == 'Bottom': position = ((left + right) // 2 - text_width // 2,bottom - text_height) + elif watermark_position == 'Left-Bottom': position = (left, bottom - text_height) + elif watermark_position == 'Center': position = ((left + right) // 2 - text_width // 2, (top + bottom) // 2 - text_height // 2) + draw.text(position, watermark_text, font=font, fill=tuple(int(watermark_text_color[x:x+2], 16) for x in (1,3,5))) + result = Image.blend(image, copy_image, watermark_alpha) + elif watermark_type == 'Image': + left, right, top, bottom = 0 + watermark_padding, image.size[0] - watermark_padding, 0 + watermark_padding, image.size[1] - watermark_padding + if watermark_position == 'Left': position = (left, (top + bottom) // 2 - watermark_image_size_height // 2) + elif watermark_position == 'Left-Top': position = (left, top) + elif watermark_position == 'Top': position = ((left + right) // 2 - watermark_image_size_width // 2, top) + elif watermark_position == 'Right-Top': position = (right - watermark_image_size_width,top) + elif watermark_position == 'Right': position = (right - watermark_image_size_width, (top + bottom) // 2 - watermark_image_size_height // 2) + elif watermark_position == 'Right-Bottom': position = (right - watermark_image_size_width, bottom - watermark_image_size_height) + elif watermark_position == 'Bottom': position = ((left + right) // 2 - watermark_image_size_width // 2,bottom - watermark_image_size_height) + elif watermark_position == 'Left-Bottom': position = (left, bottom - watermark_image_size_height) + elif watermark_position == 'Center': position = ((left + right) // 2 - watermark_image_size_width // 2, (top + bottom) // 2 - watermark_image_size_height // 2) + copy_np = np.array(image) + copy_np_origin = copy_np.copy() + water_image = cv2.resize(watermark_image.copy(), (watermark_image_size_width, watermark_image_size_height)) + mask = np.where(np.all(water_image == [255, 255, 255], axis=-1), 0, 255) + alpha = np.zeros((water_image.shape[0], water_image.shape[1]), dtype=np.uint8) + alpha[:,:] = mask + copy_np_crop = copy_np[position[1]:position[1]+watermark_image_size_height, position[0]:position[0]+watermark_image_size_width, :] + copy_np_crop[alpha.nonzero()] = water_image[alpha.nonzero()] + copy_np[position[1]:position[1]+watermark_image_size_height, position[0]:position[0]+watermark_image_size_width, :] = copy_np_crop + result = Image.fromarray(cv2.addWeighted(copy_np_origin, 1 - watermark_alpha, copy_np, watermark_alpha, 0)) + gc.collect() + torch_gc() + return result + +def matched_noise(image_np, mask_np, noise = 1, color_variation = 0.05): + def _fft2(data): + if data.ndim > 2: + out_fft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128) + for c in range(data.shape[2]): + c_data = data[:,:,c] + out_fft[:,:,c] = np.fft.fft2(np.fft.fftshift(c_data), norm='ortho') + out_fft[:,:,c] = np.fft.ifftshift(out_fft[:,:,c]) + else: + out_fft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128) + out_fft[:,:] = np.fft.fft2(np.fft.fftshift(data), norm='ortho') + out_fft[:,:] = np.fft.ifftshift(out_fft[:,:]) + return out_fft + def _ifft2(data): + if data.ndim > 2: + out_ifft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128) + for c in range(data.shape[2]): + c_data = data[:, :, c] + out_ifft[:, :, c] = np.fft.ifft2(np.fft.fftshift(c_data), norm="ortho") + out_ifft[:, :, c] = np.fft.ifftshift(out_ifft[:, :, c]) + else: + out_ifft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128) + out_ifft[:, :] = np.fft.ifft2(np.fft.fftshift(data), norm="ortho") + out_ifft[:, :] = np.fft.ifftshift(out_ifft[:, :]) + return out_ifft + def _get_gaussian_window(width, height, std=3.14, mode=0): + window_scale_x = float(width / min(width, height)) + window_scale_y = float(height / min(width, height)) + window = np.zeros((width, height)) + x = (np.arange(width) / width * 2. - 1.) * window_scale_x + for y in range(height): + fy = (y / height * 2. - 1.) * window_scale_y + if mode == 0: + window[:, y] = np.exp(-(x ** 2 + fy ** 2) * std) + else: + window[:, y] = (1 / ((x ** 2 + 1.) * (fy ** 2 + 1.))) ** (std / 3.14) + return window + def _get_masked_window_rgb(np_mask_grey, hardness=1.0): + np_mask_rgb = np.zeros((np_mask_grey.shape[0], np_mask_grey.shape[1], 3)) + if hardness != 1.0: + hardened = np_mask_grey[:] ** hardness + else: + hardened = np_mask_grey[:] + for c in range(3): + np_mask_rgb[:, :, c] = hardened[:] + return np_mask_rgb + + width = image_np.shape[0] + height = image_np.shape[1] + channel = image_np.shape[2] + + image_np = image_np[:] * (1.0 - mask_np) + mask_np_grey = (np.sum(mask_np, axis=2) / 3.0) + img_mask = mask_np_grey > 1e-6 + ref_mask = mask_np_grey < 1e-3 + + image_windowed = image_np * (1.0 - _get_masked_window_rgb(mask_np_grey)) + image_windowed /= np.max(image_windowed) + image_windowed += np.average(image_np) * mask_np + + src_fft = _fft2(image_windowed) + src_dist = np.absolute(src_fft) + src_phase = src_fft / src_dist + + rng = np.random.default_rng(0) + + noise_window = _get_gaussian_window(width, height, mode=1) + noise_rgb = rng.random((width,height, channel)) + noise_grey = (np.sum(noise_rgb, axis=2) / 3.0) + noise_rgb *= color_variation + for c in range(channel): + noise_rgb[:,:,c] += (1.0 - color_variation) * noise_grey + + noise_fft = _fft2(noise_rgb) + for c in range(channel): + noise_fft[:,:,c] *= noise_window + noise_rgb = np.real(_ifft2(noise_fft)) + shaped_noise_fft = _fft2(noise_rgb) + shaped_noise_fft[:,:,:] = np.absolute(shaped_noise_fft[:,:,:]) ** 2 * (src_dist ** noise) * src_phase + + brightness_variation = 0 + contrast_adjusted_np = image_np[:] * (brightness_variation + 1.0) - brightness_variation * 2.0 + + shaped_noise = np.real(_ifft2(shaped_noise_fft)) + shaped_noise -= np.min(shaped_noise) + shaped_noise /= np.max(shaped_noise) + shaped_noise[img_mask, :] = exposure.match_histograms(shaped_noise[img_mask, :] ** 1.0, contrast_adjusted_np[ref_mask, :], channel_axis = 1) + shaped_noise = image_np[:] * (1.0 - mask_np) + shaped_noise * mask_np + + return np.clip(shaped_noise[:], 0.0, 1.0) \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd/.gitignore b/exhm/detailer/sd-webui-ddsd/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..0a986a396cccd33787225afb7f9ad51afe25f45c --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd/.gitignore @@ -0,0 +1,170 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea +*.pt +*.pth +*.ckpt +*.safetensors +models/control_sd15_scribble.pth +detected_maps/ + +# Ignore all .ddcfg files except for Empty.ddcfg +config/*.ddcfg +!config/Empty.ddcfg \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd/README.md b/exhm/detailer/sd-webui-ddsd/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8b369f0bbf0a071177604a92c951fce2fa4078b6 --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd/README.md @@ -0,0 +1,108 @@ +# sd-webui-ddsd +자동으로 동작하는 후보정 작업 확장. + +## What is +### Outpaint +#### Outpaint How to use +1. 증가시킬 픽셀을 선택 +2. 증가시킬 방향 선택 + 1. 방향이 None이면 미동작 +3. 증가시킬때 사용할 프롬프트 작성(전체 인페인팅시 이용) + 1. 비어있을때 원본 프롬프트 사용 +4. Denoise, CFG, Step 선택 + 1. Step은 최소 원본 Step 2 ~ 3배 이상 적절한 값 요구 +5. 생성! +### Upscale +이미지를 특정 크기로 잘라내어 타일별 업스케일을 하는 도구. 업스케일시 VRAM을 적게 소모. +#### Upscale How to use +1. 크기를 키울때 사용할 upscaler 모델 선택 +2. 크기를 키울 배수 선택 +3. 가로, 세로를 내가 단일로 생성할 수 있는 이미지의 최대 크기로 선택(이미지 생성 속도를 최대한 빠르게 하기 위하여) + 1. 가로 또는 세로중 한개를 0으로 세팅시 업스케일만 동작(세부 구조를 디테일하게하는 인페인팅이 동작하지 않음) +4. before running 체크 + 1. 체크시 업스케일을 먼저 돌려서 인페인팅의 퀄리티 상승. 단, 인페인팅시 더 많은 VRAM 요구 +5. 생성! +### Detect Detailer +특정 키워드로 이미지를 탐색 후 인페인팅하는 도구. +#### Detect Detailer How to use +0. 인페인팅의 범위 제한(I2I 전용) + 1. Inner 옵션은 I2I의 인페인팅에서 칠한 범위 내부만 이미지를 탐색 + 2. Outer 옵션은 I2I의 인페인팅에서 칠한 범위 외부만 이미지를 탐색 +1. 탐색 키워드 작성 + 1. 탐색할 키워드를 작성(face, person 등등) + 1. 탐색할 키워드는 문장형도 가능(happy face, running dog) + 2. 탐색할 키워드를 .으로 분할 가능(face. arm, face. chest) + 2. 탐색할 키워드에 사용 가능한 추가 옵션 존재 + 1. <area:type>을 이용하여 특정 범위 탐색 가능 + 1. 범위 종류는 left, right, top, bottom, all이 존재 + 2. <file:filename>을 이용하여 특정 파일 탐색 가능 + 1. 특정 파일의 위치는 models/ddsdmask + 3. <model:type>을 이용하여 특정 모델 탐색 가능 + 1. type은 face_media_full, face_media_short와 파일명이 존재 + 2. 파일은 models/yolo에 위치 + 4. <type1:type2:dilation:confidence> 같이 type1과 type2외에 dilation과 confidence도 추가 입력 가능 + 1. confidence는 model 타입에서만 사용되는 값 + 3. 탐색한 범위를 AND, OR, XOR, NAND, NOR 등의 게이트 옵션으로 연산 가능 + 1. face OR (body NAND outfit) -> 괄호안의 body NAND outfit을 먼저 한 후에 face와 OR 연산을 동작 + 2. 괄호는 최대한 적게 이용. 많이 이용시 많은 VRAM 소모. + 3. 동작은 왼쪽에서 오른쪽으로 순차적 동작. + 4. 탐색할 키워드에 옵션으로 여러가지 옵션 조절 가능 + 1. face:0:0.4:4 OR outfit:2:0.5:8 + 2. 순서대로 탐색할 프롬프트, SAM 탐색 레벨(0-2), 민감도(0-1), 팽창값(0-512)을 가짐 + 3. 값을 생략하면 초기값으로 세팅 +2. 긍정 프롬프트 입력 + 1. 인페인팅시 동작시킬 긍정 프롬프트 입력 +3. 부정 프롬프트 입력 + 1. 인페인팅시 동작시킬 부정 프롬프트 입력 +4. Denoising, CFG, Steps, Clip skip, Ckpt, Vae 수정 + 1. 인페인팅시 동작에 영향을 주는 옵션 +5. Split Mask 옵션 체크 + 1. 체크시 마스크가 떨어져 있는것이 존재한다면 따로 인페인팅. + 1. 따로 인페인팅시 퀄리티 상승. 하지만 더 많은 인페인팅을 요구하여 생성속도 하락. +6. Remove Area 옵션 체크 + 1. Split Mask 옵션이 Enable 되어야만 동작 + 2. 분할 인페인팅시 일정 크기 이하의 면적은 인페인팅에서 제외 +6. 생성! +### Postprocessing +최종적으로 생성된 이미지에 가하는 후보정 +#### Postprocessing How to use +1. 가하고자 하는 후보정을 선택 +2. 생성! +### Watermark +이미지 생성 최종본에 자신의 증명을 기입하는 기능 +#### Watermark How to use +1. 기입할 증명의 종류 선택(글자, 이미지) +2. 선택한 종류를 입력 +3. 선택한 종류의 크기와 위치를 지정 +4. Padding으로 해당 위치에서 얼만큼 떨어져 있을지 설정 +5. Alpha로 얼만큼 투명할지 결정 +6. 생성! + +### Video +[![Stable Diffusion - DDSD 확장 기능 (No - Talking)](http://img.youtube.com/vi/9wfZyJhPPho/0.jpg)](https://youtu.be/9wfZyJhPPho) + +## Installation +1. 다운로드 [CUDA](https://developer.nvidia.com/cuda-toolkit-archive)와 [cuDNN](https://developer.nvidia.com/rdp/cudnn-archive) + 1. 자신이 가진 WebUI와 동일한 버전의 `CUDA`와 `cuDNN`버전으로 설치 + 1. 이것은 다운로드를 편하게 하기위한 구글링크. [CUDA 117](https://drive.google.com/file/d/1HRTOLTB44-pRcrwIw9lQak2OC2ohNle3/view?usp=share_link)와 [cuDNN](https://drive.google.com/file/d/1QcgaxUra0WnCWrCLjsWp_QKw1PKcvqpj/view?usp=share_link) + 2. `CUDA` 설치 후 해당 폴더에 `cuDNN` 덮어쓰기 + 3. 일정 버전은 Easy Install을 지원. `CUDA`와 `cuDNN` 불필요. + 1. 지원버전 (torch == 1.13.1+cu117, torch==2.0.0+cu117 , torch==2.0.0+cu118) +2. 확장탭에서 설치 `https://github.com/NeoGraph-K/sd-webui-ddsd` 또는 다운로드 후 `extension/` 에 풀어넣기 +3. WebUI를 완전히 재시작 + +## Credits + +dustysys/[ddetailer](https://github.com/dustysys/ddetailer) + +AUTOMATIC1111/[stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) + +facebookresearch/[Segment Anything](https://github.com/facebookresearch/segment-anything) + +IDEA-Research/[GroundingDINO](https://github.com/IDEA-Research/GroundingDINO) + +IDEA-Research/[Grounded-Segment-Anything](https://github.com/IDEA-Research/Grounded-Segment-Anything) + +continue-revolution/[sd-webui-segment-anything](https://github.com/continue-revolution/sd-webui-segment-anything) + +Bing-su/[adetailer](https://github.com/Bing-su/adetailer) diff --git a/exhm/detailer/sd-webui-ddsd/config/Empty.ddcfg b/exhm/detailer/sd-webui-ddsd/config/Empty.ddcfg new file mode 100644 index 0000000000000000000000000000000000000000..5cb4d37952781472ad74162a889f0e31a0b30008 --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd/config/Empty.ddcfg @@ -0,0 +1 @@ +{"enable_script_names": "dynamic_thresholding;dynamic_prompting", "disable_watermark": true, "disable_postprocess": true, "disable_upscaler": true, "ddetailer_before_upscaler": false, "scalevalue": 2, "upscaler_sample": "Original", "overlap": 32, "upscaler_index": "SwinIR_4x", "rewidth": 512, "reheight": 512, "denoising_strength": 0.1, "upscaler_ckpt": "Original", "upscaler_vae": "Original", "disable_detailer": true, "disable_mask_paint_mode": true, "inpaint_mask_mode": "Inner", "detailer_sample": "Original", "detailer_sam_model": "sam_vit_b_01ec64.pth", "detailer_dino_model": "groundingdino_swinb_cogcoor.pth", "dino_full_res_inpaint": true, "dino_inpaint_padding": 0, "detailer_mask_blur": 4, "disable_outpaint": true, "outpaint_sample": "Original", "outpaint_mask_blur": 8, "dino_detect_count": 5, "dino_detection_ckpt_1": "Original", "dino_detection_vae_1": "Original", "dino_detection_prompt_1": "", "dino_detection_positive_1": "", "dino_detection_negative_1": "", "dino_detection_denoise_1": 0.4, "dino_detection_cfg_1": 0, "dino_detection_steps_1": 0, "dino_detection_spliter_disable_1": true, "dino_detection_spliter_remove_area_1": 16, "dino_detection_clip_skip_1": 0, "dino_detection_ckpt_2": "Original", "dino_detection_vae_2": "Original", "dino_detection_prompt_2": "", "dino_detection_positive_2": "", "dino_detection_negative_2": "", "dino_detection_denoise_2": 0.4, "dino_detection_cfg_2": 0, "dino_detection_steps_2": 0, "dino_detection_spliter_disable_2": true, "dino_detection_spliter_remove_area_2": 16, "dino_detection_clip_skip_2": 0, "dino_detection_ckpt_3": "Original", "dino_detection_vae_3": "Original", "dino_detection_prompt_3": "", "dino_detection_positive_3": "", "dino_detection_negative_3": "", "dino_detection_denoise_3": 0.4, "dino_detection_cfg_3": 0, "dino_detection_steps_3": 0, "dino_detection_spliter_disable_3": true, "dino_detection_spliter_remove_area_3": 16, "dino_detection_clip_skip_3": 0, "dino_detection_ckpt_4": "Original", "dino_detection_vae_4": "Original", "dino_detection_prompt_4": "", "dino_detection_positive_4": "", "dino_detection_negative_4": "", "dino_detection_denoise_4": 0.4, "dino_detection_cfg_4": 0, "dino_detection_steps_4": 0, "dino_detection_spliter_disable_4": true, "dino_detection_spliter_remove_area_4": 16, "dino_detection_clip_skip_4": 0, "dino_detection_ckpt_5": "Original", "dino_detection_vae_5": "Original", "dino_detection_prompt_5": "", "dino_detection_positive_5": "", "dino_detection_negative_5": "", "dino_detection_denoise_5": 0.4, "dino_detection_cfg_5": 0, "dino_detection_steps_5": 0, "dino_detection_spliter_disable_5": true, "dino_detection_spliter_remove_area_5": 16, "dino_detection_clip_skip_5": 0, "watermark_count": 2, "watermark_type_1": "Text", "watermark_position_1": "Center", "watermark_image_1": null, "watermark_image_size_width_1": 100, "watermark_image_size_height_1": 100, "watermark_text_1": "", "watermark_text_color_1": null, "watermark_text_font_1": "Courier New", "watermark_text_size_1": 50, "watermark_padding_1": 10, "watermark_alpha_1": 0.4, "watermark_type_2": "Text", "watermark_position_2": "Center", "watermark_image_2": null, "watermark_image_size_width_2": 100, "watermark_image_size_height_2": 100, "watermark_text_2": "", "watermark_text_color_2": null, "watermark_text_font_2": "Courier New", "watermark_text_size_2": 50, "watermark_padding_2": 10, "watermark_alpha_2": 0.4, "postprocessing_count": 2, "pp_type_1": "none", "pp_saturation_strength_1": 1.1, "pp_sharpening_radius_1": 2, "pp_sharpening_percent_1": 150, "pp_sharpening_threshold_1": 3, "pp_gaussian_radius_1": 2, "pp_brightness_strength_1": 1.1, "pp_color_strength_1": 1.1, "pp_contrast_strength_1": 1.1, "pp_hue_strength_1": 0, "pp_bilateral_sigmaC_1": 10, "pp_bilateral_sigmaS_1": 10, "pp_color_tint_type_name_1": "warm", "pp_color_tint_lut_name_1": "FGCineBasic.cube", "pp_type_2": "none", "pp_saturation_strength_2": 1.1, "pp_sharpening_radius_2": 2, "pp_sharpening_percent_2": 150, "pp_sharpening_threshold_2": 3, "pp_gaussian_radius_2": 2, "pp_brightness_strength_2": 1.1, "pp_color_strength_2": 1.1, "pp_contrast_strength_2": 1.1, "pp_hue_strength_2": 0, "pp_bilateral_sigmaC_2": 10, "pp_bilateral_sigmaS_2": 10, "pp_color_tint_type_name_2": "warm", "pp_color_tint_lut_name_2": "FGCineBasic.cube", "outpaint_count": 4, "outpaint_positive_1": "FGCineBasic.cube", "outpaint_negative_1": "", "outpaint_denoise_1": "", "outpaint_cfg_1": 0.8, "outpaint_steps_1": 0, "outpaint_pixels_1": 80, "outpaint_direction_1": 128, "outpaint_positive_2": "FGCineBasic.cube", "outpaint_negative_2": "", "outpaint_denoise_2": "", "outpaint_cfg_2": 0.8, "outpaint_steps_2": 0, "outpaint_pixels_2": 80, "outpaint_direction_2": 128, "outpaint_positive_3": "", "outpaint_negative_3": "", "outpaint_denoise_3": 0.8, "outpaint_cfg_3": 0, "outpaint_steps_3": 80, "outpaint_pixels_3": 128, "outpaint_direction_3": "None", "outpaint_positive_4": "", "outpaint_negative_4": "", "outpaint_denoise_4": 0.8, "outpaint_cfg_4": 0, "outpaint_steps_4": 80, "outpaint_pixels_4": 128, "outpaint_direction_4": "None"} \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd/install.py b/exhm/detailer/sd-webui-ddsd/install.py new file mode 100644 index 0000000000000000000000000000000000000000..1478dd2d4b2d9b5fefaea741876995c6fda1dcd4 --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd/install.py @@ -0,0 +1,110 @@ +import os +import platform + +import launch + + +def check_system_machine(): + system = platform.system() + machine = platform.machine() + return (system, machine) in [('Windows', 'AMD64'), ('Linux', 'x86_64')] + + +def check_python_version(low: int, high: int): + ver = platform.python_version_tuple() + if int(ver[0]) == 3 and low <= int(ver[1]) <= high: + return ver[0] + ver[1] + return None + + +def install_pycocotools(): + base = 'https://github.com/Bing-su/dddetailer/releases/download/pycocotools/' + urls = { + 'Windows': 'pycocotools-2.0.6-cp{ver}-cp{ver}-win_amd64.whl', + 'Linux': 'pycocotools-2.0.6-cp{ver}-cp{ver}-manylinux_2_17_x86_64.manylinux2014_x86_64.whl', + } + + python_version = check_python_version(8, 11) + if not check_system_machine() or not python_version: + launch.run_pip('install pycocotools', 'sd-webui-ddsd requirement: pycocotools') + return + + url = urls[platform.system()].format(ver=python_version) + launch.run_pip(f'install {base + url}', 'sd-webui-ddsd requirement: pycocotools') + + +def install_groundingdino(): + import torch + from packaging.version import parse + + # torch_version: '1.13.1' or '2.0.0' or ... + torch_version = parse(torch.__version__).base_version + # cuda_version: '117' or '118' or 'None' + cuda_version = torch.version.cuda.replace('.', '') + python_version = check_python_version(9, 10) + + system = 'win' if platform.system() == 'Windows' else 'linux' + machine = 'amd64' if platform.machine() == 'AMD64' else 'x86_64' + + if torch_version in ['2.1.0', '2.1.1', '2.1.2'] and cuda_version == '121': + url = 'https://github.com/Bing-su/GroundingDINO/releases/download/v23.9.27/groundingdino-23.9.27+torch2.1.0.cu121-cp{py}-cp{py}-{system}_{machine}.whl' + url = url.format( + py=python_version, + system=system, + machine=machine, + ) + launch.run_pip(f'install {url}', 'sd-webui-ddsd requirement: groundingdino') + return + + if ( + not check_system_machine() + or (torch_version, cuda_version) + not in [('1.13.1', '117'), ('2.0.1', '117'), ('2.0.1', '118'), ('2.1.0', '121')] + or not python_version + ): + launch.run_pip('install git+https://github.com/IDEA-Research/GroundingDINO', 'sd-webui-ddsd requirement: groundingdino') + return + + url = 'https://github.com/Bing-su/GroundingDINO/releases/download/wheel-0.1.0/groundingdino-0.1.0+torch{torch}.cu{cuda}-cp{py}-cp{py}-{system}_{machine}.whl' + url = url.format( + torch=torch_version, + cuda=cuda_version, + py=python_version, + system=system, + machine=machine, + ) + + launch.run_pip(f'install {url}', 'sd-webui-ddsd requirement: groundingdino') + + +current_dir = os.path.dirname(os.path.realpath(__file__)) +req_file = os.path.join(current_dir, 'requirements.txt') + +with open(req_file) as file: + for lib in file: + version = None + lib = lib.strip() + lib = 'skimage' if lib == 'scikit-image' else lib + if '==' in lib: + lib, version = [x.strip() for x in lib.split('==')] + if not launch.is_installed(lib): + if lib == 'pycocotools': + install_pycocotools() + elif lib == 'groundingdino': + install_groundingdino() + elif lib == 'skimage': + launch.run_pip( + f'install scikit-image', + f'sd-webui-ddsd requirement: scikit-image' + ) + elif lib == 'pillow_lut': + launch.run_pip( + f'install pillow_lut', + f'sd-webui-ddsd requirement: pillow_lut' + ) + else: + lib = lib if version is None else lib + '==' + version + launch.run_pip( + f'install {lib}', + f'sd-webui-ddsd requirement: {lib}' + ) diff --git a/exhm/detailer/sd-webui-ddsd/requirements.txt b/exhm/detailer/sd-webui-ddsd/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..fc05f0b2fd1c8b896788382c3115265af313a3f6 --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd/requirements.txt @@ -0,0 +1,8 @@ +pycocotools +segment_anything +groundingdino +scipy +scikit-image +pillow_lut +ultralytics==8.0.87 +mediapipe==0.9.3.0 \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd.cpython-310.pyc b/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..268ec8973835e9d1679d74e13c275f95ed830a5e Binary files /dev/null and b/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd.cpython-310.pyc differ diff --git a/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_bs.cpython-310.pyc b/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_bs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6198b0e8229a0c4b01c9bb23c72bc841eaba5ffb Binary files /dev/null and b/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_bs.cpython-310.pyc differ diff --git a/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_dino.cpython-310.pyc b/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_dino.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..208e67920b5bd32e2f193d43657e0379e3f0cefc Binary files /dev/null and b/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_dino.cpython-310.pyc differ diff --git a/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_postprocess.cpython-310.pyc b/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_postprocess.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18da6060880f307049bfda4cc86c65b9c4b6c000 Binary files /dev/null and b/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_postprocess.cpython-310.pyc differ diff --git a/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_sam.cpython-310.pyc b/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_sam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a88a05ba1002b47296ad65c5e20d6424c0783e12 Binary files /dev/null and b/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_sam.cpython-310.pyc differ diff --git a/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_utils.cpython-310.pyc b/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a832fe811c1f274fbc1ab85616db62a34cd9a2cf Binary files /dev/null and b/exhm/detailer/sd-webui-ddsd/scripts/__pycache__/ddsd_utils.cpython-310.pyc differ diff --git a/exhm/detailer/sd-webui-ddsd/scripts/ddsd.py b/exhm/detailer/sd-webui-ddsd/scripts/ddsd.py new file mode 100644 index 0000000000000000000000000000000000000000..646a786113ef1c8802d57d0348304f14978d344c --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd/scripts/ddsd.py @@ -0,0 +1,1295 @@ +from json import load as json_read, dumps as json_write +import os +import math +import re + +import gradio as gr +import numpy as np +from PIL import Image, ImageDraw + +from scripts.ddsd_sam import sam_model_list +from scripts.ddsd_dino import dino_model_list +from scripts.ddsd_postprocess import lut_model_list, ddsd_postprocess +from scripts.ddsd_utils import dino_detect_from_prompt, mask_spliter_and_remover, I2I_Generator_Create, get_fonts_list, image_apply_watermark, matched_noise + +import modules +from modules import processing, shared, images, devices, modelloader, sd_models, sd_vae +from modules.processing import create_infotext, StableDiffusionProcessingTxt2Img +from modules.shared import opts, state +from modules.sd_models import model_hash +from modules.paths import models_path +from modules.scripts import AlwaysVisible + +from basicsr.utils.download_util import load_file_from_url + +grounding_models_path = os.path.join(models_path, "grounding") +sam_models_path = os.path.join(models_path, "sam") +lut_models_path = os.path.join(models_path, 'lut') +yolo_models_path = os.path.join(models_path, 'yolo') +ddsd_config_path = os.path.join(os.path.dirname(os.path.dirname(__file__)),'config') + +ckpt_model_name_pattern = re.compile('([\\w\\.\\[\\]\\\\\\+\\(\\)/]+)\\s*\\[.*\\]') + +def list_models(model_path, filter): + model_list = modelloader.load_models(model_path=model_path, ext_filter=[filter]) + + def modeltitle(path, shorthash): + abspath = os.path.abspath(path) + + if abspath.startswith(model_path): + name = abspath.replace(model_path, '') + else: + name = os.path.basename(path) + + if name.startswith("\\") or name.startswith("/"): + name = name[1:] + + shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0] + + return f'{name} [{shorthash}]', shortname + + models = [] + for filename in model_list: + h = model_hash(filename) + title, short_model_name = modeltitle(filename, h) + models.append(title) + + return models + +def startup(): + if (len(list_models(yolo_models_path, '.pth')) == 0) and (len(list_models(yolo_models_path, '.pt')) == 0): + print("No detection yolo models found, downloading...") + load_file_from_url('https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8m.pt',yolo_models_path) + load_file_from_url('https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8n.pt',yolo_models_path) + load_file_from_url('https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8s.pt',yolo_models_path) + load_file_from_url('https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8n_v2.pt',yolo_models_path) + load_file_from_url('https://huggingface.co/Bingsu/adetailer/resolve/main/hand_yolov8n.pt',yolo_models_path) + load_file_from_url('https://huggingface.co/Bingsu/adetailer/resolve/main/hand_yolov8s.pt',yolo_models_path) + + if (len(list_models(grounding_models_path, '.pth')) == 0): + print("No detection groundingdino models found, downloading...") + load_file_from_url('https://huggingface.co/ShilongLiu/GroundingDINO/resolve/main/groundingdino_swint_ogc.pth',grounding_models_path) + load_file_from_url('https://raw.githubusercontent.com/IDEA-Research/GroundingDINO/main/groundingdino/config/GroundingDINO_SwinT_OGC.py',grounding_models_path, file_name='groundingdino_swint_ogc.py') + #load_file_from_url('https://huggingface.co/ShilongLiu/GroundingDINO/resolve/main/groundingdino_swinb_cogcoor.pth',grounding_models_path) + #load_file_from_url('https://raw.githubusercontent.com/IDEA-Research/GroundingDINO/main/groundingdino/config/GroundingDINO_SwinB.cfg.py',grounding_models_path, file_name='groundingdino_swinb_cogcoor.py') + + + if (len(list_models(sam_models_path, '.pth')) == 0): + print("No detection sam models found, downloading...") + #load_file_from_url('https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth',sam_models_path) + #load_file_from_url('https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth',sam_models_path) + load_file_from_url('https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth',sam_models_path) + + if (len(list_models(lut_models_path, '.cube')) == 0): # Free use lut files. + print('No detection lut models found, downloading...') + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Arabica%2012.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Ava%20614.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Azrael%2093.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Bourbon%2064.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Byers%2011.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Chemical%20168.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Clayton%2033.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Clouseau%2054.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Cobi%203.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Contrail%2035.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Cubicle%2099.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Django%2025.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Domingo%20145.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineBasic.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineBright.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineCold.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineDrama.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineTealOrange1.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineTealOrange2.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineVibrant.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/FGCineWarm.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Faded%2047.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Folger%2050.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Fusion%2088.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Hyla%2068.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Korben%20214.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/LBK-K-Tone_33.cube', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Lenox%20340.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Lucky%2064.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/McKinnon%2075.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Milo%205.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Neon%20770.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Paladin%201875.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Pasadena%2021.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Pitaya%2015.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Reeve%2038.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Remy%2024.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Sprocket%20231.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Teigen%2028.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Trent%2018.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Tweed%2071.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Vireo%2037.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Zed%2032.CUBE', lut_models_path) + load_file_from_url('https://huggingface.co/datasets/NeoGraph/Luts_Cube/resolve/main/Zeke%2039.CUBE', lut_models_path) + +startup() + +def gr_show(visible=True): + return {"visible": visible, "__type__": "update"} + +def gr_list_refresh(choices, value): + return {'choices':choices,'value':value,'__type__':'update'} +def gr_value_refresh(value): + return {'value':value,'__type__':'update'} +class Script(modules.scripts.Script): + def __init__(self): + self.original_scripts = None + self.original_scripts_always = None + _ ,self.font_path = get_fonts_list() + self.ckptname = None + self.vae = None + self.clip_skip = 1 + + def title(self): + return "ddetailer + sdupscale" + + def show(self, is_img2img): + return AlwaysVisible + + def ui(self, is_img2img): + pp_types = [ + 'none', + 'saturation','sharpening','gaussian blur','brightness','color','contrast', + #'color extraction', + 'hue', 'inversion', 'bilateral','color tint(type)','color tint(lut)'] + ckpt_list = list(sd_models.checkpoints_list.keys()) + ckpt_list.insert(0, 'Original') + vae_list = list(sd_vae.vae_dict.keys()) + vae_list.insert(0, 'Original') + sample_list = [x.name for x in shared.list_samplers()] + sample_list = [x for x in sample_list if x not in ['PLMS','UniPC','DDIM']] + sample_list.insert(0,"Original") + fonts_list, _ = get_fonts_list() + ddsd_config_list = [x[:-6] for x in os.listdir(ddsd_config_path) if x.endswith('.ddcfg')] + ret = [] + dino_detection_ckpt_list = [] + dino_detection_vae_list = [] + dino_detection_prompt_list = [] + dino_detection_positive_list = [] + dino_detection_negative_list = [] + dino_detection_denoise_list = [] + dino_detection_cfg_list = [] + dino_detection_steps_list = [] + dino_detection_spliter_disable_list = [] + dino_detection_spliter_remove_area_list = [] + dino_detection_clip_skip_list = [] + pp_type_list = [] + pp_saturation_strength_list = [] + pp_sharpening_radius_list = [] + pp_sharpening_percent_list = [] + pp_sharpening_threshold_list = [] + pp_gaussian_radius_list = [] + pp_brightness_strength_list = [] + pp_color_strength_list = [] + pp_contrast_strength_list = [] + pp_hue_strength_list = [] + pp_bilateral_sigmaC_list = [] + pp_bilateral_sigmaS_list = [] + pp_color_tint_type_name_list = [] + pp_color_tint_lut_name_list = [] + watermark_type_list = [] + watermark_position_list = [] + watermark_image_list = [] + watermark_image_size_width_list = [] + watermark_image_size_height_list = [] + watermark_text_list = [] + watermark_text_color_list = [] + watermark_text_font_list = [] + watermark_text_size_list = [] + watermark_padding_list = [] + watermark_alpha_list = [] + outpaint_positive_list = [] + outpaint_negative_list = [] + outpaint_denoise_list = [] + outpaint_cfg_list = [] + outpaint_steps_list = [] + outpaint_pixels_list = [] + outpaint_direction_list = [] + dino_tabs = None + watermark_tabs = None + postprocess_tabs = None + outpaint_tabs = None + + with gr.Accordion('DDSD', open=False, elem_id='ddsd_all_option_acc'): + + with gr.Row(): + ddsd_save_path = gr.Textbox(label='Save File Name', visible=True, interactive=True, value='ddsd') + ddsd_save = gr.Button('Save', elem_id='save_button', visible=True, interactive=True) + with gr.Row(): + ddsd_load_path = gr.Dropdown(label='Load File Name', visible=True, interactive=True, choices=ddsd_config_list) + ddsd_load = gr.Button('Load', elem_id='load_button',visible=True, interactive=True) + + with gr.Accordion("Script Option", open = False, elem_id="ddsd_enable_script_acc"): + with gr.Column(): + all_target_info = gr.HTML('

I2I All process target script

') + enable_script_names = gr.Textbox(label="Enable Script(Extension)", elem_id="enable_script_names", value='dynamic_thresholding;dynamic_prompting',show_label=True, lines=1, placeholder="Extension python file name(ex - dynamic_thresholding;dynamic_prompting)") + + with gr.Accordion("Outpainting", open=False, elem_id='ddsd_outpaint_acc'): + with gr.Column(): + outpaint_target_info = gr.HTML('

I2I Outpainting

') + disable_outpaint = gr.Checkbox(label='Disable Outpaint', elem_id='disable_outpaint', value=True, visible=True) + outpaint_sample = gr.Dropdown(label='Outpaint Sampling', elem_id='outpaint_sample', choices=sample_list, value=sample_list[0], visible=False, type="value") + with gr.Tabs(elem_id = 'outpaint_arguments', visible=False) as outpaint_tabs_acc: + for outpaint_index in range(shared.opts.data.get('outpaint_count', 1)): + with gr.Tab(f'Outpaint {outpaint_index + 1} Argument', elem_id=f'outpaint_{outpaint_index+1}_argument_tab'): + outpaint_pixels = gr.Slider(label=f'Outpaint {outpaint_index+1} Pixels', minimum=0, maximum=256, value=64, step=16) + outpaint_direction = gr.Radio(choices=['None', 'Left','Right','Up','Down'], value='None', label=f'Outpaint {outpaint_index+1} Direction') + with gr.Row(): + outpaint_positive = gr.Textbox(label=f'Positive {outpaint_index+1} Prompt', show_label=True, lines=2, placeholder='Outpaint Positive Prompt(Empty is Original)') + outpaint_negative = gr.Textbox(label=f'Negative {outpaint_index+1} Prompt', show_label=True, lines=2, placeholder='Outpaint Negative Prompt(Empty is Original)') + outpaint_denoise = gr.Slider(label=f'Outpaint {outpaint_index+1} Denoise', minimum=0, maximum=1.0, step=0.01, value=0.8) + outpaint_cfg = gr.Slider(label=f'Outpaint {outpaint_index+1} CFG(0 To Original)', minimum=0, maximum=500, step=0.5, value=0) + outpaint_steps = gr.Slider(label=f'Outpaint {outpaint_index+1} Steps(0 To Original)', minimum=0, maximum=150, step=1, value=0) + outpaint_positive_list.append(outpaint_positive) + outpaint_negative_list.append(outpaint_negative) + outpaint_denoise_list.append(outpaint_denoise) + outpaint_cfg_list.append(outpaint_cfg) + outpaint_steps_list.append(outpaint_steps) + outpaint_pixels_list.append(outpaint_pixels) + outpaint_direction_list.append(outpaint_direction) + outpaint_tabs = outpaint_tabs_acc + outpaint_mask_blur = gr.Slider(label='Outpaint Blur', elem_id='outpaint_mask_blur', minimum=0, maximum=128, step=4, value=8, visible=False) + + with gr.Accordion("Upscaler", open=False, elem_id="ddsd_upscaler_acc"): + with gr.Column(): + sd_upscale_target_info = gr.HTML('

I2I Upscaler Option

') + disable_upscaler = gr.Checkbox(label='Disable Upscaler', elem_id='disable_upscaler', value=True, visible=True) + ddetailer_before_upscaler = gr.Checkbox(label='Upscaler before running detailer', elem_id='upscaler_before_running_detailer', value=False, visible=False) + with gr.Row(): + upscaler_sample = gr.Dropdown(label='Upscaler Sampling', elem_id='upscaler_sample', choices=sample_list, value=sample_list[0], visible=False, type="value") + upscaler_index = gr.Dropdown(label='Upscaler', elem_id='upscaler_index', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[-1].name, type="index", visible=False) + with gr.Row(): + upscaler_ckpt = gr.Dropdown(label='Upscaler CKPT Model', elem_id=f'upscaler_detect_ckpt', choices=ckpt_list, value=ckpt_list[0], visible=False) + upscaler_vae = gr.Dropdown(label='Upscaler VAE Model', elem_id=f'upscaler_detect_vae', choices=vae_list, value=vae_list[0], visible=False) + scalevalue = gr.Slider(minimum=1, maximum=16, step=0.5, elem_id='upscaler_scalevalue', label='Resize', value=2, visible=False) + overlap = gr.Slider(minimum=0, maximum=256, step=32, elem_id='upscaler_overlap', label='Tile overlap', value=32, visible=False) + with gr.Row(): + rewidth = gr.Slider(minimum=0, maximum=1024, step=64, elem_id='upscaler_rewidth', label='Width(0 to No Inpainting)', value=512, visible=False) + reheight = gr.Slider(minimum=0, maximum=1024, step=64, elem_id='upscaler_reheight', label='Height(0 to No Inpainting)', value=512, visible=False) + denoising_strength = gr.Slider(minimum=0, maximum=1.0, step=0.01, elem_id='upscaler_denoising', label='Denoising strength', value=0.1, visible=False) + + with gr.Accordion("DINO Detect", open=False, elem_id="ddsd_dino_detect_acc"): + with gr.Column(): + ddetailer_target_info = gr.HTML('

I2I Detection Detailer Option

') + disable_detailer = gr.Checkbox(label='Disable Detection Detailer', elem_id='disable_detailer',value=True, visible=True) + disable_mask_paint_mode = gr.Checkbox(label='Disable I2I Mask Paint Mode', value=True, visible=False) + inpaint_mask_mode = gr.Radio(choices=['Inner', 'Outer'], value='Inner', label='Inpaint Mask Paint Mode', visible=False, show_label=True) + detailer_sample = gr.Dropdown(label='Detailer Sampling', elem_id='detailer_sample', choices=sample_list, value=sample_list[0], visible=False, type="value") + with gr.Row(): + detailer_sam_model = gr.Dropdown(label='Detailer SAM Model', elem_id='detailer_sam_model', choices=sam_model_list(), value=sam_model_list()[0], visible=False) + detailer_dino_model = gr.Dropdown(label='Detailer DINO Model', elem_id='detailer_dino_model', choices=dino_model_list(), value=dino_model_list()[0], visible=False) + with gr.Tabs(elem_id = 'dino_detct_arguments', visible=False) as dino_tabs_acc: + for index in range(shared.opts.data.get('dino_detect_count', 2)): + with gr.Tab(f'DINO {index + 1} Argument', elem_id=f'dino_{index + 1}_argument_tab'): + with gr.Row(): + dino_detection_ckpt = gr.Dropdown(label='Detailer CKPT Model', elem_id=f'detailer_detect_ckpt_{index+1}', choices=ckpt_list, value=ckpt_list[0], visible=True) + dino_detection_vae = gr.Dropdown(label='Detailer VAE Model', elem_id=f'detailer_detect_vae_{index+1}', choices=vae_list, value=vae_list[0], visible=True) + dino_detection_prompt = gr.Textbox(label=f"Detect {index + 1} Prompt", elem_id=f"detailer_detect_prompt_{index + 1}", show_label=True, lines=2, placeholder="Detect Token Prompt(ex - face:level(0-2):threshold(0-1):dilation(0-128))", visible=True) + with gr.Row(): + dino_detection_positive = gr.Textbox(label=f"Positive {index + 1} Prompt", elem_id=f"detailer_detect_positive_{index + 1}", show_label=True, lines=2, placeholder="Detect Mask Inpaint Positive(ex - perfect anatomy)", visible=True) + dino_detection_negative = gr.Textbox(label=f"Negative {index + 1} Prompt", elem_id=f"detailer_detect_negative_{index + 1}", show_label=True, lines=2, placeholder="Detect Mask Inpaint Negative(ex - nsfw)", visible=True) + dino_detection_denoise = gr.Slider(minimum=0, maximum=1.0, step=0.01, elem_id=f'dino_detect_{index+1}_denoising', label=f'DINO {index + 1} Denoising strength', value=0.4, visible=True) + dino_detection_cfg = gr.Slider(minimum=0, maximum=500, step=0.5, elem_id=f'dino_detect_{index+1}_cfg_scale', label=f'DINO {index + 1} CFG Scale(0 to Origin)', value=0, visible=True) + dino_detection_steps = gr.Slider(minimum=0, maximum=150, step=1, elem_id=f'dino_detect_{index+1}_steps', label=f'DINO {index + 1} Steps(0 to Origin)', value=0, visible=True) + dino_detection_spliter_disable = gr.Checkbox(label=f'Disable DINO {index + 1} Detect Split Mask', value=True, visible=True) + dino_detection_spliter_remove_area = gr.Slider(minimum=0, maximum=800, step=8, elem_id=f'dino_detect_{index+1}_remove_area', label=f'Remove {index + 1} Area', value=16, visible=True) + dino_detection_clip_skip = gr.Slider(minimum=0, maximum=10, step=1, elem_id=f'dino_detect_{index+1}_clip_skip', label=f'Clip skip {index + 1} Inpaint(0 to Origin)', value=0, visible=True) + dino_detection_ckpt_list.append(dino_detection_ckpt) + dino_detection_vae_list.append(dino_detection_vae) + dino_detection_prompt_list.append(dino_detection_prompt) + dino_detection_positive_list.append(dino_detection_positive) + dino_detection_negative_list.append(dino_detection_negative) + dino_detection_denoise_list.append(dino_detection_denoise) + dino_detection_cfg_list.append(dino_detection_cfg) + dino_detection_steps_list.append(dino_detection_steps) + dino_detection_spliter_disable_list.append(dino_detection_spliter_disable) + dino_detection_spliter_remove_area_list.append(dino_detection_spliter_remove_area) + dino_detection_clip_skip_list.append(dino_detection_clip_skip) + dino_tabs = dino_tabs_acc + dino_full_res_inpaint = gr.Checkbox(label='Inpaint at full resolution ', elem_id='detailer_full_res', value=True, visible = False) + with gr.Row(): + dino_inpaint_padding = gr.Slider(label='Inpaint at full resolution padding, pixels ', elem_id='detailer_padding', minimum=0, maximum=256, step=4, value=0, visible=False) + detailer_mask_blur = gr.Slider(label='Detailer Blur', elem_id='detailer_mask_blur', minimum=0, maximum=64, step=1, value=4, visible=False) + + with gr.Accordion("Postprocessing", open=False, elem_id='ddsd_post_processing'): + with gr.Column(): + postprocess_info = gr.HTML('

Postprocessing to the final image

') + disable_postprocess = gr.Checkbox(label='Disable PostProcess', elem_id='disable_postprocess',value=True, visible=True) + with gr.Tabs(elem_id = 'ddsd_postprocess_arguments', visible=False) as postprocess_tabs_acc: + for index in range(shared.opts.data.get('postprocessing_count', 1)): + with gr.Tab(f'Postprocessing {index + 1} Argument', elem_id=f'postprocessing_{index + 1}_argument_tab'): + pp_type = gr.Dropdown(label=f'Postprocessing type {index+1}', elem_id=f'postprocessing_{index+1}', choices=pp_types, value=pp_types[0], visible=True) + pp_saturation_strength = gr.Slider(label=f'Saturation strength {index+1}', minimum=0, maximum=3, step=0.01, value=1.1, visible=False) + pp_sharpening_radius = gr.Slider(label=f'Sharpening radius {index+1}', minimum=0, maximum=50, step=1, value=2, visible=False) + pp_sharpening_percent = gr.Slider(label=f'Sharpening percent {index+1}', minimum=0, maximum=300, step=1, value=150, visible=False) + pp_sharpening_threshold = gr.Slider(label=f'Sharpening threshold {index+1}', minimum=0, maximum=10, step=0.01, value=3, visible=False) + pp_gaussian_radius = gr.Slider(label=f'Gaussian Blur radius {index+1}', minimum=0, maximum=50, step=1, value=2, visible=False) + pp_brightness_strength = gr.Slider(label=f'Brightness strength {index+1}', minimum=0, maximum=5, step=0.01, value=1.1, visible=False) + pp_color_strength = gr.Slider(label=f'Color strength {index+1}', minimum=0, maximum=5, step=0.01, value=1.1, visible=False) + pp_contrast_strength = gr.Slider(label=f'Contrast strength {index+1}', minimum=0, maximum=5, step=0.01, value=1.1, visible=False) + pp_hue_strength = gr.Slider(label=f'Hue strength {index+1}', minimum=-1, maximum=1, step=0.01, value=0, visible=False) + pp_bilateral_sigmaC = gr.Slider(label=f'Bilateral sigmaC {index+1}', minimum=0, maximum=100, step=1, value=10, visible=False) + pp_bilateral_sigmaS = gr.Slider(label=f'Bilateral sigmaS {index+1}', minimum=0, maximum=30, step=1, value=10, visible=False) + pp_color_tint_type_name = gr.Radio(label=f'Color tint type name {index+1}',choices=['warm', 'cool'], value='warm', visible=False) + pp_color_tint_lut_name = gr.Dropdown(label=f'Color tint lut name {index+1}',choices=lut_model_list(), value=lut_model_list()[0], visible=False) + pp_type_list.append(pp_type) + pp_saturation_strength_list.append(pp_saturation_strength) + pp_sharpening_radius_list.append(pp_sharpening_radius) + pp_sharpening_percent_list.append(pp_sharpening_percent) + pp_sharpening_threshold_list.append(pp_sharpening_threshold) + pp_gaussian_radius_list.append(pp_gaussian_radius) + pp_brightness_strength_list.append(pp_brightness_strength) + pp_color_strength_list.append(pp_color_strength) + pp_contrast_strength_list.append(pp_contrast_strength) + pp_hue_strength_list.append(pp_hue_strength) + pp_bilateral_sigmaC_list.append(pp_bilateral_sigmaC) + pp_bilateral_sigmaS_list.append(pp_bilateral_sigmaS) + pp_color_tint_type_name_list.append(pp_color_tint_type_name) + pp_color_tint_lut_name_list.append(pp_color_tint_lut_name) + def pp_type_change_func(pp_saturation_strength,pp_sharpening_radius,pp_sharpening_percent,pp_sharpening_threshold,pp_gaussian_radius,pp_brightness_strength,pp_color_strength,pp_contrast_strength,pp_hue_strength,pp_bilateral_sigmaC,pp_bilateral_sigmaS,pp_color_tint_type_name,pp_color_tint_lut_name): + saturation_strength, sharpening_radius, sharpening_percent, sharpening_threshold, gaussian_radius, brightness_strength, color_strength, contrast_strength, hue_strength, bilateral_sigmaC, bilateral_sigmaS, color_tint_type_name, color_tint_lut_name = pp_saturation_strength,pp_sharpening_radius,pp_sharpening_percent,pp_sharpening_threshold,pp_gaussian_radius,pp_brightness_strength,pp_color_strength,pp_contrast_strength,pp_hue_strength,pp_bilateral_sigmaC,pp_bilateral_sigmaS,pp_color_tint_type_name,pp_color_tint_lut_name + return lambda data:{ + saturation_strength:gr_show(data == 'saturation'), + sharpening_radius:gr_show(data == 'sharpening'), + sharpening_percent:gr_show(data == 'sharpening'), + sharpening_threshold:gr_show(data == 'sharpening'), + gaussian_radius:gr_show(data == 'gaussian blur'), + brightness_strength:gr_show(data == 'brightness'), + color_strength:gr_show(data == 'color'), + contrast_strength:gr_show(data == 'contrast'), + hue_strength:gr_show(data == 'hue'), + bilateral_sigmaC:gr_show(data == 'bilateral'), + bilateral_sigmaS:gr_show(data == 'bilateral'), + color_tint_type_name:gr_show(data == 'color tint(type)'), + color_tint_lut_name:gr_show(data == 'color tint(lut)') + } + def pp_type_change_func2(pp_saturation_strength,pp_sharpening_radius,pp_sharpening_percent,pp_sharpening_threshold,pp_gaussian_radius,pp_brightness_strength,pp_color_strength,pp_contrast_strength,pp_hue_strength,pp_bilateral_sigmaC,pp_bilateral_sigmaS,pp_color_tint_type_name,pp_color_tint_lut_name): + saturation_strength, sharpening_radius, sharpening_percent, sharpening_threshold, gaussian_radius, brightness_strength, color_strength, contrast_strength, hue_strength, bilateral_sigmaC, bilateral_sigmaS, color_tint_type_name, color_tint_lut_name = pp_saturation_strength,pp_sharpening_radius,pp_sharpening_percent,pp_sharpening_threshold,pp_gaussian_radius,pp_brightness_strength,pp_color_strength,pp_contrast_strength,pp_hue_strength,pp_bilateral_sigmaC,pp_bilateral_sigmaS,pp_color_tint_type_name,pp_color_tint_lut_name + return [saturation_strength, sharpening_radius, sharpening_percent, sharpening_threshold, gaussian_radius, brightness_strength, color_strength, contrast_strength, hue_strength, bilateral_sigmaC, bilateral_sigmaS, color_tint_type_name, color_tint_lut_name] + pp_type.change( + pp_type_change_func(pp_saturation_strength,pp_sharpening_radius,pp_sharpening_percent,pp_sharpening_threshold,pp_gaussian_radius,pp_brightness_strength,pp_color_strength,pp_contrast_strength,pp_hue_strength,pp_bilateral_sigmaC,pp_bilateral_sigmaS,pp_color_tint_type_name,pp_color_tint_lut_name), + inputs=[pp_type], + outputs=pp_type_change_func2(pp_saturation_strength,pp_sharpening_radius,pp_sharpening_percent,pp_sharpening_threshold,pp_gaussian_radius,pp_brightness_strength,pp_color_strength,pp_contrast_strength,pp_hue_strength,pp_bilateral_sigmaC,pp_bilateral_sigmaS,pp_color_tint_type_name,pp_color_tint_lut_name) + ) + postprocess_tabs = postprocess_tabs_acc + + with gr.Accordion("Watermark", open=False, elem_id='ddsd_watermark_option'): + with gr.Column(): + watermark_info = gr.HTML('

Add a watermark to the final saved image

') + disable_watermark = gr.Checkbox(label='Disable Watermark', elem_id='disable_watermark',value=True, visible=True) + with gr.Tabs(elem_id='watermark_tabs', visible=False) as watermark_tabs_acc: + for index in range(shared.opts.data.get('watermark_count', 1)): + with gr.Tab(f'Watermark {index + 1} Argument', elem_id=f'watermark_{index+1}_argument_tab'): + watermark_type = gr.Radio(choices=['Text','Image'], value='Text', label=f'Watermark {index+1} text') + watermark_position = gr.Dropdown(choices=['Left','Left-Top','Top','Right-Top','Right','Right-Bottom','Bottom','Left-Bottom','Center'], value='Center', label=f'Watermark {index+1} Position', elem_id=f'watermark_{index+1}_position') + with gr.Column(): + watermark_image = gr.Image(label=f"Watermark {index+1} Upload image", visible=False) + with gr.Row(): + watermark_image_size_width = gr.Slider(label=f'Watermark {index+1} Width', visible=False, minimum=50, maximum=500, step=10, value=100) + watermark_image_size_height = gr.Slider(label=f'Watermark {index+1} Height', visible=False, minimum=50, maximum=500, step=10, value=100) + with gr.Column(): + watermark_text = gr.Textbox(placeholder='watermark text - ex) Copyright © NeoGraph. All Rights Reserved.', visible=True, value='') + with gr.Row(): + watermark_text_color = gr.ColorPicker(label=f'Watermark {index+1} Color') + watermark_text_font = gr.Dropdown(label=f'Watermark {index+1} Fonts', choices=fonts_list, value=fonts_list[0]) + watermark_text_size = gr.Slider(label=f'Watermark {index+1} Size', visible=True, minimum=10, maximum=500, step=1, value=50) + watermark_padding = gr.Slider(label=f'Watermark {index+1} Padding', visible=True, minimum=0, maximum=200, step=1, value=10) + watermark_alpha = gr.Slider(label=f'Watermark {index+1} Alpha', visible=True, minimum=0, maximum=1, step=0.01, value=0.4) + watermark_type_list.append(watermark_type) + watermark_position_list.append(watermark_position) + watermark_image_list.append(watermark_image) + watermark_image_size_width_list.append(watermark_image_size_width) + watermark_image_size_height_list.append(watermark_image_size_height) + watermark_text_list.append(watermark_text) + watermark_text_color_list.append(watermark_text_color) + watermark_text_font_list.append(watermark_text_font) + watermark_text_size_list.append(watermark_text_size) + watermark_padding_list.append(watermark_padding) + watermark_alpha_list.append(watermark_alpha) + def watermark_type_change_func(watermark_image, watermark_image_size_width, watermark_image_size_height, watermark_text, watermark_text_color, watermark_text_font, watermark_text_size): + image, image_size_width, iamge_size_height, text, text_color, text_font, text_size = watermark_image, watermark_image_size_width, watermark_image_size_height, watermark_text, watermark_text_color, watermark_text_font, watermark_text_size + return lambda data:{ + image:gr_show(data == 'Image'), + image_size_width:gr_show(data == 'Image'), + iamge_size_height:gr_show(data == 'Image'), + text:gr_show(data == 'Text'), + text_color:gr_show(data == 'Text'), + text_font:gr_show(data == 'Text'), + text_size:gr_show(data == 'Text') + } + def watermark_type_change_func2(watermark_image, watermark_image_size_width, watermark_image_size_height, watermark_text, watermark_text_color, watermark_text_font, watermark_text_size): + image, image_size_width, iamge_size_height, text, text_color, text_font, text_size = watermark_image, watermark_image_size_width, watermark_image_size_height, watermark_text, watermark_text_color, watermark_text_font, watermark_text_size + return [image, image_size_width, iamge_size_height, text, text_color, text_font, text_size] + watermark_type.change( + watermark_type_change_func(watermark_image,watermark_image_size_width,watermark_image_size_height,watermark_text,watermark_text_color,watermark_text_font,watermark_text_size), + inputs=[watermark_type], + outputs=watermark_type_change_func2(watermark_image, watermark_image_size_width, watermark_image_size_height, watermark_text, watermark_text_color, watermark_text_font, watermark_text_size) + ) + watermark_tabs = watermark_tabs_acc + disable_outpaint.change( + lambda disable:{ + outpaint_sample:gr_show(not disable), + outpaint_tabs:gr_show(not disable), + outpaint_mask_blur:gr_show(not disable) + }, + inputs=[disable_outpaint], + outputs=[outpaint_sample, outpaint_tabs, outpaint_mask_blur] + ) + disable_watermark.change( + lambda disable:{ + watermark_tabs:gr_show(not disable) + }, + inputs=[disable_watermark], + outputs=watermark_tabs + ) + disable_postprocess.change( + lambda disable:{ + postprocess_tabs:gr_show(not disable) + }, + inputs=[disable_postprocess], + outputs=postprocess_tabs + ) + disable_upscaler.change( + lambda disable: { + ddetailer_before_upscaler:gr_show(not disable), + upscaler_sample:gr_show(not disable), + upscaler_index:gr_show(not disable), + upscaler_ckpt:gr_show(not disable), + upscaler_vae:gr_show(not disable), + scalevalue:gr_show(not disable), + overlap:gr_show(not disable), + rewidth:gr_show(not disable), + reheight:gr_show(not disable), + denoising_strength:gr_show(not disable), + }, + inputs= [disable_upscaler], + outputs =[ddetailer_before_upscaler, upscaler_sample, upscaler_index, upscaler_ckpt, upscaler_vae, scalevalue, overlap, rewidth, reheight, denoising_strength] + ) + + disable_mask_paint_mode.change( + lambda disable:{ + inpaint_mask_mode:gr_show(is_img2img and not disable) + }, + inputs=[disable_mask_paint_mode], + outputs=inpaint_mask_mode + ) + + disable_detailer.change( + lambda disable, in_disable:{ + disable_mask_paint_mode:gr_show(not disable and is_img2img), + inpaint_mask_mode:gr_show(not disable and is_img2img and not in_disable), + detailer_sample:gr_show(not disable), + detailer_sam_model:gr_show(not disable), + detailer_dino_model:gr_show(not disable), + dino_full_res_inpaint:gr_show(not disable), + dino_inpaint_padding:gr_show(not disable), + detailer_mask_blur:gr_show(not disable), + dino_tabs:gr_show(not disable) + }, + inputs=[disable_detailer, disable_mask_paint_mode], + outputs=[ + disable_mask_paint_mode, + inpaint_mask_mode, + detailer_sample, + detailer_sam_model, + detailer_dino_model, + dino_full_res_inpaint, + dino_inpaint_padding, + detailer_mask_blur, + dino_tabs + ] + ) + + ret += [enable_script_names] + ret += [disable_watermark, disable_postprocess] + ret += [disable_upscaler, ddetailer_before_upscaler, scalevalue, upscaler_sample, overlap, upscaler_index, rewidth, reheight, denoising_strength, upscaler_ckpt, upscaler_vae] + ret += [disable_detailer, disable_mask_paint_mode, inpaint_mask_mode, detailer_sample, detailer_sam_model, detailer_dino_model, dino_full_res_inpaint, dino_inpaint_padding, detailer_mask_blur] + ret += [disable_outpaint, outpaint_sample, outpaint_mask_blur] + ret += dino_detection_ckpt_list + \ + dino_detection_vae_list + \ + dino_detection_prompt_list + \ + dino_detection_positive_list + \ + dino_detection_negative_list + \ + dino_detection_denoise_list + \ + dino_detection_cfg_list + \ + dino_detection_steps_list + \ + dino_detection_spliter_disable_list + \ + dino_detection_spliter_remove_area_list + \ + dino_detection_clip_skip_list + \ + watermark_type_list + \ + watermark_position_list + \ + watermark_image_list + \ + watermark_image_size_width_list + \ + watermark_image_size_height_list + \ + watermark_text_list + \ + watermark_text_color_list + \ + watermark_text_font_list + \ + watermark_text_size_list + \ + watermark_padding_list + \ + watermark_alpha_list + \ + pp_type_list + \ + pp_saturation_strength_list + \ + pp_sharpening_radius_list + \ + pp_sharpening_percent_list + \ + pp_sharpening_threshold_list + \ + pp_gaussian_radius_list + \ + pp_brightness_strength_list + \ + pp_color_strength_list + \ + pp_contrast_strength_list + \ + pp_hue_strength_list + \ + pp_bilateral_sigmaC_list + \ + pp_bilateral_sigmaS_list + \ + pp_color_tint_type_name_list + \ + pp_color_tint_lut_name_list + \ + outpaint_positive_list + \ + outpaint_negative_list + \ + outpaint_denoise_list + \ + outpaint_cfg_list + \ + outpaint_steps_list + \ + outpaint_pixels_list + \ + outpaint_direction_list + + def ds(*args): + args = list(args) + ddsd_save_path = args[0] + args = args[1:] + enable_script_names,disable_watermark,disable_postprocess,disable_upscaler,ddetailer_before_upscaler,scalevalue,upscaler_sample,overlap,upscaler_index,rewidth,reheight,denoising_strength,upscaler_ckpt,upscaler_vae,disable_detailer,disable_mask_paint_mode,inpaint_mask_mode,detailer_sample,detailer_sam_model,detailer_dino_model,dino_full_res_inpaint,dino_inpaint_padding,detailer_mask_blur,disable_outpaint, outpaint_sample, outpaint_mask_blur = args[:26] + result = {} + result['enable_script_names'] = enable_script_names + result['disable_watermark'] = disable_watermark + result['disable_postprocess'] = disable_postprocess + result['disable_upscaler'] = disable_upscaler + result['ddetailer_before_upscaler'] = ddetailer_before_upscaler + result['scalevalue'] = scalevalue + result['upscaler_sample'] = upscaler_sample + result['overlap'] = overlap + result['upscaler_index'] = shared.sd_upscalers[upscaler_index].name + result['rewidth'] = rewidth + result['reheight'] = reheight + result['denoising_strength'] = denoising_strength + result['upscaler_ckpt'] = upscaler_ckpt + result['upscaler_vae'] = upscaler_vae + result['disable_detailer'] = disable_detailer + result['disable_mask_paint_mode'] = disable_mask_paint_mode + result['inpaint_mask_mode'] = inpaint_mask_mode + result['detailer_sample'] = detailer_sample + result['detailer_sam_model'] = detailer_sam_model + result['detailer_dino_model'] = detailer_dino_model + result['dino_full_res_inpaint'] = dino_full_res_inpaint + result['dino_inpaint_padding'] = dino_inpaint_padding + result['detailer_mask_blur'] = detailer_mask_blur + result['disable_outpaint'] = disable_outpaint + result['outpaint_sample'] = outpaint_sample + result['outpaint_mask_blur'] = outpaint_mask_blur + args = args[26:] + result['dino_detect_count'] = shared.opts.data.get('dino_detect_count', 2) + for index in range(result['dino_detect_count']): + result[f'dino_detection_ckpt_{index+1}'] = args[index + result['dino_detect_count'] * 0] + result[f'dino_detection_vae_{index+1}'] = args[index + result['dino_detect_count'] * 1] + result[f'dino_detection_prompt_{index+1}'] = args[index + result['dino_detect_count'] * 2] + result[f'dino_detection_positive_{index+1}'] = args[index + result['dino_detect_count'] * 3] + result[f'dino_detection_negative_{index+1}'] = args[index + result['dino_detect_count'] * 4] + result[f'dino_detection_denoise_{index+1}'] = args[index + result['dino_detect_count'] * 5] + result[f'dino_detection_cfg_{index+1}'] = args[index + result['dino_detect_count'] * 6] + result[f'dino_detection_steps_{index+1}'] = args[index + result['dino_detect_count'] * 7] + result[f'dino_detection_spliter_disable_{index+1}'] = args[index + result['dino_detect_count'] * 8] + result[f'dino_detection_spliter_remove_area_{index+1}'] = args[index + result['dino_detect_count'] * 9] + result[f'dino_detection_clip_skip_{index+1}'] = args[index + result['dino_detect_count'] * 10] + args = args[result['dino_detect_count'] * 11:] + result['watermark_count'] = shared.opts.data.get('watermark_count', 1) + for index in range(result['watermark_count']): + result[f'watermark_type_{index+1}'] = args[index + result['watermark_count'] * 0] + result[f'watermark_position_{index+1}'] = args[index + result['watermark_count'] * 1] + result[f'watermark_image_{index+1}'] = None + result[f'watermark_image_size_width_{index+1}'] = args[index + result['watermark_count'] * 3] + result[f'watermark_image_size_height_{index+1}'] = args[index + result['watermark_count'] * 4] + result[f'watermark_text_{index+1}'] = args[index + result['watermark_count'] * 5] + result[f'watermark_text_color_{index+1}'] = args[index + result['watermark_count'] * 6] + result[f'watermark_text_font_{index+1}'] = args[index + result['watermark_count'] * 7] + result[f'watermark_text_size_{index+1}'] = args[index + result['watermark_count'] * 8] + result[f'watermark_padding_{index+1}'] = args[index + result['watermark_count'] * 9] + result[f'watermark_alpha_{index+1}'] = args[index + result['watermark_count'] * 10] + args = args[result['watermark_count'] * 11:] + result['postprocessing_count'] = shared.opts.data.get('postprocessing_count', 1) + for index in range(result['postprocessing_count']): + result[f'pp_type_{index+1}'] = args[index + result['postprocessing_count'] * 0] + result[f'pp_saturation_strength_{index+1}'] = args[index + result['postprocessing_count'] * 1] + result[f'pp_sharpening_radius_{index+1}'] = args[index + result['postprocessing_count'] * 2] + result[f'pp_sharpening_percent_{index+1}'] = args[index + result['postprocessing_count'] * 3] + result[f'pp_sharpening_threshold_{index+1}'] = args[index + result['postprocessing_count'] * 4] + result[f'pp_gaussian_radius_{index+1}'] = args[index + result['postprocessing_count'] * 5] + result[f'pp_brightness_strength_{index+1}'] = args[index + result['postprocessing_count'] * 6] + result[f'pp_color_strength_{index+1}'] = args[index + result['postprocessing_count'] * 7] + result[f'pp_contrast_strength_{index+1}'] = args[index + result['postprocessing_count'] * 8] + result[f'pp_hue_strength_{index+1}'] = args[index + result['postprocessing_count'] * 9] + result[f'pp_bilateral_sigmaC_{index+1}'] = args[index + result['postprocessing_count'] * 10] + result[f'pp_bilateral_sigmaS_{index+1}'] = args[index + result['postprocessing_count'] * 11] + result[f'pp_color_tint_type_name_{index+1}'] = args[index + result['postprocessing_count'] * 12] + result[f'pp_color_tint_lut_name_{index+1}'] = args[index + result['postprocessing_count'] * 13] + args = args[result['postprocessing_count'] * 13:] + result['outpaint_count'] = shared.opts.data.get('outpaint_count', 1) + for index in range(result['outpaint_count']): + result[f'outpaint_positive_{index+1}'] = args[index + result['outpaint_count'] * 0] + result[f'outpaint_negative_{index+1}'] = args[index + result['outpaint_count'] * 1] + result[f'outpaint_denoise_{index+1}'] = args[index + result['outpaint_count'] * 2] + result[f'outpaint_cfg_{index+1}'] = args[index + result['outpaint_count'] * 3] + result[f'outpaint_steps_{index+1}'] = args[index + result['outpaint_count'] * 4] + result[f'outpaint_pixels_{index+1}'] = args[index + result['outpaint_count'] * 5] + result[f'outpaint_direction_{index+1}'] = args[index + result['outpaint_count'] * 6] + args = args[result['outpaint_count'] * 6:] + if not os.path.exists(ddsd_config_path): + os.mkdir(ddsd_config_path) + with open(os.path.join(ddsd_config_path, f'{ddsd_save_path}.ddcfg'), 'w', encoding='utf-8') as f: + f.write(json_write(result)) + choices = [x[:-6] for x in os.listdir(ddsd_config_path) if x.endswith('.ddcfg')] + return { + ddsd_load_path:gr_list_refresh(choices, choices[0]) + } + def dl(ddsd_load_path): + with open(os.path.join(ddsd_config_path, f'{ddsd_load_path}.ddcfg'), 'r', encoding='utf-8') as f: + result = json_read(f) + results = [result['enable_script_names'],result['disable_watermark'],result['disable_postprocess'],result['disable_upscaler'],result['ddetailer_before_upscaler'],result['scalevalue'],result['upscaler_sample'],result['overlap'],result['upscaler_index'],result['rewidth'],result['reheight'],result['denoising_strength'],result['upscaler_ckpt'],result['upscaler_vae'],result['disable_detailer'],result['disable_mask_paint_mode'],result['inpaint_mask_mode'],result['detailer_sample'],result['detailer_sam_model'],result['detailer_dino_model'],result['dino_full_res_inpaint'],result['dino_inpaint_padding'],result['detailer_mask_blur'],result['disable_outpaint'],result['outpaint_sample'],result['outpaint_mask_blur']] + def result_create(token,file_count,count, default): + data = file_count if file_count < count else count + temp = [] + for index in range(data): + temp.append(result.get(f'{token}_{index+1}',default)) + while len(temp) < count: + temp.append(default) + return temp + results += result_create('dino_detection_ckpt',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), 'Original') + results += result_create('dino_detection_vae',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), 'Original') + results += result_create('dino_detection_prompt',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), '') + results += result_create('dino_detection_positive',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), '') + results += result_create('dino_detection_negative',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), '') + results += result_create('dino_detection_denoise',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), 0.4) + results += result_create('dino_detection_cfg',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), 0) + results += result_create('dino_detection_steps',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), 0) + results += result_create('dino_detection_spliter_disable',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), True) + results += result_create('dino_detection_spliter_remove_area',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), 8) + results += result_create('dino_detection_clip_skip',result['dino_detect_count'], shared.opts.data.get('dino_detect_count', 2), 0) + + results += result_create('watermark_type',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 'Text') + results += result_create('watermark_position',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 'Center') + results += result_create('watermark_image',result['watermark_count'], shared.opts.data.get('watermark_count', 1), None) + results += result_create('watermark_image_size_width',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 100) + results += result_create('watermark_image_size_height',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 100) + results += result_create('watermark_text',result['watermark_count'], shared.opts.data.get('watermark_count', 1), '') + results += result_create('watermark_text_color',result['watermark_count'], shared.opts.data.get('watermark_count', 1), None) + results += result_create('watermark_text_font',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 'Arial') + results += result_create('watermark_text_size',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 50) + results += result_create('watermark_padding',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 10) + results += result_create('watermark_alpha',result['watermark_count'], shared.opts.data.get('watermark_count', 1), 0.4) + + results += result_create('pp_type',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 'none') + results += result_create('pp_saturation_strength',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 1.1) + results += result_create('pp_sharpening_radius',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 2) + results += result_create('pp_sharpening_percent',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 100) + results += result_create('pp_sharpening_threshold',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 1) + results += result_create('pp_gaussian_radius',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 2) + results += result_create('pp_brightness_strength',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 1.1) + results += result_create('pp_color_strength',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 1.1) + results += result_create('pp_contrast_strength',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 1.1) + results += result_create('pp_hue_strength',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 0) + results += result_create('pp_bilateral_sigmaC',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 10) + results += result_create('pp_bilateral_sigmaS',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 10) + results += result_create('pp_color_tint_type_name',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 'warm') + results += result_create('pp_color_tint_lut_name',result['postprocessing_count'], shared.opts.data.get('postprocessing_count', 1), 'FGCineBasic.cube') + + results += result_create('outpaint_positive', result['outpaint_count'], shared.opts.data.get('outpaint_count', 1), '') + results += result_create('outpaint_negative', result['outpaint_count'], shared.opts.data.get('outpaint_count', 1), '') + results += result_create('outpaint_denoise', result['outpaint_count'], shared.opts.data.get('outpaint_count', 1), 0.8) + results += result_create('outpaint_cfg', result['outpaint_count'], shared.opts.data.get('outpaint_count', 1), 0) + results += result_create('outpaint_steps', result['outpaint_count'], shared.opts.data.get('outpaint_count', 1), 0) + results += result_create('outpaint_pixels', result['outpaint_count'], shared.opts.data.get('outpaint_count', 1), 64) + results += result_create('outpaint_direction', result['outpaint_count'], shared.opts.data.get('outpaint_count', 1), 'None') + return dict(zip(ret, [gr_value_refresh(x) for x in results])) + ddsd_save.click(ds, inputs=[ddsd_save_path]+ret, outputs=[ddsd_load_path]) + ddsd_load.click(dl, inputs=[ddsd_load_path], outputs=ret) + + return ret + + def outpainting(self, p, init_image, + outpaint_sample, outpaint_mask_blur, outpaint_count, + outpaint_denoise_list, outpaint_cfg_list, outpaint_steps_list, outpaint_positive_list, outpaint_negative_list, + outpaint_pixels_list, outpaint_direction_list): + + for outpaint_index in range(outpaint_count): + if outpaint_direction_list[outpaint_index] == 'None': continue + is_horiz = outpaint_direction_list[outpaint_index] in ['Left','Right'] + is_vert = outpaint_direction_list[outpaint_index] in ['Up','Down'] + + target_w = init_image.width + (outpaint_pixels_list[outpaint_index] if is_horiz else 0) + target_h = init_image.height + (outpaint_pixels_list[outpaint_index] if is_vert else 0) + + image = Image.new('RGB', (target_w,target_h)) + image.paste(init_image, + (outpaint_pixels_list[outpaint_index] if outpaint_direction_list[outpaint_index] == 'Left' else 0, + outpaint_pixels_list[outpaint_index] if outpaint_direction_list[outpaint_index] == 'Up' else 0)) + mask = Image.new('L', (target_w, target_h), 'white') + mask_draw = ImageDraw.Draw(mask) + + mask_draw.rectangle(( + (outpaint_pixels_list[outpaint_index] + outpaint_mask_blur * 2) if outpaint_direction_list[outpaint_index] == 'Left' else 0, + (outpaint_pixels_list[outpaint_index] + outpaint_mask_blur * 2) if outpaint_direction_list[outpaint_index] == 'Up' else 0, + (mask.width - outpaint_pixels_list[outpaint_index] - outpaint_mask_blur * 2) if outpaint_direction_list[outpaint_index] == 'Right' else target_w, + (mask.height - outpaint_pixels_list[outpaint_index] - outpaint_mask_blur * 2) if outpaint_direction_list[outpaint_index] == 'Down' else target_h + ), fill='black') + + latent_mask = Image.new('L', (target_w, target_h), 'white') + latent_mask_draw = ImageDraw.Draw(latent_mask) + latent_mask_draw.rectangle(( + (outpaint_pixels_list[outpaint_index] + outpaint_mask_blur // 2) if outpaint_direction_list[outpaint_index] == 'Left' else 0, + (outpaint_pixels_list[outpaint_index] + outpaint_mask_blur // 2) if outpaint_direction_list[outpaint_index] == 'Up' else 0, + (mask.width - outpaint_pixels_list[outpaint_index] - outpaint_mask_blur // 2) if outpaint_direction_list[outpaint_index] == 'Right' else target_w, + (mask.height - outpaint_pixels_list[outpaint_index] - outpaint_mask_blur // 2) if outpaint_direction_list[outpaint_index] == 'Down' else target_h + ), fill='black') + + devices.torch_gc() + + pi = I2I_Generator_Create( + p, ('Euler' if p.sampler_name in ['PLMS', 'UniPC', 'DDIM'] else p.sampler_name) if outpaint_sample == 'Original' else outpaint_sample, + outpaint_mask_blur * 2, False, 0, image, + outpaint_denoise_list[outpaint_index], + outpaint_cfg_list[outpaint_index] if outpaint_cfg_list[outpaint_index] > 0 else p.cfg_scale, + outpaint_steps_list[outpaint_index] if outpaint_steps_list[outpaint_index] > 0 else p.steps, + target_w, + target_h, + p.tiling, p.scripts, self.i2i_scripts, self.i2i_scripts_always, p.script_args, + outpaint_positive_list[outpaint_index] if outpaint_positive_list[outpaint_index] else self.target_prompts, + outpaint_negative_list[outpaint_index] if outpaint_negative_list[outpaint_index] else self.target_negative_prompts, + 0 + ) + + + pi.image_mask = mask + pi.latent_mask = latent_mask + pi.seed = self.target_seeds + outpaint_index + + state.job_count += 1 + proc = processing.process_images(pi) + + p.extra_generation_params[f'Outpaint {outpaint_index + 1} Direction'] = outpaint_direction_list[outpaint_index] + p.extra_generation_params[f'Outpaint {outpaint_index + 1} Pixels'] = outpaint_pixels_list[outpaint_index] + p.extra_generation_params[f'Outpaint {outpaint_index + 1} Positive'] = proc.all_prompts[0] if outpaint_positive_list[outpaint_index] else "Original" + p.extra_generation_params[f'Outpaint {outpaint_index + 1} Negative'] = proc.all_negative_prompts[0] if outpaint_negative_list[outpaint_index] else "Original" + p.extra_generation_params[f'Outpaint {outpaint_index + 1} Denoising'] = pi.denoising_strength + p.extra_generation_params[f'Outpaint {outpaint_index + 1} CFG Scale'] = pi.cfg_scale + p.extra_generation_params[f'Outpaint {outpaint_index + 1} Steps'] = pi.steps + + init_image = proc.images[0] + + return init_image + + + + def dino_detect_detailer(self, p, init_image, + disable_mask_paint_mode, inpaint_mask_mode, detailer_sample, detailer_sam_model, detailer_dino_model, + dino_full_res_inpaint, dino_inpaint_padding, detailer_mask_blur, + dino_detect_count, + dino_detection_ckpt_list, + dino_detection_vae_list, + dino_detection_prompt_list, + dino_detection_positive_list, + dino_detection_negative_list, + dino_detection_denoise_list, + dino_detection_cfg_list, + dino_detection_steps_list, + dino_detection_spliter_disable_list, + dino_detection_spliter_remove_area_list, + dino_detection_clip_skip_list): + self.image_results.append([]) + def mask_image_suffle(mask, image): + if shared.opts.data.get('mask_type', False): return mask + mask_image = Image.new("RGBA", mask.size, (255,255,255,0)) + mask_image.paste(mask, mask=mask) + mask_image = Image.composite(mask, image, mask_image) + return Image.blend(image, mask_image, 0.5) + for detect_index in range(dino_detect_count): + if len(dino_detection_prompt_list[detect_index]) < 1: continue + pi = I2I_Generator_Create( + p, ('Euler' if p.sampler_name in ['PLMS', 'UniPC', 'DDIM'] else p.sampler_name) if detailer_sample == 'Original' else detailer_sample, + detailer_mask_blur, dino_full_res_inpaint, dino_inpaint_padding, init_image, + dino_detection_denoise_list[detect_index], + dino_detection_cfg_list[detect_index] if dino_detection_cfg_list[detect_index] > 0 else p.cfg_scale, + dino_detection_steps_list[detect_index] if dino_detection_steps_list[detect_index] > 0 else p.steps, + p.width, p.height, p.tiling, p.scripts, self.i2i_scripts, self.i2i_scripts_always, p.script_args, + dino_detection_positive_list[detect_index] if dino_detection_positive_list[detect_index] else self.target_prompts, + dino_detection_negative_list[detect_index] if dino_detection_negative_list[detect_index] else self.target_negative_prompts + ) + mask = dino_detect_from_prompt(dino_detection_prompt_list[detect_index], detailer_sam_model, detailer_dino_model, init_image, disable_mask_paint_mode or isinstance(p, StableDiffusionProcessingTxt2Img), inpaint_mask_mode, getattr(p,'image_mask',None)) + if mask is not None: + self.change_ckpt_model(dino_detection_ckpt_list[detect_index] if dino_detection_ckpt_list[detect_index] != 'Original' else self.ckptname) + self.change_vae_model(dino_detection_vae_list[detect_index] if dino_detection_vae_list[detect_index] != 'Original' else self.vae) + opts.CLIP_stop_at_last_layers = dino_detection_clip_skip_list[detect_index] if dino_detection_clip_skip_list[detect_index] else self.clip_skip + if not dino_detection_spliter_disable_list[detect_index]: + mask = mask_spliter_and_remover(mask, dino_detection_spliter_remove_area_list[detect_index]) + for mask_index, mask_split in enumerate(mask): + pi.seed = self.target_seeds + mask_index + detect_index + pi.init_images = [init_image] + pi.image_mask = Image.fromarray(mask_split) + if shared.opts.data.get('save_ddsd_working_on_dino_mask_images', False): + images.save_image(mask_image_suffle(pi.image_mask, pi.init_images[0]), p.outpath_samples, + shared.opts.data.get('save_ddsd_working_on_dino_mask_images_prefix', ''), + pi.seed, self.target_prompts, opts.samples_format, + suffix='' if shared.opts.data.get('save_ddsd_working_on_dino_mask_images_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_working_on_dino_mask_images_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + state.job_count += 1 + if shared.opts.data.get('preview_masks_images', False): + shared.state.current_image = mask_image_suffle(pi.image_mask, pi.init_images[0]) + if shared.opts.data.get('result_masks', False): + self.image_results[-1].append(mask_image_suffle(pi.image_mask, pi.init_images[0])) + processed = processing.process_images(pi) + init_image = processed.images[0] + if shared.opts.data.get('save_ddsd_working_on_images', False): + images.save_image(init_image, p.outpath_samples, + shared.opts.data.get('save_ddsd_working_on_images_prefix', ''), + pi.seed, self.target_prompts, opts.samples_format, + suffix='' if shared.opts.data.get('save_ddsd_working_on_images_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_working_on_images_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + else: + pi.seed = self.target_seeds + detect_index + pi.init_images = [init_image] + pi.image_mask = Image.fromarray(mask) + if shared.opts.data.get('save_ddsd_working_on_dino_mask_images', False): + images.save_image(mask_image_suffle(pi.image_mask, pi.init_images[0]), p.outpath_samples, + shared.opts.data.get('save_ddsd_working_on_dino_mask_images_prefix', ''), + pi.seed, self.target_prompts, opts.samples_format, + suffix='' if shared.opts.data.get('save_ddsd_working_on_dino_mask_images_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_working_on_dino_mask_images_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + state.job_count += 1 + if shared.opts.data.get('preview_masks_images', False): + shared.state.current_image = mask_image_suffle(pi.image_mask, pi.init_images[0]) + if shared.opts.data.get('result_masks', False): + self.image_results[-1].append(mask_image_suffle(pi.image_mask, pi.init_images[0])) + processed = processing.process_images(pi) + init_image = processed.images[0] + if shared.opts.data.get('save_ddsd_working_on_images', False): + images.save_image(init_image, p.outpath_samples, + shared.opts.data.get('save_ddsd_working_on_images_prefix', ''), + pi.seed, self.target_prompts, opts.samples_format, + suffix='' if shared.opts.data.get('save_ddsd_working_on_images_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_working_on_images_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + p.extra_generation_params[f'DINO {detect_index + 1}'] = dino_detection_prompt_list[detect_index] + p.extra_generation_params[f'DINO {detect_index + 1} Positive'] = processed.all_prompts[0] if dino_detection_positive_list[detect_index] else "Original" + p.extra_generation_params[f'DINO {detect_index + 1} Negative'] = processed.all_negative_prompts[0] if dino_detection_negative_list[detect_index] else "Original" + p.extra_generation_params[f'DINO {detect_index + 1} Denoising'] = pi.denoising_strength + p.extra_generation_params[f'DINO {detect_index + 1} CFG Scale'] = pi.cfg_scale + p.extra_generation_params[f'DINO {detect_index + 1} Steps'] = pi.steps + p.extra_generation_params[f'DINO {detect_index + 1} Spliter'] = not dino_detection_spliter_disable_list[detect_index] + p.extra_generation_params[f'DINO {detect_index + 1} SplitRemove Area'] = dino_detection_spliter_remove_area_list[detect_index] + p.extra_generation_params[f'DINO {detect_index + 1} Ckpt Model'] = dino_detection_ckpt_list[detect_index] if dino_detection_ckpt_list[detect_index] != 'Original' else self.ckptname + p.extra_generation_params[f'DINO {detect_index + 1} Vae Model'] = dino_detection_vae_list[detect_index] if dino_detection_vae_list[detect_index] != 'Original' else self.vae + p.extra_generation_params[f'DINO {detect_index + 1} Clip Skip'] = dino_detection_clip_skip_list[detect_index] if dino_detection_clip_skip_list[detect_index] else 'Original' + else: + p.extra_generation_params[f'DINO {detect_index + 1}'] = 'Error' + opts.CLIP_stop_at_last_layers = self.clip_skip + return init_image + + def upscale(self, p, init_image, + scalevalue, upscaler_sample, overlap, rewidth, reheight, denoising_strength, upscaler_ckpt, upscaler_vae, + detailer_mask_blur, dino_full_res_inpaint, dino_inpaint_padding): + self.change_ckpt_model(upscaler_ckpt if upscaler_ckpt != 'Original' else self.ckptname) + self.change_vae_model(upscaler_vae if upscaler_vae != 'Original' else self.vae) + pi = I2I_Generator_Create( + p, ('Euler' if p.sampler_name in ['PLMS', 'UniPC', 'DDIM'] else p.sampler_name) if upscaler_sample == 'Original' else upscaler_sample, + detailer_mask_blur, dino_full_res_inpaint, dino_inpaint_padding, init_image, + denoising_strength, p.cfg_scale, p.steps, + rewidth, reheight, p.tiling, p.scripts, self.i2i_scripts, self.i2i_scripts_always, p.script_args, + self.target_prompts, self.target_negative_prompts + ) + p.extra_generation_params[f'Tile upscale value'] = scalevalue + p.extra_generation_params[f'Tile upscale width'] = rewidth + p.extra_generation_params[f'Tile upscale height'] = reheight + p.extra_generation_params[f'Tile upscale overlap'] = overlap + p.extra_generation_params[f'Tile upscale upscaler'] = self.upscaler.name + p.extra_generation_params[f'Tile upscale Ckpt Model'] = upscaler_ckpt if upscaler_ckpt != 'Original' else self.ckptname + p.extra_generation_params[f'Tile upscale Vae Model'] = upscaler_vae if upscaler_vae != 'Original' else self.vae + if(self.upscaler.name != "None"): + img = self.upscaler.scaler.upscale(init_image, scalevalue, self.upscaler.data_path) + else: + img = init_image + if rewidth and reheight: + devices.torch_gc() + grid = images.split_grid(img, tile_w=rewidth, tile_h=reheight, overlap=overlap) + work = [] + for y, h, row in grid.tiles: + for tiledata in row: + work.append(tiledata[2]) + + batch_count = math.ceil(len(work)) + state.job = 'Upscaler Batching' + state.job_count += batch_count + + print(f"Tile upscaling will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)} per upscale in a total of {state.job_count} batches (I2I).") + + pi.seed = self.target_seeds + work_results = [] + for i in range(batch_count): + pi.init_images = work[i:(i+1)] + processed = processing.process_images(pi) + + p.seed = processed.seed + 1 + work_results += processed.images + + image_index = 0 + for y, h, row in grid.tiles: + for tiledata in row: + tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (rewidth, reheight)) + image_index += 1 + init_image = images.combine_grid(grid) + else: + init_image = img + if shared.opts.data.get('save_ddsd_working_on_images', False): + images.save_image(init_image, p.outpath_samples, + shared.opts.data.get('save_ddsd_working_on_images_prefix', ''), + pi.seed, self.target_prompts, opts.samples_format, + suffix = '' if shared.opts.data.get('save_ddsd_working_on_images_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_working_on_images_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + return init_image + + def watermark(self, p, init_image): + if shared.opts.data.get('save_ddsd_watermark_with_and_without', False): + images.save_image(init_image, p.outpath_samples, + shared.opts.data.get('save_ddsd_watermark_with_and_without_prefix', ''), + self.target_seeds, self.target_prompts, opts.samples_format, + suffix= '' if shared.opts.data.get('save_ddsd_watermark_with_and_without_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_watermark_with_and_without_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + for water_index in range(self.watermark_count): + init_image = image_apply_watermark(init_image, + self.watermark_type_list[water_index], + self.watermark_position_list[water_index], + self.watermark_image_list[water_index], + self.watermark_image_size_width_list[water_index], + self.watermark_image_size_height_list[water_index], + self.watermark_text_list[water_index], + self.watermark_text_color_list[water_index], + self.font_path[self.watermark_text_font_list[water_index]], + self.watermark_text_size_list[water_index], + self.watermark_padding_list[water_index], + self.watermark_alpha_list[water_index]) + return init_image + + def postprocess_target(self, p, init_image, + pp_type_list, + pp_saturation_strength_list, + pp_sharpening_radius_list, pp_sharpening_percent_list, pp_sharpening_threshold_list, + pp_gaussian_radius_list, + pp_brightness_strength_list, + pp_color_strength_list, + pp_contrast_strength_list, + pp_hue_strength_list, + pp_bilateral_sigmaC_list, pp_bilateral_sigmaS_list, + pp_color_tint_type_name_list, + pp_color_tint_lut_name_list): + for pp_index in range(shared.opts.data.get('postprocessing_count', 1)): + if pp_type_list[pp_index] == 'none': continue + if shared.opts.data.get('save_ddsd_postprocessing_with_and_without', False): + images.save_image(init_image, p.outpath_samples, + shared.opts.data.get('save_ddsd_postprocessing_with_and_without_prefix', ''), + self.target_seeds, self.target_prompts, opts.samples_format, + suffix= '' if shared.opts.data.get('save_ddsd_postprocessing_with_and_without_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_postprocessing_with_and_without_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + init_image = ddsd_postprocess(init_image, pp_type_list[pp_index], pp_saturation_strength_list[pp_index], pp_sharpening_radius_list[pp_index], pp_sharpening_percent_list[pp_index], pp_sharpening_threshold_list[pp_index], pp_gaussian_radius_list[pp_index], pp_brightness_strength_list[pp_index], pp_color_strength_list[pp_index], pp_contrast_strength_list[pp_index], pp_hue_strength_list[pp_index], pp_bilateral_sigmaC_list[pp_index], pp_bilateral_sigmaS_list[pp_index], pp_color_tint_lut_name_list[pp_index], pp_color_tint_type_name_list[pp_index]) + p.extra_generation_params[f'Postprocess {pp_index+1} type'] = pp_type_list[pp_index] + if pp_type_list[pp_index] == 'saturation': + p.extra_generation_params[f'Postprocess {pp_index+1} strength'] = pp_saturation_strength_list[pp_index] + elif pp_type_list[pp_index] == 'sharpening': + p.extra_generation_params[f'Postprocess {pp_index+1} radius'] = pp_sharpening_radius_list[pp_index] + p.extra_generation_params[f'Postprocess {pp_index+1} percent'] = pp_sharpening_percent_list[pp_index] + p.extra_generation_params[f'Postprocess {pp_index+1} threshold'] = pp_sharpening_threshold_list[pp_index] + elif pp_type_list[pp_index] == 'gaussian blur': + p.extra_generation_params[f'Postprocess {pp_index+1} radius'] = pp_gaussian_radius_list[pp_index] + elif pp_type_list[pp_index] == 'brightness': + p.extra_generation_params[f'Postprocess {pp_index+1} strength'] = pp_brightness_strength_list[pp_index] + elif pp_type_list[pp_index] == 'color': + p.extra_generation_params[f'Postprocess {pp_index+1} strength'] = pp_color_strength_list[pp_index] + elif pp_type_list[pp_index] == 'contrast': + p.extra_generation_params[f'Postprocess {pp_index+1} strength'] = pp_contrast_strength_list[pp_index] + elif pp_type_list[pp_index] == 'hue': + p.extra_generation_params[f'Postprocess {pp_index+1} strength'] = pp_hue_strength_list[pp_index] + elif pp_type_list[pp_index] == 'bilateral': + p.extra_generation_params[f'Postprocess {pp_index+1} sigma c'] = pp_bilateral_sigmaC_list[pp_index] + p.extra_generation_params[f'Postprocess {pp_index+1} sigma s'] = pp_bilateral_sigmaS_list[pp_index] + elif pp_type_list[pp_index] == 'color tint(type)': + p.extra_generation_params[f'Postprocess {pp_index+1} type'] = pp_color_tint_type_name_list[pp_index] + elif pp_type_list[pp_index] == 'color tint(lut)': + p.extra_generation_params[f'Postprocess {pp_index+1} lut'] = pp_color_tint_lut_name_list[pp_index] + return init_image + + def change_vae_model(self, name:str): + if name is None: return + if name.lower() in ['auto', 'automatic']: modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file=modules.sd_vae.unspecified) + elif name.lower() == 'none': modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file=None) + else: modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file=modules.sd_vae.vae_dict[name]) + + def change_ckpt_model(self, name:str): + if name is None: return + info = modules.sd_models.get_closet_checkpoint_match(name) + if info is None: + raise RuntimeError(f"Unknown checkpoint: {name}") + modules.sd_models.reload_model_weights(shared.sd_model, info) + + def postprocess(self, p, res, *args, **kargs): + if getattr(p, 'sub_processing', False): return + self.change_ckpt_model(self.ckptname) + self.change_vae_model(self.vae) + opts.CLIP_stop_at_last_layers = self.clip_skip + if len(self.image_results) < 1: return + final_count = len(res.images) + if (p.n_iter > 1 or p.batch_size > 1) and final_count != p.n_iter * p.batch_size: + grid = res.images[0] + res.images = res.images[1:] + grid_texts = res.infotexts[0] + res.infotexts = res.infotexts[1:] + images = [[*masks, image] for masks, image in zip(self.image_results,res.images)] + res.images = [image for sub in images for image in sub] + infos = [[info] * (len(masks) + 1) for masks, info in zip(self.image_results, res.infotexts)] + res.infotexts = [info for sub in infos for info in sub] + if (p.n_iter > 1 or p.batch_size > 1) and final_count != p.n_iter * p.batch_size: + res.images = [grid] + res.images + res.infotexts = [grid_texts] + res.infotexts + + def process(self, p, + enable_script_names, + disable_watermark, disable_postprocess, + disable_upscaler, ddetailer_before_upscaler, scalevalue, upscaler_sample, overlap, upscaler_index, rewidth, reheight, denoising_strength, upscaler_ckpt, upscaler_vae, + disable_detailer, disable_mask_paint_mode, inpaint_mask_mode, detailer_sample, detailer_sam_model, detailer_dino_model, + dino_full_res_inpaint, dino_inpaint_padding, detailer_mask_blur, + disable_outpaint, outpaint_sample, outpaint_mask_blur, + *args): + if getattr(p, 'sub_processing', False): return + self.image_results = [] + self.ckptname = shared.opts.data['sd_model_checkpoint'] + self.vae = shared.opts.data['sd_vae'] + self.clip_skip = opts.CLIP_stop_at_last_layers + self.restore_script(p) + self.enable_script_names = enable_script_names + self.disable_watermark = disable_watermark + self.disable_postprocess = disable_postprocess + self.disable_upscaler = disable_upscaler + self.ddetailer_before_upscaler = ddetailer_before_upscaler + self.scalevalue = scalevalue + self.upscaler_sample = upscaler_sample + self.overlap = overlap + self.upscaler_index = upscaler_index + self.rewidth = rewidth + self.reheight = reheight + self.denoising_strength = denoising_strength + self.upscaler_ckpt = upscaler_ckpt + self.upscaler_vae = upscaler_vae + self.disable_detailer = disable_detailer + self.disable_mask_paint_mode = disable_mask_paint_mode + self.inpaint_mask_mode = inpaint_mask_mode + self.detailer_sample = detailer_sample + self.detailer_sam_model = detailer_sam_model + self.detailer_dino_model = detailer_dino_model + self.dino_full_res_inpaint = dino_full_res_inpaint + self.dino_inpaint_padding = dino_inpaint_padding + self.detailer_mask_blur = detailer_mask_blur + self.disable_outpaint = disable_outpaint + self.outpaint_sample = outpaint_sample + self.outpaint_mask_blur = outpaint_mask_blur + args_list = [*args] + self.dino_detect_count = shared.opts.data.get('dino_detect_count', 2) + self.dino_detection_ckpt_list = args_list[self.dino_detect_count * 0:self.dino_detect_count * 1] + self.dino_detection_vae_list = args_list[self.dino_detect_count * 1:self.dino_detect_count * 2] + self.dino_detection_prompt_list = args_list[self.dino_detect_count * 2:self.dino_detect_count * 3] + self.dino_detection_positive_list = args_list[self.dino_detect_count * 3:self.dino_detect_count * 4] + self.dino_detection_negative_list = args_list[self.dino_detect_count * 4:self.dino_detect_count * 5] + self.dino_detection_denoise_list = args_list[self.dino_detect_count * 5:self.dino_detect_count * 6] + self.dino_detection_cfg_list = args_list[self.dino_detect_count * 6:self.dino_detect_count * 7] + self.dino_detection_steps_list = args_list[self.dino_detect_count * 7:self.dino_detect_count * 8] + self.dino_detection_spliter_disable_list = args_list[self.dino_detect_count * 8:self.dino_detect_count * 9] + self.dino_detection_spliter_remove_area_list = args_list[self.dino_detect_count * 9:self.dino_detect_count * 10] + self.dino_detection_clip_skip_list = args_list[self.dino_detect_count * 10 : self.dino_detect_count * 11] + self.watermark_count = shared.opts.data.get('watermark_count', 1) + self.watermark_type_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 0:self.dino_detect_count * 11 + self.watermark_count * 1] + self.watermark_position_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 1:self.dino_detect_count * 11 + self.watermark_count * 2] + self.watermark_image_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 2:self.dino_detect_count * 11 + self.watermark_count * 3] + self.watermark_image_size_width_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 3:self.dino_detect_count * 11 + self.watermark_count * 4] + self.watermark_image_size_height_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 4:self.dino_detect_count * 11 + self.watermark_count * 5] + self.watermark_text_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 5:self.dino_detect_count * 11 + self.watermark_count * 6] + self.watermark_text_color_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 6:self.dino_detect_count * 11 + self.watermark_count * 7] + self.watermark_text_font_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 7:self.dino_detect_count * 11 + self.watermark_count * 8] + self.watermark_text_size_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 8:self.dino_detect_count * 11 + self.watermark_count * 9] + self.watermark_padding_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 9:self.dino_detect_count * 11 + self.watermark_count * 10] + self.watermark_alpha_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 10:self.dino_detect_count * 11 + self.watermark_count * 11] + self.pp_count = shared.opts.data.get('postprocessing_count', 1) + self.pp_type_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 0:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 1] + self.pp_saturation_strength_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 1:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 2] + self.pp_sharpening_radius_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 2:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 3] + self.pp_sharpening_percent_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 3:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 4] + self.pp_sharpening_threshold_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 4:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 5] + self.pp_gaussian_radius_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 5:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 6] + self.pp_brightness_strength_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 6:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 7] + self.pp_color_strength_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 7:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 8] + self.pp_contrast_strength_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 8:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 9] + self.pp_hue_strength_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 9:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 10] + self.pp_bilateral_sigmaC_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 10:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 11] + self.pp_bilateral_sigmaS_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 11:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 12] + self.pp_color_tint_type_name_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 12:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 13] + self.pp_color_tint_lut_name_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 13:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14] + self.outpaint_count = shared.opts.data.get('outpaint_count', 1) + self.outpaint_positive_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 0:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 1] + self.outpaint_negative_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 1:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 2] + self.outpaint_denoise_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 2:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 3] + self.outpaint_cfg_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 3:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 4] + self.outpaint_steps_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 4:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 5] + self.outpaint_pixels_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 5:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 6] + self.outpaint_direction_list = args_list[self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 6:self.dino_detect_count * 11 + self.watermark_count * 11 + self.pp_count * 14 + self.outpaint_count * 7] + self.script_names_list = [x.strip()+'.py' for x in enable_script_names.split(';') if len(x) > 1] + self.script_names_list += [os.path.basename(__file__)] + self.i2i_scripts = [x for x in self.original_scripts if os.path.basename(x.filename) in self.script_names_list].copy() + self.i2i_scripts_always = [x for x in self.original_scripts_always if os.path.basename(x.filename) in self.script_names_list].copy() + self.upscaler = shared.sd_upscalers[upscaler_index] + + def before_process_batch(self, p, *args, **kargs): + if getattr(p, 'sub_processing', False): return + self.iter_number = kargs['batch_number'] + self.batch_number = 0 + + def restore_script(self, p): + if self.original_scripts is None: self.original_scripts = p.scripts.scripts.copy() + else: + if len(p.scripts.scripts) != len(self.original_scripts): p.scripts.scripts = self.original_scripts.copy() + if self.original_scripts_always is None: self.original_scripts_always = p.scripts.alwayson_scripts.copy() + else: + if len(p.scripts.alwayson_scripts) != len(self.original_scripts_always): p.scripts.alwayson_scripts = self.original_scripts_always.copy() + p.scripts.scripts = self.original_scripts.copy() + p.scripts.alwayson_scripts = self.original_scripts_always.copy() + + def postprocess_image(self, p, pp, *args): + if getattr(p, 'sub_processing', False): return + devices.torch_gc() + output_image = pp.image + self.target_prompts = p.all_prompts[self.iter_number * p.batch_size:(self.iter_number + 1) * p.batch_size][self.batch_number] + self.target_negative_prompts = p.all_negative_prompts[self.iter_number * p.batch_size:(self.iter_number + 1) * p.batch_size][self.batch_number] + self.target_seeds = p.all_seeds[self.iter_number * p.batch_size:(self.iter_number + 1) * p.batch_size][self.batch_number] + if shared.opts.data.get('save_ddsd_working_on_images', False): + images.save_image(output_image, p.outpath_samples, + shared.opts.data.get('save_ddsd_working_on_images_prefix', ''), + self.target_seeds, self.target_prompts, opts.samples_format, + suffix= '' if shared.opts.data.get('save_ddsd_working_on_images_suffix', '') == '' else f"-{shared.opts.data.get('save_ddsd_working_on_images_suffix', '')}", + info=create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, None, self.iter_number, self.batch_number), p=p) + + if not self.disable_outpaint: + output_image = self.outpainting(p, output_image, + self.outpaint_sample, self.outpaint_mask_blur, self.outpaint_count, + self.outpaint_denoise_list, self.outpaint_cfg_list, self.outpaint_steps_list, + self.outpaint_positive_list, self.outpaint_negative_list, self.outpaint_pixels_list,self.outpaint_direction_list) + devices.torch_gc() + + if self.ddetailer_before_upscaler and not self.disable_upscaler: + output_image = self.upscale(p, output_image, + self.scalevalue, self.upscaler_sample, + self.overlap, self.rewidth, self.reheight, self.denoising_strength, + self.upscaler_ckpt, self.upscaler_vae, + self.detailer_mask_blur, self.dino_full_res_inpaint, self.dino_inpaint_padding) + devices.torch_gc() + + if not self.disable_detailer: + output_image = self.dino_detect_detailer(p, output_image, + self.disable_mask_paint_mode, self.inpaint_mask_mode, self.detailer_sample, self.detailer_sam_model, self.detailer_dino_model, + self.dino_full_res_inpaint, self.dino_inpaint_padding, self.detailer_mask_blur, + self.dino_detect_count, + self.dino_detection_ckpt_list, + self.dino_detection_vae_list, + self.dino_detection_prompt_list, + self.dino_detection_positive_list, + self.dino_detection_negative_list, + self.dino_detection_denoise_list, + self.dino_detection_cfg_list, + self.dino_detection_steps_list, + self.dino_detection_spliter_disable_list, + self.dino_detection_spliter_remove_area_list, + self.dino_detection_clip_skip_list) + devices.torch_gc() + + if not self.ddetailer_before_upscaler and not self.disable_upscaler: + output_image = self.upscale(p, output_image, + self.scalevalue, self.upscaler_sample, + self.overlap, self.rewidth, self.reheight, self.denoising_strength, + self.upscaler_ckpt, self.upscaler_vae, + self.detailer_mask_blur, self.dino_full_res_inpaint, self.dino_inpaint_padding) + devices.torch_gc() + + if not self.disable_postprocess: + output_image = self.postprocess_target(p, output_image, + self.pp_type_list, + self.pp_saturation_strength_list, + self.pp_sharpening_radius_list, + self.pp_sharpening_percent_list, + self.pp_sharpening_threshold_list, + self.pp_gaussian_radius_list, + self.pp_brightness_strength_list, + self.pp_color_strength_list, + self.pp_contrast_strength_list, + self.pp_hue_strength_list, + self.pp_bilateral_sigmaC_list, + self.pp_bilateral_sigmaS_list, + self.pp_color_tint_type_name_list, + self.pp_color_tint_lut_name_list) + + devices.torch_gc() + + if not self.disable_watermark: + output_image = self.watermark(p, output_image) + + devices.torch_gc() + self.batch_number += 1 + self.restore_script(p) + pp.image = output_image + +def on_ui_settings(): + section = ('ddsd_script', "DDSD") + shared.opts.add_option("save_ddsd_working_on_images", shared.OptionInfo( + False, "Save all images you are working on", gr.Checkbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_working_on_images_prefix", shared.OptionInfo( + '', "Save all images you are working on prefix", gr.Textbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_working_on_images_suffix", shared.OptionInfo( + 'Working_On', "Save all images you are working on suffix", gr.Textbox, {"interactive": True}, section=section)) + + shared.opts.add_option("outpaint_count", shared.OptionInfo( + 1, "Outpainting Max Count", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}, section=section)) + + shared.opts.add_option("save_ddsd_working_on_dino_mask_images", shared.OptionInfo( + False, "Save dino mask images you are working on", gr.Checkbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_working_on_dino_mask_images_prefix", shared.OptionInfo( + '', "Save dino mask images you are working on prefix", gr.Textbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_working_on_dino_mask_images_suffix", shared.OptionInfo( + 'Mask', "Save dino mask images you are working on suffix", gr.Textbox, {"interactive": True}, section=section)) + shared.opts.add_option("dino_detect_count", shared.OptionInfo( + 2, "Dino Detect Max Count", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}, section=section)) + + shared.opts.add_option("save_ddsd_postprocessing_with_and_without", shared.OptionInfo( + False, "Save with and without postprocessing ", gr.Checkbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_postprocessing_with_and_without_prefix", shared.OptionInfo( + '', "Save with and without postprocesing prefix", gr.Textbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_postprocessing_with_and_without_suffix", shared.OptionInfo( + 'Without_Postprocessing', "Save with and without postprocessing suffix", gr.Textbox, {"interactive": True}, section=section)) + shared.opts.add_option("postprocessing_count", shared.OptionInfo( + 1, "Postprocessing Count", gr.Slider, {"minimum": 1, "maximum": 5, "step": 1}, section=section)) + + shared.opts.add_option("save_ddsd_watermark_with_and_without", shared.OptionInfo( + False, "Save with and without watermark ", gr.Checkbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_watermark_with_and_without_prefix", shared.OptionInfo( + '', "Save with and without watermark prefix", gr.Textbox, {"interactive": True}, section=section)) + shared.opts.add_option("save_ddsd_watermark_with_and_without_suffix", shared.OptionInfo( + 'Without_Watermark', "Save with and without watermark suffix", gr.Textbox, {"interactive": True}, section=section)) + shared.opts.add_option("watermark_count", shared.OptionInfo( + 1, "Watermark Count", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}, section=section)) + + shared.opts.add_option("preview_masks_images", shared.OptionInfo( + False, "Show the working mask in preview.", gr.Checkbox, {"interactive": True}, section=section)) + shared.opts.add_option("result_masks", shared.OptionInfo( + False, "The mask result is output on the final output.", gr.Checkbox, {"interactive": True}, section=section)) + shared.opts.add_option("mask_type", shared.OptionInfo( + False, "The type of mask is a black and white image.", gr.Checkbox, {"interactive": True}, section=section)) + +modules.script_callbacks.on_ui_settings(on_ui_settings) \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd/scripts/ddsd_bs.py b/exhm/detailer/sd-webui-ddsd/scripts/ddsd_bs.py new file mode 100644 index 0000000000000000000000000000000000000000..d830cdb3792e2a722665f3273905de995637c214 --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd/scripts/ddsd_bs.py @@ -0,0 +1,71 @@ +from __future__ import annotations + +import os +import torch + +import mediapipe as mp +import numpy as np + +from PIL import Image, ImageDraw +from ultralytics import YOLO + +from modules import safe +from modules.shared import cmd_opts +from modules.paths import models_path + +yolo_models_path = os.path.join(models_path, 'yolo') + +def mediapipe_face_detect(image, model_type, confidence): + width, height = image.size + image_np = np.array(image) + + mp_face_detection = mp.solutions.face_detection + with mp_face_detection.FaceDetection(model_selection=model_type, min_detection_confidence=confidence) as face_detector: + predictor = face_detector.process(image_np) + + if predictor.detections is None: return None + + bboxes = [] + for detection in predictor.detections: + + bbox = detection.location_data.relative_bounding_box + x1 = bbox.xmin * width + y1 = bbox.ymin * height + x2 = x1 + bbox.width * width + y2 = y1 + bbox.height * height + bboxes.append([x1,y1,x2,y2]) + + return create_mask_from_bbox(image, bboxes) + +def ultralytics_predict(image, model_type, confidence, device): + models = [os.path.join(yolo_models_path,x) for x in os.listdir(yolo_models_path) if (x.endswith('.pt') or x.endswith('.pth')) and os.path.splitext(os.path.basename(x))[0].upper() == model_type] + if len(models) == 0: return None + model = YOLO(models[0]) + predictor = model(image, conf=confidence, show_labels=False, device=device) + bboxes = predictor[0].boxes.xyxy.cpu().numpy() + if bboxes.size == 0: return None + bboxes = bboxes.tolist() + return create_mask_from_bbox(image, bboxes) + +def create_mask_from_bbox(image, bboxes): + mask = Image.new('L', image.size, 0) + draw = ImageDraw.Draw(mask) + for bbox in bboxes: + draw.rectangle(bbox, fill=255) + return np.array(mask) + +def bs_model(image, model_type, confidence): + image = Image.fromarray(image) + orig = torch.load + torch.load = safe.unsafe_torch_load + if model_type == 'FACE_MEDIA_FULL': + mask = mediapipe_face_detect(image, 1, confidence) + elif model_type == 'FACE_MEDIA_SHORT': + mask = mediapipe_face_detect(image, 0, confidence) + else: + device = '' + if getattr(cmd_opts, 'lowvram', False) or getattr(cmd_opts, 'medvram', False): + device = 'cpu' + mask = ultralytics_predict(image, model_type, confidence, device) + torch.load = orig + return mask \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd/scripts/ddsd_dino.py b/exhm/detailer/sd-webui-ddsd/scripts/ddsd_dino.py new file mode 100644 index 0000000000000000000000000000000000000000..12c3577d5cdef89ab284edd2df0689daeb9dd2ba --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd/scripts/ddsd_dino.py @@ -0,0 +1,99 @@ +import os +import gc +import torch +import copy +import cv2 +from collections import OrderedDict + +from modules import shared +from modules.devices import device, torch_gc, cpu + +import groundingdino.datasets.transforms as T +from groundingdino.models import build_model +from groundingdino.util.slconfig import SLConfig +from modules.paths import models_path +from groundingdino.util.utils import clean_state_dict + +dino_model_cache = OrderedDict() +grounding_models_dir = os.path.join(models_path, "grounding") + +def dino_model_list(): + return [x for x in os.listdir(grounding_models_dir) if x.endswith('.pth')] + +def dino_config_file_name(dino_model_name:str): + return dino_model_name.replace('.pth','.py') + +def clear_dino_cache(): + dino_model_cache.clear() + gc.collect() + torch_gc() + +def load_dino_model(dino_checkpoint): + print(f"Initializing GroundingDINO {dino_checkpoint}") + if dino_checkpoint in dino_model_cache: + dino = dino_model_cache[dino_checkpoint] + if shared.cmd_opts.lowvram: + dino.to(device=device) + else: + clear_dino_cache() + args = SLConfig.fromfile(os.path.join(grounding_models_dir,dino_config_file_name(dino_checkpoint))) + dino = build_model(args) + checkpoint = torch.load(os.path.join(grounding_models_dir,dino_checkpoint),map_location='cpu') + dino.load_state_dict(clean_state_dict(checkpoint['model']), strict=False) + dino.to(device=device) + dino_model_cache[dino_checkpoint] = dino + dino.eval() + return dino + + +def load_dino_image(image_pil): + transform = T.Compose( + [ + T.RandomResize([800], max_size=1333), + T.ToTensor(), + T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), + ] + ) + image, _ = transform(image_pil, None) # 3, h, w + return image + + +def get_grounding_output(model, image, caption, box_threshold): + caption = caption.lower() + caption = caption.strip() + if not caption.endswith("."): + caption = caption + "." + image = image.to(device) + with torch.no_grad(): + outputs = model(image[None], captions=[caption]) + if shared.cmd_opts.lowvram: + model.to(cpu) + logits = outputs["pred_logits"].sigmoid()[0] # (nq, 256) + boxes = outputs["pred_boxes"][0] # (nq, 4) + + # filter output + logits_filt = logits.clone() + boxes_filt = boxes.clone() + filt_mask = logits_filt.max(dim=1)[0] > box_threshold + logits_filt = logits_filt[filt_mask] # num_filt, 256 + boxes_filt = boxes_filt[filt_mask] # num_filt, 4 + + return boxes_filt.cpu() + + +def dino_predict_internal(input_image, dino_model_name, text_prompt, box_threshold): + print("Running GroundingDINO Inference") + dino_image = load_dino_image(input_image.convert("RGB")) + dino_model = load_dino_model(dino_model_name) + + boxes_filt = get_grounding_output( + dino_model, dino_image, text_prompt, box_threshold + ) + + H, W = input_image.size[1], input_image.size[0] + for i in range(boxes_filt.size(0)): + boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H]) + boxes_filt[i][:2] -= boxes_filt[i][2:] / 2 + boxes_filt[i][2:] += boxes_filt[i][:2] + clear_dino_cache() + return boxes_filt diff --git a/exhm/detailer/sd-webui-ddsd/scripts/ddsd_postprocess.py b/exhm/detailer/sd-webui-ddsd/scripts/ddsd_postprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..8c16aee650eae0c83ebe599ccfb8a08c69d33e9d --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd/scripts/ddsd_postprocess.py @@ -0,0 +1,83 @@ +import os +import numpy as np +import cv2 +from PIL import Image, ImageEnhance, ImageFilter, ImageOps +from pillow_lut import load_cube_file +from scipy.interpolate import UnivariateSpline + +from modules.paths import models_path + +lut_model_dir = os.path.join(models_path, "lut") + +def lut_model_list(): + return [x for x in os.listdir(lut_model_dir) if x.lower().endswith('.cube')] + +def saturation_image(image:Image.Image, strength:float) -> Image.Image: # 채도 조절 + return ImageEnhance.Color(image).enhance(strength) +def sharpening_image(image:Image.Image, radius:float, percent:int, threshold:float) -> Image.Image: # 선명도 조절 + return image.filter(ImageFilter.UnsharpMask(radius=radius, percent=percent, threshold=threshold)) +def gaussian_blur_image(image:Image.Image, radius:float) -> Image.Image: # 흐림도 조절 + return image.filter(ImageFilter.GaussianBlur(radius=radius)) +def brightness_image(image:Image.Image, strength:float) -> Image.Image: # 밝기 조절 + return ImageEnhance.Brightness(image).enhance(strength) +def color_image(image:Image.Image, strength:float) -> Image.Image: # 색조 조절 + return ImageEnhance.Color(image).enhance(strength) +def contrast_image(image:Image.Image, strength:float) -> Image.Image: # 대비 조절 + return ImageEnhance.Contrast(image).enhance(strength) +def color_extraction_image(image:Image.Image, lower:tuple[int,int,int], upper:tuple[int,int,int], strength:float) -> Image.Image: # 색상 추출 및 변화 + image_np = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2HSV) + mask = cv2.inRange(image_np, lower, upper) + image_np = image_np.astype(np.float64) + image_np[mask != 0] *= strength + image_np = image_np.astype(np.uint8) + return Image.fromarray(cv2.cvtColor(image_np, cv2.COLOR_HSV2RGB)) +def hue_image(image:Image.Image, strength:float) -> Image.Image: # Hue 조절 + image_np = np.array(image) + image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2HSV) + image_np[..., 0] = (image_np[..., 0] + strength * 180) % 180 + return Image.fromarray(cv2.cvtColor(image_np, cv2.COLOR_HSV2RGB)) +def inversion_image(image:Image.Image) -> Image.Image: # 반전 + return ImageOps.invert(image) +def bilateral_image(image:Image.Image, sigmaC:int, sigmaS:int) -> Image.Image: # 양방향 필터 + image_np = np.array(image) + return Image.fromarray(cv2.bilateralFilter(image_np, -1, sigmaC, sigmaS)) +def color_tint_lut_image(image:Image.Image, lut_file:str) -> Image.Image: # 색상 조절 + lut = load_cube_file(os.path.join(lut_model_dir, lut_file)) + return image.filter(lut) +def color_tint_type_image(image:Image.Image, type:str) -> Image.Image: # 색온도 조절(Warm, Cool) + increase = UnivariateSpline([0,64,128,192,256],[0,70,140,210,256])(range(256)) + decrease = UnivariateSpline([0,64,128,192,256],[0,30,80,120,192])(range(256)) + image_np = np.array(image) + r, g, b = cv2.split(image_np) + r = cv2.LUT(r, increase if type == 'warm' else decrease).astype(np.uint8) + b = cv2.LUT(b, decrease if type == 'warm' else increase).astype(np.uint8) + image_np = cv2.merge((r, g, b)) + h, s, v = cv2.split(cv2.cvtColor(image_np, cv2.COLOR_RGB2HSV)) + s = cv2.LUT(s, increase if type == 'warm' else decrease).astype(np.uint8) + return Image.fromarray(cv2.cvtColor(cv2.merge((h, s, v)), cv2.COLOR_HSV2RGB)) + +def ddsd_postprocess(image:Image.Image, pptype:str, + saturation_strength:float, + sharpening_radius:float, sharpening_percent:int, sharpening_threshold:float, + gaussian_blur_radius:float, + brightness_strength:float, + color_strength:float, + contrast_strength:float, + #color_extraction_lower:tuple[int,int,int], color_extraction_upper:tuple[int,int,int], color_extraction_strength:float, + hue_strength:float, + bilateral_sigmaC:int, bilateral_sigmaS:int, + color_tint_lut_file:str, + color_tint_type_name:str) -> Image.Image: + if pptype == 'saturation': return saturation_image(image, saturation_strength) + if pptype == 'sharpening': return sharpening_image(image, sharpening_radius, sharpening_percent, sharpening_threshold) + if pptype == 'gaussian blur': return gaussian_blur_image(image, gaussian_blur_radius) + if pptype == 'brightness': return brightness_image(image, brightness_strength) + if pptype == 'color': return color_image(image, color_strength) + if pptype == 'contrast': return contrast_image(image, contrast_strength) + #if pptype == 'color extraction': return color_extraction_image(image, color_extraction_lower, color_extraction_upper, color_extraction_strength) + if pptype == 'hue': return hue_image(hue_strength) + if pptype == 'inversion': return inversion_image(image) + if pptype == 'bilateral': return bilateral_image(image, bilateral_sigmaC, bilateral_sigmaS) + if pptype == 'color tint(type)': return color_tint_type_image(image, color_tint_type_name) + if pptype == 'color tint(lut)': return color_tint_lut_image(image, color_tint_lut_file) + return image \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd/scripts/ddsd_sam.py b/exhm/detailer/sd-webui-ddsd/scripts/ddsd_sam.py new file mode 100644 index 0000000000000000000000000000000000000000..4f76a769e3539c2b26c6926dd3f35120bb8208c0 --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd/scripts/ddsd_sam.py @@ -0,0 +1,89 @@ +import os +import numpy as np +import torch +import gc +import cv2 + +from modules import shared +from modules.paths import models_path +from modules.safe import unsafe_torch_load, load +from modules.devices import device, torch_gc, cpu + +from PIL import Image +from collections import OrderedDict +from scipy.ndimage import binary_dilation +from segment_anything import SamPredictor, sam_model_registry +from scripts.ddsd_dino import dino_predict_internal, clear_dino_cache + +sam_model_cache = OrderedDict() +sam_model_dir = os.path.join(models_path, "sam") + +def sam_model_list(): + return [x for x in os.listdir(sam_model_dir) if x.endswith('.pth')] + +def load_sam_model(sam_checkpoint): + model_type = '_'.join(sam_checkpoint.split('_')[1:-1]) + sam_checkpoint = os.path.join(sam_model_dir, sam_checkpoint) + torch.load = unsafe_torch_load + sam = sam_model_registry[model_type](checkpoint=sam_checkpoint) + sam.to(device=device) + sam.eval() + torch.load = load + return sam + +def clear_sam_cache(): + sam_model_cache.clear() + gc.collect() + torch_gc() + +def clear_cache(): + clear_sam_cache() + clear_dino_cache() + +def dilate_mask(mask, dilation): + dilation_kernel = np.ones((dilation, dilation), np.uint8) + return cv2.dilate(mask, dilation_kernel) + +def init_sam_model(sam_model_name): + print('Initializing SAM') + if sam_model_name in sam_model_cache: + sam = sam_model_cache[sam_model_name] + if(shared.cmd_opts.lowvram): + sam.to(device=device) + return sam + elif sam_model_name in sam_model_list(): + clear_sam_cache() + sam_model_cache[sam_model_name] = load_sam_model(sam_model_name) + return sam_model_cache[sam_model_name] + else: + Exception(f'{sam_model_name} not found, please download model to models/sam') + +def sam_predict(sam_model_name, dino_model_name, image, image_np, image_np_rgb, dino_text, dino_box_threshold, dilation, sam_level): + print('Start SAM Processing') + + assert dino_text, 'Please input dino text' + + boxes = dino_predict_internal(image, dino_model_name, dino_text, dino_box_threshold) + + if boxes.shape[0] < 1: return None + + sam = init_sam_model(sam_model_name) + + print(f'Running SAM Inference {image_np_rgb.shape}') + predictor = SamPredictor(sam) + predictor.set_image(image_np_rgb) + transformed_boxes = predictor.transform.apply_boxes_torch(boxes, image_np.shape[:2]) + masks, _, _ = predictor.predict_torch( + point_coords = None, + point_labels = None, + boxes = transformed_boxes.to(device), + multimask_output = True + ) + + masks = masks.permute(1,0,2,3).cpu().numpy() + + if shared.cmd_opts.lowvram: + sam.to(cpu) + clear_sam_cache() + + return dilate_mask(np.any(masks[sam_level], axis=0).astype(np.uint8) * 255,dilation) \ No newline at end of file diff --git a/exhm/detailer/sd-webui-ddsd/scripts/ddsd_utils.py b/exhm/detailer/sd-webui-ddsd/scripts/ddsd_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3388a4383fda5a24d6b4d745e073fa9d61418b83 --- /dev/null +++ b/exhm/detailer/sd-webui-ddsd/scripts/ddsd_utils.py @@ -0,0 +1,383 @@ +import os +import re +import numpy as np +import cv2 +import gc +import matplotlib.font_manager +from glob import glob +from PIL import Image, ImageDraw, ImageFont +from scripts.ddsd_sam import sam_predict, clear_cache, dilate_mask +from scripts.ddsd_bs import bs_model +from modules.devices import torch_gc +from skimage import measure, exposure + +from modules.paths import models_path +from modules.processing import StableDiffusionProcessingImg2Img + +token_split = re.compile(r"(AND|OR|NOR|XOR|NAND)") +token_first = re.compile(r'\(([^()]+)\)') +token_match = re.compile(r'(\d+)GROUPMASK') +token_file = re.compile(r'\s*<(.*)>\s*') + +ddsd_mask_path = os.path.join(models_path, "ddsdmask") +mask_embed = {} + +def startup(): + global mask_embed + if not os.path.exists(ddsd_mask_path): + os.makedirs(ddsd_mask_path) + with open(os.path.join(ddsd_mask_path, 'put_in_mask_here.txt'),'w') as f: pass + + masks = glob(os.path.join(ddsd_mask_path,'**\\*')) + masks = [(x, *os.path.splitext(os.path.basename(x))) for x in masks if os.path.isfile(x)] + masks = [(x, y) for x, y, z in masks if z in ['.png', '.jpg', '.jpeg', '.webp']] + mask_embed = {y.upper():x for x, y in masks} + +startup() + +def try_convert(data, type, default, min, max): + try: + convert = type(data) + if convert < min: return min + if convert > max: return max + return convert + except (ValueError, TypeError): + return default + +def prompt_spliter(prompt:str, split_token:str, count:int): + spliter = prompt.split(split_token) + while len(spliter) < count: + spliter.append('') + return spliter[:count] + +def combine_masks(mask, combine_masks_option, mask2): + if combine_masks_option == 'AND': return cv2.bitwise_and(mask, mask2) + if combine_masks_option == 'OR': return cv2.bitwise_or(mask, mask2) + if combine_masks_option == 'XOR': return cv2.bitwise_xor(mask, mask2) + if combine_masks_option == 'NOR': return cv2.bitwise_not(cv2.bitwise_or(mask, mask2)) + if combine_masks_option == 'NAND': return cv2.bitwise_not(cv2.bitwise_and(mask,mask2)) + +def dino_detect_from_prompt(prompt:str, detailer_sam_model, detailer_dino_model, init_image, disable_mask_paint_mode, inpaint_mask_mode, image_mask): + clear_cache() + image_np_zero = np.array(init_image.convert('L')) + image_np_zero[:,:] = 0 + image_np = np.array(init_image) + image_np_rgb = image_np[:,:,:3].copy() + image_set = (init_image, image_np, image_np_rgb, image_np_zero) + model_set = (detailer_sam_model, detailer_dino_model) + result = dino_prompt_detector(prompt, model_set, image_set) + clear_cache() + if np.array_equal(result, image_np_zero): return None + if disable_mask_paint_mode: return result + if image_mask is None: return result + image_mask = np.array(image_mask.resize((result.shape[1],result.shape[0])).convert('L')) + image_mask = np.resize(image_mask, result.shape) + if inpaint_mask_mode == 'Inner': return cv2.bitwise_and(result, image_mask) + if inpaint_mask_mode == 'Outer': return cv2.bitwise_and(result, cv2.bitwise_not(image_mask)) + return None + +def dino_prompt_token_file(prompt:str, image_np_zero, image_np_rgb): + usage_type, usage, dilation, confidence = prompt_spliter(prompt, ':', 4) + usage_type = usage_type.upper() + usage = usage.upper() + confidence = try_convert(confidence, float, 0.3, 0, 1) + if usage_type == 'AREA': + if usage == 'LEFT': + image_np_zero[:,:image_np_zero.shape[1] // 2] = 255 + image_np_zero[:,image_np_zero.shape[1] // 2:] = 0 + elif usage == 'RIGHT': + image_np_zero[:,:image_np_zero.shape[1] // 2] = 0 + image_np_zero[:,image_np_zero.shape[1] // 2:] = 255 + elif usage == 'TOP': + image_np_zero[:image_np_zero.shape[0] // 2,:] = 255 + image_np_zero[image_np_zero.shape[0] // 2:,:] = 0 + elif usage == 'BOTTOM': + image_np_zero[:image_np_zero.shape[0] // 2,:] = 0 + image_np_zero[image_np_zero.shape[0] // 2:,:] = 255 + elif usage == 'ALL': + image_np_zero[:,:] = 255 + if usage_type == 'FILE': + if usage in mask_embed: + image = Image.open(mask_embed[usage]).convert('L') + h, w = image_np_zero.shape[:2] + image = image.resize((w, h)) + image_np_zero = np.array(image) + if usage_type == 'MODEL': + mask = bs_model(image_np_rgb, usage, confidence) + if mask is None: return image_np_zero + image_np_zero = mask + return dilate_mask(image_np_zero, try_convert(dilation, int, 2, 0, 512)) + +def dino_prompt_detector(prompt:str, model_set, image_set): + find = token_first.search(prompt) + result_group = {} + result_count = 0 + while find: + result_group[f'{result_count}GROUPMASK'] = dino_prompt_detector(find.group(1), model_set, image_set) + prompt = prompt.replace(find.group(), f' {result_count}GROUPMASK ') + result_count += 1 + find = token_first.search(prompt) + + spliter = token_split.split(prompt) + + while len(spliter) > 1: + left, operator, right = spliter[:3] + if not isinstance(left, np.ndarray): + match = token_match.match(left.strip()) + if match is None: + match = token_file.match(left) + if match is None: + dino_text, sam_level, dino_box_threshold, dilation = prompt_spliter(left, ':', 4) + left = sam_predict(model_set[0], model_set[1], image_set[0], image_set[1], image_set[2], dino_text, + try_convert(dino_box_threshold.strip(), float, 0.3, 0, 1.0), + try_convert(dilation.strip(), int, 16, 0, 512), + try_convert(sam_level.strip(), int, 0, 0, 2)) + if left is None: left = image_set[3].copy() + else: + left = dino_prompt_token_file(match.group(1), image_set[3].copy(), image_set[2].copy()) + else: + left = result_group[left.strip()] + if not isinstance(right, np.ndarray): + match = token_match.match(right.strip()) + if match is None: + match = token_file.match(right) + if match is None: + dino_text, sam_level, dino_box_threshold, dilation = prompt_spliter(right, ':', 4) + right = sam_predict(model_set[0], model_set[1], image_set[0], image_set[1], image_set[2], dino_text, + try_convert(dino_box_threshold.strip(), float, 0.3, 0, 1.0), + try_convert(dilation.strip(), int, 16, 0, 512), + try_convert(sam_level.strip(), int, 0, 0, 2)) + if right is None: right = image_set[3].copy() + else: + right = dino_prompt_token_file(match.group(1), image_set[3].copy(), image_set[2].copy()) + else: + right = result_group[right.strip()] + spliter[:3] = [combine_masks(left, operator, right)] + gc.collect() + torch_gc() + if isinstance(spliter[0], np.ndarray): return spliter[0] + match = token_file.match(spliter[0]) + if match is None: + dino_text, sam_level, dino_box_threshold, dilation = prompt_spliter(spliter[0], ':', 4) + target = sam_predict(model_set[0], model_set[1], image_set[0], image_set[1], image_set[2], dino_text, + try_convert(dino_box_threshold.strip(), float, 0.3, 0, 1.0), + try_convert(dilation.strip(), int, 16, 0, 512), + try_convert(sam_level.strip(), int, 0, 0, 2)) + if target is None: return image_set[3].copy() + else: + target = dino_prompt_token_file(match.group(1), image_set[3].copy(), image_set[2].copy()) + return target + +def mask_spliter_and_remover(mask, area): + gc.collect() + torch_gc() + labels = measure.label(mask) + regions = measure.regionprops(labels) + + for r in regions: + if r.area < area: + for coord in r.coords: + labels[coord[0], coord[1]] = 0 + + num_labels = np.max(labels) + + label_images = [] + for x in range(num_labels): + label_image = np.zeros_like(mask, dtype=np.uint8) + label_image[labels == (x + 1)] = 255 + label_images.append(label_image) + return label_images + +def I2I_Generator_Create(p, i2i_sample, i2i_mask_blur, full_res_inpainting, inpainting_padding, init_image, denoise, cfg, steps, width, height, tiling, scripts, scripts_list, alwaysonscripts_list, script_args, positive, negative, fill = 1): + i2i = StableDiffusionProcessingImg2Img( + init_images = [init_image], + resize_mode = 0, + denoising_strength = 0, + mask = None, + mask_blur= i2i_mask_blur, + inpainting_fill = fill, + inpaint_full_res = full_res_inpainting, + inpaint_full_res_padding= inpainting_padding, + inpainting_mask_invert= 0, + sd_model=p.sd_model, + outpath_samples=p.outpath_samples, + outpath_grids=p.outpath_grids, + restore_faces=p.restore_faces, + prompt='', + negative_prompt='', + styles=p.styles, + seed=p.seed, + subseed=p.subseed, + subseed_strength=p.subseed_strength, + seed_resize_from_h=p.seed_resize_from_h, + seed_resize_from_w=p.seed_resize_from_w, + sampler_name=i2i_sample, + n_iter=1, + batch_size=1, + steps=steps, + cfg_scale=cfg, + width=width, + height=height, + tiling=tiling, + ) + i2i.denoising_strength = denoise + i2i.do_not_save_grid = True + i2i.do_not_save_samples = True + i2i.override_settings = {} + i2i.override_settings_restore_afterwards = {} + i2i.scripts = scripts + i2i.scripts.scripts = scripts_list.copy() + i2i.scripts.alwayson_scripts = alwaysonscripts_list.copy() + i2i.script_args = script_args + i2i.prompt = positive + i2i.negative_prompt = negative + i2i.sub_processing = True + + return i2i + +def get_fonts_list(): + fonts, font_paths = [], {} + fonts_list = matplotlib.font_manager.findSystemFonts() + for font in fonts_list: + try: + fonts.append(matplotlib.font_manager.FontProperties(fname=font).get_name()) + font_paths[fonts[-1]] = font + except RuntimeError: + print(f'Skip font file: {font}') + return fonts, font_paths + +def image_apply_watermark(image, watermark_type, watermark_position, watermark_image, watermark_image_size_width, watermark_image_size_height, watermark_text, watermark_text_color, watermark_text_font, watermark_text_size, watermark_padding, watermark_alpha): + gc.collect() + torch_gc() + if watermark_type == 'Text': + font = ImageFont.truetype(watermark_text_font, watermark_text_size) + copy_image = image.copy() + draw = ImageDraw.Draw(copy_image) + text_width, text_height = font.getsize(watermark_text) + left, right, top, bottom = 0 + watermark_padding, image.size[0] - watermark_padding, 0 + watermark_padding, image.size[1] - watermark_padding + if watermark_position == 'Left': position = (left, (top + bottom) // 2 - text_height // 2) + elif watermark_position == 'Left-Top': position = (left, top) + elif watermark_position == 'Top': position = ((left + right) // 2 - text_width // 2, top) + elif watermark_position == 'Right-Top': position = (right - text_width,top) + elif watermark_position == 'Right': position = (right - text_width, (top + bottom) // 2 - text_height // 2) + elif watermark_position == 'Right-Bottom': position = (right - text_width, bottom - text_height) + elif watermark_position == 'Bottom': position = ((left + right) // 2 - text_width // 2,bottom - text_height) + elif watermark_position == 'Left-Bottom': position = (left, bottom - text_height) + elif watermark_position == 'Center': position = ((left + right) // 2 - text_width // 2, (top + bottom) // 2 - text_height // 2) + draw.text(position, watermark_text, font=font, fill=tuple(int(watermark_text_color[x:x+2], 16) for x in (1,3,5))) + result = Image.blend(image, copy_image, watermark_alpha) + elif watermark_type == 'Image': + left, right, top, bottom = 0 + watermark_padding, image.size[0] - watermark_padding, 0 + watermark_padding, image.size[1] - watermark_padding + if watermark_position == 'Left': position = (left, (top + bottom) // 2 - watermark_image_size_height // 2) + elif watermark_position == 'Left-Top': position = (left, top) + elif watermark_position == 'Top': position = ((left + right) // 2 - watermark_image_size_width // 2, top) + elif watermark_position == 'Right-Top': position = (right - watermark_image_size_width,top) + elif watermark_position == 'Right': position = (right - watermark_image_size_width, (top + bottom) // 2 - watermark_image_size_height // 2) + elif watermark_position == 'Right-Bottom': position = (right - watermark_image_size_width, bottom - watermark_image_size_height) + elif watermark_position == 'Bottom': position = ((left + right) // 2 - watermark_image_size_width // 2,bottom - watermark_image_size_height) + elif watermark_position == 'Left-Bottom': position = (left, bottom - watermark_image_size_height) + elif watermark_position == 'Center': position = ((left + right) // 2 - watermark_image_size_width // 2, (top + bottom) // 2 - watermark_image_size_height // 2) + copy_np = np.array(image) + copy_np_origin = copy_np.copy() + water_image = cv2.resize(watermark_image.copy(), (watermark_image_size_width, watermark_image_size_height)) + mask = np.where(np.all(water_image == [255, 255, 255], axis=-1), 0, 255) + alpha = np.zeros((water_image.shape[0], water_image.shape[1]), dtype=np.uint8) + alpha[:,:] = mask + copy_np_crop = copy_np[position[1]:position[1]+watermark_image_size_height, position[0]:position[0]+watermark_image_size_width, :] + copy_np_crop[alpha.nonzero()] = water_image[alpha.nonzero()] + copy_np[position[1]:position[1]+watermark_image_size_height, position[0]:position[0]+watermark_image_size_width, :] = copy_np_crop + result = Image.fromarray(cv2.addWeighted(copy_np_origin, 1 - watermark_alpha, copy_np, watermark_alpha, 0)) + gc.collect() + torch_gc() + return result + +def matched_noise(image_np, mask_np, noise = 1, color_variation = 0.05): + def _fft2(data): + if data.ndim > 2: + out_fft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128) + for c in range(data.shape[2]): + c_data = data[:,:,c] + out_fft[:,:,c] = np.fft.fft2(np.fft.fftshift(c_data), norm='ortho') + out_fft[:,:,c] = np.fft.ifftshift(out_fft[:,:,c]) + else: + out_fft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128) + out_fft[:,:] = np.fft.fft2(np.fft.fftshift(data), norm='ortho') + out_fft[:,:] = np.fft.ifftshift(out_fft[:,:]) + return out_fft + def _ifft2(data): + if data.ndim > 2: + out_ifft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128) + for c in range(data.shape[2]): + c_data = data[:, :, c] + out_ifft[:, :, c] = np.fft.ifft2(np.fft.fftshift(c_data), norm="ortho") + out_ifft[:, :, c] = np.fft.ifftshift(out_ifft[:, :, c]) + else: + out_ifft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128) + out_ifft[:, :] = np.fft.ifft2(np.fft.fftshift(data), norm="ortho") + out_ifft[:, :] = np.fft.ifftshift(out_ifft[:, :]) + return out_ifft + def _get_gaussian_window(width, height, std=3.14, mode=0): + window_scale_x = float(width / min(width, height)) + window_scale_y = float(height / min(width, height)) + window = np.zeros((width, height)) + x = (np.arange(width) / width * 2. - 1.) * window_scale_x + for y in range(height): + fy = (y / height * 2. - 1.) * window_scale_y + if mode == 0: + window[:, y] = np.exp(-(x ** 2 + fy ** 2) * std) + else: + window[:, y] = (1 / ((x ** 2 + 1.) * (fy ** 2 + 1.))) ** (std / 3.14) + return window + def _get_masked_window_rgb(np_mask_grey, hardness=1.0): + np_mask_rgb = np.zeros((np_mask_grey.shape[0], np_mask_grey.shape[1], 3)) + if hardness != 1.0: + hardened = np_mask_grey[:] ** hardness + else: + hardened = np_mask_grey[:] + for c in range(3): + np_mask_rgb[:, :, c] = hardened[:] + return np_mask_rgb + + width = image_np.shape[0] + height = image_np.shape[1] + channel = image_np.shape[2] + + image_np = image_np[:] * (1.0 - mask_np) + mask_np_grey = (np.sum(mask_np, axis=2) / 3.0) + img_mask = mask_np_grey > 1e-6 + ref_mask = mask_np_grey < 1e-3 + + image_windowed = image_np * (1.0 - _get_masked_window_rgb(mask_np_grey)) + image_windowed /= np.max(image_windowed) + image_windowed += np.average(image_np) * mask_np + + src_fft = _fft2(image_windowed) + src_dist = np.absolute(src_fft) + src_phase = src_fft / src_dist + + rng = np.random.default_rng(0) + + noise_window = _get_gaussian_window(width, height, mode=1) + noise_rgb = rng.random((width,height, channel)) + noise_grey = (np.sum(noise_rgb, axis=2) / 3.0) + noise_rgb *= color_variation + for c in range(channel): + noise_rgb[:,:,c] += (1.0 - color_variation) * noise_grey + + noise_fft = _fft2(noise_rgb) + for c in range(channel): + noise_fft[:,:,c] *= noise_window + noise_rgb = np.real(_ifft2(noise_fft)) + shaped_noise_fft = _fft2(noise_rgb) + shaped_noise_fft[:,:,:] = np.absolute(shaped_noise_fft[:,:,:]) ** 2 * (src_dist ** noise) * src_phase + + brightness_variation = 0 + contrast_adjusted_np = image_np[:] * (brightness_variation + 1.0) - brightness_variation * 2.0 + + shaped_noise = np.real(_ifft2(shaped_noise_fft)) + shaped_noise -= np.min(shaped_noise) + shaped_noise /= np.max(shaped_noise) + shaped_noise[img_mask, :] = exposure.match_histograms(shaped_noise[img_mask, :] ** 1.0, contrast_adjusted_np[ref_mask, :], channel_axis = 1) + shaped_noise = image_np[:] * (1.0 - mask_np) + shaped_noise * mask_np + + return np.clip(shaped_noise[:], 0.0, 1.0) \ No newline at end of file diff --git a/exhm/detailer/stable-diffusion-webui-eyemask/.gitignore b/exhm/detailer/stable-diffusion-webui-eyemask/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..23ccfb92242660c6e2ad6d9722d6dca2551f5f36 --- /dev/null +++ b/exhm/detailer/stable-diffusion-webui-eyemask/.gitignore @@ -0,0 +1,3 @@ +.idea +__pycache__/ +/outputs/ diff --git a/exhm/detailer/stable-diffusion-webui-eyemask/LICENSE b/exhm/detailer/stable-diffusion-webui-eyemask/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..a5102d8de6021c9d36e072b7a6d1294e5e082d30 --- /dev/null +++ b/exhm/detailer/stable-diffusion-webui-eyemask/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Ilian Iliev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/exhm/detailer/stable-diffusion-webui-eyemask/README.md b/exhm/detailer/stable-diffusion-webui-eyemask/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d0ee99bd715da7c70a61230f00c61a8baebd52c7 --- /dev/null +++ b/exhm/detailer/stable-diffusion-webui-eyemask/README.md @@ -0,0 +1,76 @@ +

+ + + +

+ +# stable-diffusion-webui-eyemask + +This extension is for AUTOMATIC1111's [Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) + +### Capabilities + +* Create mask for eyes / face / body and redraw by multiple params +* Wildcards for mask prompt and original prompt +* Using of different model just for masked area redraw +* Mask padding in px or percents +* Mask previews +* Separate embedded version of the script (enabled from settings) +* Batch mode support +* Custom API routes (including serving static files) + +Put all wildcards files in */wildcards* dir. + +### Usage + +Demo video: [https://www.youtube.com/watch?v=Q5PIFd7XsjM](https://www.youtube.com/watch?v=Q5PIFd7XsjM) + +* Enable the extension and enter your eye mask prompt +* Enter your original prompt +* Select mask type +* Select a model for mask redraw if you want other model +* Select mask padding if you want bigger area for redraw +* Select mask preview if you want to check the mask +* You can use placeholders in mask prompt and original prompt: + - Go to the */extensions/stable-diffusion-webui-eyemask/wildcards* dir and create a text file (for example *my-wildcard.txt*) + - Write your wildcards on new lines in this file + - Use this wildcard in the mask prompt or original prompt as **\_\_my-wildcard\_\_** and a random line will be used + - If you want to use each line in order, the wildcard file name must end with **_each** + - You can use multiple wildcards in one prompt + - You can use wildcards in the mask prompt and the original prompt at the same time + +### Install + +Use *Install from URL* option with this repo url. + +### Requirements +- dlib==19.24.0 +- setuptools +- cmake + +All requirements will be installed on first use. +You may need to install [cmake](https://cmake.org/download/) manually. + +### Examples + + +### Mask types + +1. Eyes dlib +2. Face dlib +3. Face depthmask +4. Body depthmask + + + +### Contributing + +Feel free to submit PRs to develop! + +

+ ...and you can always buy me a :beer:!

+ + Donate with PayPal + +

+ diff --git a/exhm/detailer/stable-diffusion-webui-eyemask/install.py b/exhm/detailer/stable-diffusion-webui-eyemask/install.py new file mode 100644 index 0000000000000000000000000000000000000000..b093452dd113f43c526e7e7d17d30244bed30ce0 --- /dev/null +++ b/exhm/detailer/stable-diffusion-webui-eyemask/install.py @@ -0,0 +1,78 @@ +import os +import sys + +from launch import is_installed, run, git_clone +from modules.paths import models_path +from modules.sd_models import model_hash +from modules import modelloader +from basicsr.utils.download_util import load_file_from_url + +include_mmdet = False +dd_models_path = os.path.join(models_path, "mmdet") + + +def list_models(model_path): + model_list = modelloader.load_models(model_path=model_path, ext_filter=[".pth"]) + + def modeltitle(path, shorthash): + abspath = os.path.abspath(path) + + if abspath.startswith(model_path): + name = abspath.replace(model_path, '') + else: + name = os.path.basename(path) + + if name.startswith("\\") or name.startswith("/"): + name = name[1:] + + shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0] + + return f'{name} [{shorthash}]', shortname + + models = [] + for filename in model_list: + h = model_hash(filename) + title, short_model_name = modeltitle(filename, h) + models.append(title) + + return models + +python = sys.executable + +if include_mmdet: + run(f'"{python}" -m pip install lightning-utilities==0.11.2', desc=None, errdesc=f"Couldn't install lightning-utilities") + run(f'"{python}" -m pip install pytorch-lightning==2.2.4', desc=None, errdesc=f"Couldn't install pytorch-lightning") + +if not is_installed("cmake"): + run(f'"{python}" -m pip install cmake', desc="Installing cmake", errdesc="Couldn't install cmake") + +if not is_installed("setuptools"): + run(f'"{python}" -m pip install setuptools', desc="Installing setuptools", errdesc="Couldn't install setuptools") + +if not is_installed("dlib"): + try: + run(f'"{python}" -m pip install dlib==19.24.0', desc="Installing dlib", errdesc="Couldn't install dlib") + except Exception as e: + print(e) + print("----------------------------------------------") + print("Failed building wheel for dlib") + print("ERROR: CMake must be installed to build dlib") + print("Install cmake from https://cmake.org/download/") + print("----------------------------------------------") + + +if include_mmdet and not is_installed("mmdet"): + run(f'"{python}" -m pip install -U openmim==0.3.3', desc=None, errdesc="Couldn't install openmim") + run(f'"{python}" -m mim install mmcv-full==1.7.1', desc=f"Installing mmcv-full", errdesc=f"Couldn't install mmcv-full") + run(f'"{python}" -m pip install mmdet==2.27.0', desc=f"Installing mmdet", errdesc=f"Couldn't install mmdet") + +if include_mmdet and (len(list_models(dd_models_path)) == 0): + print("No detection models found, downloading...") + bbox_path = os.path.join(dd_models_path, "bbox") + segm_path = os.path.join(dd_models_path, "segm") + load_file_from_url("https://huggingface.co/dustysys/ddetailer/resolve/main/mmdet/bbox/mmdet_anime-face_yolov3.pth", bbox_path) + load_file_from_url("https://huggingface.co/dustysys/ddetailer/raw/main/mmdet/bbox/mmdet_anime-face_yolov3.py", bbox_path) + load_file_from_url("https://huggingface.co/dustysys/ddetailer/resolve/main/mmdet/segm/mmdet_dd-person_mask2former.pth", segm_path) + load_file_from_url("https://huggingface.co/dustysys/ddetailer/raw/main/mmdet/segm/mmdet_dd-person_mask2former.py", segm_path) + +git_clone("https://github.com/isl-org/MiDaS.git", "repositories/midas", "midas") diff --git a/exhm/detailer/stable-diffusion-webui-eyemask/javascript/dialog.js b/exhm/detailer/stable-diffusion-webui-eyemask/javascript/dialog.js new file mode 100644 index 0000000000000000000000000000000000000000..64aa113acca606ccdfe7d9d189319b954b869b4d --- /dev/null +++ b/exhm/detailer/stable-diffusion-webui-eyemask/javascript/dialog.js @@ -0,0 +1,210 @@ + +var dialog = (function () { + + var DIALOG_FADE_IN_SPEED = 250; + var DIALOG_FADE_OUT_SPEED = 150; + + var $mainContainer = null; + + var onClose = null; + + function getContainer() { + if (! $mainContainer) { + $mainContainer = $('#dialogs-container'); + } + return $mainContainer; + } + + var mainLayout = ( + '' + ); + + function getContent(opt) { + return mainLayout.format(opt.id); + } + + function getButton(btnOpt) { + var template = ''; + var id = btnOpt.id ? ' id="' + btnOpt.id + '-modal-btn" ' : ''; + var className = btnOpt.className ? btnOpt.className : ''; + var text = btnOpt.text; + return template.format(id, className, text); + } + + function close() { + getContainer() + .removeClass('open') + .find('div.modal').fadeOut(DIALOG_FADE_OUT_SPEED); + + if (onClose && onClose.call) { + onClose(); + } + } + + function onFooterBtnClick(data) { + if (data.action && data.action.call) { + data.action.call(this, data.params || []); + } + if (! data.dontClose) { + close(); + } + } + + function show(opt) { + + onClose = null; + close(); + + if ((typeof opt).toLowerCase() === 'string') { + opt = { + content: opt + }; + } + + if (! opt.id) { + opt.id = +new Date; + } + + onClose = (opt.onClose && opt.onClose.call) ? opt.onClose : null; + + var dialogId = '#{0}-modal'.format(opt.id); + + if (! getContainer().find(dialogId).length) { + getContainer().append(getContent(opt)); + } + + $dialog = $(dialogId); + + if (opt.big || opt.imageSrc) { + var windowHeight = $(window).height(); + var minHeight = parseInt(windowHeight * 0.8) - 100; + var top = parseInt(windowHeight * 0.08); + $dialog.css('top', top + 'px'); + if (! opt.dontStretch) { + $dialog.find('.modal-content').css('min-height', minHeight); + } + $dialog.addClass('modal-big'); + + if (opt.imageSrc) { + $dialog.find('.modal-content').css({ + 'background-image': 'url(' + opt.imageSrc + ')', + 'background-size': 'contain', + 'background-repeat' : 'no-repeat', + 'background-position': 'center center' + }); + } + } + + if (opt.maxWidth) { + $dialog.css('max-width', opt.maxWidth); + } + + if (opt.width) { + $dialog.css('width', opt.width); + } + + if (opt.top) { + $dialog.css('top', opt.top); + } + + $dialog + .find('.modal-title').html(opt.title).end() + .find('.modal-content').html(opt.content || opt.text || '').end() + .bindClick(function (e) { + e.stopPropagation(); + }) + .fadeIn(DIALOG_FADE_IN_SPEED); + + getContainer().addClass('open'); + + if (! opt.title) { + $dialog + .find('.modal-header').hide().end() + .find('.modal-content').css('padding', '30px 20px') + } + + if (opt.removeWrapper) { + $dialog.find('.modal-content *').first().unwrap(); + } + $footer = $dialog.find('.modal-footer').first().empty(); + + if (! opt.disableOverlay) { + getContainer().bindClick(close); + } else { + getContainer().off('click'); + } + + if (! opt.buttons || ! opt.buttons.length) { + opt.buttons = [{ + id: 'modal-close-button', + text: 'Ok' + }]; + } + + for (var i = 0, len = opt.buttons.length; i < len; i++) { + if (! opt.buttons[i].id) { + opt.buttons[i].id = Math.floor(Math.random() * 1000000); + } + $footer + .append(getButton(opt.buttons[i])) + .find('#' + opt.buttons[i].id + '-modal-btn') + .bindClick(onFooterBtnClick, [opt.buttons[i]]); + } + + return $dialog; + } + + function showConfirm(text, action) { + this.show({ + id: 'main-confirmation', + title: 'Confirmation', + content: text, + buttons: [ + { + text: 'Yes', + className: 'danger', + action: action + }, { + text: 'No' + } + ] + }); + } + + function showImage(src, title, buttons, maxWidth) { + this.show({ + id: 'image-dialog', + imageSrc: '/sdapi/v1/eyemask/v1/static/images/' + src, + big: true, + maxWidth: maxWidth || null, + title: title || null, + buttons: buttons || null + }); + } + + function showError(message) { + this.show({ + id: 'error-dialog', + title: 'Error', + content: message, + }); + } + + function hide() { + close(); + } + + return { + show: show, + hide: hide, + confirm: showConfirm, + image: showImage, + error: showError, + }; +})(); \ No newline at end of file diff --git a/exhm/detailer/stable-diffusion-webui-eyemask/javascript/eyemask.js b/exhm/detailer/stable-diffusion-webui-eyemask/javascript/eyemask.js new file mode 100644 index 0000000000000000000000000000000000000000..bde458f7d47226f78b0e0b1ae0a1c4f8ba32883d --- /dev/null +++ b/exhm/detailer/stable-diffusion-webui-eyemask/javascript/eyemask.js @@ -0,0 +1,209 @@ + +document.addEventListener('DOMContentLoaded', function() { + + window.BODY_SELECTOR = '.mx-auto.container' + + toastr.options = { + target: window.BODY_SELECTOR, + timeOut: 3500 + }; + + onUiLoaded(EyeMaskController.load); +}); + + +const EyeMaskController = (function () { + + let container = null; + let config = {}; + + const LS_PREFIX = 'em-save-'; + const TABS = ['txt2img', 'img2img']; + + const emTitles = { + '\u21A9\uFE0F': 'Load current image eyemask params', + 'Redraw original': 'Change seed after each batch', + 'Include mask': 'Include mask image in result' + }; + + function getContainer() { + if (!container) { + container = gradioApp().getElementById('eye-mask-container'); + } + return container + } + + function getApiUrl(path) { + return '/sdapi/v1/eyemask/v1' + path; + } + + function initRoot() { + window.gradioRoot = gradioApp().querySelector('.gradio-container'); + window.$gradioRoot = $(window.gradioRoot); + $gradioRoot.append('
'); + } + + function loadTitles() { + gradioApp().querySelectorAll('span, button, select, p').forEach(function(elem) { + if (elem) { + let tooltip = emTitles[elem.textContent] || emTitles[elem.value]; + if (tooltip) { + elem.title = tooltip; + } + } + }); + } + + function getConfig() { + $.ajax({ + url: getApiUrl('/config.json'), + dataType: 'json', + async: false, + cache: false, + success: function(data) { + config = data; + } + }); + } + + function getAllIds(id) { + let result = []; + result.push('em-{0}-txt2img'.format(id)); + result.push('em-{0}-img2img'.format(id)); + result.push('em-emb-{0}-txt2img'.format(id)); + result.push('em-emb-{0}-img2img'.format(id)); + return result; + } + + function getSelector(selector) { + return (` + #em-${selector}-txt2img, + #em-${selector}-img2img, + #em-emb-${selector}-txt2img, + #em-emb-${selector}-img2img + `); + } + + function loadPlaceHolders() { + if (config.em_save_prompts) { + ['txt2img_prompt', 'img2img_prompt'].forEach(handleSavedInput); + } + if (config.em_save_neg_prompts) { + ['txt2img_neg_prompt', 'img2img_neg_prompt'].forEach(handleSavedInput); + } + if (config.em_save_em_prompts) { + getAllIds('prompt').forEach(handleSavedInput); + } + if (config.em_save_em_neg_prompts) { + getAllIds('negative-prompt').forEach(handleSavedInput); + } + if (config.em_save_settings) { + [ + 'enabled', + 'count', + 'mask-type', + 'mask-padding', + 'mask-steps', + 'mask-blur', + 'denoising-strength', + 'full-res-padding', + 'cfg', + 'width', + 'height', + 'include-mask', + 'padding-in-px', + 'redraw-original', + 'use-other-model', + 'mask-model' + ].forEach(function (id) { + getAllIds(id).forEach(handleSavedInput); + }); + } + if (config.em_save_last_script) { + TABS.forEach(loadLastScript) + } + } + + function handleSavedInput(id) { + + let $el = $('#{0} textarea, #{0} select, #{0} input'.format(id)); + let event = 'change input'; + + if (! $el.length) { + return; + } + + let value = localStorage.getItem(LS_PREFIX + id); + + if (value) { + switch ($el[0].type) { + case 'checkbox': + $el.prop('checked', value === 'true').triggerEvent(event); + break; + case 'radio': + $el.filter(':checked').prop('checked', false); + $el.filter('[value="{0}"]'.format(value)).prop('checked', true).triggerEvent(event); + break; + default: + $el.val(value).triggerEvent(event); + } + } + + $el.on(event,function () { + let value = this.value; + if (this.type && this.type === 'checkbox') { + value = this.checked; + } + localStorage.setItem(LS_PREFIX + id, value); + }); + + if (id.indexOf('emb-enabled') > -1 && value === 'true') { + $('#' + id.replace('em-emb-enabled-', '') + '_script_container .cursor-pointer').triggerEvent('click'); + } + + if (id.indexOf('emb-use-other-model') > -1) { + setTimeout(function () { + $el.triggerEvent(event); + }, 0); + } + } + + function loadLastScript(tab) { + + let $select = $('#{0}_script_container #script_list select'.format(tab)); + let value = localStorage.getItem(LS_PREFIX + 'last-script-' + tab); + + $select.on('change', function () { + localStorage.setItem(LS_PREFIX + 'last-script-' + tab, this.value); + }); + + if (value) { + setTimeout(function () { + $select.val(value).triggerEvent('change'); + },0); + } + } + + function bindEvents() { + $(gradioApp()).on('click', getSelector('eye-info-button'), function () { + dialog.image('mask-types.jpg', 'Mask Types', null, '80%'); + }); + } + + function onFirstLoad() { + getConfig(); + initRoot(); + loadTitles(); + loadPlaceHolders(); + bindEvents(); + } + + function load() { + container = getContainer(); + onFirstLoad(); + } + + return { + load + }; +}()); diff --git a/exhm/detailer/stable-diffusion-webui-eyemask/javascript/helpers.js b/exhm/detailer/stable-diffusion-webui-eyemask/javascript/helpers.js new file mode 100644 index 0000000000000000000000000000000000000000..e9587c56387301fc2e18cf14a1c3244afbc064a0 --- /dev/null +++ b/exhm/detailer/stable-diffusion-webui-eyemask/javascript/helpers.js @@ -0,0 +1,35 @@ + +function log(m) { + console.log(m); +} + +String.prototype.format = function() { + var args = arguments; + return this.replace(/{(\d+)}/g, function(match, number) { + return typeof args[number] != 'undefined' ? args[number] : match; + }); +}; + +document.addEventListener('DOMContentLoaded', function() { + + $.fn.bindClick = function (func, args) { + if (args) { + return this.off('click').on('click', function () { + func.apply(this, args); + }); + } else { + return this.off('click').on('click', func); + } + }; + + $.fn.triggerEvent = function (event) { + if (! this.length) { + return this; + } + let el = this[0]; + event.split(' ').forEach(function (evt) { + el.dispatchEvent(new Event(evt.trim())); + }); + return this; + }; +}); \ No newline at end of file diff --git a/exhm/detailer/stable-diffusion-webui-eyemask/javascript/jquery-3.6.3.min.js b/exhm/detailer/stable-diffusion-webui-eyemask/javascript/jquery-3.6.3.min.js new file mode 100644 index 0000000000000000000000000000000000000000..7ec500648d3a6f0c89c90d17d768da7573507c10 --- /dev/null +++ b/exhm/detailer/stable-diffusion-webui-eyemask/javascript/jquery-3.6.3.min.js @@ -0,0 +1,10996 @@ +/*! + * jQuery JavaScript Library v3.6.3 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright OpenJS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2022-12-20T21:28Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket trac-14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. + "use strict"; + + var arr = []; + + var getProto = Object.getPrototypeOf; + + var slice = arr.slice; + + var flat = arr.flat ? function( array ) { + return arr.flat.call( array ); + } : function( array ) { + return arr.concat.apply( [], array ); + }; + + + var push = arr.push; + + var indexOf = arr.indexOf; + + var class2type = {}; + + var toString = class2type.toString; + + var hasOwn = class2type.hasOwnProperty; + + var fnToString = hasOwn.toString; + + var ObjectFunctionString = fnToString.call( Object ); + + var support = {}; + + var isFunction = function isFunction( obj ) { + + // Support: Chrome <=57, Firefox <=52 + // In some browsers, typeof returns "function" for HTML elements + // (i.e., `typeof document.createElement( "object" ) === "function"`). + // We don't want to classify *any* DOM node as a function. + // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 + // Plus for old WebKit, typeof returns "function" for HTML collections + // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) + return typeof obj === "function" && typeof obj.nodeType !== "number" && + typeof obj.item !== "function"; + }; + + + var isWindow = function isWindow( obj ) { + return obj != null && obj === obj.window; + }; + + + var document = window.document; + + + + var preservedScriptAttributes = { + type: true, + src: true, + nonce: true, + noModule: true + }; + + function DOMEval( code, node, doc ) { + doc = doc || document; + + var i, val, + script = doc.createElement( "script" ); + + script.text = code; + if ( node ) { + for ( i in preservedScriptAttributes ) { + + // Support: Firefox 64+, Edge 18+ + // Some browsers don't support the "nonce" property on scripts. + // On the other hand, just using `getAttribute` is not enough as + // the `nonce` attribute is reset to an empty string whenever it + // becomes browsing-context connected. + // See https://github.com/whatwg/html/issues/2369 + // See https://html.spec.whatwg.org/#nonce-attributes + // The `node.getAttribute` check was added for the sake of + // `jQuery.globalEval` so that it can fake a nonce-containing node + // via an object. + val = node[ i ] || node.getAttribute && node.getAttribute( i ); + if ( val ) { + script.setAttribute( i, val ); + } + } + } + doc.head.appendChild( script ).parentNode.removeChild( script ); + } + + + function toType( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; + } + /* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + + var + version = "3.6.3", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }; + + jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + even: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return ( i + 1 ) % 2; + } ) ); + }, + + odd: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return i % 2; + } ) ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice + }; + + jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + copy = options[ name ]; + + // Prevent Object.prototype pollution + // Prevent never-ending loop + if ( name === "__proto__" || target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + src = target[ name ]; + + // Ensure proper type for the source value + if ( copyIsArray && !Array.isArray( src ) ) { + clone = []; + } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { + clone = {}; + } else { + clone = src; + } + copyIsArray = false; + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; + }; + + jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + // Evaluates a script in a provided context; falls back to the global one + // if not specified. + globalEval: function( code, options, doc ) { + DOMEval( code, { nonce: options && options.nonce }, doc ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return flat( ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support + } ); + + if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; + } + +// Populate the class2type map + jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), + function( _i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); + } ); + + function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = toType( obj ); + + if ( isFunction( obj ) || isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; + } + var Sizzle = + /*! + * Sizzle CSS Selector Engine v2.3.9 + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://js.foundation/ + * + * Date: 2022-12-19 + */ + ( function( window ) { + var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + nonnativeSelectorCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ( {} ).hasOwnProperty, + arr = [], + pop = arr.pop, + pushNative = arr.push, + push = arr.push, + slice = arr.slice, + + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[ i ] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + + "ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram + identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + + "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + + // "Attribute values must be CSS identifiers [capture 5] + // or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + + whitespace + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + + "*" ), + rdescend = new RegExp( whitespace + "|>" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + + whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + + whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rhtml = /HTML$/i, + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), + funescape = function( escape, nonHex ) { + var high = "0x" + escape.slice( 1 ) - 0x10000; + + return nonHex ? + + // Strip the backslash prefix from a non-hex escape sequence + nonHex : + + // Replace a hexadecimal escape sequence with the encoded Unicode code point + // Support: IE <=11+ + // For values outside the Basic Multilingual Plane (BMP), manually construct a + // surrogate pair + high < 0 ? + String.fromCharCode( high + 0x10000 ) : + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + inDisabledFieldset = addCombinator( + function( elem ) { + return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) + try { + push.apply( + ( arr = slice.call( preferredDoc.childNodes ) ), + preferredDoc.childNodes + ); + + // Support: Android<4.0 + // Detect silently failing push.apply + // eslint-disable-next-line no-unused-expressions + arr[ preferredDoc.childNodes.length ].nodeType; + } catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + pushNative.apply( target, slice.call( els ) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + + // Can't trust NodeList.length + while ( ( target[ j++ ] = els[ i++ ] ) ) {} + target.length = j - 1; + } + }; + } + + function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + setDocument( context ); + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { + + // ID selector + if ( ( m = match[ 1 ] ) ) { + + // Document context + if ( nodeType === 9 ) { + if ( ( elem = context.getElementById( m ) ) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && ( elem = newContext.getElementById( m ) ) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[ 2 ] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !nonnativeSelectorCache[ selector + " " ] && + ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && + + // Support: IE 8 only + // Exclude object elements + ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { + + newSelector = selector; + newContext = context; + + // qSA considers elements outside a scoping root when evaluating child or + // descendant combinators, which is not what we want. + // In such cases, we work around the behavior by prefixing every selector in the + // list with an ID selector referencing the scope context. + // The technique has to be used as well when a leading combinator is used + // as such selectors are not recognized by querySelectorAll. + // Thanks to Andrew Dupont for this technique. + if ( nodeType === 1 && + ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + + // We can use :scope instead of the ID hack if the browser + // supports it & if we're not changing the context. + if ( newContext !== context || !support.scope ) { + + // Capture the context ID, setting it first if necessary + if ( ( nid = context.getAttribute( "id" ) ) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", ( nid = expando ) ); + } + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + + toSelector( groups[ i ] ); + } + newSelector = groups.join( "," ); + } + + try { + + // `qSA` may not throw for unrecognized parts using forgiving parsing: + // https://drafts.csswg.org/selectors/#forgiving-selector + // like the `:has()` pseudo-class: + // https://drafts.csswg.org/selectors/#relational + // `CSS.supports` is still expected to return `false` then: + // https://drafts.csswg.org/css-conditional-4/#typedef-supports-selector-fn + // https://drafts.csswg.org/css-conditional-4/#dfn-support-selector + if ( support.cssSupportsSelector && + + // eslint-disable-next-line no-undef + !CSS.supports( "selector(:is(" + newSelector + "))" ) ) { + + // Support: IE 11+ + // Throw to get to the same code path as an error directly in qSA. + // Note: once we only support browser supporting + // `CSS.supports('selector(...)')`, we can most likely drop + // the `try-catch`. IE doesn't implement the API. + throw new Error(); + } + + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + nonnativeSelectorCache( selector, true ); + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); + } + + /** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ + function createCache() { + var keys = []; + + function cache( key, value ) { + + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return ( cache[ key + " " ] = value ); + } + return cache; + } + + /** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ + function markFunction( fn ) { + fn[ expando ] = true; + return fn; + } + + /** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ + function assert( fn ) { + var el = document.createElement( "fieldset" ); + + try { + return !!fn( el ); + } catch ( e ) { + return false; + } finally { + + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + + // release memory in IE + el = null; + } + } + + /** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ + function addHandle( attrs, handler ) { + var arr = attrs.split( "|" ), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[ i ] ] = handler; + } + } + + /** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ + function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( ( cur = cur.nextSibling ) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; + } + + /** + * Returns a function to use in pseudos for input types + * @param {String} type + */ + function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; + } + + /** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ + function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return ( name === "input" || name === "button" ) && elem.type === type; + }; + } + + /** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ + function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + inDisabledFieldset( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; + } + + /** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ + function createPositionalPseudo( fn ) { + return markFunction( function( argument ) { + argument = +argument; + return markFunction( function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ ( j = matchIndexes[ i ] ) ] ) { + seed[ j ] = !( matches[ j ] = seed[ j ] ); + } + } + } ); + } ); + } + + /** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ + function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; + } + +// Expose support vars for convenience + support = Sizzle.support = {}; + + /** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ + isXML = Sizzle.isXML = function( elem ) { + var namespace = elem && elem.namespaceURI, + docElem = elem && ( elem.ownerDocument || elem ).documentElement; + + // Support: IE <=8 + // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes + // https://bugs.jquery.com/ticket/4833 + return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); + }; + + /** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ + setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9 - 11+, Edge 12 - 18+ + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( preferredDoc != document && + ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, + // Safari 4 - 5 only, Opera <=11.6 - 12.x only + // IE/Edge & older browsers don't support the :scope pseudo-class. + // Support: Safari 6.0 only + // Safari 6.0 supports :scope but it's an alias of :root there. + support.scope = assert( function( el ) { + docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); + return typeof el.querySelectorAll !== "undefined" && + !el.querySelectorAll( ":scope fieldset div" ).length; + } ); + + // Support: Chrome 105+, Firefox 104+, Safari 15.4+ + // Make sure forgiving mode is not used in `CSS.supports( "selector(...)" )`. + // + // `:is()` uses a forgiving selector list as an argument and is widely + // implemented, so it's a good one to test against. + support.cssSupportsSelector = assert( function() { + /* eslint-disable no-undef */ + + return CSS.supports( "selector(*)" ) && + + // Support: Firefox 78-81 only + // In old Firefox, `:is()` didn't use forgiving parsing. In that case, + // fail this test as there's no selector to test against that. + // `CSS.supports` uses unforgiving parsing + document.querySelectorAll( ":is(:jqfake)" ) && + + // `*` is needed as Safari & newer Chrome implemented something in between + // for `:has()` - it throws in `qSA` if it only contains an unsupported + // argument but multiple ones, one of which is supported, are fine. + // We want to play safe in case `:is()` gets the same treatment. + !CSS.supports( "selector(:is(*,:jqfake))" ); + + /* eslint-enable */ + } ); + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert( function( el ) { + el.className = "i"; + return !el.getAttribute( "className" ); + } ); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert( function( el ) { + el.appendChild( document.createComment( "" ) ); + return !el.getElementsByTagName( "*" ).length; + } ); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert( function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + } ); + + // ID filter and find + if ( support.getById ) { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute( "id" ) === attrId; + }; + }; + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode( "id" ); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( ( elem = elems[ i++ ] ) ) { + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find[ "TAG" ] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { + + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert( function( el ) { + + var input; + + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll( "[selected]" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push( "~=" ); + } + + // Support: IE 11+, Edge 15 - 18+ + // IE 11/Edge don't find elements on a `[name='']` query in some cases. + // Adding a temporary attribute to the document before the selection works + // around the issue. + // Interestingly, IE 10 & older don't seem to have the issue. + input = document.createElement( "input" ); + input.setAttribute( "name", "" ); + el.appendChild( input ); + if ( !el.querySelectorAll( "[name='']" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + + whitespace + "*(?:''|\"\")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll( ":checked" ).length ) { + rbuggyQSA.push( ":checked" ); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push( ".#.+[+~]" ); + } + + // Support: Firefox <=3.6 - 5 only + // Old Firefox doesn't throw on a badly-escaped identifier. + el.querySelectorAll( "\\\f" ); + rbuggyQSA.push( "[\\r\\n\\f]" ); + } ); + + assert( function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement( "input" ); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll( "[name=d]" ).length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: Opera 10 - 11 only + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll( "*,:x" ); + rbuggyQSA.push( ",.*:" ); + } ); + } + + if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector ) ) ) ) { + + assert( function( el ) { + + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + } ); + } + + if ( !support.cssSupportsSelector ) { + + // Support: Chrome 105+, Safari 15.4+ + // `:has()` uses a forgiving selector list as an argument so our regular + // `try-catch` mechanism fails to catch `:has()` with arguments not supported + // natively like `:has(:contains("Foo"))`. Where supported & spec-compliant, + // we now use `CSS.supports("selector(:is(SELECTOR_TO_BE_TESTED))")`, but + // outside that we mark `:has` as buggy. + rbuggyQSA.push( ":has" ); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + + // Support: IE <9 only + // IE doesn't have `contains` on `document` so we need to check for + // `documentElement` presence. + // We need to fall back to `a` when `documentElement` is missing + // as `ownerDocument` of elements within `