diff --git a/.eslintignore b/.eslintignore new file mode 100644 index 0000000000000000000000000000000000000000..1cfd9487674ff4db01a4285097f5eae74010b2ae --- /dev/null +++ b/.eslintignore @@ -0,0 +1,4 @@ +extensions +extensions-disabled +repositories +venv \ No newline at end of file diff --git a/.eslintrc.js b/.eslintrc.js new file mode 100644 index 0000000000000000000000000000000000000000..f33aca09fa022638e45e8737386402711e464656 --- /dev/null +++ b/.eslintrc.js @@ -0,0 +1,91 @@ +/* global module */ +module.exports = { + env: { + browser: true, + es2021: true, + }, + extends: "eslint:recommended", + parserOptions: { + ecmaVersion: "latest", + }, + rules: { + "arrow-spacing": "error", + "block-spacing": "error", + "brace-style": "error", + "comma-dangle": ["error", "only-multiline"], + "comma-spacing": "error", + "comma-style": ["error", "last"], + "curly": ["error", "multi-line", "consistent"], + "eol-last": "error", + "func-call-spacing": "error", + "function-call-argument-newline": ["error", "consistent"], + "function-paren-newline": ["error", "consistent"], + "indent": ["error", 4], + "key-spacing": "error", + "keyword-spacing": "error", + "linebreak-style": ["error", "unix"], + "no-extra-semi": "error", + "no-mixed-spaces-and-tabs": "error", + "no-multi-spaces": "error", + "no-redeclare": ["error", {builtinGlobals: false}], + "no-trailing-spaces": "error", + "no-unused-vars": "off", + "no-whitespace-before-property": "error", + "object-curly-newline": ["error", {consistent: true, multiline: true}], + "object-curly-spacing": ["error", "never"], + "operator-linebreak": ["error", "after"], + "quote-props": ["error", "consistent-as-needed"], + "semi": ["error", "always"], + "semi-spacing": "error", + "semi-style": ["error", "last"], + "space-before-blocks": "error", + "space-before-function-paren": ["error", "never"], + "space-in-parens": ["error", "never"], + "space-infix-ops": "error", + "space-unary-ops": "error", + "switch-colon-spacing": "error", + "template-curly-spacing": ["error", "never"], + "unicode-bom": "error", + }, + globals: { + //script.js + gradioApp: "readonly", + executeCallbacks: "readonly", + onAfterUiUpdate: "readonly", + onOptionsChanged: "readonly", + onUiLoaded: "readonly", + onUiUpdate: "readonly", + uiCurrentTab: "writable", + uiElementInSight: "readonly", + uiElementIsVisible: "readonly", + //ui.js + opts: "writable", + all_gallery_buttons: "readonly", + selected_gallery_button: "readonly", + selected_gallery_index: "readonly", + switch_to_txt2img: "readonly", + switch_to_img2img_tab: "readonly", + switch_to_img2img: "readonly", + switch_to_sketch: "readonly", + switch_to_inpaint: "readonly", + switch_to_inpaint_sketch: "readonly", + switch_to_extras: "readonly", + get_tab_index: "readonly", + create_submit_args: "readonly", + restart_reload: "readonly", + updateInput: "readonly", + //extraNetworks.js + requestGet: "readonly", + popup: "readonly", + // from python + localization: "readonly", + // progrssbar.js + randomId: "readonly", + requestProgress: "readonly", + // imageviewer.js + modalPrevImage: "readonly", + modalNextImage: "readonly", + // token-counters.js + setupTokenCounters: "readonly", + } +}; diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000000000000000000000000000000000..4104da632b8fcacf3a6f52eba093e63059749725 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# Apply ESlint +9c54b78d9dde5601e916f308d9a9d6953ec39430 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..a77c378a15038f82a4d9c159b2dae2eb6d617efe 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,32 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +extensions/Stable-Diffusion-Webui-Civitai-Helper/img/all_in_one.png filter=lfs diff=lfs merge=lfs -text +models/Lora/141887\?type=Model filter=lfs diff=lfs merge=lfs -text +repositories/BLIP/BLIP.gif filter=lfs diff=lfs merge=lfs -text +repositories/generative-models/assets/sdxl_report.pdf filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-inpainting/merged-leopards.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/depth2img/d2i.gif filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/depth2img/depth2img01.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/depth2img/depth2img02.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/depth2img/merged-0000.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/depth2img/merged-0004.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/depth2img/merged-0005.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/img2img/upscaling-in.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/img2img/upscaling-out.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/stable-unclip/unclip-variations.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/stable-unclip/unclip-variations_noise.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/txt2img/768/merged-0001.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/txt2img/768/merged-0002.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/txt2img/768/merged-0003.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/txt2img/768/merged-0004.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/txt2img/768/merged-0005.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/txt2img/768/merged-0006.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/txt2img/merged-0001.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/txt2img/merged-0003.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/txt2img/merged-0005.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/txt2img/merged-0006.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/txt2img/merged-0007.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/upscaling/merged-dog.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/upscaling/sampled-bear-x4.png filter=lfs diff=lfs merge=lfs -text +repositories/stable-diffusion-stability-ai/assets/stable-samples/upscaling/snow-leopard-x4.png filter=lfs diff=lfs merge=lfs -text diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000000000000000000000000000000000000..d80b24e2bde995177bf61022701548c32796b0df --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,138 @@ +name: Bug Report +description: You think somethings is broken in the UI +title: "[Bug]: " +labels: ["bug-report"] + +body: + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search to see if an issue already exists for the bug you encountered, and that it hasn't been fixed in a recent build/commit. + options: + - label: I have searched the existing issues and checked the recent builds/commits + required: true + - type: markdown + attributes: + value: | + *Please fill this form with as much information as possible, don't forget to fill "What OS..." and "What browsers" and *provide screenshots if possible** + - type: textarea + id: what-did + attributes: + label: What happened? + description: Tell us what happened in a very clear and simple way + validations: + required: true + - type: textarea + id: steps + attributes: + label: Steps to reproduce the problem + description: Please provide us with precise step by step information on how to reproduce the bug + value: | + 1. Go to .... + 2. Press .... + 3. ... + validations: + required: true + - type: textarea + id: what-should + attributes: + label: What should have happened? + description: Tell what you think the normal behavior should be + validations: + required: true + - type: input + id: commit + attributes: + label: Version or Commit where the problem happens + description: "Which webui version or commit are you running ? (Do not write *Latest Version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Version: v1.2.3** link at the bottom of the UI, or from the cmd/terminal if you can't launch it.)" + validations: + required: true + - type: dropdown + id: py-version + attributes: + label: What Python version are you running on ? + multiple: false + options: + - Python 3.10.x + - Python 3.11.x (above, no supported yet) + - Python 3.9.x (below, no recommended) + - type: dropdown + id: platforms + attributes: + label: What platforms do you use to access the UI ? + multiple: true + options: + - Windows + - Linux + - MacOS + - iOS + - Android + - Other/Cloud + - type: dropdown + id: device + attributes: + label: What device are you running WebUI on? + multiple: true + options: + - Nvidia GPUs (RTX 20 above) + - Nvidia GPUs (GTX 16 below) + - AMD GPUs (RX 6000 above) + - AMD GPUs (RX 5000 below) + - CPU + - Other GPUs + - type: dropdown + id: cross_attention_opt + attributes: + label: Cross attention optimization + description: What cross attention optimization are you using, Settings -> Optimizations -> Cross attention optimization + multiple: false + options: + - Automatic + - xformers + - sdp-no-mem + - sdp + - Doggettx + - V1 + - InvokeAI + - "None " + validations: + required: true + - type: dropdown + id: browsers + attributes: + label: What browsers do you use to access the UI ? + multiple: true + options: + - Mozilla Firefox + - Google Chrome + - Brave + - Apple Safari + - Microsoft Edge + - type: textarea + id: cmdargs + attributes: + label: Command Line Arguments + description: Are you using any launching parameters/command line arguments (modified webui-user .bat/.sh) ? If yes, please write them below. Write "No" otherwise. + render: Shell + validations: + required: true + - type: textarea + id: extensions + attributes: + label: List of extensions + description: Are you using any extensions other than built-ins? If yes, provide a list, you can copy it at "Extensions" tab. Write "No" otherwise. + validations: + required: true + - type: textarea + id: logs + attributes: + label: Console logs + description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after your bug happened. If it's very long, provide a link to pastebin or similar service. + render: Shell + validations: + required: true + - type: textarea + id: misc + attributes: + label: Additional information + description: Please provide us with any relevant additional info or context. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..f58c94a9be6847193a971ac67aa83e9a6d75c0ae --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: WebUI Community Support + url: https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions + about: Please ask and answer questions here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000000000000000000000000000000000000..35a887408c1a0cb7d5bbf0a8444d0903a708be75 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,40 @@ +name: Feature request +description: Suggest an idea for this project +title: "[Feature Request]: " +labels: ["enhancement"] + +body: + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search to see if an issue already exists for the feature you want, and that it's not implemented in a recent build/commit. + options: + - label: I have searched the existing issues and checked the recent builds/commits + required: true + - type: markdown + attributes: + value: | + *Please fill this form with as much information as possible, provide screenshots and/or illustrations of the feature if possible* + - type: textarea + id: feature + attributes: + label: What would your feature do ? + description: Tell us about your feature in a very clear and simple way, and what problem it would solve + validations: + required: true + - type: textarea + id: workflow + attributes: + label: Proposed workflow + description: Please provide us with step by step information on how you'd like the feature to be accessed and used + value: | + 1. Go to .... + 2. Press .... + 3. ... + validations: + required: true + - type: textarea + id: misc + attributes: + label: Additional information + description: Add any other context or screenshots about the feature request here. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000000000000000000000000000000000..c9fcda2e2790861c7bf4aa4cb37e01545c48fb95 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,15 @@ +## Description + +* a simple description of what you're trying to accomplish +* a summary of changes in code +* which issues it fixes, if any + +## Screenshots/videos: + + +## Checklist: + +- [ ] I have read [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing) +- [ ] I have performed a self-review of my own code +- [ ] My code follows the [style guidelines](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style) +- [ ] My code passes [tests](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests) diff --git a/.github/workflows/on_pull_request.yaml b/.github/workflows/on_pull_request.yaml new file mode 100644 index 0000000000000000000000000000000000000000..78e608ee945831e36ab832636e9a7ed9e180c462 --- /dev/null +++ b/.github/workflows/on_pull_request.yaml @@ -0,0 +1,38 @@ +name: Linter + +on: + - push + - pull_request + +jobs: + lint-python: + name: ruff + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name + steps: + - name: Checkout Code + uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: 3.11 + # NB: there's no cache: pip here since we're not installing anything + # from the requirements.txt file(s) in the repository; it's faster + # not to have GHA download an (at the time of writing) 4 GB cache + # of PyTorch and other dependencies. + - name: Install Ruff + run: pip install ruff==0.0.272 + - name: Run Ruff + run: ruff . + lint-js: + name: eslint + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name + steps: + - name: Checkout Code + uses: actions/checkout@v3 + - name: Install Node.js + uses: actions/setup-node@v3 + with: + node-version: 18 + - run: npm i --ci + - run: npm run lint diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3dafaf8dcfcd14fd7a7ca3385806efad5550b871 --- /dev/null +++ b/.github/workflows/run_tests.yaml @@ -0,0 +1,73 @@ +name: Tests + +on: + - push + - pull_request + +jobs: + test: + name: tests on CPU with empty model + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name + steps: + - name: Checkout Code + uses: actions/checkout@v3 + - name: Set up Python 3.10 + uses: actions/setup-python@v4 + with: + python-version: 3.10.6 + cache: pip + cache-dependency-path: | + **/requirements*txt + launch.py + - name: Install test dependencies + run: pip install wait-for-it -r requirements-test.txt + env: + PIP_DISABLE_PIP_VERSION_CHECK: "1" + PIP_PROGRESS_BAR: "off" + - name: Setup environment + run: python launch.py --skip-torch-cuda-test --exit + env: + PIP_DISABLE_PIP_VERSION_CHECK: "1" + PIP_PROGRESS_BAR: "off" + TORCH_INDEX_URL: https://download.pytorch.org/whl/cpu + WEBUI_LAUNCH_LIVE_OUTPUT: "1" + PYTHONUNBUFFERED: "1" + - name: Start test server + run: > + python -m coverage run + --data-file=.coverage.server + launch.py + --skip-prepare-environment + --skip-torch-cuda-test + --test-server + --do-not-download-clip + --no-half + --disable-opt-split-attention + --use-cpu all + --api-server-stop + 2>&1 | tee output.txt & + - name: Run tests + run: | + wait-for-it --service 127.0.0.1:7860 -t 600 + python -m pytest -vv --junitxml=test/results.xml --cov . --cov-report=xml --verify-base-url test + - name: Kill test server + if: always() + run: curl -vv -XPOST http://127.0.0.1:7860/sdapi/v1/server-stop && sleep 10 + - name: Show coverage + run: | + python -m coverage combine .coverage* + python -m coverage report -i + python -m coverage html -i + - name: Upload main app output + uses: actions/upload-artifact@v3 + if: always() + with: + name: output + path: output.txt + - name: Upload coverage HTML + uses: actions/upload-artifact@v3 + if: always() + with: + name: htmlcov + path: htmlcov diff --git a/.github/workflows/update_space.yml b/.github/workflows/update_space.yml new file mode 100644 index 0000000000000000000000000000000000000000..67dbc84e4e59320a7c98b94460eb976e5cd2984f --- /dev/null +++ b/.github/workflows/update_space.yml @@ -0,0 +1,28 @@ +name: Run Python script + +on: + push: + branches: + - main + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.9' + + - name: Install Gradio + run: python -m pip install gradio + + - name: Log in to Hugging Face + run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")' + + - name: Deploy to Spaces + run: gradio deploy diff --git a/.github/workflows/warns_merge_master.yml b/.github/workflows/warns_merge_master.yml new file mode 100644 index 0000000000000000000000000000000000000000..ae2aab6ba8ce5684755b5fb4083267111bcd23cd --- /dev/null +++ b/.github/workflows/warns_merge_master.yml @@ -0,0 +1,19 @@ +name: Pull requests can't target master branch + +"on": + pull_request: + types: + - opened + - synchronize + - reopened + branches: + - master + +jobs: + check: + runs-on: ubuntu-latest + steps: + - name: Warning marge into master + run: | + echo -e "::warning::This pull request directly merge into \"master\" branch, normally development happens on \"dev\" branch." + exit 1 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..09734267ff5c4d51c2f9f1c85f6f8bf2cc225fb9 --- /dev/null +++ b/.gitignore @@ -0,0 +1,39 @@ +__pycache__ +*.ckpt +*.safetensors +*.pth +/ESRGAN/* +/SwinIR/* +/repositories +/venv +/tmp +/model.ckpt +/models/**/* +/GFPGANv1.3.pth +/gfpgan/weights/*.pth +/ui-config.json +/outputs +/config.json +/log +/webui.settings.bat +/embeddings +/styles.csv +/params.txt +/styles.csv.bak +/webui-user.bat +/webui-user.sh +/interrogate +/user.css +/.idea +notification.mp3 +/SwinIR +/textual_inversion +.vscode +/extensions +/test/stdout.txt +/test/stderr.txt +/cache.json* +/config_states/ +/node_modules +/package-lock.json +/.coverage* diff --git a/.ipynb_checkpoints/webui-user-checkpoint.sh b/.ipynb_checkpoints/webui-user-checkpoint.sh new file mode 100644 index 0000000000000000000000000000000000000000..bbc02bf1e0fa472879ec3697b5208a995d35eeb2 --- /dev/null +++ b/.ipynb_checkpoints/webui-user-checkpoint.sh @@ -0,0 +1,48 @@ +# #!/bin/bash +######################################################### +# Uncomment and change the variables below to your need:# +######################################################### + +# Install directory without trailing slash +install_dir="/workspace" + +# Name of the subdirectory +#clone_dir="stable-diffusion-webui" + +# Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention" +export COMMANDLINE_ARGS="--xformers --port 3000 --listen --enable-insecure-extension-access --no-half-vae --share" +#export XFORMERS_PACKAGE="xformers==0.0.17.dev447" + +# python3 executable +#python_cmd="python3" + +# git executable +#export GIT="git" + +# python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv) +venv_dir="/workspace/venv" + +# script to launch to start the app +#export LAUNCH_SCRIPT="launch.py" + +# install command for torch +# export TORCH_COMMAND="pip install torch torchvision --index-url https://download.pytorch.org/whl/cu118" + +# Requirements file to use for stable-diffusion-webui +#export REQS_FILE="./extensions/sd_dreambooth_extension/requirements.txt" + +# Fixed git repos +#export K_DIFFUSION_PACKAGE="" +#export GFPGAN_PACKAGE="" + +# Fixed git commits +#export STABLE_DIFFUSION_COMMIT_HASH="" +#export TAMING_TRANSFORMERS_COMMIT_HASH="" +#export CODEFORMER_COMMIT_HASH="" +#export BLIP_COMMIT_HASH="" + +# Uncomment to enable accelerated launch +#export ACCELERATE="True" + +########################################### + diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000000000000000000000000000000000000..53254e5dcfd871c8c0f0f4dec9dceeb1ba967eda --- /dev/null +++ b/.pylintrc @@ -0,0 +1,3 @@ +# See https://pylint.pycqa.org/en/latest/user_guide/messages/message_control.html +[MESSAGES CONTROL] +disable=C,R,W,E,I diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..920c29130b8c983f13662a7733111735af01cc0d --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,352 @@ +## 1.5.1 + +### Minor: + * support parsing text encoder blocks in some new LoRAs + * delete scale checker script due to user demand + +### Extensions and API: + * add postprocess_batch_list script callback + +### Bug Fixes: + * fix TI training for SD1 + * fix reload altclip model error + * prepend the pythonpath instead of overriding it + * fix typo in SD_WEBUI_RESTARTING + * if txt2img/img2img raises an exception, finally call state.end() + * fix composable diffusion weight parsing + * restyle Startup profile for black users + * fix webui not launching with --nowebui + * catch exception for non git extensions + * fix some options missing from /sdapi/v1/options + * fix for extension update status always saying "unknown" + * fix display of extra network cards that have `<>` in the name + * update lora extension to work with python 3.8 + + +## 1.5.0 + +### Features: + * SD XL support + * user metadata system for custom networks + * extended Lora metadata editor: set activation text, default weight, view tags, training info + * Lora extension rework to include other types of networks (all that were previously handled by LyCORIS extension) + * show github stars for extenstions + * img2img batch mode can read extra stuff from png info + * img2img batch works with subdirectories + * hotkeys to move prompt elements: alt+left/right + * restyle time taken/VRAM display + * add textual inversion hashes to infotext + * optimization: cache git extension repo information + * move generate button next to the generated picture for mobile clients + * hide cards for networks of incompatible Stable Diffusion version in Lora extra networks interface + * skip installing packages with pip if they all are already installed - startup speedup of about 2 seconds + +### Minor: + * checkbox to check/uncheck all extensions in the Installed tab + * add gradio user to infotext and to filename patterns + * allow gif for extra network previews + * add options to change colors in grid + * use natural sort for items in extra networks + * Mac: use empty_cache() from torch 2 to clear VRAM + * added automatic support for installing the right libraries for Navi3 (AMD) + * add option SWIN_torch_compile to accelerate SwinIR upscale + * suppress printing TI embedding info at start to console by default + * speedup extra networks listing + * added `[none]` filename token. + * removed thumbs extra networks view mode (use settings tab to change width/height/scale to get thumbs) + * add always_discard_next_to_last_sigma option to XYZ plot + * automatically switch to 32-bit float VAE if the generated picture has NaNs without the need for `--no-half-vae` commandline flag. + +### Extensions and API: + * api endpoints: /sdapi/v1/server-kill, /sdapi/v1/server-restart, /sdapi/v1/server-stop + * allow Script to have custom metaclass + * add model exists status check /sdapi/v1/options + * rename --add-stop-route to --api-server-stop + * add `before_hr` script callback + * add callback `after_extra_networks_activate` + * disable rich exception output in console for API by default, use WEBUI_RICH_EXCEPTIONS env var to enable + * return http 404 when thumb file not found + * allow replacing extensions index with environment variable + +### Bug Fixes: + * fix for catch errors when retrieving extension index #11290 + * fix very slow loading speed of .safetensors files when reading from network drives + * API cache cleanup + * fix UnicodeEncodeError when writing to file CLIP Interrogator batch mode + * fix warning of 'has_mps' deprecated from PyTorch + * fix problem with extra network saving images as previews losing generation info + * fix throwing exception when trying to resize image with I;16 mode + * fix for #11534: canvas zoom and pan extension hijacking shortcut keys + * fixed launch script to be runnable from any directory + * don't add "Seed Resize: -1x-1" to API image metadata + * correctly remove end parenthesis with ctrl+up/down + * fixing --subpath on newer gradio version + * fix: check fill size none zero when resize (fixes #11425) + * use submit and blur for quick settings textbox + * save img2img batch with images.save_image() + * prevent running preload.py for disabled extensions + * fix: previously, model name was added together with directory name to infotext and to [model_name] filename pattern; directory name is now not included + + +## 1.4.1 + +### Bug Fixes: + * add queue lock for refresh-checkpoints + +## 1.4.0 + +### Features: + * zoom controls for inpainting + * run basic torch calculation at startup in parallel to reduce the performance impact of first generation + * option to pad prompt/neg prompt to be same length + * remove taming_transformers dependency + * custom k-diffusion scheduler settings + * add an option to show selected settings in main txt2img/img2img UI + * sysinfo tab in settings + * infer styles from prompts when pasting params into the UI + * an option to control the behavior of the above + +### Minor: + * bump Gradio to 3.32.0 + * bump xformers to 0.0.20 + * Add option to disable token counters + * tooltip fixes & optimizations + * make it possible to configure filename for the zip download + * `[vae_filename]` pattern for filenames + * Revert discarding penultimate sigma for DPM-Solver++(2M) SDE + * change UI reorder setting to multiselect + * read version info form CHANGELOG.md if git version info is not available + * link footer API to Wiki when API is not active + * persistent conds cache (opt-in optimization) + +### Extensions: + * After installing extensions, webui properly restarts the process rather than reloads the UI + * Added VAE listing to web API. Via: /sdapi/v1/sd-vae + * custom unet support + * Add onAfterUiUpdate callback + * refactor EmbeddingDatabase.register_embedding() to allow unregistering + * add before_process callback for scripts + * add ability for alwayson scripts to specify section and let user reorder those sections + +### Bug Fixes: + * Fix dragging text to prompt + * fix incorrect quoting for infotext values with colon in them + * fix "hires. fix" prompt sharing same labels with txt2img_prompt + * Fix s_min_uncond default type int + * Fix for #10643 (Inpainting mask sometimes not working) + * fix bad styling for thumbs view in extra networks #10639 + * fix for empty list of optimizations #10605 + * small fixes to prepare_tcmalloc for Debian/Ubuntu compatibility + * fix --ui-debug-mode exit + * patch GitPython to not use leaky persistent processes + * fix duplicate Cross attention optimization after UI reload + * torch.cuda.is_available() check for SdOptimizationXformers + * fix hires fix using wrong conds in second pass if using Loras. + * handle exception when parsing generation parameters from png info + * fix upcast attention dtype error + * forcing Torch Version to 1.13.1 for RX 5000 series GPUs + * split mask blur into X and Y components, patch Outpainting MK2 accordingly + * don't die when a LoRA is a broken symlink + * allow activation of Generate Forever during generation + + +## 1.3.2 + +### Bug Fixes: + * fix files served out of tmp directory even if they are saved to disk + * fix postprocessing overwriting parameters + +## 1.3.1 + +### Features: + * revert default cross attention optimization to Doggettx + +### Bug Fixes: + * fix bug: LoRA don't apply on dropdown list sd_lora + * fix png info always added even if setting is not enabled + * fix some fields not applying in xyz plot + * fix "hires. fix" prompt sharing same labels with txt2img_prompt + * fix lora hashes not being added properly to infotex if there is only one lora + * fix --use-cpu failing to work properly at startup + * make --disable-opt-split-attention command line option work again + +## 1.3.0 + +### Features: + * add UI to edit defaults + * token merging (via dbolya/tomesd) + * settings tab rework: add a lot of additional explanations and links + * load extensions' Git metadata in parallel to loading the main program to save a ton of time during startup + * update extensions table: show branch, show date in separate column, and show version from tags if available + * TAESD - another option for cheap live previews + * allow choosing sampler and prompts for second pass of hires fix - hidden by default, enabled in settings + * calculate hashes for Lora + * add lora hashes to infotext + * when pasting infotext, use infotext's lora hashes to find local loras for `` entries whose hashes match loras the user has + * select cross attention optimization from UI + +### Minor: + * bump Gradio to 3.31.0 + * bump PyTorch to 2.0.1 for macOS and Linux AMD + * allow setting defaults for elements in extensions' tabs + * allow selecting file type for live previews + * show "Loading..." for extra networks when displaying for the first time + * suppress ENSD infotext for samplers that don't use it + * clientside optimizations + * add options to show/hide hidden files and dirs in extra networks, and to not list models/files in hidden directories + * allow whitespace in styles.csv + * add option to reorder tabs + * move some functionality (swap resolution and set seed to -1) to client + * option to specify editor height for img2img + * button to copy image resolution into img2img width/height sliders + * switch from pyngrok to ngrok-py + * lazy-load images in extra networks UI + * set "Navigate image viewer with gamepad" option to false by default, by request + * change upscalers to download models into user-specified directory (from commandline args) rather than the default models/<...> + * allow hiding buttons in ui-config.json + +### Extensions: + * add /sdapi/v1/script-info api + * use Ruff to lint Python code + * use ESlint to lint Javascript code + * add/modify CFG callbacks for Self-Attention Guidance extension + * add command and endpoint for graceful server stopping + * add some locals (prompts/seeds/etc) from processing function into the Processing class as fields + * rework quoting for infotext items that have commas in them to use JSON (should be backwards compatible except for cases where it didn't work previously) + * add /sdapi/v1/refresh-loras api checkpoint post request + * tests overhaul + +### Bug Fixes: + * fix an issue preventing the program from starting if the user specifies a bad Gradio theme + * fix broken prompts from file script + * fix symlink scanning for extra networks + * fix --data-dir ignored when launching via webui-user.bat COMMANDLINE_ARGS + * allow web UI to be ran fully offline + * fix inability to run with --freeze-settings + * fix inability to merge checkpoint without adding metadata + * fix extra networks' save preview image not adding infotext for jpeg/webm + * remove blinking effect from text in hires fix and scale resolution preview + * make links to `http://<...>.git` extensions work in the extension tab + * fix bug with webui hanging at startup due to hanging git process + + +## 1.2.1 + +### Features: + * add an option to always refer to LoRA by filenames + +### Bug Fixes: + * never refer to LoRA by an alias if multiple LoRAs have same alias or the alias is called none + * fix upscalers disappearing after the user reloads UI + * allow bf16 in safe unpickler (resolves problems with loading some LoRAs) + * allow web UI to be ran fully offline + * fix localizations not working + * fix error for LoRAs: `'LatentDiffusion' object has no attribute 'lora_layer_mapping'` + +## 1.2.0 + +### Features: + * do not wait for Stable Diffusion model to load at startup + * add filename patterns: `[denoising]` + * directory hiding for extra networks: dirs starting with `.` will hide their cards on extra network tabs unless specifically searched for + * LoRA: for the `<...>` text in prompt, use name of LoRA that is in the metdata of the file, if present, instead of filename (both can be used to activate LoRA) + * LoRA: read infotext params from kohya-ss's extension parameters if they are present and if his extension is not active + * LoRA: fix some LoRAs not working (ones that have 3x3 convolution layer) + * LoRA: add an option to use old method of applying LoRAs (producing same results as with kohya-ss) + * add version to infotext, footer and console output when starting + * add links to wiki for filename pattern settings + * add extended info for quicksettings setting and use multiselect input instead of a text field + +### Minor: + * bump Gradio to 3.29.0 + * bump PyTorch to 2.0.1 + * `--subpath` option for gradio for use with reverse proxy + * Linux/macOS: use existing virtualenv if already active (the VIRTUAL_ENV environment variable) + * do not apply localizations if there are none (possible frontend optimization) + * add extra `None` option for VAE in XYZ plot + * print error to console when batch processing in img2img fails + * create HTML for extra network pages only on demand + * allow directories starting with `.` to still list their models for LoRA, checkpoints, etc + * put infotext options into their own category in settings tab + * do not show licenses page when user selects Show all pages in settings + +### Extensions: + * tooltip localization support + * add API method to get LoRA models with prompt + +### Bug Fixes: + * re-add `/docs` endpoint + * fix gamepad navigation + * make the lightbox fullscreen image function properly + * fix squished thumbnails in extras tab + * keep "search" filter for extra networks when user refreshes the tab (previously it showed everthing after you refreshed) + * fix webui showing the same image if you configure the generation to always save results into same file + * fix bug with upscalers not working properly + * fix MPS on PyTorch 2.0.1, Intel Macs + * make it so that custom context menu from contextMenu.js only disappears after user's click, ignoring non-user click events + * prevent Reload UI button/link from reloading the page when it's not yet ready + * fix prompts from file script failing to read contents from a drag/drop file + + +## 1.1.1 +### Bug Fixes: + * fix an error that prevents running webui on PyTorch<2.0 without --disable-safe-unpickle + +## 1.1.0 +### Features: + * switch to PyTorch 2.0.0 (except for AMD GPUs) + * visual improvements to custom code scripts + * add filename patterns: `[clip_skip]`, `[hasprompt<>]`, `[batch_number]`, `[generation_number]` + * add support for saving init images in img2img, and record their hashes in infotext for reproducability + * automatically select current word when adjusting weight with ctrl+up/down + * add dropdowns for X/Y/Z plot + * add setting: Stable Diffusion/Random number generator source: makes it possible to make images generated from a given manual seed consistent across different GPUs + * support Gradio's theme API + * use TCMalloc on Linux by default; possible fix for memory leaks + * add optimization option to remove negative conditioning at low sigma values #9177 + * embed model merge metadata in .safetensors file + * extension settings backup/restore feature #9169 + * add "resize by" and "resize to" tabs to img2img + * add option "keep original size" to textual inversion images preprocess + * image viewer scrolling via analog stick + * button to restore the progress from session lost / tab reload + +### Minor: + * bump Gradio to 3.28.1 + * change "scale to" to sliders in Extras tab + * add labels to tool buttons to make it possible to hide them + * add tiled inference support for ScuNET + * add branch support for extension installation + * change Linux installation script to install into current directory rather than `/home/username` + * sort textual inversion embeddings by name (case-insensitive) + * allow styles.csv to be symlinked or mounted in docker + * remove the "do not add watermark to images" option + * make selected tab configurable with UI config + * make the extra networks UI fixed height and scrollable + * add `disable_tls_verify` arg for use with self-signed certs + +### Extensions: + * add reload callback + * add `is_hr_pass` field for processing + +### Bug Fixes: + * fix broken batch image processing on 'Extras/Batch Process' tab + * add "None" option to extra networks dropdowns + * fix FileExistsError for CLIP Interrogator + * fix /sdapi/v1/txt2img endpoint not working on Linux #9319 + * fix disappearing live previews and progressbar during slow tasks + * fix fullscreen image view not working properly in some cases + * prevent alwayson_scripts args param resizing script_arg list when they are inserted in it + * fix prompt schedule for second order samplers + * fix image mask/composite for weird resolutions #9628 + * use correct images for previews when using AND (see #9491) + * one broken image in img2img batch won't stop all processing + * fix image orientation bug in train/preprocess + * fix Ngrok recreating tunnels every reload + * fix `--realesrgan-models-path` and `--ldsr-models-path` not working + * fix `--skip-install` not working + * use SAMPLE file format in Outpainting Mk2 & Poorman + * do not fail all LoRAs if some have failed to load when making a picture + +## 1.0.0 + * everything diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000000000000000000000000000000000000..2c937f6f1e519f864d15d5233e1fb86c6cdfac2f --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,12 @@ +* @AUTOMATIC1111 + +# if you were managing a localization and were removed from this file, this is because +# the intended way to do localizations now is via extensions. See: +# https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Developing-extensions +# Make a repo with your localization and since you are still listed as a collaborator +# you can add it to the wiki page yourself. This change is because some people complained +# the git commit log is cluttered with things unrelated to almost everyone and +# because I believe this is the best overall for the project to handle localizations almost +# entirely without my oversight. + + diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..211d32e752cb61bd056436e8f7a806f12a626bb7 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,663 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (c) 2023 AUTOMATIC1111 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/README.md b/README.md index cf4a34773d513a66efcc8987ff0800f16eae8a9b..d7fbaf1bab212256af33e440d264920d759cd6be 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,179 @@ --- -title: SD Webui -emoji: 👁 -colorFrom: blue -colorTo: red +title: SD_webui +app_file: webui.py sdk: gradio sdk_version: 3.40.1 -app_file: app.py -pinned: false --- +# Stable Diffusion web UI +A browser interface based on Gradio library for Stable Diffusion. -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +![](screenshot.png) + +## Features +[Detailed feature showcase with images](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features): +- Original txt2img and img2img modes +- One click install and run script (but you still must install python and git) +- Outpainting +- Inpainting +- Color Sketch +- Prompt Matrix +- Stable Diffusion Upscale +- Attention, specify parts of text that the model should pay more attention to + - a man in a `((tuxedo))` - will pay more attention to tuxedo + - a man in a `(tuxedo:1.21)` - alternative syntax + - select text and press `Ctrl+Up` or `Ctrl+Down` (or `Command+Up` or `Command+Down` if you're on a MacOS) to automatically adjust attention to selected text (code contributed by anonymous user) +- Loopback, run img2img processing multiple times +- X/Y/Z plot, a way to draw a 3 dimensional plot of images with different parameters +- Textual Inversion + - have as many embeddings as you want and use any names you like for them + - use multiple embeddings with different numbers of vectors per token + - works with half precision floating point numbers + - train embeddings on 8GB (also reports of 6GB working) +- Extras tab with: + - GFPGAN, neural network that fixes faces + - CodeFormer, face restoration tool as an alternative to GFPGAN + - RealESRGAN, neural network upscaler + - ESRGAN, neural network upscaler with a lot of third party models + - SwinIR and Swin2SR ([see here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/2092)), neural network upscalers + - LDSR, Latent diffusion super resolution upscaling +- Resizing aspect ratio options +- Sampling method selection + - Adjust sampler eta values (noise multiplier) + - More advanced noise setting options +- Interrupt processing at any time +- 4GB video card support (also reports of 2GB working) +- Correct seeds for batches +- Live prompt token length validation +- Generation parameters + - parameters you used to generate images are saved with that image + - in PNG chunks for PNG, in EXIF for JPEG + - can drag the image to PNG info tab to restore generation parameters and automatically copy them into UI + - can be disabled in settings + - drag and drop an image/text-parameters to promptbox +- Read Generation Parameters Button, loads parameters in promptbox to UI +- Settings page +- Running arbitrary python code from UI (must run with `--allow-code` to enable) +- Mouseover hints for most UI elements +- Possible to change defaults/mix/max/step values for UI elements via text config +- Tiling support, a checkbox to create images that can be tiled like textures +- Progress bar and live image generation preview + - Can use a separate neural network to produce previews with almost none VRAM or compute requirement +- Negative prompt, an extra text field that allows you to list what you don't want to see in generated image +- Styles, a way to save part of prompt and easily apply them via dropdown later +- Variations, a way to generate same image but with tiny differences +- Seed resizing, a way to generate same image but at slightly different resolution +- CLIP interrogator, a button that tries to guess prompt from an image +- Prompt Editing, a way to change prompt mid-generation, say to start making a watermelon and switch to anime girl midway +- Batch Processing, process a group of files using img2img +- Img2img Alternative, reverse Euler method of cross attention control +- Highres Fix, a convenience option to produce high resolution pictures in one click without usual distortions +- Reloading checkpoints on the fly +- Checkpoint Merger, a tab that allows you to merge up to 3 checkpoints into one +- [Custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Scripts) with many extensions from community +- [Composable-Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/), a way to use multiple prompts at once + - separate prompts using uppercase `AND` + - also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2` +- No token limit for prompts (original stable diffusion lets you use up to 75 tokens) +- DeepDanbooru integration, creates danbooru style tags for anime prompts +- [xformers](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers), major speed increase for select cards: (add `--xformers` to commandline args) +- via extension: [History tab](https://github.com/yfszzx/stable-diffusion-webui-images-browser): view, direct and delete images conveniently within the UI +- Generate forever option +- Training tab + - hypernetworks and embeddings options + - Preprocessing images: cropping, mirroring, autotagging using BLIP or deepdanbooru (for anime) +- Clip skip +- Hypernetworks +- Loras (same as Hypernetworks but more pretty) +- A sparate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt +- Can select to load a different VAE from settings screen +- Estimated completion time in progress bar +- API +- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML +- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embeds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients)) +- [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions +- [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions +- Now without any bad letters! +- Load checkpoints in safetensors format +- Eased resolution restriction: generated image's domension must be a multiple of 8 rather than 64 +- Now with a license! +- Reorder elements in the UI from settings screen + +## Installation and Running +Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs. + +Alternatively, use online services (like Google Colab): + +- [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) + +### Installation on Windows 10/11 with NVidia-GPUs using release package +1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract it's contents. +2. Run `update.bat`. +3. Run `run.bat`. +> For more details see [Install-and-Run-on-NVidia-GPUs](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) + +### Automatic Installation on Windows +1. Install [Python 3.10.6](https://www.python.org/downloads/release/python-3106/) (Newer version of Python does not support torch), checking "Add Python to PATH". +2. Install [git](https://git-scm.com/download/win). +3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`. +4. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user. + +### Automatic Installation on Linux +1. Install the dependencies: +```bash +# Debian-based: +sudo apt install wget git python3 python3-venv +# Red Hat-based: +sudo dnf install wget git python3 +# Arch-based: +sudo pacman -S wget git python3 +``` +2. Navigate to the directory you would like the webui to be installed and execute the following command: +```bash +bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh) +``` +3. Run `webui.sh`. +4. Check `webui-user.sh` for options. +### Installation on Apple Silicon + +Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon). + +## Contributing +Here's how to add code to this repo: [Contributing](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing) + +## Documentation + +The documentation was moved from this README over to the project's [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki). + +For the purposes of getting Google and other search engines to crawl the wiki, here's a link to the (not for humans) [crawlable wiki](https://github-wiki-see.page/m/AUTOMATIC1111/stable-diffusion-webui/wiki). + +## Credits +Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file. + +- Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers +- k-diffusion - https://github.com/crowsonkb/k-diffusion.git +- GFPGAN - https://github.com/TencentARC/GFPGAN.git +- CodeFormer - https://github.com/sczhou/CodeFormer +- ESRGAN - https://github.com/xinntao/ESRGAN +- SwinIR - https://github.com/JingyunLiang/SwinIR +- Swin2SR - https://github.com/mv-lab/swin2sr +- LDSR - https://github.com/Hafiidz/latent-diffusion +- MiDaS - https://github.com/isl-org/MiDaS +- Ideas for optimizations - https://github.com/basujindal/stable-diffusion +- Cross Attention layer optimization - Doggettx - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing. +- Cross Attention layer optimization - InvokeAI, lstein - https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion) +- Sub-quadratic Cross Attention layer optimization - Alex Birch (https://github.com/Birch-san/diffusers/pull/1), Amin Rezaei (https://github.com/AminRezaei0x443/memory-efficient-attention) +- Textual Inversion - Rinon Gal - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas). +- Idea for SD upscale - https://github.com/jquesnelle/txt2imghd +- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot +- CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator +- Idea for Composable Diffusion - https://github.com/energy-based-model/Compositional-Visual-Generation-with-Composable-Diffusion-Models-PyTorch +- xformers - https://github.com/facebookresearch/xformers +- DeepDanbooru - interrogator for anime diffusers https://github.com/KichangKim/DeepDanbooru +- Sampling in float32 precision from a float16 UNet - marunine for the idea, Birch-san for the example Diffusers implementation (https://github.com/Birch-san/diffusers-play/tree/92feee6) +- Instruct pix2pix - Tim Brooks (star), Aleksander Holynski (star), Alexei A. Efros (no star) - https://github.com/timothybrooks/instruct-pix2pix +- Security advice - RyotaK +- UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC +- TAESD - Ollin Boer Bohan - https://github.com/madebyollin/taesd +- LyCORIS - KohakuBlueleaf +- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. +- (You) diff --git a/__pycache__/launch.cpython-310.pyc b/__pycache__/launch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..422be298a3ebf852b63d04a7b9f3966470612a0c Binary files /dev/null and b/__pycache__/launch.cpython-310.pyc differ diff --git a/__pycache__/relauncher.cpython-310.pyc b/__pycache__/relauncher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b22a1632b06ad8d983ae3f88520cab409711d63b Binary files /dev/null and b/__pycache__/relauncher.cpython-310.pyc differ diff --git a/__pycache__/webui.cpython-310.pyc b/__pycache__/webui.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba95f5f652db39ac727ba0f4e3d0d8c8846b1c58 Binary files /dev/null and b/__pycache__/webui.cpython-310.pyc differ diff --git a/cache.json b/cache.json new file mode 100644 index 0000000000000000000000000000000000000000..de6402bd07c19e290d368601ea0c14ef4a053625 --- /dev/null +++ b/cache.json @@ -0,0 +1,219 @@ +{ + "hashes": { + "checkpoint/v1-5-pruned-emaonly.safetensors": { + "mtime": 1692314737.0, + "sha256": "6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa" + }, + "checkpoint/sd_xl_base_1.0.safetensors": { + "mtime": 1690358783.0, + "sha256": "31e35c80fc4829d14f90153f4c74cd59c90b779f6afe05a74cd6120b893f7e5b" + } + }, + "safetensors-metadata": { + "lora/sd_xl_offset_example-lora_1.0": { + "mtime": 1690358783.0, + "value": { + "ss_adaptive_noise_scale": "None", + "ss_base_model_version": "sdxl_base_v0-9", + "ss_cache_latents": "True", + "ss_caption_dropout_every_n_epochs": "0", + "ss_caption_dropout_rate": "0.0", + "ss_caption_tag_dropout_rate": "0.0", + "ss_clip_skip": "None", + "ss_dataset_dirs": { + "": { + "n_repeats": 1, + "img_count": 7412 + } + }, + "ss_datasets": "[{\"is_dreambooth\": true, \"batch_size_per_device\": 1, \"num_train_images\": 7412, \"num_reg_images\": 0, \"resolution\": [1024, 1024], \"enable_bucket\": false, \"min_bucket_reso\": null, \"max_bucket_reso\": null, \"tag_frequency\": {\"\": {\"contrast\": 7412}}, \"bucket_info\": null, \"subsets\": [{\"img_count\": 7412, \"num_repeats\": 1, \"color_aug\": false, \"flip_aug\": false, \"random_crop\": false, \"shuffle_caption\": false, \"keep_tokens\": 1, \"image_dir\": \"\", \"class_tokens\": \"contrast\", \"is_reg\": false}]}]", + "ss_epoch": "4", + "ss_face_crop_aug_range": "None", + "ss_full_fp16": "False", + "ss_gradient_accumulation_steps": "1", + "ss_gradient_checkpointing": "False", + "ss_learning_rate": "0.0009", + "ss_lowram": "False", + "ss_lr_scheduler": "cosine_with_restarts", + "ss_lr_warmup_steps": "100", + "ss_max_grad_norm": "1.0", + "ss_max_token_length": "None", + "ss_max_train_steps": "7750", + "ss_min_snr_gamma": "None", + "ss_mixed_precision": "fp16", + "ss_multires_noise_discount": "0.3", + "ss_multires_noise_iterations": "None", + "ss_network_alpha": "1", + "ss_network_args": { + "conv_dim": "8" + }, + "ss_network_dim": "8", + "ss_network_dropout": "None", + "ss_network_module": "networks.lora", + "ss_new_sd_model_hash": "a0f13b7eb4f4807f6863db3da874cb01e3cd0d5e2c481b6b01b8ea4a3139542c", + "ss_noise_offset": "0.2", + "ss_num_batches_per_epoch": "155", + "ss_num_epochs": "50", + "ss_num_reg_images": "0", + "ss_num_train_images": "7412", + "ss_optimizer": "transformers.optimization.Adafactor(scale_parameter=False,relative_step=False,warmup_init=False)", + "ss_output_name": "offset_0.2", + "ss_prior_loss_weight": "1.0", + "ss_scale_weight_norms": "None", + "ss_sd_model_hash": "b1facb5b", + "ss_sd_model_name": "SDXL_1-0.safetensors", + "ss_sd_scripts_commit_hash": "71a6d49d0663fbdeacab11c1050c33384695122b", + "ss_seed": "42", + "ss_session_id": "2452006521", + "ss_steps": "620", + "ss_tag_frequency": { + "": { + "contrast": 7412 + } + }, + "ss_text_encoder_lr": "None", + "ss_training_comment": "None", + "ss_training_finished_at": "1689972654.070616", + "ss_training_started_at": "1689970447.43792", + "ss_unet_lr": "None", + "ss_v2": "False", + "sshs_legacy_hash": "fec84cf7", + "sshs_model_hash": "8e3e833226b356a1bb9688b472e8e36315cd8a656f1a2d576bc13edd251392dd", + "modelspec.sai_model_spec": "1.0.0", + "modelspec.architecture": "stable-diffusion-xl-v1-base/lora", + "modelspec.title": "SDXL 1.0 Official Offset Example LoRA", + "modelspec.author": "StabilityAI", + "modelspec.description": "This is an example LoRA for SDXL 1.0 (Base) that adds Offset Noise to the model, trained by KaliYuga for StabilityAI. When applied, it will extend the image's contrast (range of brightness to darkness), which is particularly popular for producing very dark or nighttime images. At low percentages, it improves contrast and perceived image quality; at higher percentages it can be a powerful tool to produce perfect inky black. This small file (50 megabytes) demonstrates the power of LoRA on SDXL and produces a clear visual upgrade to the base model without needing to replace the 6.5 gigabyte full model. The LoRA was heavily trained with the keyword `contrasts`, which can be used alter the high-contrast effect of offset noise.", + "modelspec.usage_hint": "Recommended strength: 50% (0.5). The keyword `contrasts` may alter the effect.", + "modelspec.date": "2023-07-26", + "modelspec.resolution": "1024x1024", + "modelspec.prediction_type": "epsilon", + "modelspec.license": "CreativeML Open RAIL++-M License", + "modelspec.thumbnail": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAEAAQADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDwWnKhdwo/H6UDrWt4f0p9V1SC2ztSWQKW+tPYlanqnwo8KJqVzceIZ4Alsn7uzjPA4GC5rB+JupLdav8AY7RPMdODK3Un0UdhXa+PvEf/AAgnhaw0HRBsu7lNiY6qnQn6kmovAHwseVE1fXQXnk+YRn39awlK70OmENLvRHJeEvCd7Dpcl7Gp+2SDCZ4wD3rPvfB2rIztK6gg8lBX0TLpEVvBsjRUXGOB0rjteiSKN/61z1ZVIanXRjSnokeFXOkXcB2yncPfrWVNbOjEsp+orvdUkDztheM1gTwj5tw4PSqp1W9yauHitjmSh6r+dMIGcMOa0rmBQTjjFUGdgdrcjtXSnc4pRsyPb3B/GlByeRg/zowPx74pwAYY3D8aokXG36U11wfan8AYZgRRjcuO46GhMTRAaSnMKaBmrJCinYpvegBRQRQOtOpAMopSKSgQUClxSUDFphFO5pKAG0UUoFAxKKXFJQAUUUUAWbeIzzpGP4jz9K9I+FGjteeJI7mWPKx8ovp6n8K4zw7btJPcSpjzViZIs9nYYz+Ar3v4X6Gml6XJckZIAiRj3xyx/E1Mn0HFdTCvbCLxN+0Ba2s/z22nwCQqehK84/MivcSyQxAKAMdq8L8H3PnfG3U5ieTEw/WvbDGW5J61z0JXT/rzOvERtZf12KlxIZgwXkVy2t6Y0kJdsjiuzCxoPmxiuQ8YeJ9M0a0ae8lAA4WNfvMfanVimtRUZNS0PMrvQ5POmbGFByCelcvqIhjDKLiFmHYNUGu+ML7xLfbEJtbIHAjQ849zVeeLSoIQFj3PjkseprBUuXc6pVufYyblyWI6iqEsJIJXr6VflWNjmMYHtUew11J2OKSuZ8f7w+Wx2yD7p/pRuIJV1AYVPcw8bgMEd6jJ+0pnpKo5H94etWmZNW0GlgoyEBoWZfTFNjDZw3HvTmiB5HUUxD5FDAMOhqFRU0J3IyGohxxVIli0w0+kbpQIbTx0pg608dKAEIpuOafTT1oAXFNNOppoABSGlFBFMBtFFFIYUmKWigBKSnUUAbmhTfZpops8ebhh68V9C+FL3d4fSNTgCMsPoa+brQMBg8KxBHPevofwLaSXHh2K5YH94uwemAMVjWv0NabXU4vwpI1p8ZEB6Tq6/XvXuupata2EIEk0asegLV4V4gtptG8VWOoQt5MuSglP8OeM1Q1PStW1TUGZdQlumUZJDVyUalo+f+R6FSlztPoe4yX7TQs0bBhtzkd6+ePHV695r0yyu7BDgKx+7XpHw61u4XXYdB1NSVmBWN26hgM4/GuQ+K+hLZeMJTDgJKucD1q4ttqTJlFRTgkZPgDSLDUtZeXUpYYbO3TexlcKpP1NUvGmo6KdRki0xVljVsBo1wp9wT1rY8M+Dm1YJcJfGAwL8sarneT1JzxVgfB3Vb28BE0cMHqzbm962XLuzB86XKjgtGtp7+68uOMkZ5A7VtX+l/YvlbqRmvUrLwTY+E9Ocq2+XHzO3U15x4muxJduF6Vk6jlOyNoUlGndnNTAEEVmBGEmVPI6Vfds55quoIfNdEdDkmrsazBjgjDdx60oUgllHHcU9YRJKzsc+gpElVJzG/PqadybW3FjUCQMOhqu/Dn61eMexsjoapyriQ1UWRJDaGxim5xSE5qiAHWn0ylBoAdTaGpKAAdaDSDrSmmAClptOoAaelJTjSd6QCUUtJQNBRRRQB0HhvTW1O/SzjXdJN95u0cfc/U19X+HdLi0/QLO1RMBUGB+FfPnwn0xr7xDKgH7mIK1w/r6LX0pYSCcvIP9WgwKmWoHmnxR0USaY8iryhJyO1eZeFPFkXh6WaS+zKSu1Ex1PbmvojxPp632i3IYZzGTXy/baSNT1KWw3YKSkCuLlUJvm23PQpSlOCUd9jrPAF9d+KPi3pt19yCB3nZVHAAU4H61r/GFiPEKSY+XoDiu/wDAXhTTvCtiLjaBI0eXkI5rzD4peJLW91aSK2P+jA4aR+m72qpPmSsi4K0pXd9N/Mh8EaiI9QWFHGW/hr262YRwBm9K+XPtPkGO7tHCtGQQ6Hqa9p8M+KJtU0eMz8SBcMPWpvyalOPtA8aar8siqeMV4hqcpknYk969J8WXDM7nPFeZ3aFpCfelR1bkysQuWCiijSFakK4pCODXUcDRXa5McBAXk96rRrubJ6mnzD92KSE8itFsZSbbLsT7oyjdR0qGbqDQpxJTpFymaUdxPYqnrSU8jimVoZhRRRSAU0lFFAAOtFFFABSUvekPWgYtAFApaAGmkpe1JQNBRRRQB7r4TgOg+GrKytYgdY1MrI4HXnpn2x/WvbrC0Flp0Fnu3OFBkb1Pc15V8OdPa41G58Q3jZEY8i2T0A716pay+YpYn5m6+1Zp6lNEHiS6jsfD17cyHCpGcflXynpWp/Y/FK3UpwkkmT+de9fE7VJLjSn0+0OQPvmvn/XNOIhE6KRjhvauecoym4v0OyipQgprvc+nbPUIbvRU27XSSPBHYjFcN4h+FEV3pv2rS5mL9Wt5juU/Q9a4v4d+Mp49ulXchO37hJ6ivWfEPjT+xNItVsohPe3J2RRgZ5+lZrR8s+h0JSlZ0tn/AFqeL3nw/v8AR50+2RLFCfmUFuK0YdSi0u38tJAD2INaWteGvFHiCafWdeuv7PgwPklfLKPRVHAFcBLpsLXG0TTSRjuxxu/CnKN/iZryOCvFG7dapJqYDHBXnkdD7GsS5t1BbA4rTs4EtrRwAFQ/pUF0oJ45BFTHR6ETvJamFJDz0qpcfINo6mtO6kWFcDluwrLZSxLN1NdMTimuhVnX93UMPXPpVuRcqaht7cyvsDAc1qtjnktQU5k4qZzhcVLLaG0kBPpVrQtA1PxRqsel6Vb+dcuCxJOEjQdWdv4VHr+HWmiWZJqI16hqHwxt9J0HUWGrw6jqlvGsn2eGNgGwfmVCfvdR07A15k2SST1qk09iWmnZjKKU0lAgooooAKKKKACkpaSgYUvakpe1MBDSUppDSAKKKKBnu/hDWzBpljbbtqyLvPuSa9IbVGtdPLJyxHX0rwjRLgXAtYI84g8pE29WfPIr6C/skG0jRxwRgiudxkm7GvNF2ucjrFlNcac90oLKOWPqK4rxBoot9OeQr8ksW8fUV7fa6eknh+e3df4GWvMfiW8emeGbaL/loItv58VyyotWkdVKtf3TxdY3tJUuYSQ6HIIr1bwxqNjr0trdTki7tlIUg8qfWvOkg36OZSPu4qnZarcaJqaXMB4B+ZfUVpOPPtui6dT2Xoz1rV9D8TeINRMEc/m27YBb2HTNV7z4eS6YhaafeduSWNeh+DfEFlqnh2G9t3XLj5h3B9K5rxxrymKWNG+bGAKza9y7ep0KrKc7W0RT+Fgsk8UahYXKxzNJbEwrIu5Tg/N19qn8Z/Cp5LiW+8NhQsgJl09jjHvEfX/ZP4V5vp+s3Gkaxb6hbKr3cThkBcqfccdiOOa+lbC/h1bTYLqMqfMUEqrBtp7jIJHH1rqpRUoWfQ4K8nCq3HqfK914X1WMNJ9meVQSCVHII7EdQfasKZWiZkdSrKcMCMEV9S+K/CR1mBrvT5fsurR8+YOFnX0YevvWLo3gS6vlZvENlZYX7rBQWPPf8hVqPQydTS581sRtOapvIUbKNg19axfDTwnLcNONIidnP335z6kDpVhvAPhR7Y2g0a0FvC2dzJyGPfNaKNjJzufKemW+pa9qFrptlbPc3Nw4SJFHU+57AdSewr6P0TwzY+A/DDadDIslzNh766UY85/QE/wL0A+p711K2eieGbbZYW9vbyOMMyqA230FYMmoabqjOBfR8cbOVx269/xpaN2Q0nbmZwlxqe7VnRWELKpeI+pHYfhn868h1qGK31y9ggBEKSkID2HX+teoeK/Dul2shvINet7Zw2dsz4z9ABn9K878VWL2GsyQyMrSKFV3U5BIA/oRSgnF2fU1rOM4proYTU2nNTa0ZzBRRR3pAJS0tJQOwneiiimAUopKUUABptKaSkAoopRRQB7z8K/CqM1vfzKJPKlLkdg2P6V7FJKxu40A/dopJNUvCfhu28KeHILNGMkxG+WRjku561fk2rueUhUAySaiRSHiUW+kyzP8qtk8+lfPHxH1h9b1ArGcwxttAHtXonjPxcbphp9mSIB95x3rym/ljW4LdVBrknUvJJdDtpUrRbl1KbAQaOtufvMRmsLUIdrLmtG7uvMmDdvT0qzJ4e1TVBBLZWzzRyHAdRkA+/tThdMc2nFmZofijU/DE7/YXDROctE3Qn1puoeMb7Ubl3dBGZDzzmu/0D4O32phZ792twY2DRkfMkgPH1BFcT4o8E6noGutpzQPOSpeN41PzqOTj6V0ezW7Rz+2klyxZW0b7JLqKS6lHJdRxsHaMyrFFgH+Nj29hya+m/A+uWWq6X5VmIY44+FS3iKRgegJ5P5Cvl2PSZVeP+0bS+jib5kCRffx15NfR/wkYS+HzJb6TDplop2pCiksx/vF2+9n24rSNrWMp3bud+sYVtx5PpT/AC98bAn73U+1B+bOKgmn2WgcjBMbUEjInRFaVeFH7uNfYdaxk1RStxZyj7rldw6SD1H54pb26Wx0xxy8sdsQoHXOMk/nXKWFxNdeENPvGzHJJuMmDj5iegzRe+gJaXI/EFnJdxp5FyrXEQIhlY9F757EgdPc+1ctZ6P4yV2/s2KG2c/xSsPLKjpkmrusfaYH8tzsiOMeX0Ue57//AF6itb68cnbO72wBwwJbB9B7+9QkovY1u3Hc5u41vWjq/wDZPiXTdLv45Ac4iCSDtlWGMc1yOtqNRvtUlj3Abw67+ckDmup8exXt5qlnPawKoii2kxrgn2NczCX89pHyGfhwe1OTCK0OVIyKbnFWbmIw3UsTDBDH8qrMK0MRAc0oFNFSCkNBjimmnGk7UITG0UUUwClFJQOtAAaTvT6bjmkAtFFITQB9rTarbafYi7vpAvHyqTyfpXmPivxrdanJHb2p8q3duQp5x71l+LtUutWmaWJz9mhzGoB6H1rnNFZ7vWmgkOcR8ZriqVG9EdtKmlZs05Jzc7kQfKvBPrWDqWnzpACkbHe2AccV2Xh7Q5Ly5lt/MVSHwc16Xp3hW2it1SaFWA6gjj61FGlKWvQ0rV4w93qeYeB/hvf3F9DfXsSeXG3MbjOa9p0vw/YaTE0drAkSEltijgE9cVpWttHbW4SNQAB2qOd9nQ4Nd8YqK0OCUnJ3YpRVzwBVOaC2nmV5YkYp91mXJBPFQzX+3huM96oXWqJHHuZwOeAKHIXLc0nsrW/kS3kgieNecFQQBWi/kWkSooWNeiqorO8POZ4prg9GO1fpV+8QmCVkAMmOM0XugSsczrPjzSdFuryK4hvGt7KNGvLuOLMdvv8Aug8gknI4GTWvj+0NMgntpVeF0V43Xo6HByPwrzjxH4V1nxBc3Ma/aZtJurmG6cW0iiWOSNdpRlYjcpwCDngjpXofhuG7tfDqLqCrFLvdhCH3+UueELfxEDqRxmkmmNpo8v8AFniW5s9futOTaWkJUMW4QDg/mK4ofEm4itItOs9Lint9MDSTPcXGwSYPb8xgDmtH4hW0+m+IbjVLhGEM25oCf4j6YrkPCfgPxD4mhlkg0vz7K4yfOMyxhGz1BNF7MdtDe/4WFd69IIpdPjgaXoIRvBH4+1d1pE1nHaxefKqTbeFkUofoAateF/hjZeG5Vv8AUpI7i8VNkMEXKQr3yx+8x7ngelct4/8AEwutQ+wxJGY4Dgjrz9aLvdhpsi94l1qB7V4kTcCOW4rzF2JmY56mr9xdmVVnt1244lhJyPqKz2+aTcOh5qJO5UdDH1kYu1PPKisw10GsQNLaLKnIXqK54MG6GtIvQiS1GkU8UhFFNkoD1pRShaSgY0ikp5FNIoEJRnmkNIvWgCQUu2lUVPHFmk3YaVyuy1GauvCcVUdcGhO4NWPWvEEGreH/ABDc2ccLT2s7blwM9a1NC8PXz6kjfZXRyvJPbNWbD4h6fqYha+i23UeN6gZ/KvVfCd5Ya1ZC9syskLcKwGOlL2MWveWqBVpL4XuTaH4as7FBO1uouCPmfua25BtGFqfGBgVBL0NNJJWQNt6sLeYsu1hgiqWoSPnCAGl+0pbh3dgAB3pkkH2xg7M6RKMkLwW/+tSbGlc5nWRqclo6WlqZrgcqFOB+JPSsPTNG1q91NYdUaKGH7zJHKHLe1dRrupnyvsOm5M5GGKqWCj14qTw7oFzZRia4dlLDJVuXY+rensB0rmu51LLY63BU6XM3q9jprO3S1tUhQAKo4FI0y+YYyeT0qZWzH9Kz7ohW3EH1yO1dZxlqK1SElgACaz9X1G0060c3EypGiktk4rRtphPbRyhw4I6g5ryf4zWdxqVpY2Gn7/Ou7tI2YyYVGPAJ9Ae/uKTdtSoRTdmzh21Q+KvEdxBcyJcWccheMH7rR57V6X4dvtP0LThZWrBYUHmBBzsB/pXm50CPwhZ3Ftbi4bUXiAkvlwSOcNtDHisS6ur25mV0e7iJIIO9BjjH3V65/nmhPl1YP3tj0nxN8QbEwS21nNM8zIQHiwqqffPNeVOUG93Yu7nLO1R3kxgAAEckoG44IyPw/wAis5bi5eUb4lUnpu5//XUybYkrGh5LSr+6Vgo71WlIVjGnLfxGtyysri8RUafYWXKrjaD+Paq9xpiQCRcFJEOCH6qfQ/40co0zMcBrdkPcVyU6GOZl64PUd66ieXZnjleorntRKNc70/iGacRyK6Pzg1LtzzVenpKV68iqILAX5aYRzUqurpxTCKAGUEUGl7UARMKEFKelCUCJ41yRWraWu8ZxVC1Xc4FdZpVnvQnHasKsrG1ONzJmsiq9Kx7mHaTxXfzWK7MEVyeqW3luwxUU56l1IaXO5+G+ged4qjM8fmKgO5SOOa+k7Gzt7G1WK3iWJB/CowK8z+EFqH0uS5Nm6lnP79yPnHbHsK9TPTjivQrW5rI83C8zppyGPIR1OKrXNyqxE5+g7k0+aMN/Hg1nR/JqUaynjnb6E1zydkdcVdkdvp3m3H2y/O7acxw5+Vfc+prH8S+IxHFLa27kSyjZlTyM1j/EXxdc6fJDp9gCGYh3kHcelReB7Rda1v7XcHcYVDqCep9cen1rjnNyfs4dep6VOkoQ9rU2XQ7PwjoR0nTEadme4kALljk10RxmlIwMDiq0pdFLL82O1dkIqEVFHnTm5ycpbsmPAOKoXjIYmDbcY5LdKii1q1lwDIqknbgnoemK5Txd4stNDtvNnkUICeD3Ipt2JSbdg1Tx7Y+GrwWflvcnhp0U/NGD0/HA6e4rP8ReFIPHtvHq1rq0gtAuViiG1g455PXINedWdjrd5a6l4kuLNWS9Jl8pz8+zsR6fSsXS/Hl7oz79LvDERndFIMq3swrGNa8mmtDrlh+WKknqbetrqVmiR3dwl+F3IGlGJFHpuH3vesbzJlPFvBGXU4YL0I/x6VX1Lxvqur3ESJawPcP8iLGhZnY+g9c1u3PgrUrXSI73Xr4q7KS1vFgBP9nI6n1qpVFHUzhRlN2OXbThel2s0K3KNuNtIcAn0B/hb0I4P166ei2EVyA8Mu1lP76CVRwfcdAc9xjPcA1lSXtvbXsU9tvzJD5Tl23ZIOM+/b9ajmuZLqY3KSeW5BWRgeo6HPuOPqMGrU09SJUmnY9AktB9iU28ZWeLOYz1I749a5XXL/dcKzEb3hCSH1IOAT+GKr2vii7s7ZrG/cyMgxDcdx6BvUe9c/fX0mpXfnSfLIRyB3rRyi1oYqMovUju7skEryyjAPqKx5H3tkdKtXT4G2qfXmoRYUUUUxCgkHIOKkSbs1RUUATsRTQaYrcYNKDQAppUpppyUCL9iP3orudIwI8e1cLZnEgrstJlwg+lctY6aWxsz42VyWuKPMaukmm+SuW1qbLtzWdPcuex9YeFtMj0nw/a2sagbUGcdzWjPKQCFBJqvpM6zaXCyHgoMGrLofLIHBPU16EtW7nDBJRSRlXE08YPQ+mDWBruqSwabLcxcTwDzAPcVsXMRVvkV5DknOa5zxDEx0+bKk/KcjuK56l7HRStzJkXiAWuq6Lb6lEFMckYkVseozWz8OtDbT7O4v5G3Ndbdny42qO1edeBtXW5sLvw3LJmS2cyW+erRk8j8DXuWnhU0+BE6IgXHpXPQp/vXJ9DsxFVqioLZks0qxIWY4FY0+oSyuy2yb1A5csFUfia17iFJEO4Z9jXHa39rtg0VnHZpCQWke4cgRj1wOoP4V2M89HGeNYTHI2o2OqK17G4kMEXMXHqR1P6VwuixXvj3X0k1FibS2YkoehOcnNdjqV6uya0udZgkuGUo9lY23lLH7k9Sa8t07xDfeHb25s7VS0juVAXqawnd3SOqlypps9Q+Ivi+DStMTRdNCid48SyDoi9OPc14W0E+palHBZwPLPKQkccYyzGu4tvCl5rzm51G7YySHJii5OT2LV6R4e8N6N4JiaZIl+1uuHnblh7D0H86iNRR1W/4G9Sm5JR6fiR+APANt4RsxqeqiJ9UZcls5EI/ug+vqa5Dx741TULme0s8mLoeeAf7w/qKm8e+OJLhTYWspRf4yD2rzrTtK1HxHcGDT4XaMH95Mfuj6mqS5tWQ3ye7HcqRvLdyxW9uC77iSfTkV1UumW+j6a7TPmWY5x6H/8AUfyrWg0jTvCdkXkZXuccn3rita1dr2csxJH8K54ppuT02IsoK8tyDzNxKu2VA+UnqR2qI8ybgcHsarI0zAsVJQ9DjpUqtmtbWML33I70fvORtJGcVVq3dkNGuT8wPH0qpVrYzaswpaKKYgpKWigBOlOPrTacv3cUAGacpqOlBoAuW74YGum026Cgc1yUbYNadrc7QMmsqkbmlOVjqJ7sbetczqM2925qeW9ymM1lXEu4mopwsypy0PsDw9qdsNOggjdSFG3IPcV0wAZQexFfM3h/xAdO1u1nvbyWOxiByqc7u4GPc19H6ReLqOk290n3ZUDAZ6V6Fenyu55uEre0jZ9B00OeSxAHpXNa5CkcUjxsz4HzoRyB611VxkoQvHua5+fTcbrkTSZIzyQB+Vc01dHbF2Z87+I9SXRvFEGqWK+VNBKC23gOueQR9K+k/C+qR6npSTROGR8Mv0Iryb4h+H9Kv9NnuZU8m6iUklAOaxvhl48g0FoNMn8ySNjtFxM+1U9lHU/U4HtWMF7yaOiTvFp/I+jZX2LwNzdh61yPiHTTeR/ZGnSKa8Ro5bk8lU6sFH0GBnpXVWtxFeWyXETB0dchgc5rL1LZEXlEJlkVCwGP84rexzJnnuqrp3h+2NvaW0iyH5muSN3n8ctk8EjuOD6V4prcf2PX5bkBB5vzoyHgj29K9u15NQa1lDWxjki5cW43GDIyAydHUjuOeCK8j8V24ntRMfK8xPmV4jlGHt/geaymtTam9DofA+s2kUjXN9KESMcA1Q8a+ORqGsSjT2xahAFbPfua89S8dV2huD2qN2LKSeB/Os1S7m7xGmm5u+GNBm8Y+I1tGdlt0HmTvnnb/ia9U8UXWn+CdCttPsIgiCMhcY5YHqfUkGvOvCOpjw/G9xGwWV+prP8AFPiF9buRLIWKp3J6n2oa5nYcZKMbvcoarrE17IZZmJz0XNW/CvhO68SXfmyZjs0OZJTxn2FQeHtDfW75ZLjK2yn5vf2rste8S2+laeNN0zamFx8vAFU3b3YmaXN70jA8VjTbW+W000/uY1+ZT2Pc5rkHlJdthwueKdc3DTyMxYnJyT61DWkY2RjUnzPQOpyaWkxS1ZmFFFFABRRRQAUL1xRQOCKADFFTMgPNREYNACqamR8d6r0uaVgLLS+9QO2abk0UWC509nd718mX7ynvXunwU1/7RYXWjTMQ8chli3vksv8AFgdgDivH/E2hPp063cC5ifkMO1dN8F7or8RbXzHx59vKij1OAfy4Nd1VNRcWeThpRlJVIdT6NuCOFPRjiuT8Rao1tFgkKAMHnj6H0rq2IZ8N3yVHuK838eq8SFoZFE0qsAjeo6VxSdlc9aKvJIw5hpviuG+0rU0kCIQEmhfbIvGQfcexrxe6in8P6/PYsY5JbWTb5iRrlx1ByQccV6rcSQax4bV7Wb7LqAj2pOnDK47N614jI832mX7Q7NNvIkZjkk55yazpqxrVdz6i+EfjA+IrW5spXZpbaNW6ZVR0wCODXeamzR2spRC77cqg7kc/0rwH9n5yvjC8UD71m27LnP3l6L0/HrX0HelhC5VcttIGe1bSMEeU+J7jWtUnt5bHNvZvlGmx8649exGcivMdZtxby3EMs6zFs72QYBPevX/FMWrNHGlpOtvAZyN6jkI6jGR0IzkH657V5L4i0r+yZGhcSCXJBy25PbB9Kzkro1g7M88CBZ2AzgHvU4UELk9+lJKoWRs9c1GXOc+lLca0NNEL7VJ+UVes9DiuhJNPIu0cKua503G0cswHsamg1MQMrAMdpDAHpmpcX0KU431Ou1C9TRNMFpbYEhGCR2rh7qdpHIJJJ+8at6lrMmpXHnPGqt6L0zWYTk5PU04RtuFSd9EFLRRWhiFFFFABRRRQAUUUUAFBooPSgC0OUFQOOanU/ulNQydTQBHSgUlPAoASin7aaRSA9Q0bVVv9MbTdRG4OuI3PrXNWU1x4X8UwXMe4PbTrKvONwU5x9D0p2g38M6LDKQrDofeus1vQDqOkpfIM3EABOP4lr1WvaQTW589GTw9Zxasn/Vz17R/iLpfibTWu7HjULUCSSwZgHPqF/vVwfxJ8Q2GpGxu7a7iW26Sb2w8ZPYr17H8q8n1Mm2dJIC0b4yGU4IP1rCnlkmlaSV2d2OSzHJNcFWmk7Hs4es5xUjp73V7gOZbC58uKRORjmQg/exXJsT5jFiSSck+prprS08600oNgBoJGJ+hNc9IuZzgA8050lGKsTSrucpKT2/zf+R638AQ3/CZSFXKxm1kymSdxyOcdh2r6JutxjbHp3r5y+D7PpPjbRDcjDalFL5YJwQu04Y+xIwo49a+jZHBWQgghTj8RUTVmjWnLmTfmcRrLzBxGZgu9FkAZchXBIXPsQCDXifimVTqbJCzLGTxEWJC89BnsDXqXj68a20+QxuVnLIqsOoIJOa8Sv7qSfUTLKPm3biPT6VEnpY1jvcytStnikJK8ZrNyetdFq9xb3cYkjI+YZyD1rnSdpx1rKDujWasyJx3pnTinuCOvHtTK0MgxRRRTAKKKKACiiigAooooAKKKKACiiigCVHxFj0qNmyaSkNABUsfNRVInFAFjb8tRMMGnbzimMalDY5ZHgl3oSDmvQvCPjFRttbtvk6Ant/8AWrn/ABH4dNg5urU+ZaOchh2rmwHjcMhIYeldilOhKzPOcaWLpqSep6d4s8PxpAby32tA/wAw287c/wBK80nQo5Haus8P+LSsJ0/UPmhcY5rP1bSCJHltj5kJORj0rSqlUjzwMMM5Yebp1PkbGlLnSNOlCgmOxnPP1Nc3p9n9v1KC1HBnlSL6bmA/rXWafCV8EtKeGW28vnsC5JrF8EgTeNNLXji6RufY5omvgTFRlrVkuj/zHeIdRd/Hd3cWkrwJaTCG1CHG0R/KoHoBjJNfRvgjxxa+K/CM9yJM3lggW8O3buYgneB6Ng18pahKZNTvXJzulfH4sc1r+GfEOp+HLt7nTp9hkjMUkbDckqnqGHf+nauZvmkz0oWhBeh6/wDEvU4z4Z0nUoo9y3O2Zc8YGc4P1/rXjWoXgRXlxtaTPlqP4Qa6HXfHkmv6VYWl1YpG9kpQ+U2I3APy/L2xXD3c7XM/mOcsRg1nKPc0jJPYiSR0GAx2+lTxtujbsQQTj0NV6kgG52XIGVPX86FuEtgkA7dO9MIx9KkIMgyOnZQM4prqR1X8zQwT6DKKKKRQUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUopKcKAFzTSadTSKAPVbq1ey+HV69yeWYbM15YruxVQCzHgADkmum8SeMbjWLNNMjULaxt26sa3vA9pYaTE2ozQQvew/NJcXX+qth22j+J67qi9tUUYPRHkUG8LRcqi1k9jBm8LjRLKG+8RiSFpuYbJOJHHqx/hFUpdatVIFtp7QKOm2Un881P411eXVNflMmofb1iYhbjGA/wBB2Fc95hHasZzUHywOunSdSKnU1b6bHoUV4154DuJmXbuIG1OgwTWH4EZU8aaXI33RdJ/hWjZSFfhvMxI5fHT3NUfBFq0us28wGfLcSfka3leUoehw0rU6dbtdmBq8Zg1m+jPVLhwc/wC8aIZAFGBW58RbIWXjjUgv3Jn85fo3Nc3E+OK5Je7No9OPv00ydxyevJqvIoDtjpzVwMhUc8kiorlBHOVPbP8AOiSCD1sVcHGakgfy7iJz0DDP0pJDwKjPSo2ZtutS4fkLR5+VHIpkwynH1NJKwZ5CDwwDDH05pm4tHiqbM0upHRRRUGoUUUtACUUtFACUUUUAFFFFABRQaKACiiigApwptKDQA6kNGaSgD//Z", + "modelspec.hash_sha256": "0x8e3e833226b356a1bb9688b472e8e36315cd8a656f1a2d576bc13edd251392dd" + } + }, + "lora/szdnpp_lora_v1": { + "mtime": 1692334745.0, + "value": { + "ss_network_dim": "128", + "ss_num_epochs": "10", + "ss_reg_dataset_dirs": {}, + "ss_face_crop_aug_range": "None", + "ss_sd_model_name": "runwayml/stable-diffusion-v1-5", + "ss_max_token_length": "None", + "ss_multires_noise_discount": "0.3", + "ss_max_train_steps": "3200", + "ss_caption_dropout_every_n_epochs": "0", + "ss_seed": "420420420", + "ss_learning_rate": "None", + "ss_training_comment": "sszdnpp", + "ss_num_train_images": "320", + "ss_dataset_dirs": { + "10_szdnpp woman": { + "n_repeats": 10, + "img_count": 32 + } + }, + "sshs_model_hash": "f4edac6000fcbc1d33ab59c681605d77da2e0fcf656893791abcd2adc6277d53", + "modelspec.implementation": "diffusers", + "ss_steps": "3200", + "ss_tag_frequency": { + "10_szdnpp woman": { + "szdnpp woman": 32 + }, + "4_woman": {}, + "6_woman": {} + }, + "ss_keep_tokens": "0", + "modelspec.resolution": "768x768", + "ss_min_bucket_reso": "128", + "ss_bucket_no_upscale": "True", + "ss_color_aug": "False", + "ss_cache_latents": "True", + "ss_unet_lr": "None", + "ss_session_id": "243271588", + "modelspec.sai_model_spec": "1.0.0", + "ss_network_alpha": "64.0", + "ss_gradient_accumulation_steps": "1", + "modelspec.title": "szdnpp_lora_v1", + "ss_num_reg_images": "0", + "ss_lr_warmup_steps": "0", + "ss_network_dropout": "None", + "ss_random_crop": "False", + "ss_epoch": "10", + "ss_prior_loss_weight": "1.0", + "ss_text_encoder_lr": "None", + "ss_lr_scheduler": "adafactor:0.0001", + "sshs_legacy_hash": "d7f0f78b", + "ss_full_fp16": "False", + "ss_flip_aug": "False", + "ss_max_bucket_reso": "2048", + "ss_bucket_info": { + "buckets": { + "0": { + "resolution": [ + 512, + 512 + ], + "count": 320 + } + }, + "mean_img_ar_error": 0.0 + }, + "ss_adaptive_noise_scale": "None", + "ss_max_grad_norm": "1.0", + "ss_zero_terminal_snr": "False", + "ss_optimizer": "transformers.optimization.Adafactor(relative_step=True)", + "ss_num_batches_per_epoch": "320", + "ss_sd_scripts_commit_hash": "ee37a5311d3591caffa8c7ebe798ec2bb870ed87", + "ss_min_snr_gamma": "None", + "ss_batch_size_per_device": "1", + "ss_gradient_checkpointing": "False", + "ss_network_module": "networks.lora", + "modelspec.prediction_type": "epsilon", + "ss_scale_weight_norms": "None", + "ss_multires_noise_iterations": "None", + "ss_output_name": "szdnpp_lora_v1", + "ss_total_batch_size": "1", + "ss_shuffle_caption": "False", + "modelspec.date": "2023-08-18T04:58:07", + "ss_caption_tag_dropout_rate": "0.0", + "ss_clip_skip": "None", + "ss_resolution": "(768, 768)", + "ss_caption_dropout_rate": "0.0", + "ss_v2": "False", + "ss_noise_offset": "0.0", + "ss_enable_bucket": "True", + "ss_mixed_precision": "fp16", + "ss_lowram": "False", + "ss_training_started_at": "1692333214.5576196", + "modelspec.architecture": "stable-diffusion-v1/lora", + "ss_base_model_version": "sd_v1", + "ss_training_finished_at": "1692334687.9210682" + } + } + }, + "extensions-git": { + "Stable-Diffusion-Webui-Civitai-Helper": { + "mtime": 1692335674.0, + "value": { + "remote": "https://github.com/butaixianran/Stable-Diffusion-Webui-Civitai-Helper.git", + "commit_date": 1684842802, + "branch": "main", + "commit_hash": "920ca3267f789a72e3225c99f069bf0be0db795d", + "version": "920ca326" + } + }, + "Hypernetwork-MonkeyPatch-Extension": { + "mtime": 1692335697.0, + "value": { + "remote": "https://github.com/aria1th/Hypernetwork-MonkeyPatch-Extension.git", + "commit_date": 1690546730, + "branch": "main", + "commit_hash": "bd47167526e59c9cf3fdd8e8de364dbae96550df", + "version": "bd471675" + } + } + } +} \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e260c5e86f9609ecfaf5937b89760da5fa042c3e --- /dev/null +++ b/config.json @@ -0,0 +1,234 @@ +{ + "samples_save": true, + "samples_format": "png", + "samples_filename_pattern": "", + "save_images_add_number": true, + "grid_save": true, + "grid_format": "png", + "grid_extended_filename": false, + "grid_only_if_multiple": true, + "grid_prevent_empty_spots": false, + "grid_zip_filename_pattern": "", + "n_rows": -1, + "font": "", + "grid_text_active_color": "#000000", + "grid_text_inactive_color": "#999999", + "grid_background_color": "#ffffff", + "enable_pnginfo": true, + "save_txt": false, + "save_images_before_face_restoration": false, + "save_images_before_highres_fix": false, + "save_images_before_color_correction": false, + "save_mask": false, + "save_mask_composite": false, + "jpeg_quality": 80, + "webp_lossless": false, + "export_for_4chan": true, + "img_downscale_threshold": 4.0, + "target_side_length": 4000, + "img_max_size_mp": 200, + "use_original_name_batch": true, + "use_upscaler_name_as_suffix": false, + "save_selected_only": true, + "save_init_img": false, + "temp_dir": "", + "clean_temp_dir_at_start": false, + "outdir_samples": "", + "outdir_txt2img_samples": "outputs/txt2img-images", + "outdir_img2img_samples": "outputs/img2img-images", + "outdir_extras_samples": "outputs/extras-images", + "outdir_grids": "", + "outdir_txt2img_grids": "outputs/txt2img-grids", + "outdir_img2img_grids": "outputs/img2img-grids", + "outdir_save": "log/images", + "outdir_init_images": "outputs/init-images", + "save_to_dirs": true, + "grid_save_to_dirs": true, + "use_save_to_dirs_for_ui": false, + "directories_filename_pattern": "[date]", + "directories_max_prompt_words": 8, + "ESRGAN_tile": 192, + "ESRGAN_tile_overlap": 8, + "realesrgan_enabled_models": [ + "R-ESRGAN 4x+", + "R-ESRGAN 4x+ Anime6B" + ], + "upscaler_for_img2img": null, + "face_restoration_model": "CodeFormer", + "code_former_weight": 0.5, + "face_restoration_unload": false, + "show_warnings": false, + "memmon_poll_rate": 8, + "samples_log_stdout": false, + "multiple_tqdm": true, + "print_hypernet_extra": false, + "list_hidden_files": true, + "disable_mmap_load_safetensors": false, + "unload_models_when_training": false, + "pin_memory": false, + "save_optimizer_state": false, + "save_training_settings_to_txt": true, + "dataset_filename_word_regex": "", + "dataset_filename_join_string": " ", + "training_image_repeats_per_epoch": 1, + "training_write_csv_every": 500, + "training_xattention_optimizations": false, + "training_enable_tensorboard": false, + "training_tensorboard_save_images": false, + "training_tensorboard_flush_every": 120, + "sd_model_checkpoint": "v1-5-pruned-emaonly.safetensors [6ce0161689]", + "sd_checkpoint_cache": 0, + "sd_vae_checkpoint_cache": 0, + "sd_vae": "Automatic", + "sd_vae_as_default": true, + "sd_unet": "Automatic", + "inpainting_mask_weight": 1.0, + "initial_noise_multiplier": 1.0, + "img2img_color_correction": false, + "img2img_fix_steps": false, + "img2img_background_color": "#ffffff", + "enable_quantization": false, + "enable_emphasis": true, + "enable_batch_seeds": true, + "comma_padding_backtrack": 20, + "CLIP_stop_at_last_layers": 1, + "upcast_attn": false, + "auto_vae_precision": true, + "randn_source": "GPU", + "sdxl_crop_top": 0, + "sdxl_crop_left": 0, + "sdxl_refiner_low_aesthetic_score": 2.5, + "sdxl_refiner_high_aesthetic_score": 6.0, + "cross_attention_optimization": "Automatic", + "s_min_uncond": 0.0, + "token_merging_ratio": 0.0, + "token_merging_ratio_img2img": 0.0, + "token_merging_ratio_hr": 0.0, + "pad_cond_uncond": false, + "experimental_persistent_cond_cache": false, + "use_old_emphasis_implementation": false, + "use_old_karras_scheduler_sigmas": false, + "no_dpmpp_sde_batch_determinism": false, + "use_old_hires_fix_width_height": false, + "dont_fix_second_order_samplers_schedule": false, + "hires_fix_use_firstpass_conds": false, + "interrogate_keep_models_in_memory": false, + "interrogate_return_ranks": false, + "interrogate_clip_num_beams": 1, + "interrogate_clip_min_length": 24, + "interrogate_clip_max_length": 48, + "interrogate_clip_dict_limit": 1500, + "interrogate_clip_skip_categories": [], + "interrogate_deepbooru_score_threshold": 0.5, + "deepbooru_sort_alpha": true, + "deepbooru_use_spaces": true, + "deepbooru_escape": true, + "deepbooru_filter_tags": "", + "extra_networks_show_hidden_directories": true, + "extra_networks_hidden_models": "When searched", + "extra_networks_default_multiplier": 1.0, + "extra_networks_card_width": 0, + "extra_networks_card_height": 0, + "extra_networks_card_text_scale": 1.0, + "extra_networks_card_show_desc": true, + "extra_networks_add_text_separator": " ", + "ui_extra_networks_tab_reorder": "", + "textual_inversion_print_at_load": false, + "textual_inversion_add_hashes_to_infotext": true, + "sd_hypernetwork": "None", + "localization": "None", + "gradio_theme": "Default", + "img2img_editor_height": 720, + "return_grid": true, + "return_mask": false, + "return_mask_composite": false, + "do_not_show_images": false, + "send_seed": true, + "send_size": true, + "js_modal_lightbox": true, + "js_modal_lightbox_initially_zoomed": true, + "js_modal_lightbox_gamepad": false, + "js_modal_lightbox_gamepad_repeat": 250, + "show_progress_in_title": true, + "samplers_in_dropdown": true, + "dimensions_and_batch_together": true, + "keyedit_precision_attention": 0.1, + "keyedit_precision_extra": 0.05, + "keyedit_delimiters": ".,\\/!?%^*;:{}=`~()", + "keyedit_move": true, + "quicksettings_list": [ + "sd_model_checkpoint", + "sd_vae", + "lora_preferred_name" + ], + "ui_tab_order": [], + "hidden_tabs": [], + "ui_reorder_list": [], + "hires_fix_show_sampler": false, + "hires_fix_show_prompts": false, + "disable_token_counters": false, + "add_model_hash_to_info": true, + "add_model_name_to_info": true, + "add_user_name_to_info": false, + "add_version_to_infotext": true, + "disable_weights_auto_swap": true, + "infotext_styles": "Apply if any", + "show_progressbar": true, + "live_previews_enable": true, + "live_previews_image_format": "png", + "show_progress_grid": true, + "show_progress_every_n_steps": 10, + "show_progress_type": "Approx NN", + "live_preview_content": "Prompt", + "live_preview_refresh_period": 1000, + "hide_samplers": [], + "eta_ddim": 0.0, + "eta_ancestral": 1.0, + "ddim_discretize": "uniform", + "s_churn": 0.0, + "s_tmin": 0.0, + "s_noise": 1.0, + "k_sched_type": "Automatic", + "sigma_min": 0.0, + "sigma_max": 0.0, + "rho": 0.0, + "eta_noise_seed_delta": 0, + "always_discard_next_to_last_sigma": false, + "uni_pc_variant": "bh1", + "uni_pc_skip_type": "time_uniform", + "uni_pc_order": 3, + "uni_pc_lower_order_final": true, + "postprocessing_enable_in_main_ui": [], + "postprocessing_operation_order": [], + "upscaling_max_images_in_cache": 5, + "disabled_extensions": [], + "disable_all_extensions": "none", + "restore_config_state_file": "", + "sd_checkpoint_hash": "6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa", + "ldsr_steps": 100, + "ldsr_cached": false, + "SCUNET_tile": 256, + "SCUNET_tile_overlap": 8, + "SWIN_tile": 192, + "SWIN_tile_overlap": 8, + "SWIN_torch_compile": false, + "lora_functional": false, + "sd_lora": "None", + "lora_preferred_name": "Alias from file", + "lora_add_hashes_to_infotext": true, + "lora_show_all": false, + "lora_hide_unknown_for_versions": [], + "extra_options": [], + "extra_options_accordion": false, + "canvas_hotkey_zoom": "Alt", + "canvas_hotkey_adjust": "Ctrl", + "canvas_hotkey_move": "F", + "canvas_hotkey_fullscreen": "S", + "canvas_hotkey_reset": "R", + "canvas_hotkey_overlap": "O", + "canvas_show_tooltip": true, + "canvas_blur_prompt": false, + "canvas_disabled_functions": [ + "Overlap" + ] +} \ No newline at end of file diff --git a/configs/alt-diffusion-inference.yaml b/configs/alt-diffusion-inference.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cfbee72d71bfd7deed2075e423ca51bd1da0521c --- /dev/null +++ b/configs/alt-diffusion-inference.yaml @@ -0,0 +1,72 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: modules.xlmr.BertSeriesModelWithTransformation + params: + name: "XLMR-Large" \ No newline at end of file diff --git a/configs/instruct-pix2pix.yaml b/configs/instruct-pix2pix.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e896879dd7ac5697b89cb323ec43eb41c03596c --- /dev/null +++ b/configs/instruct-pix2pix.yaml @@ -0,0 +1,98 @@ +# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion). +# See more details in LICENSE. + +model: + base_learning_rate: 1.0e-04 + target: modules.models.diffusion.ddpm_edit.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: edited + cond_stage_key: edit + # image_size: 64 + # image_size: 32 + image_size: 16 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: hybrid + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: false + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 0 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 8 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + +data: + target: main.DataModuleFromConfig + params: + batch_size: 128 + num_workers: 1 + wrap: false + validation: + target: edit_dataset.EditDataset + params: + path: data/clip-filtered-dataset + cache_dir: data/ + cache_name: data_10k + split: val + min_text_sim: 0.2 + min_image_sim: 0.75 + min_direction_sim: 0.2 + max_samples_per_prompt: 1 + min_resize_res: 512 + max_resize_res: 512 + crop_res: 512 + output_as_edit: False + real_input: True diff --git a/configs/v1-inference.yaml b/configs/v1-inference.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d4effe569e897369918625f9d8be5603a0e6a0d6 --- /dev/null +++ b/configs/v1-inference.yaml @@ -0,0 +1,70 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder diff --git a/configs/v1-inpainting-inference.yaml b/configs/v1-inpainting-inference.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f9eec37d24bce33ce92320a782d16ae72308190a --- /dev/null +++ b/configs/v1-inpainting-inference.yaml @@ -0,0 +1,70 @@ +model: + base_learning_rate: 7.5e-05 + target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: hybrid # important + monitor: val/loss_simple_ema + scale_factor: 0.18215 + finetune_keys: null + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 9 # 4 data + 4 downscaled image + 1 mask + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder diff --git a/embeddings/Place Textual Inversion embeddings here.txt b/embeddings/Place Textual Inversion embeddings here.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/environment-wsl2.yaml b/environment-wsl2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0c4ae6809997ec38e7cf62cf0f71360b8cb61a7e --- /dev/null +++ b/environment-wsl2.yaml @@ -0,0 +1,11 @@ +name: automatic +channels: + - pytorch + - defaults +dependencies: + - python=3.10 + - pip=23.0 + - cudatoolkit=11.8 + - pytorch=2.0 + - torchvision=0.15 + - numpy=1.23 diff --git a/extensions-builtin/LDSR/__pycache__/ldsr_model_arch.cpython-310.pyc b/extensions-builtin/LDSR/__pycache__/ldsr_model_arch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1ebcc701ef4badd8e5623bd7fd7de969487c8b8 Binary files /dev/null and b/extensions-builtin/LDSR/__pycache__/ldsr_model_arch.cpython-310.pyc differ diff --git a/extensions-builtin/LDSR/__pycache__/preload.cpython-310.pyc b/extensions-builtin/LDSR/__pycache__/preload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a43957dd7bb8e9fefe33f40d0a3ad8040701a8de Binary files /dev/null and b/extensions-builtin/LDSR/__pycache__/preload.cpython-310.pyc differ diff --git a/extensions-builtin/LDSR/__pycache__/sd_hijack_autoencoder.cpython-310.pyc b/extensions-builtin/LDSR/__pycache__/sd_hijack_autoencoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..907916c4f7ed165dcbcb7e4a86fc6e73db840df5 Binary files /dev/null and b/extensions-builtin/LDSR/__pycache__/sd_hijack_autoencoder.cpython-310.pyc differ diff --git a/extensions-builtin/LDSR/__pycache__/sd_hijack_ddpm_v1.cpython-310.pyc b/extensions-builtin/LDSR/__pycache__/sd_hijack_ddpm_v1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1daf9986a01f4d7135883e417516d2e7ad414060 Binary files /dev/null and b/extensions-builtin/LDSR/__pycache__/sd_hijack_ddpm_v1.cpython-310.pyc differ diff --git a/extensions-builtin/LDSR/__pycache__/vqvae_quantize.cpython-310.pyc b/extensions-builtin/LDSR/__pycache__/vqvae_quantize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89362b181af17fec1a9a2d8b23917a71f3262c9e Binary files /dev/null and b/extensions-builtin/LDSR/__pycache__/vqvae_quantize.cpython-310.pyc differ diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..7cac36ce55ae295c6d0e444a93ea12bf8cfe893c --- /dev/null +++ b/extensions-builtin/LDSR/ldsr_model_arch.py @@ -0,0 +1,250 @@ +import os +import gc +import time + +import numpy as np +import torch +import torchvision +from PIL import Image +from einops import rearrange, repeat +from omegaconf import OmegaConf +import safetensors.torch + +from ldm.models.diffusion.ddim import DDIMSampler +from ldm.util import instantiate_from_config, ismap +from modules import shared, sd_hijack, devices + +cached_ldsr_model: torch.nn.Module = None + + +# Create LDSR Class +class LDSR: + def load_model_from_config(self, half_attention): + global cached_ldsr_model + + if shared.opts.ldsr_cached and cached_ldsr_model is not None: + print("Loading model from cache") + model: torch.nn.Module = cached_ldsr_model + else: + print(f"Loading model from {self.modelPath}") + _, extension = os.path.splitext(self.modelPath) + if extension.lower() == ".safetensors": + pl_sd = safetensors.torch.load_file(self.modelPath, device="cpu") + else: + pl_sd = torch.load(self.modelPath, map_location="cpu") + sd = pl_sd["state_dict"] if "state_dict" in pl_sd else pl_sd + config = OmegaConf.load(self.yamlPath) + config.model.target = "ldm.models.diffusion.ddpm.LatentDiffusionV1" + model: torch.nn.Module = instantiate_from_config(config.model) + model.load_state_dict(sd, strict=False) + model = model.to(shared.device) + if half_attention: + model = model.half() + if shared.cmd_opts.opt_channelslast: + model = model.to(memory_format=torch.channels_last) + + sd_hijack.model_hijack.hijack(model) # apply optimization + model.eval() + + if shared.opts.ldsr_cached: + cached_ldsr_model = model + + return {"model": model} + + def __init__(self, model_path, yaml_path): + self.modelPath = model_path + self.yamlPath = yaml_path + + @staticmethod + def run(model, selected_path, custom_steps, eta): + example = get_cond(selected_path) + + n_runs = 1 + guider = None + ckwargs = None + ddim_use_x0_pred = False + temperature = 1. + eta = eta + custom_shape = None + + height, width = example["image"].shape[1:3] + split_input = height >= 128 and width >= 128 + + if split_input: + ks = 128 + stride = 64 + vqf = 4 # + model.split_input_params = {"ks": (ks, ks), "stride": (stride, stride), + "vqf": vqf, + "patch_distributed_vq": True, + "tie_braker": False, + "clip_max_weight": 0.5, + "clip_min_weight": 0.01, + "clip_max_tie_weight": 0.5, + "clip_min_tie_weight": 0.01} + else: + if hasattr(model, "split_input_params"): + delattr(model, "split_input_params") + + x_t = None + logs = None + for _ in range(n_runs): + if custom_shape is not None: + x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device) + x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0]) + + logs = make_convolutional_sample(example, model, + custom_steps=custom_steps, + eta=eta, quantize_x0=False, + custom_shape=custom_shape, + temperature=temperature, noise_dropout=0., + corrector=guider, corrector_kwargs=ckwargs, x_T=x_t, + ddim_use_x0_pred=ddim_use_x0_pred + ) + return logs + + def super_resolution(self, image, steps=100, target_scale=2, half_attention=False): + model = self.load_model_from_config(half_attention) + + # Run settings + diffusion_steps = int(steps) + eta = 1.0 + + + gc.collect() + devices.torch_gc() + + im_og = image + width_og, height_og = im_og.size + # If we can adjust the max upscale size, then the 4 below should be our variable + down_sample_rate = target_scale / 4 + wd = width_og * down_sample_rate + hd = height_og * down_sample_rate + width_downsampled_pre = int(np.ceil(wd)) + height_downsampled_pre = int(np.ceil(hd)) + + if down_sample_rate != 1: + print( + f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]') + im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS) + else: + print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)") + + # pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts + pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size + im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge')) + + logs = self.run(model["model"], im_padded, diffusion_steps, eta) + + sample = logs["sample"] + sample = sample.detach().cpu() + sample = torch.clamp(sample, -1., 1.) + sample = (sample + 1.) / 2. * 255 + sample = sample.numpy().astype(np.uint8) + sample = np.transpose(sample, (0, 2, 3, 1)) + a = Image.fromarray(sample[0]) + + # remove padding + a = a.crop((0, 0) + tuple(np.array(im_og.size) * 4)) + + del model + gc.collect() + devices.torch_gc() + + return a + + +def get_cond(selected_path): + example = {} + up_f = 4 + c = selected_path.convert('RGB') + c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0) + c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]], + antialias=True) + c_up = rearrange(c_up, '1 c h w -> 1 h w c') + c = rearrange(c, '1 c h w -> 1 h w c') + c = 2. * c - 1. + + c = c.to(shared.device) + example["LR_image"] = c + example["image"] = c_up + + return example + + +@torch.no_grad() +def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None, + mask=None, x0=None, quantize_x0=False, temperature=1., score_corrector=None, + corrector_kwargs=None, x_t=None + ): + ddim = DDIMSampler(model) + bs = shape[0] + shape = shape[1:] + print(f"Sampling with eta = {eta}; steps: {steps}") + samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback, + normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta, + mask=mask, x0=x0, temperature=temperature, verbose=False, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, x_t=x_t) + + return samples, intermediates + + +@torch.no_grad() +def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None, + corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False): + log = {} + + z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=not (hasattr(model, 'split_input_params') + and model.cond_stage_key == 'coordinates_bbox'), + return_original_cond=True) + + if custom_shape is not None: + z = torch.randn(custom_shape) + print(f"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}") + + z0 = None + + log["input"] = x + log["reconstruction"] = xrec + + if ismap(xc): + log["original_conditioning"] = model.to_rgb(xc) + if hasattr(model, 'cond_stage_key'): + log[model.cond_stage_key] = model.to_rgb(xc) + + else: + log["original_conditioning"] = xc if xc is not None else torch.zeros_like(x) + if model.cond_stage_model: + log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x) + if model.cond_stage_key == 'class_label': + log[model.cond_stage_key] = xc[model.cond_stage_key] + + with model.ema_scope("Plotting"): + t0 = time.time() + + sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape, + eta=eta, + quantize_x0=quantize_x0, mask=None, x0=z0, + temperature=temperature, score_corrector=corrector, corrector_kwargs=corrector_kwargs, + x_t=x_T) + t1 = time.time() + + if ddim_use_x0_pred: + sample = intermediates['pred_x0'][-1] + + x_sample = model.decode_first_stage(sample) + + try: + x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True) + log["sample_noquant"] = x_sample_noquant + log["sample_diff"] = torch.abs(x_sample_noquant - x_sample) + except Exception: + pass + + log["sample"] = x_sample + log["time"] = t1 - t0 + + return log diff --git a/extensions-builtin/LDSR/preload.py b/extensions-builtin/LDSR/preload.py new file mode 100644 index 0000000000000000000000000000000000000000..cfd478d545ed12ef74e73fa40b6defe0156859da --- /dev/null +++ b/extensions-builtin/LDSR/preload.py @@ -0,0 +1,6 @@ +import os +from modules import paths + + +def preload(parser): + parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(paths.models_path, 'LDSR')) diff --git a/extensions-builtin/LDSR/scripts/__pycache__/ldsr_model.cpython-310.pyc b/extensions-builtin/LDSR/scripts/__pycache__/ldsr_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a831549e75df9287da89f03f0ecbe960c4260f3 Binary files /dev/null and b/extensions-builtin/LDSR/scripts/__pycache__/ldsr_model.cpython-310.pyc differ diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py new file mode 100644 index 0000000000000000000000000000000000000000..bd78decea1ec9fc66d61d66ee64457458a290f72 --- /dev/null +++ b/extensions-builtin/LDSR/scripts/ldsr_model.py @@ -0,0 +1,68 @@ +import os + +from modules.modelloader import load_file_from_url +from modules.upscaler import Upscaler, UpscalerData +from ldsr_model_arch import LDSR +from modules import shared, script_callbacks, errors +import sd_hijack_autoencoder # noqa: F401 +import sd_hijack_ddpm_v1 # noqa: F401 + + +class UpscalerLDSR(Upscaler): + def __init__(self, user_path): + self.name = "LDSR" + self.user_path = user_path + self.model_url = "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1" + self.yaml_url = "https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1" + super().__init__() + scaler_data = UpscalerData("LDSR", None, self) + self.scalers = [scaler_data] + + def load_model(self, path: str): + # Remove incorrect project.yaml file if too big + yaml_path = os.path.join(self.model_path, "project.yaml") + old_model_path = os.path.join(self.model_path, "model.pth") + new_model_path = os.path.join(self.model_path, "model.ckpt") + + local_model_paths = self.find_models(ext_filter=[".ckpt", ".safetensors"]) + local_ckpt_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.ckpt")]), None) + local_safetensors_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.safetensors")]), None) + local_yaml_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("project.yaml")]), None) + + if os.path.exists(yaml_path): + statinfo = os.stat(yaml_path) + if statinfo.st_size >= 10485760: + print("Removing invalid LDSR YAML file.") + os.remove(yaml_path) + + if os.path.exists(old_model_path): + print("Renaming model from model.pth to model.ckpt") + os.rename(old_model_path, new_model_path) + + if local_safetensors_path is not None and os.path.exists(local_safetensors_path): + model = local_safetensors_path + else: + model = local_ckpt_path or load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name="model.ckpt") + + yaml = local_yaml_path or load_file_from_url(self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml") + + return LDSR(model, yaml) + + def do_upscale(self, img, path): + try: + ldsr = self.load_model(path) + except Exception: + errors.report(f"Failed loading LDSR model {path}", exc_info=True) + return img + ddim_steps = shared.opts.ldsr_steps + return ldsr.super_resolution(img, ddim_steps, self.scale) + + +def on_ui_settings(): + import gradio as gr + + shared.opts.add_option("ldsr_steps", shared.OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}, section=('upscaling', "Upscaling"))) + shared.opts.add_option("ldsr_cached", shared.OptionInfo(False, "Cache LDSR model in memory", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling"))) + + +script_callbacks.on_ui_settings(on_ui_settings) diff --git a/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/extensions-builtin/LDSR/sd_hijack_autoencoder.py new file mode 100644 index 0000000000000000000000000000000000000000..c29d274da825d2500b77a2022db3421b40b18886 --- /dev/null +++ b/extensions-builtin/LDSR/sd_hijack_autoencoder.py @@ -0,0 +1,293 @@ +# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo +# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo +# As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder +import numpy as np +import torch +import pytorch_lightning as pl +import torch.nn.functional as F +from contextlib import contextmanager + +from torch.optim.lr_scheduler import LambdaLR + +from ldm.modules.ema import LitEma +from vqvae_quantize import VectorQuantizer2 as VectorQuantizer +from ldm.modules.diffusionmodules.model import Encoder, Decoder +from ldm.util import instantiate_from_config + +import ldm.models.autoencoder +from packaging import version + +class VQModel(pl.LightningModule): + def __init__(self, + ddconfig, + lossconfig, + n_embed, + embed_dim, + ckpt_path=None, + ignore_keys=None, + image_key="image", + colorize_nlabels=None, + monitor=None, + batch_resize_range=None, + scheduler_config=None, + lr_g_factor=1.0, + remap=None, + sane_index_shape=False, # tell vector quantizer to return indices as bhw + use_ema=False + ): + super().__init__() + self.embed_dim = embed_dim + self.n_embed = n_embed + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, + remap=remap, + sane_index_shape=sane_index_shape) + self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + if colorize_nlabels is not None: + assert type(colorize_nlabels)==int + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + if monitor is not None: + self.monitor = monitor + self.batch_resize_range = batch_resize_range + if self.batch_resize_range is not None: + print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.") + + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or []) + self.scheduler_config = scheduler_config + self.lr_g_factor = lr_g_factor + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.parameters()) + self.model_ema.copy_to(self) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def init_from_ckpt(self, path, ignore_keys=None): + sd = torch.load(path, map_location="cpu")["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys or []: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if missing: + print(f"Missing Keys: {missing}") + if unexpected: + print(f"Unexpected Keys: {unexpected}") + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self) + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + quant, emb_loss, info = self.quantize(h) + return quant, emb_loss, info + + def encode_to_prequant(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return h + + def decode(self, quant): + quant = self.post_quant_conv(quant) + dec = self.decoder(quant) + return dec + + def decode_code(self, code_b): + quant_b = self.quantize.embed_code(code_b) + dec = self.decode(quant_b) + return dec + + def forward(self, input, return_pred_indices=False): + quant, diff, (_,_,ind) = self.encode(input) + dec = self.decode(quant) + if return_pred_indices: + return dec, diff, ind + return dec, diff + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() + if self.batch_resize_range is not None: + lower_size = self.batch_resize_range[0] + upper_size = self.batch_resize_range[1] + if self.global_step <= 4: + # do the first few batches with max size to avoid later oom + new_resize = upper_size + else: + new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16)) + if new_resize != x.shape[2]: + x = F.interpolate(x, size=new_resize, mode="bicubic") + x = x.detach() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + # https://github.com/pytorch/pytorch/issues/37142 + # try not to fool the heuristics + x = self.get_input(batch, self.image_key) + xrec, qloss, ind = self(x, return_pred_indices=True) + + if optimizer_idx == 0: + # autoencode + aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train", + predicted_indices=ind) + + self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) + return aeloss + + if optimizer_idx == 1: + # discriminator + discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) + return discloss + + def validation_step(self, batch, batch_idx): + log_dict = self._validation_step(batch, batch_idx) + with self.ema_scope(): + self._validation_step(batch, batch_idx, suffix="_ema") + return log_dict + + def _validation_step(self, batch, batch_idx, suffix=""): + x = self.get_input(batch, self.image_key) + xrec, qloss, ind = self(x, return_pred_indices=True) + aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, + self.global_step, + last_layer=self.get_last_layer(), + split="val"+suffix, + predicted_indices=ind + ) + + discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, + self.global_step, + last_layer=self.get_last_layer(), + split="val"+suffix, + predicted_indices=ind + ) + rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] + self.log(f"val{suffix}/rec_loss", rec_loss, + prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) + self.log(f"val{suffix}/aeloss", aeloss, + prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) + if version.parse(pl.__version__) >= version.parse('1.4.0'): + del log_dict_ae[f"val{suffix}/rec_loss"] + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr_d = self.learning_rate + lr_g = self.lr_g_factor*self.learning_rate + print("lr_d", lr_d) + print("lr_g", lr_g) + opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ + list(self.decoder.parameters())+ + list(self.quantize.parameters())+ + list(self.quant_conv.parameters())+ + list(self.post_quant_conv.parameters()), + lr=lr_g, betas=(0.5, 0.9)) + opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), + lr=lr_d, betas=(0.5, 0.9)) + + if self.scheduler_config is not None: + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }, + { + 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }, + ] + return [opt_ae, opt_disc], scheduler + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): + log = {} + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if only_inputs: + log["inputs"] = x + return log + xrec, _ = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log["inputs"] = x + log["reconstructions"] = xrec + if plot_ema: + with self.ema_scope(): + xrec_ema, _ = self(x) + if x.shape[1] > 3: + xrec_ema = self.to_rgb(xrec_ema) + log["reconstructions_ema"] = xrec_ema + return log + + def to_rgb(self, x): + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) + x = F.conv2d(x, weight=self.colorize) + x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return x + + +class VQModelInterface(VQModel): + def __init__(self, embed_dim, *args, **kwargs): + super().__init__(*args, embed_dim=embed_dim, **kwargs) + self.embed_dim = embed_dim + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return h + + def decode(self, h, force_not_quantize=False): + # also go through quantization layer + if not force_not_quantize: + quant, emb_loss, info = self.quantize(h) + else: + quant = h + quant = self.post_quant_conv(quant) + dec = self.decoder(quant) + return dec + +ldm.models.autoencoder.VQModel = VQModel +ldm.models.autoencoder.VQModelInterface = VQModelInterface diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..04adc5eb2cfe9aa1d5f75e5653624456c5e37a47 --- /dev/null +++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py @@ -0,0 +1,1443 @@ +# This script is copied from the compvis/stable-diffusion repo (aka the SD V1 repo) +# Original filename: ldm/models/diffusion/ddpm.py +# The purpose to reinstate the old DDPM logic which works with VQ, whereas the V2 one doesn't +# Some models such as LDSR require VQ to work correctly +# The classes are suffixed with "V1" and added back to the "ldm.models.diffusion.ddpm" module + +import torch +import torch.nn as nn +import numpy as np +import pytorch_lightning as pl +from torch.optim.lr_scheduler import LambdaLR +from einops import rearrange, repeat +from contextlib import contextmanager +from functools import partial +from tqdm import tqdm +from torchvision.utils import make_grid +from pytorch_lightning.utilities.distributed import rank_zero_only + +from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config +from ldm.modules.ema import LitEma +from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution +from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL +from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like +from ldm.models.diffusion.ddim import DDIMSampler + +import ldm.models.diffusion.ddpm + +__conditioning_keys__ = {'concat': 'c_concat', + 'crossattn': 'c_crossattn', + 'adm': 'y'} + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +def uniform_on_device(r1, r2, shape, device): + return (r1 - r2) * torch.rand(*shape, device=device) + r2 + + +class DDPMV1(pl.LightningModule): + # classic DDPM with Gaussian diffusion, in image space + def __init__(self, + unet_config, + timesteps=1000, + beta_schedule="linear", + loss_type="l2", + ckpt_path=None, + ignore_keys=None, + load_only_unet=False, + monitor="val/loss", + use_ema=True, + first_stage_key="image", + image_size=256, + channels=3, + log_every_t=100, + clip_denoised=True, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + given_betas=None, + original_elbo_weight=0., + v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta + l_simple_weight=1., + conditioning_key=None, + parameterization="eps", # all assuming fixed variance schedules + scheduler_config=None, + use_positional_encodings=False, + learn_logvar=False, + logvar_init=0., + ): + super().__init__() + assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' + self.parameterization = parameterization + print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") + self.cond_stage_model = None + self.clip_denoised = clip_denoised + self.log_every_t = log_every_t + self.first_stage_key = first_stage_key + self.image_size = image_size # try conv? + self.channels = channels + self.use_positional_encodings = use_positional_encodings + self.model = DiffusionWrapperV1(unet_config, conditioning_key) + count_params(self.model, verbose=True) + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self.model) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + self.use_scheduler = scheduler_config is not None + if self.use_scheduler: + self.scheduler_config = scheduler_config + + self.v_posterior = v_posterior + self.original_elbo_weight = original_elbo_weight + self.l_simple_weight = l_simple_weight + + if monitor is not None: + self.monitor = monitor + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet) + + self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, + linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) + + self.loss_type = loss_type + + self.learn_logvar = learn_logvar + self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) + if self.learn_logvar: + self.logvar = nn.Parameter(self.logvar, requires_grad=True) + + + def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if exists(given_betas): + betas = given_betas + else: + betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, + cosine_s=cosine_s) + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( + 1. - alphas_cumprod) + self.v_posterior * betas + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer('posterior_variance', to_torch(posterior_variance)) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) + self.register_buffer('posterior_mean_coef1', to_torch( + betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) + self.register_buffer('posterior_mean_coef2', to_torch( + (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) + + if self.parameterization == "eps": + lvlb_weights = self.betas ** 2 / ( + 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) + elif self.parameterization == "x0": + lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) + else: + raise NotImplementedError("mu not supported") + # TODO how to choose this term + lvlb_weights[0] = lvlb_weights[1] + self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) + assert not torch.isnan(self.lvlb_weights).all() + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.model.parameters()) + self.model_ema.copy_to(self.model) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.model.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def init_from_ckpt(self, path, ignore_keys=None, only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys or []: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( + sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if missing: + print(f"Missing Keys: {missing}") + if unexpected: + print(f"Unexpected Keys: {unexpected}") + + def q_mean_variance(self, x_start, t): + """ + Get the distribution q(x_t | x_0). + :param x_start: the [N x C x ...] tensor of noiseless inputs. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :return: A tuple (mean, variance, log_variance), all of x_start's shape. + """ + mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) + return mean, variance, log_variance + + def predict_start_from_noise(self, x_t, t, noise): + return ( + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - + extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise + ) + + def q_posterior(self, x_start, x_t, t): + posterior_mean = ( + extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance(self, x, t, clip_denoised: bool): + model_out = self.model(x, t) + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + if clip_denoised: + x_recon.clamp_(-1., 1.) + + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): + b, *_, device = *x.shape, x.device + model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) + noise = noise_like(x.shape, device, repeat_noise) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def p_sample_loop(self, shape, return_intermediates=False): + device = self.betas.device + b = shape[0] + img = torch.randn(shape, device=device) + intermediates = [img] + for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): + img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), + clip_denoised=self.clip_denoised) + if i % self.log_every_t == 0 or i == self.num_timesteps - 1: + intermediates.append(img) + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, batch_size=16, return_intermediates=False): + image_size = self.image_size + channels = self.channels + return self.p_sample_loop((batch_size, channels, image_size, image_size), + return_intermediates=return_intermediates) + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) + + def get_loss(self, pred, target, mean=True): + if self.loss_type == 'l1': + loss = (target - pred).abs() + if mean: + loss = loss.mean() + elif self.loss_type == 'l2': + if mean: + loss = torch.nn.functional.mse_loss(target, pred) + else: + loss = torch.nn.functional.mse_loss(target, pred, reduction='none') + else: + raise NotImplementedError("unknown loss type '{loss_type}'") + + return loss + + def p_losses(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_out = self.model(x_noisy, t) + + loss_dict = {} + if self.parameterization == "eps": + target = noise + elif self.parameterization == "x0": + target = x_start + else: + raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") + + loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) + + log_prefix = 'train' if self.training else 'val' + + loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) + loss_simple = loss.mean() * self.l_simple_weight + + loss_vlb = (self.lvlb_weights[t] * loss).mean() + loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) + + loss = loss_simple + self.original_elbo_weight * loss_vlb + + loss_dict.update({f'{log_prefix}/loss': loss}) + + return loss, loss_dict + + def forward(self, x, *args, **kwargs): + # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size + # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + return self.p_losses(x, t, *args, **kwargs) + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, 'b h w c -> b c h w') + x = x.to(memory_format=torch.contiguous_format).float() + return x + + def shared_step(self, batch): + x = self.get_input(batch, self.first_stage_key) + loss, loss_dict = self(x) + return loss, loss_dict + + def training_step(self, batch, batch_idx): + loss, loss_dict = self.shared_step(batch) + + self.log_dict(loss_dict, prog_bar=True, + logger=True, on_step=True, on_epoch=True) + + self.log("global_step", self.global_step, + prog_bar=True, logger=True, on_step=True, on_epoch=False) + + if self.use_scheduler: + lr = self.optimizers().param_groups[0]['lr'] + self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) + + return loss + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + _, loss_dict_no_ema = self.shared_step(batch) + with self.ema_scope(): + _, loss_dict_ema = self.shared_step(batch) + loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} + self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) + self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self.model) + + def _get_rows_from_list(self, samples): + n_imgs_per_row = len(samples) + denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): + log = {} + x = self.get_input(batch, self.first_stage_key) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + x = x.to(self.device)[:N] + log["inputs"] = x + + # get diffusion row + diffusion_row = [] + x_start = x[:n_row] + + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(x_start) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + diffusion_row.append(x_noisy) + + log["diffusion_row"] = self._get_rows_from_list(diffusion_row) + + if sample: + # get denoise row + with self.ema_scope("Plotting"): + samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) + + log["samples"] = samples + log["denoise_row"] = self._get_rows_from_list(denoise_row) + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.learn_logvar: + params = params + [self.logvar] + opt = torch.optim.AdamW(params, lr=lr) + return opt + + +class LatentDiffusionV1(DDPMV1): + """main class""" + def __init__(self, + first_stage_config, + cond_stage_config, + num_timesteps_cond=None, + cond_stage_key="image", + cond_stage_trainable=False, + concat_mode=True, + cond_stage_forward=None, + conditioning_key=None, + scale_factor=1.0, + scale_by_std=False, + *args, **kwargs): + self.num_timesteps_cond = default(num_timesteps_cond, 1) + self.scale_by_std = scale_by_std + assert self.num_timesteps_cond <= kwargs['timesteps'] + # for backwards compatibility after implementation of DiffusionWrapper + if conditioning_key is None: + conditioning_key = 'concat' if concat_mode else 'crossattn' + if cond_stage_config == '__is_unconditional__': + conditioning_key = None + ckpt_path = kwargs.pop("ckpt_path", None) + ignore_keys = kwargs.pop("ignore_keys", []) + super().__init__(*args, conditioning_key=conditioning_key, **kwargs) + self.concat_mode = concat_mode + self.cond_stage_trainable = cond_stage_trainable + self.cond_stage_key = cond_stage_key + try: + self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 + except Exception: + self.num_downs = 0 + if not scale_by_std: + self.scale_factor = scale_factor + else: + self.register_buffer('scale_factor', torch.tensor(scale_factor)) + self.instantiate_first_stage(first_stage_config) + self.instantiate_cond_stage(cond_stage_config) + self.cond_stage_forward = cond_stage_forward + self.clip_denoised = False + self.bbox_tokenizer = None + + self.restarted_from_ckpt = False + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys) + self.restarted_from_ckpt = True + + def make_cond_schedule(self, ): + self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) + ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() + self.cond_ids[:self.num_timesteps_cond] = ids + + @rank_zero_only + @torch.no_grad() + def on_train_batch_start(self, batch, batch_idx, dataloader_idx): + # only for very first batch + if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: + assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' + # set rescale weight to 1./std of encodings + print("### USING STD-RESCALING ###") + x = super().get_input(batch, self.first_stage_key) + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + del self.scale_factor + self.register_buffer('scale_factor', 1. / z.flatten().std()) + print(f"setting self.scale_factor to {self.scale_factor}") + print("### USING STD-RESCALING ###") + + def register_schedule(self, + given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) + + self.shorten_cond_schedule = self.num_timesteps_cond > 1 + if self.shorten_cond_schedule: + self.make_cond_schedule() + + def instantiate_first_stage(self, config): + model = instantiate_from_config(config) + self.first_stage_model = model.eval() + self.first_stage_model.train = disabled_train + for param in self.first_stage_model.parameters(): + param.requires_grad = False + + def instantiate_cond_stage(self, config): + if not self.cond_stage_trainable: + if config == "__is_first_stage__": + print("Using first stage also as cond stage.") + self.cond_stage_model = self.first_stage_model + elif config == "__is_unconditional__": + print(f"Training {self.__class__.__name__} as an unconditional model.") + self.cond_stage_model = None + # self.be_unconditional = True + else: + model = instantiate_from_config(config) + self.cond_stage_model = model.eval() + self.cond_stage_model.train = disabled_train + for param in self.cond_stage_model.parameters(): + param.requires_grad = False + else: + assert config != '__is_first_stage__' + assert config != '__is_unconditional__' + model = instantiate_from_config(config) + self.cond_stage_model = model + + def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): + denoise_row = [] + for zd in tqdm(samples, desc=desc): + denoise_row.append(self.decode_first_stage(zd.to(self.device), + force_not_quantize=force_no_decoder_quantization)) + n_imgs_per_row = len(denoise_row) + denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W + denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + def get_first_stage_encoding(self, encoder_posterior): + if isinstance(encoder_posterior, DiagonalGaussianDistribution): + z = encoder_posterior.sample() + elif isinstance(encoder_posterior, torch.Tensor): + z = encoder_posterior + else: + raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") + return self.scale_factor * z + + def get_learned_conditioning(self, c): + if self.cond_stage_forward is None: + if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): + c = self.cond_stage_model.encode(c) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + else: + c = self.cond_stage_model(c) + else: + assert hasattr(self.cond_stage_model, self.cond_stage_forward) + c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) + return c + + def meshgrid(self, h, w): + y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) + x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) + + arr = torch.cat([y, x], dim=-1) + return arr + + def delta_border(self, h, w): + """ + :param h: height + :param w: width + :return: normalized distance to image border, + wtith min distance = 0 at border and max dist = 0.5 at image center + """ + lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) + arr = self.meshgrid(h, w) / lower_right_corner + dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] + dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] + edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] + return edge_dist + + def get_weighting(self, h, w, Ly, Lx, device): + weighting = self.delta_border(h, w) + weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], + self.split_input_params["clip_max_weight"], ) + weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) + + if self.split_input_params["tie_braker"]: + L_weighting = self.delta_border(Ly, Lx) + L_weighting = torch.clip(L_weighting, + self.split_input_params["clip_min_tie_weight"], + self.split_input_params["clip_max_tie_weight"]) + + L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) + weighting = weighting * L_weighting + return weighting + + def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code + """ + :param x: img of size (bs, c, h, w) + :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) + """ + bs, nc, h, w = x.shape + + # number of crops in image + Ly = (h - kernel_size[0]) // stride[0] + 1 + Lx = (w - kernel_size[1]) // stride[1] + 1 + + if uf == 1 and df == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) + + weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) + + elif uf > 1 and df == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), + dilation=1, padding=0, + stride=(stride[0] * uf, stride[1] * uf)) + fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) + + weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) + + elif df > 1 and uf == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), + dilation=1, padding=0, + stride=(stride[0] // df, stride[1] // df)) + fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) + + weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) + + else: + raise NotImplementedError + + return fold, unfold, normalization, weighting + + @torch.no_grad() + def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, + cond_key=None, return_original_cond=False, bs=None): + x = super().get_input(batch, k) + if bs is not None: + x = x[:bs] + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + + if self.model.conditioning_key is not None: + if cond_key is None: + cond_key = self.cond_stage_key + if cond_key != self.first_stage_key: + if cond_key in ['caption', 'coordinates_bbox']: + xc = batch[cond_key] + elif cond_key == 'class_label': + xc = batch + else: + xc = super().get_input(batch, cond_key).to(self.device) + else: + xc = x + if not self.cond_stage_trainable or force_c_encode: + if isinstance(xc, dict) or isinstance(xc, list): + # import pudb; pudb.set_trace() + c = self.get_learned_conditioning(xc) + else: + c = self.get_learned_conditioning(xc.to(self.device)) + else: + c = xc + if bs is not None: + c = c[:bs] + + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + ckey = __conditioning_keys__[self.model.conditioning_key] + c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} + + else: + c = None + xc = None + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + c = {'pos_x': pos_x, 'pos_y': pos_y} + out = [z, c] + if return_first_stage_outputs: + xrec = self.decode_first_stage(z) + out.extend([x, xrec]) + if return_original_cond: + out.append(xc) + return out + + @torch.no_grad() + def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, 'b h w c -> b c h w').contiguous() + + z = 1. / self.scale_factor * z + + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + uf = self.split_input_params["vqf"] + bs, nc, h, w = z.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) + + z = unfold(z) # (bn, nc * prod(**ks), L) + # 1. Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + # 2. apply model loop over last dim + if isinstance(self.first_stage_model, VQModelInterface): + output_list = [self.first_stage_model.decode(z[:, :, :, :, i], + force_not_quantize=predict_cids or force_not_quantize) + for i in range(z.shape[-1])] + else: + + output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) + o = o * weighting + # Reverse 1. reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization # norm is shape (1, 1, h, w) + return decoded + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + # same as above but without decorator + def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, 'b h w c -> b c h w').contiguous() + + z = 1. / self.scale_factor * z + + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + uf = self.split_input_params["vqf"] + bs, nc, h, w = z.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) + + z = unfold(z) # (bn, nc * prod(**ks), L) + # 1. Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + # 2. apply model loop over last dim + if isinstance(self.first_stage_model, VQModelInterface): + output_list = [self.first_stage_model.decode(z[:, :, :, :, i], + force_not_quantize=predict_cids or force_not_quantize) + for i in range(z.shape[-1])] + else: + + output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) + o = o * weighting + # Reverse 1. reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization # norm is shape (1, 1, h, w) + return decoded + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + @torch.no_grad() + def encode_first_stage(self, x): + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + df = self.split_input_params["vqf"] + self.split_input_params['original_image_size'] = x.shape[-2:] + bs, nc, h, w = x.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) + z = unfold(x) # (bn, nc * prod(**ks), L) + # Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) + o = o * weighting + + # Reverse reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization + return decoded + + else: + return self.first_stage_model.encode(x) + else: + return self.first_stage_model.encode(x) + + def shared_step(self, batch, **kwargs): + x, c = self.get_input(batch, self.first_stage_key) + loss = self(x, c) + return loss + + def forward(self, x, c, *args, **kwargs): + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + if self.model.conditioning_key is not None: + assert c is not None + if self.cond_stage_trainable: + c = self.get_learned_conditioning(c) + if self.shorten_cond_schedule: # TODO: drop this option + tc = self.cond_ids[t].to(self.device) + c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) + return self.p_losses(x, c, t, *args, **kwargs) + + def apply_model(self, x_noisy, t, cond, return_ids=False): + + if isinstance(cond, dict): + # hybrid case, cond is exptected to be a dict + pass + else: + if not isinstance(cond, list): + cond = [cond] + key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' + cond = {key: cond} + + if hasattr(self, "split_input_params"): + assert len(cond) == 1 # todo can only deal with one conditioning atm + assert not return_ids + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + + h, w = x_noisy.shape[-2:] + + fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) + + z = unfold(x_noisy) # (bn, nc * prod(**ks), L) + # Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] + + if self.cond_stage_key in ["image", "LR_image", "segmentation", + 'bbox_img'] and self.model.conditioning_key: # todo check for completeness + c_key = next(iter(cond.keys())) # get key + c = next(iter(cond.values())) # get value + assert (len(c) == 1) # todo extend to list with more than one elem + c = c[0] # get element + + c = unfold(c) + c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] + + elif self.cond_stage_key == 'coordinates_bbox': + assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' + + # assuming padding of unfold is always 0 and its dilation is always 1 + n_patches_per_row = int((w - ks[0]) / stride[0] + 1) + full_img_h, full_img_w = self.split_input_params['original_image_size'] + # as we are operating on latents, we need the factor from the original image size to the + # spatial latent size to properly rescale the crops for regenerating the bbox annotations + num_downs = self.first_stage_model.encoder.num_resolutions - 1 + rescale_latent = 2 ** (num_downs) + + # get top left postions of patches as conforming for the bbbox tokenizer, therefore we + # need to rescale the tl patch coordinates to be in between (0,1) + tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, + rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) + for patch_nr in range(z.shape[-1])] + + # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) + patch_limits = [(x_tl, y_tl, + rescale_latent * ks[0] / full_img_w, + rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] + # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] + + # tokenize crop coordinates for the bounding boxes of the respective patches + patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) + for bbox in patch_limits] # list of length l with tensors of shape (1, 2) + print(patch_limits_tknzd[0].shape) + # cut tknzd crop position from conditioning + assert isinstance(cond, dict), 'cond must be dict to be fed into model' + cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) + print(cut_cond.shape) + + adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) + adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') + print(adapted_cond.shape) + adapted_cond = self.get_learned_conditioning(adapted_cond) + print(adapted_cond.shape) + adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) + print(adapted_cond.shape) + + cond_list = [{'c_crossattn': [e]} for e in adapted_cond] + + else: + cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient + + # apply model by loop over crops + output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] + assert not isinstance(output_list[0], + tuple) # todo cant deal with multiple model outputs check this never happens + + o = torch.stack(output_list, axis=-1) + o = o * weighting + # Reverse reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + x_recon = fold(o) / normalization + + else: + x_recon = self.model(x_noisy, t, **cond) + + if isinstance(x_recon, tuple) and not return_ids: + return x_recon[0] + else: + return x_recon + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ + extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def _prior_bpd(self, x_start): + """ + Get the prior KL term for the variational lower-bound, measured in + bits-per-dim. + This term can't be optimized, as it only depends on the encoder. + :param x_start: the [N x C x ...] tensor of inputs. + :return: a batch of [N] KL values (in bits), one per batch element. + """ + batch_size = x_start.shape[0] + t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) + qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) + kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) + return mean_flat(kl_prior) / np.log(2.0) + + def p_losses(self, x_start, cond, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_output = self.apply_model(x_noisy, t, cond) + + loss_dict = {} + prefix = 'train' if self.training else 'val' + + if self.parameterization == "x0": + target = x_start + elif self.parameterization == "eps": + target = noise + else: + raise NotImplementedError() + + loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) + loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) + + logvar_t = self.logvar[t].to(self.device) + loss = loss_simple / torch.exp(logvar_t) + logvar_t + # loss = loss_simple / torch.exp(self.logvar) + self.logvar + if self.learn_logvar: + loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) + loss_dict.update({'logvar': self.logvar.data.mean()}) + + loss = self.l_simple_weight * loss.mean() + + loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) + loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() + loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) + loss += (self.original_elbo_weight * loss_vlb) + loss_dict.update({f'{prefix}/loss': loss}) + + return loss, loss_dict + + def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, + return_x0=False, score_corrector=None, corrector_kwargs=None): + t_in = t + model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) + + if score_corrector is not None: + assert self.parameterization == "eps" + model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) + + if return_codebook_ids: + model_out, logits = model_out + + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + else: + raise NotImplementedError() + + if clip_denoised: + x_recon.clamp_(-1., 1.) + if quantize_denoised: + x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + if return_codebook_ids: + return model_mean, posterior_variance, posterior_log_variance, logits + elif return_x0: + return model_mean, posterior_variance, posterior_log_variance, x_recon + else: + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, + return_codebook_ids=False, quantize_denoised=False, return_x0=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): + b, *_, device = *x.shape, x.device + outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, + return_codebook_ids=return_codebook_ids, + quantize_denoised=quantize_denoised, + return_x0=return_x0, + score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) + if return_codebook_ids: + raise DeprecationWarning("Support dropped.") + model_mean, _, model_log_variance, logits = outputs + elif return_x0: + model_mean, _, model_log_variance, x0 = outputs + else: + model_mean, _, model_log_variance = outputs + + noise = noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + + if return_codebook_ids: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) + if return_x0: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 + else: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, + img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., + score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, + log_every_t=None): + if not log_every_t: + log_every_t = self.log_every_t + timesteps = self.num_timesteps + if batch_size is not None: + b = batch_size if batch_size is not None else shape[0] + shape = [batch_size] + list(shape) + else: + b = batch_size = shape[0] + if x_T is None: + img = torch.randn(shape, device=self.device) + else: + img = x_T + intermediates = [] + if cond is not None: + if isinstance(cond, dict): + cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else + [x[:batch_size] for x in cond[key]] for key in cond} + else: + cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', + total=timesteps) if verbose else reversed( + range(0, timesteps)) + if type(temperature) == float: + temperature = [temperature] * timesteps + + for i in iterator: + ts = torch.full((b,), i, device=self.device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img, x0_partial = self.p_sample(img, cond, ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised, return_x0=True, + temperature=temperature[i], noise_dropout=noise_dropout, + score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) + if mask is not None: + assert x0 is not None + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1. - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(x0_partial) + if callback: + callback(i) + if img_callback: + img_callback(img, i) + return img, intermediates + + @torch.no_grad() + def p_sample_loop(self, cond, shape, return_intermediates=False, + x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, start_T=None, + log_every_t=None): + + if not log_every_t: + log_every_t = self.log_every_t + device = self.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + intermediates = [img] + if timesteps is None: + timesteps = self.num_timesteps + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( + range(0, timesteps)) + + if mask is not None: + assert x0 is not None + assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match + + for i in iterator: + ts = torch.full((b,), i, device=device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img = self.p_sample(img, cond, ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised) + if mask is not None: + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1. - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(img) + if callback: + callback(i) + if img_callback: + img_callback(img, i) + + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, + verbose=True, timesteps=None, quantize_denoised=False, + mask=None, x0=None, shape=None,**kwargs): + if shape is None: + shape = (batch_size, self.channels, self.image_size, self.image_size) + if cond is not None: + if isinstance(cond, dict): + cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else + [x[:batch_size] for x in cond[key]] for key in cond} + else: + cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] + return self.p_sample_loop(cond, + shape, + return_intermediates=return_intermediates, x_T=x_T, + verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, + mask=mask, x0=x0) + + @torch.no_grad() + def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): + + if ddim: + ddim_sampler = DDIMSampler(self) + shape = (self.channels, self.image_size, self.image_size) + samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, + shape,cond,verbose=False,**kwargs) + + else: + samples, intermediates = self.sample(cond=cond, batch_size=batch_size, + return_intermediates=True,**kwargs) + + return samples, intermediates + + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, + quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, + plot_diffusion_rows=True, **kwargs): + + use_ddim = ddim_steps is not None + + log = {} + z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=N) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption"]: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) + log["conditioning"] = xc + elif self.cond_stage_key == 'class_label': + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) + log['conditioning'] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = [] + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') + diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with self.ema_scope("Plotting"): + samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, + ddim_steps=ddim_steps,eta=ddim_eta) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( + self.first_stage_model, IdentityFirstStage): + # also display when quantizing x0 while sampling + with self.ema_scope("Plotting Quantized Denoised"): + samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, + ddim_steps=ddim_steps,eta=ddim_eta, + quantize_denoised=True) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, + # quantize_denoised=True) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_x0_quantized"] = x_samples + + if inpaint: + # make a simple center square + h, w = z.shape[2], z.shape[3] + mask = torch.ones(N, h, w).to(self.device) + # zeros will be filled in + mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. + mask = mask[:, None, ...] + with self.ema_scope("Plotting Inpaint"): + + samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, + ddim_steps=ddim_steps, x0=z[:N], mask=mask) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_inpainting"] = x_samples + log["mask"] = mask + + # outpaint + with self.ema_scope("Plotting Outpaint"): + samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, + ddim_steps=ddim_steps, x0=z[:N], mask=mask) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_outpainting"] = x_samples + + if plot_progressive_rows: + with self.ema_scope("Plotting Progressives"): + img, progressives = self.progressive_denoising(c, + shape=(self.channels, self.image_size, self.image_size), + batch_size=N) + prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") + log["progressive_row"] = prog_row + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.cond_stage_trainable: + print(f"{self.__class__.__name__}: Also optimizing conditioner params!") + params = params + list(self.cond_stage_model.parameters()) + if self.learn_logvar: + print('Diffusion model optimizing logvar') + params.append(self.logvar) + opt = torch.optim.AdamW(params, lr=lr) + if self.use_scheduler: + assert 'target' in self.scheduler_config + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }] + return [opt], scheduler + return opt + + @torch.no_grad() + def to_rgb(self, x): + x = x.float() + if not hasattr(self, "colorize"): + self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) + x = nn.functional.conv2d(x, weight=self.colorize) + x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. + return x + + +class DiffusionWrapperV1(pl.LightningModule): + def __init__(self, diff_model_config, conditioning_key): + super().__init__() + self.diffusion_model = instantiate_from_config(diff_model_config) + self.conditioning_key = conditioning_key + assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm'] + + def forward(self, x, t, c_concat: list = None, c_crossattn: list = None): + if self.conditioning_key is None: + out = self.diffusion_model(x, t) + elif self.conditioning_key == 'concat': + xc = torch.cat([x] + c_concat, dim=1) + out = self.diffusion_model(xc, t) + elif self.conditioning_key == 'crossattn': + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(x, t, context=cc) + elif self.conditioning_key == 'hybrid': + xc = torch.cat([x] + c_concat, dim=1) + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(xc, t, context=cc) + elif self.conditioning_key == 'adm': + cc = c_crossattn[0] + out = self.diffusion_model(x, t, y=cc) + else: + raise NotImplementedError() + + return out + + +class Layout2ImgDiffusionV1(LatentDiffusionV1): + # TODO: move all layout-specific hacks to this class + def __init__(self, cond_stage_key, *args, **kwargs): + assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' + super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs) + + def log_images(self, batch, N=8, *args, **kwargs): + logs = super().log_images(*args, batch=batch, N=N, **kwargs) + + key = 'train' if self.training else 'validation' + dset = self.trainer.datamodule.datasets[key] + mapper = dset.conditional_builders[self.cond_stage_key] + + bbox_imgs = [] + map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno)) + for tknzd_bbox in batch[self.cond_stage_key][:N]: + bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256)) + bbox_imgs.append(bboximg) + + cond_img = torch.stack(bbox_imgs, dim=0) + logs['bbox_image'] = cond_img + return logs + +ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1 +ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1 +ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1 +ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1 diff --git a/extensions-builtin/LDSR/vqvae_quantize.py b/extensions-builtin/LDSR/vqvae_quantize.py new file mode 100644 index 0000000000000000000000000000000000000000..dd14b8fda5ce25a8cea8b70eb1d387b9c46c80d8 --- /dev/null +++ b/extensions-builtin/LDSR/vqvae_quantize.py @@ -0,0 +1,147 @@ +# Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/taming/modules/vqvae/quantize.py, +# where the license is as follows: +# +# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +# OR OTHER DEALINGS IN THE SOFTWARE./ + +import torch +import torch.nn as nn +import numpy as np +from einops import rearrange + + +class VectorQuantizer2(nn.Module): + """ + Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly + avoids costly matrix multiplications and allows for post-hoc remapping of indices. + """ + + # NOTE: due to a bug the beta term was applied to the wrong term. for + # backwards compatibility we use the buggy version by default, but you can + # specify legacy=False to fix it. + def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random", + sane_index_shape=False, legacy=True): + super().__init__() + self.n_e = n_e + self.e_dim = e_dim + self.beta = beta + self.legacy = legacy + + self.embedding = nn.Embedding(self.n_e, self.e_dim) + self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) + + self.remap = remap + if self.remap is not None: + self.register_buffer("used", torch.tensor(np.load(self.remap))) + self.re_embed = self.used.shape[0] + self.unknown_index = unknown_index # "random" or "extra" or integer + if self.unknown_index == "extra": + self.unknown_index = self.re_embed + self.re_embed = self.re_embed + 1 + print(f"Remapping {self.n_e} indices to {self.re_embed} indices. " + f"Using {self.unknown_index} for unknown indices.") + else: + self.re_embed = n_e + + self.sane_index_shape = sane_index_shape + + def remap_to_used(self, inds): + ishape = inds.shape + assert len(ishape) > 1 + inds = inds.reshape(ishape[0], -1) + used = self.used.to(inds) + match = (inds[:, :, None] == used[None, None, ...]).long() + new = match.argmax(-1) + unknown = match.sum(2) < 1 + if self.unknown_index == "random": + new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device) + else: + new[unknown] = self.unknown_index + return new.reshape(ishape) + + def unmap_to_all(self, inds): + ishape = inds.shape + assert len(ishape) > 1 + inds = inds.reshape(ishape[0], -1) + used = self.used.to(inds) + if self.re_embed > self.used.shape[0]: # extra token + inds[inds >= self.used.shape[0]] = 0 # simply set to zero + back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds) + return back.reshape(ishape) + + def forward(self, z, temp=None, rescale_logits=False, return_logits=False): + assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel" + assert rescale_logits is False, "Only for interface compatible with Gumbel" + assert return_logits is False, "Only for interface compatible with Gumbel" + # reshape z -> (batch, height, width, channel) and flatten + z = rearrange(z, 'b c h w -> b h w c').contiguous() + z_flattened = z.view(-1, self.e_dim) + # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z + + d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \ + torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \ + torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n')) + + min_encoding_indices = torch.argmin(d, dim=1) + z_q = self.embedding(min_encoding_indices).view(z.shape) + perplexity = None + min_encodings = None + + # compute loss for embedding + if not self.legacy: + loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + \ + torch.mean((z_q - z.detach()) ** 2) + else: + loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * \ + torch.mean((z_q - z.detach()) ** 2) + + # preserve gradients + z_q = z + (z_q - z).detach() + + # reshape back to match original input shape + z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous() + + if self.remap is not None: + min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis + min_encoding_indices = self.remap_to_used(min_encoding_indices) + min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten + + if self.sane_index_shape: + min_encoding_indices = min_encoding_indices.reshape( + z_q.shape[0], z_q.shape[2], z_q.shape[3]) + + return z_q, loss, (perplexity, min_encodings, min_encoding_indices) + + def get_codebook_entry(self, indices, shape): + # shape specifying (batch, height, width, channel) + if self.remap is not None: + indices = indices.reshape(shape[0], -1) # add batch axis + indices = self.unmap_to_all(indices) + indices = indices.reshape(-1) # flatten again + + # get quantized latent vectors + z_q = self.embedding(indices) + + if shape is not None: + z_q = z_q.view(shape) + # reshape back to match original input shape + z_q = z_q.permute(0, 3, 1, 2).contiguous() + + return z_q diff --git a/extensions-builtin/Lora/__pycache__/extra_networks_lora.cpython-310.pyc b/extensions-builtin/Lora/__pycache__/extra_networks_lora.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9b623e33c36b1ae86c35d80f1c7573d8314a95a Binary files /dev/null and b/extensions-builtin/Lora/__pycache__/extra_networks_lora.cpython-310.pyc differ diff --git a/extensions-builtin/Lora/__pycache__/lora.cpython-310.pyc b/extensions-builtin/Lora/__pycache__/lora.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..048b66d1aaa3ffb9543b0ef84d1d0b0c15841d35 Binary files /dev/null and b/extensions-builtin/Lora/__pycache__/lora.cpython-310.pyc differ diff --git a/extensions-builtin/Lora/__pycache__/lyco_helpers.cpython-310.pyc b/extensions-builtin/Lora/__pycache__/lyco_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4e8f2668ab912bb4d04c33377b9c165469c6a52 Binary files /dev/null and b/extensions-builtin/Lora/__pycache__/lyco_helpers.cpython-310.pyc differ diff --git a/extensions-builtin/Lora/__pycache__/network.cpython-310.pyc b/extensions-builtin/Lora/__pycache__/network.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7362d4340745cc9205adb926f6cc1743c818bb6 Binary files /dev/null and b/extensions-builtin/Lora/__pycache__/network.cpython-310.pyc differ diff --git a/extensions-builtin/Lora/__pycache__/network_full.cpython-310.pyc b/extensions-builtin/Lora/__pycache__/network_full.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36cb4692d9178eac3f917f1c207a46ec64ca896d Binary files /dev/null and b/extensions-builtin/Lora/__pycache__/network_full.cpython-310.pyc differ diff --git a/extensions-builtin/Lora/__pycache__/network_hada.cpython-310.pyc b/extensions-builtin/Lora/__pycache__/network_hada.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..340913913b0fac0f03980b1ef865581665f7c6e9 Binary files /dev/null and b/extensions-builtin/Lora/__pycache__/network_hada.cpython-310.pyc differ diff --git a/extensions-builtin/Lora/__pycache__/network_ia3.cpython-310.pyc b/extensions-builtin/Lora/__pycache__/network_ia3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..723391fb19d5ec85117395326ee9b44488b3dc99 Binary files /dev/null and b/extensions-builtin/Lora/__pycache__/network_ia3.cpython-310.pyc differ diff --git a/extensions-builtin/Lora/__pycache__/network_lokr.cpython-310.pyc b/extensions-builtin/Lora/__pycache__/network_lokr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b7880aea92c02aa7fb4c03718c44ae6cb491180 Binary files /dev/null and b/extensions-builtin/Lora/__pycache__/network_lokr.cpython-310.pyc differ diff --git a/extensions-builtin/Lora/__pycache__/network_lora.cpython-310.pyc b/extensions-builtin/Lora/__pycache__/network_lora.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f47ab721a29c485d580866acac13af65859c5e1 Binary files /dev/null and b/extensions-builtin/Lora/__pycache__/network_lora.cpython-310.pyc differ diff --git a/extensions-builtin/Lora/__pycache__/networks.cpython-310.pyc b/extensions-builtin/Lora/__pycache__/networks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f59cc5de72cac1be7202760cfdc2ec3b7f5da3b Binary files /dev/null and b/extensions-builtin/Lora/__pycache__/networks.cpython-310.pyc differ diff --git a/extensions-builtin/Lora/__pycache__/preload.cpython-310.pyc b/extensions-builtin/Lora/__pycache__/preload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9586020faab40e1b4a7fe1835f942de694d139ea Binary files /dev/null and b/extensions-builtin/Lora/__pycache__/preload.cpython-310.pyc differ diff --git a/extensions-builtin/Lora/__pycache__/ui_edit_user_metadata.cpython-310.pyc b/extensions-builtin/Lora/__pycache__/ui_edit_user_metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f82e686ff7a3575d5c0ee7107ca19ae6ccd6c56a Binary files /dev/null and b/extensions-builtin/Lora/__pycache__/ui_edit_user_metadata.cpython-310.pyc differ diff --git a/extensions-builtin/Lora/__pycache__/ui_extra_networks_lora.cpython-310.pyc b/extensions-builtin/Lora/__pycache__/ui_extra_networks_lora.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58d5f13bc895bb48666344de6e59b4e9c6822b87 Binary files /dev/null and b/extensions-builtin/Lora/__pycache__/ui_extra_networks_lora.cpython-310.pyc differ diff --git a/extensions-builtin/Lora/extra_networks_lora.py b/extensions-builtin/Lora/extra_networks_lora.py new file mode 100644 index 0000000000000000000000000000000000000000..3532a18a1acdc6d5b71248e5de0250d0dee40815 --- /dev/null +++ b/extensions-builtin/Lora/extra_networks_lora.py @@ -0,0 +1,59 @@ +from modules import extra_networks, shared +import networks + + +class ExtraNetworkLora(extra_networks.ExtraNetwork): + def __init__(self): + super().__init__('lora') + + def activate(self, p, params_list): + additional = shared.opts.sd_lora + + if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional): + p.all_prompts = [x + f"" for x in p.all_prompts] + params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) + + names = [] + te_multipliers = [] + unet_multipliers = [] + dyn_dims = [] + for params in params_list: + assert params.items + + names.append(params.positional[0]) + + te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0 + te_multiplier = float(params.named.get("te", te_multiplier)) + + unet_multiplier = float(params.positional[2]) if len(params.positional) > 2 else te_multiplier + unet_multiplier = float(params.named.get("unet", unet_multiplier)) + + dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None + dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim + + te_multipliers.append(te_multiplier) + unet_multipliers.append(unet_multiplier) + dyn_dims.append(dyn_dim) + + networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims) + + if shared.opts.lora_add_hashes_to_infotext: + network_hashes = [] + for item in networks.loaded_networks: + shorthash = item.network_on_disk.shorthash + if not shorthash: + continue + + alias = item.mentioned_name + if not alias: + continue + + alias = alias.replace(":", "").replace(",", "") + + network_hashes.append(f"{alias}: {shorthash}") + + if network_hashes: + p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes) + + def deactivate(self, p): + pass diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py new file mode 100644 index 0000000000000000000000000000000000000000..6186538e956e39c843a2a22a77c5ab53fdfec3c7 --- /dev/null +++ b/extensions-builtin/Lora/lora.py @@ -0,0 +1,9 @@ +import networks + +list_available_loras = networks.list_available_networks + +available_loras = networks.available_networks +available_lora_aliases = networks.available_network_aliases +available_lora_hash_lookup = networks.available_network_hash_lookup +forbidden_lora_aliases = networks.forbidden_network_aliases +loaded_loras = networks.loaded_networks diff --git a/extensions-builtin/Lora/lyco_helpers.py b/extensions-builtin/Lora/lyco_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..f2f42e83a0188cc8650ea79def7f95df0e9bac34 --- /dev/null +++ b/extensions-builtin/Lora/lyco_helpers.py @@ -0,0 +1,21 @@ +import torch + + +def make_weight_cp(t, wa, wb): + temp = torch.einsum('i j k l, j r -> i r k l', t, wb) + return torch.einsum('i j k l, i r -> r j k l', temp, wa) + + +def rebuild_conventional(up, down, shape, dyn_dim=None): + up = up.reshape(up.size(0), -1) + down = down.reshape(down.size(0), -1) + if dyn_dim is not None: + up = up[:, :dyn_dim] + down = down[:dyn_dim, :] + return (up @ down).reshape(shape) + + +def rebuild_cp_decomposition(up, down, mid): + up = up.reshape(up.size(0), -1) + down = down.reshape(down.size(0), -1) + return torch.einsum('n m k l, i n, m j -> i j k l', mid, up, down) diff --git a/extensions-builtin/Lora/network.py b/extensions-builtin/Lora/network.py new file mode 100644 index 0000000000000000000000000000000000000000..a24bb802c77df49d7e6b6ba43d3db4c25479ab47 --- /dev/null +++ b/extensions-builtin/Lora/network.py @@ -0,0 +1,155 @@ +from __future__ import annotations +import os +from collections import namedtuple +import enum + +from modules import sd_models, cache, errors, hashes, shared + +NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module']) + +metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20} + + +class SdVersion(enum.Enum): + Unknown = 1 + SD1 = 2 + SD2 = 3 + SDXL = 4 + + +class NetworkOnDisk: + def __init__(self, name, filename): + self.name = name + self.filename = filename + self.metadata = {} + self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors" + + def read_metadata(): + metadata = sd_models.read_metadata_from_safetensors(filename) + metadata.pop('ssmd_cover_images', None) # those are cover images, and they are too big to display in UI as text + + return metadata + + if self.is_safetensors: + try: + self.metadata = cache.cached_data_for_file('safetensors-metadata', "lora/" + self.name, filename, read_metadata) + except Exception as e: + errors.display(e, f"reading lora {filename}") + + if self.metadata: + m = {} + for k, v in sorted(self.metadata.items(), key=lambda x: metadata_tags_order.get(x[0], 999)): + m[k] = v + + self.metadata = m + + self.alias = self.metadata.get('ss_output_name', self.name) + + self.hash = None + self.shorthash = None + self.set_hash( + self.metadata.get('sshs_model_hash') or + hashes.sha256_from_cache(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or + '' + ) + + self.sd_version = self.detect_version() + + def detect_version(self): + if str(self.metadata.get('ss_base_model_version', "")).startswith("sdxl_"): + return SdVersion.SDXL + elif str(self.metadata.get('ss_v2', "")) == "True": + return SdVersion.SD2 + elif len(self.metadata): + return SdVersion.SD1 + + return SdVersion.Unknown + + def set_hash(self, v): + self.hash = v + self.shorthash = self.hash[0:12] + + if self.shorthash: + import networks + networks.available_network_hash_lookup[self.shorthash] = self + + def read_hash(self): + if not self.hash: + self.set_hash(hashes.sha256(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or '') + + def get_alias(self): + import networks + if shared.opts.lora_preferred_name == "Filename" or self.alias.lower() in networks.forbidden_network_aliases: + return self.name + else: + return self.alias + + +class Network: # LoraModule + def __init__(self, name, network_on_disk: NetworkOnDisk): + self.name = name + self.network_on_disk = network_on_disk + self.te_multiplier = 1.0 + self.unet_multiplier = 1.0 + self.dyn_dim = None + self.modules = {} + self.mtime = None + + self.mentioned_name = None + """the text that was used to add the network to prompt - can be either name or an alias""" + + +class ModuleType: + def create_module(self, net: Network, weights: NetworkWeights) -> Network | None: + return None + + +class NetworkModule: + def __init__(self, net: Network, weights: NetworkWeights): + self.network = net + self.network_key = weights.network_key + self.sd_key = weights.sd_key + self.sd_module = weights.sd_module + + if hasattr(self.sd_module, 'weight'): + self.shape = self.sd_module.weight.shape + + self.dim = None + self.bias = weights.w.get("bias") + self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None + self.scale = weights.w["scale"].item() if "scale" in weights.w else None + + def multiplier(self): + if 'transformer' in self.sd_key[:20]: + return self.network.te_multiplier + else: + return self.network.unet_multiplier + + def calc_scale(self): + if self.scale is not None: + return self.scale + if self.dim is not None and self.alpha is not None: + return self.alpha / self.dim + + return 1.0 + + def finalize_updown(self, updown, orig_weight, output_shape): + if self.bias is not None: + updown = updown.reshape(self.bias.shape) + updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype) + updown = updown.reshape(output_shape) + + if len(output_shape) == 4: + updown = updown.reshape(output_shape) + + if orig_weight.size().numel() == updown.size().numel(): + updown = updown.reshape(orig_weight.shape) + + return updown * self.calc_scale() * self.multiplier() + + def calc_updown(self, target): + raise NotImplementedError() + + def forward(self, x, y): + raise NotImplementedError() + diff --git a/extensions-builtin/Lora/network_full.py b/extensions-builtin/Lora/network_full.py new file mode 100644 index 0000000000000000000000000000000000000000..dc197585291f04f8ffe50ce8f4fb681761061bdd --- /dev/null +++ b/extensions-builtin/Lora/network_full.py @@ -0,0 +1,22 @@ +import network + + +class ModuleTypeFull(network.ModuleType): + def create_module(self, net: network.Network, weights: network.NetworkWeights): + if all(x in weights.w for x in ["diff"]): + return NetworkModuleFull(net, weights) + + return None + + +class NetworkModuleFull(network.NetworkModule): + def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) + + self.weight = weights.w.get("diff") + + def calc_updown(self, orig_weight): + output_shape = self.weight.shape + updown = self.weight.to(orig_weight.device, dtype=orig_weight.dtype) + + return self.finalize_updown(updown, orig_weight, output_shape) diff --git a/extensions-builtin/Lora/network_hada.py b/extensions-builtin/Lora/network_hada.py new file mode 100644 index 0000000000000000000000000000000000000000..b62e88840866f2801b5bafa657cfd9b0377054b7 --- /dev/null +++ b/extensions-builtin/Lora/network_hada.py @@ -0,0 +1,55 @@ +import lyco_helpers +import network + + +class ModuleTypeHada(network.ModuleType): + def create_module(self, net: network.Network, weights: network.NetworkWeights): + if all(x in weights.w for x in ["hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b"]): + return NetworkModuleHada(net, weights) + + return None + + +class NetworkModuleHada(network.NetworkModule): + def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) + + if hasattr(self.sd_module, 'weight'): + self.shape = self.sd_module.weight.shape + + self.w1a = weights.w["hada_w1_a"] + self.w1b = weights.w["hada_w1_b"] + self.dim = self.w1b.shape[0] + self.w2a = weights.w["hada_w2_a"] + self.w2b = weights.w["hada_w2_b"] + + self.t1 = weights.w.get("hada_t1") + self.t2 = weights.w.get("hada_t2") + + def calc_updown(self, orig_weight): + w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype) + w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype) + w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) + w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) + + output_shape = [w1a.size(0), w1b.size(1)] + + if self.t1 is not None: + output_shape = [w1a.size(1), w1b.size(1)] + t1 = self.t1.to(orig_weight.device, dtype=orig_weight.dtype) + updown1 = lyco_helpers.make_weight_cp(t1, w1a, w1b) + output_shape += t1.shape[2:] + else: + if len(w1b.shape) == 4: + output_shape += w1b.shape[2:] + updown1 = lyco_helpers.rebuild_conventional(w1a, w1b, output_shape) + + if self.t2 is not None: + t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype) + updown2 = lyco_helpers.make_weight_cp(t2, w2a, w2b) + else: + updown2 = lyco_helpers.rebuild_conventional(w2a, w2b, output_shape) + + updown = updown1 * updown2 + + return self.finalize_updown(updown, orig_weight, output_shape) diff --git a/extensions-builtin/Lora/network_ia3.py b/extensions-builtin/Lora/network_ia3.py new file mode 100644 index 0000000000000000000000000000000000000000..ddf5d68983c3b8d57ad3d58b293e6bc462d52159 --- /dev/null +++ b/extensions-builtin/Lora/network_ia3.py @@ -0,0 +1,30 @@ +import network + + +class ModuleTypeIa3(network.ModuleType): + def create_module(self, net: network.Network, weights: network.NetworkWeights): + if all(x in weights.w for x in ["weight"]): + return NetworkModuleIa3(net, weights) + + return None + + +class NetworkModuleIa3(network.NetworkModule): + def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) + + self.w = weights.w["weight"] + self.on_input = weights.w["on_input"].item() + + def calc_updown(self, orig_weight): + w = self.w.to(orig_weight.device, dtype=orig_weight.dtype) + + output_shape = [w.size(0), orig_weight.size(1)] + if self.on_input: + output_shape.reverse() + else: + w = w.reshape(-1, 1) + + updown = orig_weight * w + + return self.finalize_updown(updown, orig_weight, output_shape) diff --git a/extensions-builtin/Lora/network_lokr.py b/extensions-builtin/Lora/network_lokr.py new file mode 100644 index 0000000000000000000000000000000000000000..87fbafa1b406de73cc394a3a0c9068da4119b0d8 --- /dev/null +++ b/extensions-builtin/Lora/network_lokr.py @@ -0,0 +1,64 @@ +import torch + +import lyco_helpers +import network + + +class ModuleTypeLokr(network.ModuleType): + def create_module(self, net: network.Network, weights: network.NetworkWeights): + has_1 = "lokr_w1" in weights.w or ("lokr_w1_a" in weights.w and "lokr_w1_b" in weights.w) + has_2 = "lokr_w2" in weights.w or ("lokr_w2_a" in weights.w and "lokr_w2_b" in weights.w) + if has_1 and has_2: + return NetworkModuleLokr(net, weights) + + return None + + +def make_kron(orig_shape, w1, w2): + if len(w2.shape) == 4: + w1 = w1.unsqueeze(2).unsqueeze(2) + w2 = w2.contiguous() + return torch.kron(w1, w2).reshape(orig_shape) + + +class NetworkModuleLokr(network.NetworkModule): + def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) + + self.w1 = weights.w.get("lokr_w1") + self.w1a = weights.w.get("lokr_w1_a") + self.w1b = weights.w.get("lokr_w1_b") + self.dim = self.w1b.shape[0] if self.w1b is not None else self.dim + self.w2 = weights.w.get("lokr_w2") + self.w2a = weights.w.get("lokr_w2_a") + self.w2b = weights.w.get("lokr_w2_b") + self.dim = self.w2b.shape[0] if self.w2b is not None else self.dim + self.t2 = weights.w.get("lokr_t2") + + def calc_updown(self, orig_weight): + if self.w1 is not None: + w1 = self.w1.to(orig_weight.device, dtype=orig_weight.dtype) + else: + w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype) + w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype) + w1 = w1a @ w1b + + if self.w2 is not None: + w2 = self.w2.to(orig_weight.device, dtype=orig_weight.dtype) + elif self.t2 is None: + w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) + w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) + w2 = w2a @ w2b + else: + t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype) + w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype) + w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype) + w2 = lyco_helpers.make_weight_cp(t2, w2a, w2b) + + output_shape = [w1.size(0) * w2.size(0), w1.size(1) * w2.size(1)] + if len(orig_weight.shape) == 4: + output_shape = orig_weight.shape + + updown = make_kron(output_shape, w1, w2) + + return self.finalize_updown(updown, orig_weight, output_shape) diff --git a/extensions-builtin/Lora/network_lora.py b/extensions-builtin/Lora/network_lora.py new file mode 100644 index 0000000000000000000000000000000000000000..cb63807a09a6883fa636822ebc01753e2cb4848f --- /dev/null +++ b/extensions-builtin/Lora/network_lora.py @@ -0,0 +1,86 @@ +import torch + +import lyco_helpers +import network +from modules import devices + + +class ModuleTypeLora(network.ModuleType): + def create_module(self, net: network.Network, weights: network.NetworkWeights): + if all(x in weights.w for x in ["lora_up.weight", "lora_down.weight"]): + return NetworkModuleLora(net, weights) + + return None + + +class NetworkModuleLora(network.NetworkModule): + def __init__(self, net: network.Network, weights: network.NetworkWeights): + super().__init__(net, weights) + + self.up_model = self.create_module(weights.w, "lora_up.weight") + self.down_model = self.create_module(weights.w, "lora_down.weight") + self.mid_model = self.create_module(weights.w, "lora_mid.weight", none_ok=True) + + self.dim = weights.w["lora_down.weight"].shape[0] + + def create_module(self, weights, key, none_ok=False): + weight = weights.get(key) + + if weight is None and none_ok: + return None + + is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.nn.MultiheadAttention] + is_conv = type(self.sd_module) in [torch.nn.Conv2d] + + if is_linear: + weight = weight.reshape(weight.shape[0], -1) + module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False) + elif is_conv and key == "lora_down.weight" or key == "dyn_up": + if len(weight.shape) == 2: + weight = weight.reshape(weight.shape[0], -1, 1, 1) + + if weight.shape[2] != 1 or weight.shape[3] != 1: + module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False) + else: + module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False) + elif is_conv and key == "lora_mid.weight": + module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False) + elif is_conv and key == "lora_up.weight" or key == "dyn_down": + module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False) + else: + raise AssertionError(f'Lora layer {self.network_key} matched a layer with unsupported type: {type(self.sd_module).__name__}') + + with torch.no_grad(): + if weight.shape != module.weight.shape: + weight = weight.reshape(module.weight.shape) + module.weight.copy_(weight) + + module.to(device=devices.cpu, dtype=devices.dtype) + module.weight.requires_grad_(False) + + return module + + def calc_updown(self, orig_weight): + up = self.up_model.weight.to(orig_weight.device, dtype=orig_weight.dtype) + down = self.down_model.weight.to(orig_weight.device, dtype=orig_weight.dtype) + + output_shape = [up.size(0), down.size(1)] + if self.mid_model is not None: + # cp-decomposition + mid = self.mid_model.weight.to(orig_weight.device, dtype=orig_weight.dtype) + updown = lyco_helpers.rebuild_cp_decomposition(up, down, mid) + output_shape += mid.shape[2:] + else: + if len(down.shape) == 4: + output_shape += down.shape[2:] + updown = lyco_helpers.rebuild_conventional(up, down, output_shape, self.network.dyn_dim) + + return self.finalize_updown(updown, orig_weight, output_shape) + + def forward(self, x, y): + self.up_model.to(device=devices.device) + self.down_model.to(device=devices.device) + + return y + self.up_model(self.down_model(x)) * self.multiplier() * self.calc_scale() + + diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py new file mode 100644 index 0000000000000000000000000000000000000000..73884006798d829f51cbcc446e0aa8d3506a5838 --- /dev/null +++ b/extensions-builtin/Lora/networks.py @@ -0,0 +1,468 @@ +import os +import re + +import network +import network_lora +import network_hada +import network_ia3 +import network_lokr +import network_full + +import torch +from typing import Union + +from modules import shared, devices, sd_models, errors, scripts, sd_hijack + +module_types = [ + network_lora.ModuleTypeLora(), + network_hada.ModuleTypeHada(), + network_ia3.ModuleTypeIa3(), + network_lokr.ModuleTypeLokr(), + network_full.ModuleTypeFull(), +] + + +re_digits = re.compile(r"\d+") +re_x_proj = re.compile(r"(.*)_([qkv]_proj)$") +re_compiled = {} + +suffix_conversion = { + "attentions": {}, + "resnets": { + "conv1": "in_layers_2", + "conv2": "out_layers_3", + "time_emb_proj": "emb_layers_1", + "conv_shortcut": "skip_connection", + } +} + + +def convert_diffusers_name_to_compvis(key, is_sd2): + def match(match_list, regex_text): + regex = re_compiled.get(regex_text) + if regex is None: + regex = re.compile(regex_text) + re_compiled[regex_text] = regex + + r = re.match(regex, key) + if not r: + return False + + match_list.clear() + match_list.extend([int(x) if re.match(re_digits, x) else x for x in r.groups()]) + return True + + m = [] + + if match(m, r"lora_unet_conv_in(.*)"): + return f'diffusion_model_input_blocks_0_0{m[0]}' + + if match(m, r"lora_unet_conv_out(.*)"): + return f'diffusion_model_out_2{m[0]}' + + if match(m, r"lora_unet_time_embedding_linear_(\d+)(.*)"): + return f"diffusion_model_time_embed_{m[0] * 2 - 2}{m[1]}" + + if match(m, r"lora_unet_down_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"): + suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3]) + return f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}" + + if match(m, r"lora_unet_mid_block_(attentions|resnets)_(\d+)_(.+)"): + suffix = suffix_conversion.get(m[0], {}).get(m[2], m[2]) + return f"diffusion_model_middle_block_{1 if m[0] == 'attentions' else m[1] * 2}_{suffix}" + + if match(m, r"lora_unet_up_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"): + suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3]) + return f"diffusion_model_output_blocks_{m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}" + + if match(m, r"lora_unet_down_blocks_(\d+)_downsamplers_0_conv"): + return f"diffusion_model_input_blocks_{3 + m[0] * 3}_0_op" + + if match(m, r"lora_unet_up_blocks_(\d+)_upsamplers_0_conv"): + return f"diffusion_model_output_blocks_{2 + m[0] * 3}_{2 if m[0]>0 else 1}_conv" + + if match(m, r"lora_te_text_model_encoder_layers_(\d+)_(.+)"): + if is_sd2: + if 'mlp_fc1' in m[1]: + return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc1', 'mlp_c_fc')}" + elif 'mlp_fc2' in m[1]: + return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc2', 'mlp_c_proj')}" + else: + return f"model_transformer_resblocks_{m[0]}_{m[1].replace('self_attn', 'attn')}" + + return f"transformer_text_model_encoder_layers_{m[0]}_{m[1]}" + + if match(m, r"lora_te2_text_model_encoder_layers_(\d+)_(.+)"): + if 'mlp_fc1' in m[1]: + return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc1', 'mlp_c_fc')}" + elif 'mlp_fc2' in m[1]: + return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc2', 'mlp_c_proj')}" + else: + return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('self_attn', 'attn')}" + + return key + + +def assign_network_names_to_compvis_modules(sd_model): + network_layer_mapping = {} + + if shared.sd_model.is_sdxl: + for i, embedder in enumerate(shared.sd_model.conditioner.embedders): + if not hasattr(embedder, 'wrapped'): + continue + + for name, module in embedder.wrapped.named_modules(): + network_name = f'{i}_{name.replace(".", "_")}' + network_layer_mapping[network_name] = module + module.network_layer_name = network_name + else: + for name, module in shared.sd_model.cond_stage_model.wrapped.named_modules(): + network_name = name.replace(".", "_") + network_layer_mapping[network_name] = module + module.network_layer_name = network_name + + for name, module in shared.sd_model.model.named_modules(): + network_name = name.replace(".", "_") + network_layer_mapping[network_name] = module + module.network_layer_name = network_name + + sd_model.network_layer_mapping = network_layer_mapping + + +def load_network(name, network_on_disk): + net = network.Network(name, network_on_disk) + net.mtime = os.path.getmtime(network_on_disk.filename) + + sd = sd_models.read_state_dict(network_on_disk.filename) + + # this should not be needed but is here as an emergency fix for an unknown error people are experiencing in 1.2.0 + if not hasattr(shared.sd_model, 'network_layer_mapping'): + assign_network_names_to_compvis_modules(shared.sd_model) + + keys_failed_to_match = {} + is_sd2 = 'model_transformer_resblocks' in shared.sd_model.network_layer_mapping + + matched_networks = {} + + for key_network, weight in sd.items(): + key_network_without_network_parts, network_part = key_network.split(".", 1) + + key = convert_diffusers_name_to_compvis(key_network_without_network_parts, is_sd2) + sd_module = shared.sd_model.network_layer_mapping.get(key, None) + + if sd_module is None: + m = re_x_proj.match(key) + if m: + sd_module = shared.sd_model.network_layer_mapping.get(m.group(1), None) + + # SDXL loras seem to already have correct compvis keys, so only need to replace "lora_unet" with "diffusion_model" + if sd_module is None and "lora_unet" in key_network_without_network_parts: + key = key_network_without_network_parts.replace("lora_unet", "diffusion_model") + sd_module = shared.sd_model.network_layer_mapping.get(key, None) + elif sd_module is None and "lora_te1_text_model" in key_network_without_network_parts: + key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model") + sd_module = shared.sd_model.network_layer_mapping.get(key, None) + + # some SD1 Loras also have correct compvis keys + if sd_module is None: + key = key_network_without_network_parts.replace("lora_te1_text_model", "transformer_text_model") + sd_module = shared.sd_model.network_layer_mapping.get(key, None) + + if sd_module is None: + keys_failed_to_match[key_network] = key + continue + + if key not in matched_networks: + matched_networks[key] = network.NetworkWeights(network_key=key_network, sd_key=key, w={}, sd_module=sd_module) + + matched_networks[key].w[network_part] = weight + + for key, weights in matched_networks.items(): + net_module = None + for nettype in module_types: + net_module = nettype.create_module(net, weights) + if net_module is not None: + break + + if net_module is None: + raise AssertionError(f"Could not find a module type (out of {', '.join([x.__class__.__name__ for x in module_types])}) that would accept those keys: {', '.join(weights.w)}") + + net.modules[key] = net_module + + if keys_failed_to_match: + print(f"Failed to match keys when loading network {network_on_disk.filename}: {keys_failed_to_match}") + + return net + + +def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None): + already_loaded = {} + + for net in loaded_networks: + if net.name in names: + already_loaded[net.name] = net + + loaded_networks.clear() + + networks_on_disk = [available_network_aliases.get(name, None) for name in names] + if any(x is None for x in networks_on_disk): + list_available_networks() + + networks_on_disk = [available_network_aliases.get(name, None) for name in names] + + failed_to_load_networks = [] + + for i, name in enumerate(names): + net = already_loaded.get(name, None) + + network_on_disk = networks_on_disk[i] + + if network_on_disk is not None: + if net is None or os.path.getmtime(network_on_disk.filename) > net.mtime: + try: + net = load_network(name, network_on_disk) + except Exception as e: + errors.display(e, f"loading network {network_on_disk.filename}") + continue + + net.mentioned_name = name + + network_on_disk.read_hash() + + if net is None: + failed_to_load_networks.append(name) + print(f"Couldn't find network with name {name}") + continue + + net.te_multiplier = te_multipliers[i] if te_multipliers else 1.0 + net.unet_multiplier = unet_multipliers[i] if unet_multipliers else 1.0 + net.dyn_dim = dyn_dims[i] if dyn_dims else 1.0 + loaded_networks.append(net) + + if failed_to_load_networks: + sd_hijack.model_hijack.comments.append("Failed to find networks: " + ", ".join(failed_to_load_networks)) + + +def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]): + weights_backup = getattr(self, "network_weights_backup", None) + + if weights_backup is None: + return + + if isinstance(self, torch.nn.MultiheadAttention): + self.in_proj_weight.copy_(weights_backup[0]) + self.out_proj.weight.copy_(weights_backup[1]) + else: + self.weight.copy_(weights_backup) + + +def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]): + """ + Applies the currently selected set of networks to the weights of torch layer self. + If weights already have this particular set of networks applied, does nothing. + If not, restores orginal weights from backup and alters weights according to networks. + """ + + network_layer_name = getattr(self, 'network_layer_name', None) + if network_layer_name is None: + return + + current_names = getattr(self, "network_current_names", ()) + wanted_names = tuple((x.name, x.te_multiplier, x.unet_multiplier, x.dyn_dim) for x in loaded_networks) + + weights_backup = getattr(self, "network_weights_backup", None) + if weights_backup is None: + if isinstance(self, torch.nn.MultiheadAttention): + weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True)) + else: + weights_backup = self.weight.to(devices.cpu, copy=True) + + self.network_weights_backup = weights_backup + + if current_names != wanted_names: + network_restore_weights_from_backup(self) + + for net in loaded_networks: + module = net.modules.get(network_layer_name, None) + if module is not None and hasattr(self, 'weight'): + with torch.no_grad(): + updown = module.calc_updown(self.weight) + + if len(self.weight.shape) == 4 and self.weight.shape[1] == 9: + # inpainting model. zero pad updown to make channel[1] 4 to 9 + updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5)) + + self.weight += updown + continue + + module_q = net.modules.get(network_layer_name + "_q_proj", None) + module_k = net.modules.get(network_layer_name + "_k_proj", None) + module_v = net.modules.get(network_layer_name + "_v_proj", None) + module_out = net.modules.get(network_layer_name + "_out_proj", None) + + if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out: + with torch.no_grad(): + updown_q = module_q.calc_updown(self.in_proj_weight) + updown_k = module_k.calc_updown(self.in_proj_weight) + updown_v = module_v.calc_updown(self.in_proj_weight) + updown_qkv = torch.vstack([updown_q, updown_k, updown_v]) + updown_out = module_out.calc_updown(self.out_proj.weight) + + self.in_proj_weight += updown_qkv + self.out_proj.weight += updown_out + continue + + if module is None: + continue + + print(f'failed to calculate network weights for layer {network_layer_name}') + + self.network_current_names = wanted_names + + +def network_forward(module, input, original_forward): + """ + Old way of applying Lora by executing operations during layer's forward. + Stacking many loras this way results in big performance degradation. + """ + + if len(loaded_networks) == 0: + return original_forward(module, input) + + input = devices.cond_cast_unet(input) + + network_restore_weights_from_backup(module) + network_reset_cached_weight(module) + + y = original_forward(module, input) + + network_layer_name = getattr(module, 'network_layer_name', None) + for lora in loaded_networks: + module = lora.modules.get(network_layer_name, None) + if module is None: + continue + + y = module.forward(y, input) + + return y + + +def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): + self.network_current_names = () + self.network_weights_backup = None + + +def network_Linear_forward(self, input): + if shared.opts.lora_functional: + return network_forward(self, input, torch.nn.Linear_forward_before_network) + + network_apply_weights(self) + + return torch.nn.Linear_forward_before_network(self, input) + + +def network_Linear_load_state_dict(self, *args, **kwargs): + network_reset_cached_weight(self) + + return torch.nn.Linear_load_state_dict_before_network(self, *args, **kwargs) + + +def network_Conv2d_forward(self, input): + if shared.opts.lora_functional: + return network_forward(self, input, torch.nn.Conv2d_forward_before_network) + + network_apply_weights(self) + + return torch.nn.Conv2d_forward_before_network(self, input) + + +def network_Conv2d_load_state_dict(self, *args, **kwargs): + network_reset_cached_weight(self) + + return torch.nn.Conv2d_load_state_dict_before_network(self, *args, **kwargs) + + +def network_MultiheadAttention_forward(self, *args, **kwargs): + network_apply_weights(self) + + return torch.nn.MultiheadAttention_forward_before_network(self, *args, **kwargs) + + +def network_MultiheadAttention_load_state_dict(self, *args, **kwargs): + network_reset_cached_weight(self) + + return torch.nn.MultiheadAttention_load_state_dict_before_network(self, *args, **kwargs) + + +def list_available_networks(): + available_networks.clear() + available_network_aliases.clear() + forbidden_network_aliases.clear() + available_network_hash_lookup.clear() + forbidden_network_aliases.update({"none": 1, "Addams": 1}) + + os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True) + + candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"])) + candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"])) + for filename in candidates: + if os.path.isdir(filename): + continue + + name = os.path.splitext(os.path.basename(filename))[0] + try: + entry = network.NetworkOnDisk(name, filename) + except OSError: # should catch FileNotFoundError and PermissionError etc. + errors.report(f"Failed to load network {name} from {filename}", exc_info=True) + continue + + available_networks[name] = entry + + if entry.alias in available_network_aliases: + forbidden_network_aliases[entry.alias.lower()] = 1 + + available_network_aliases[name] = entry + available_network_aliases[entry.alias] = entry + + +re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)") + + +def infotext_pasted(infotext, params): + if "AddNet Module 1" in [x[1] for x in scripts.scripts_txt2img.infotext_fields]: + return # if the other extension is active, it will handle those fields, no need to do anything + + added = [] + + for k in params: + if not k.startswith("AddNet Model "): + continue + + num = k[13:] + + if params.get("AddNet Module " + num) != "LoRA": + continue + + name = params.get("AddNet Model " + num) + if name is None: + continue + + m = re_network_name.match(name) + if m: + name = m.group(1) + + multiplier = params.get("AddNet Weight A " + num, "1.0") + + added.append(f"") + + if added: + params["Prompt"] += "\n" + "".join(added) + + +available_networks = {} +available_network_aliases = {} +loaded_networks = [] +available_network_hash_lookup = {} +forbidden_network_aliases = {} + +list_available_networks() diff --git a/extensions-builtin/Lora/preload.py b/extensions-builtin/Lora/preload.py new file mode 100644 index 0000000000000000000000000000000000000000..1f85bc5338d77df91e60f35ebb4ce11d2573f01f --- /dev/null +++ b/extensions-builtin/Lora/preload.py @@ -0,0 +1,7 @@ +import os +from modules import paths + + +def preload(parser): + parser.add_argument("--lora-dir", type=str, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora')) + parser.add_argument("--lyco-dir-backcompat", type=str, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS')) diff --git a/extensions-builtin/Lora/scripts/__pycache__/lora_script.cpython-310.pyc b/extensions-builtin/Lora/scripts/__pycache__/lora_script.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5883231cdd33efb9c5af03c9714c12e7bce9266 Binary files /dev/null and b/extensions-builtin/Lora/scripts/__pycache__/lora_script.cpython-310.pyc differ diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py new file mode 100644 index 0000000000000000000000000000000000000000..e7b3cd06a88b39e4f177610ff370f6b7dd7ce972 --- /dev/null +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -0,0 +1,123 @@ +import re + +import torch +import gradio as gr +from fastapi import FastAPI + +import network +import networks +import lora # noqa:F401 +import extra_networks_lora +import ui_extra_networks_lora +from modules import script_callbacks, ui_extra_networks, extra_networks, shared + +def unload(): + torch.nn.Linear.forward = torch.nn.Linear_forward_before_network + torch.nn.Linear._load_from_state_dict = torch.nn.Linear_load_state_dict_before_network + torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_network + torch.nn.Conv2d._load_from_state_dict = torch.nn.Conv2d_load_state_dict_before_network + torch.nn.MultiheadAttention.forward = torch.nn.MultiheadAttention_forward_before_network + torch.nn.MultiheadAttention._load_from_state_dict = torch.nn.MultiheadAttention_load_state_dict_before_network + + +def before_ui(): + ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora()) + + extra_network = extra_networks_lora.ExtraNetworkLora() + extra_networks.register_extra_network(extra_network) + extra_networks.register_extra_network_alias(extra_network, "lyco") + + +if not hasattr(torch.nn, 'Linear_forward_before_network'): + torch.nn.Linear_forward_before_network = torch.nn.Linear.forward + +if not hasattr(torch.nn, 'Linear_load_state_dict_before_network'): + torch.nn.Linear_load_state_dict_before_network = torch.nn.Linear._load_from_state_dict + +if not hasattr(torch.nn, 'Conv2d_forward_before_network'): + torch.nn.Conv2d_forward_before_network = torch.nn.Conv2d.forward + +if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_network'): + torch.nn.Conv2d_load_state_dict_before_network = torch.nn.Conv2d._load_from_state_dict + +if not hasattr(torch.nn, 'MultiheadAttention_forward_before_network'): + torch.nn.MultiheadAttention_forward_before_network = torch.nn.MultiheadAttention.forward + +if not hasattr(torch.nn, 'MultiheadAttention_load_state_dict_before_network'): + torch.nn.MultiheadAttention_load_state_dict_before_network = torch.nn.MultiheadAttention._load_from_state_dict + +torch.nn.Linear.forward = networks.network_Linear_forward +torch.nn.Linear._load_from_state_dict = networks.network_Linear_load_state_dict +torch.nn.Conv2d.forward = networks.network_Conv2d_forward +torch.nn.Conv2d._load_from_state_dict = networks.network_Conv2d_load_state_dict +torch.nn.MultiheadAttention.forward = networks.network_MultiheadAttention_forward +torch.nn.MultiheadAttention._load_from_state_dict = networks.network_MultiheadAttention_load_state_dict + +script_callbacks.on_model_loaded(networks.assign_network_names_to_compvis_modules) +script_callbacks.on_script_unloaded(unload) +script_callbacks.on_before_ui(before_ui) +script_callbacks.on_infotext_pasted(networks.infotext_pasted) + + +shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), { + "sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks), + "lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}), + "lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"), + "lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"), + "lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}), +})) + + +shared.options_templates.update(shared.options_section(('compatibility', "Compatibility"), { + "lora_functional": shared.OptionInfo(False, "Lora/Networks: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension"), +})) + + +def create_lora_json(obj: network.NetworkOnDisk): + return { + "name": obj.name, + "alias": obj.alias, + "path": obj.filename, + "metadata": obj.metadata, + } + + +def api_networks(_: gr.Blocks, app: FastAPI): + @app.get("/sdapi/v1/loras") + async def get_loras(): + return [create_lora_json(obj) for obj in networks.available_networks.values()] + + @app.post("/sdapi/v1/refresh-loras") + async def refresh_loras(): + return networks.list_available_networks() + + +script_callbacks.on_app_started(api_networks) + +re_lora = re.compile("= 16 + + +re_word = re.compile(r"[-_\w']+") +re_comma = re.compile(r" *, *") + + +def build_tags(metadata): + tags = {} + + for _, tags_dict in metadata.get("ss_tag_frequency", {}).items(): + for tag, tag_count in tags_dict.items(): + tag = tag.strip() + tags[tag] = tags.get(tag, 0) + int(tag_count) + + if tags and is_non_comma_tagset(tags): + new_tags = {} + + for text, text_count in tags.items(): + for word in re.findall(re_word, text): + if len(word) < 3: + continue + + new_tags[word] = new_tags.get(word, 0) + text_count + + tags = new_tags + + ordered_tags = sorted(tags.keys(), key=tags.get, reverse=True) + + return [(tag, tags[tag]) for tag in ordered_tags] + + +class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor): + def __init__(self, ui, tabname, page): + super().__init__(ui, tabname, page) + + self.select_sd_version = None + + self.taginfo = None + self.edit_activation_text = None + self.slider_preferred_weight = None + self.edit_notes = None + + def save_lora_user_metadata(self, name, desc, sd_version, activation_text, preferred_weight, notes): + user_metadata = self.get_user_metadata(name) + user_metadata["description"] = desc + user_metadata["sd version"] = sd_version + user_metadata["activation text"] = activation_text + user_metadata["preferred weight"] = preferred_weight + user_metadata["notes"] = notes + + self.write_user_metadata(name, user_metadata) + + def get_metadata_table(self, name): + table = super().get_metadata_table(name) + item = self.page.items.get(name, {}) + metadata = item.get("metadata") or {} + + keys = { + 'ss_sd_model_name': "Model:", + 'ss_clip_skip': "Clip skip:", + 'ss_network_module': "Kohya module:", + } + + for key, label in keys.items(): + value = metadata.get(key, None) + if value is not None and str(value) != "None": + table.append((label, html.escape(value))) + + ss_training_started_at = metadata.get('ss_training_started_at') + if ss_training_started_at: + table.append(("Date trained:", datetime.datetime.utcfromtimestamp(float(ss_training_started_at)).strftime('%Y-%m-%d %H:%M'))) + + ss_bucket_info = metadata.get("ss_bucket_info") + if ss_bucket_info and "buckets" in ss_bucket_info: + resolutions = {} + for _, bucket in ss_bucket_info["buckets"].items(): + resolution = bucket["resolution"] + resolution = f'{resolution[1]}x{resolution[0]}' + + resolutions[resolution] = resolutions.get(resolution, 0) + int(bucket["count"]) + + resolutions_list = sorted(resolutions.keys(), key=resolutions.get, reverse=True) + resolutions_text = html.escape(", ".join(resolutions_list[0:4])) + if len(resolutions) > 4: + resolutions_text += ", ..." + resolutions_text = f"{resolutions_text}" + + table.append(('Resolutions:' if len(resolutions_list) > 1 else 'Resolution:', resolutions_text)) + + image_count = 0 + for _, params in metadata.get("ss_dataset_dirs", {}).items(): + image_count += int(params.get("img_count", 0)) + + if image_count: + table.append(("Dataset size:", image_count)) + + return table + + def put_values_into_components(self, name): + user_metadata = self.get_user_metadata(name) + values = super().put_values_into_components(name) + + item = self.page.items.get(name, {}) + metadata = item.get("metadata") or {} + + tags = build_tags(metadata) + gradio_tags = [(tag, str(count)) for tag, count in tags[0:24]] + + return [ + *values[0:5], + item.get("sd_version", "Unknown"), + gr.HighlightedText.update(value=gradio_tags, visible=True if tags else False), + user_metadata.get('activation text', ''), + float(user_metadata.get('preferred weight', 0.0)), + gr.update(visible=True if tags else False), + gr.update(value=self.generate_random_prompt_from_tags(tags), visible=True if tags else False), + ] + + def generate_random_prompt(self, name): + item = self.page.items.get(name, {}) + metadata = item.get("metadata") or {} + tags = build_tags(metadata) + + return self.generate_random_prompt_from_tags(tags) + + def generate_random_prompt_from_tags(self, tags): + max_count = None + res = [] + for tag, count in tags: + if not max_count: + max_count = count + + v = random.random() * max_count + if count > v: + res.append(tag) + + return ", ".join(sorted(res)) + + def create_extra_default_items_in_left_column(self): + + # this would be a lot better as gr.Radio but I can't make it work + self.select_sd_version = gr.Dropdown(['SD1', 'SD2', 'SDXL', 'Unknown'], value='Unknown', label='Stable Diffusion version', interactive=True) + + def create_editor(self): + self.create_default_editor_elems() + + self.taginfo = gr.HighlightedText(label="Training dataset tags") + self.edit_activation_text = gr.Text(label='Activation text', info="Will be added to prompt along with Lora") + self.slider_preferred_weight = gr.Slider(label='Preferred weight', info="Set to 0 to disable", minimum=0.0, maximum=2.0, step=0.01) + + with gr.Row() as row_random_prompt: + with gr.Column(scale=8): + random_prompt = gr.Textbox(label='Random prompt', lines=4, max_lines=4, interactive=False) + + with gr.Column(scale=1, min_width=120): + generate_random_prompt = gr.Button('Generate').style(full_width=True, size="lg") + + self.edit_notes = gr.TextArea(label='Notes', lines=4) + + generate_random_prompt.click(fn=self.generate_random_prompt, inputs=[self.edit_name_input], outputs=[random_prompt], show_progress=False) + + def select_tag(activation_text, evt: gr.SelectData): + tag = evt.value[0] + + words = re.split(re_comma, activation_text) + if tag in words: + words = [x for x in words if x != tag and x.strip()] + return ", ".join(words) + + return activation_text + ", " + tag if activation_text else tag + + self.taginfo.select(fn=select_tag, inputs=[self.edit_activation_text], outputs=[self.edit_activation_text], show_progress=False) + + self.create_default_buttons() + + viewed_components = [ + self.edit_name, + self.edit_description, + self.html_filedata, + self.html_preview, + self.edit_notes, + self.select_sd_version, + self.taginfo, + self.edit_activation_text, + self.slider_preferred_weight, + row_random_prompt, + random_prompt, + ] + + self.button_edit\ + .click(fn=self.put_values_into_components, inputs=[self.edit_name_input], outputs=viewed_components)\ + .then(fn=lambda: gr.update(visible=True), inputs=[], outputs=[self.box]) + + edited_components = [ + self.edit_description, + self.select_sd_version, + self.edit_activation_text, + self.slider_preferred_weight, + self.edit_notes, + ] + + self.setup_save_handler(self.button_save, self.save_lora_user_metadata, edited_components) diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py new file mode 100644 index 0000000000000000000000000000000000000000..c9e8ecc32e86edbe2d6153112e4db3e3c3f5d4e9 --- /dev/null +++ b/extensions-builtin/Lora/ui_extra_networks_lora.py @@ -0,0 +1,78 @@ +import os + +import network +import networks + +from modules import shared, ui_extra_networks +from modules.ui_extra_networks import quote_js +from ui_edit_user_metadata import LoraUserMetadataEditor + + +class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): + def __init__(self): + super().__init__('Lora') + + def refresh(self): + networks.list_available_networks() + + def create_item(self, name, index=None, enable_filter=True): + lora_on_disk = networks.available_networks.get(name) + + path, ext = os.path.splitext(lora_on_disk.filename) + + alias = lora_on_disk.get_alias() + + item = { + "name": name, + "filename": lora_on_disk.filename, + "preview": self.find_preview(path), + "description": self.find_description(path), + "search_term": self.search_terms_from_path(lora_on_disk.filename), + "local_preview": f"{path}.{shared.opts.samples_format}", + "metadata": lora_on_disk.metadata, + "sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)}, + "sd_version": lora_on_disk.sd_version.name, + } + + self.read_user_metadata(item) + activation_text = item["user_metadata"].get("activation text") + preferred_weight = item["user_metadata"].get("preferred weight", 0.0) + item["prompt"] = quote_js(f"") + + if activation_text: + item["prompt"] += " + " + quote_js(" " + activation_text) + + sd_version = item["user_metadata"].get("sd version") + if sd_version in network.SdVersion.__members__: + item["sd_version"] = sd_version + sd_version = network.SdVersion[sd_version] + else: + sd_version = lora_on_disk.sd_version + + if shared.opts.lora_show_all or not enable_filter: + pass + elif sd_version == network.SdVersion.Unknown: + model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1 + if model_version.name in shared.opts.lora_hide_unknown_for_versions: + return None + elif shared.sd_model.is_sdxl and sd_version != network.SdVersion.SDXL: + return None + elif shared.sd_model.is_sd2 and sd_version != network.SdVersion.SD2: + return None + elif shared.sd_model.is_sd1 and sd_version != network.SdVersion.SD1: + return None + + return item + + def list_items(self): + for index, name in enumerate(networks.available_networks): + item = self.create_item(name, index) + + if item is not None: + yield item + + def allowed_directories_for_previews(self): + return [shared.cmd_opts.lora_dir, shared.cmd_opts.lyco_dir_backcompat] + + def create_user_metadata_editor(self, ui, tabname): + return LoraUserMetadataEditor(ui, tabname, self) diff --git a/extensions-builtin/ScuNET/__pycache__/preload.cpython-310.pyc b/extensions-builtin/ScuNET/__pycache__/preload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..907f912a1618ed51d130bb70167daf7183b9f738 Binary files /dev/null and b/extensions-builtin/ScuNET/__pycache__/preload.cpython-310.pyc differ diff --git a/extensions-builtin/ScuNET/__pycache__/scunet_model_arch.cpython-310.pyc b/extensions-builtin/ScuNET/__pycache__/scunet_model_arch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e84447fbe0f2d9b207510bef1666cd5ea9137b20 Binary files /dev/null and b/extensions-builtin/ScuNET/__pycache__/scunet_model_arch.cpython-310.pyc differ diff --git a/extensions-builtin/ScuNET/preload.py b/extensions-builtin/ScuNET/preload.py new file mode 100644 index 0000000000000000000000000000000000000000..4ce82b1d4349b24192b1915d022ed4fda9f31e5c --- /dev/null +++ b/extensions-builtin/ScuNET/preload.py @@ -0,0 +1,6 @@ +import os +from modules import paths + + +def preload(parser): + parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(paths.models_path, 'ScuNET')) diff --git a/extensions-builtin/ScuNET/scripts/__pycache__/scunet_model.cpython-310.pyc b/extensions-builtin/ScuNET/scripts/__pycache__/scunet_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..240e95bb6a5cbd1c58a9d7abce27f287eea9316b Binary files /dev/null and b/extensions-builtin/ScuNET/scripts/__pycache__/scunet_model.cpython-310.pyc differ diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py new file mode 100644 index 0000000000000000000000000000000000000000..167d2f64b8e8ef1c506d89026e5d2ac8687d8098 --- /dev/null +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -0,0 +1,144 @@ +import sys + +import PIL.Image +import numpy as np +import torch +from tqdm import tqdm + +import modules.upscaler +from modules import devices, modelloader, script_callbacks, errors +from scunet_model_arch import SCUNet + +from modules.modelloader import load_file_from_url +from modules.shared import opts + + +class UpscalerScuNET(modules.upscaler.Upscaler): + def __init__(self, dirname): + self.name = "ScuNET" + self.model_name = "ScuNET GAN" + self.model_name2 = "ScuNET PSNR" + self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_gan.pth" + self.model_url2 = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_psnr.pth" + self.user_path = dirname + super().__init__() + model_paths = self.find_models(ext_filter=[".pth"]) + scalers = [] + add_model2 = True + for file in model_paths: + if file.startswith("http"): + name = self.model_name + else: + name = modelloader.friendly_name(file) + if name == self.model_name2 or file == self.model_url2: + add_model2 = False + try: + scaler_data = modules.upscaler.UpscalerData(name, file, self, 4) + scalers.append(scaler_data) + except Exception: + errors.report(f"Error loading ScuNET model: {file}", exc_info=True) + if add_model2: + scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self) + scalers.append(scaler_data2) + self.scalers = scalers + + @staticmethod + @torch.no_grad() + def tiled_inference(img, model): + # test the image tile by tile + h, w = img.shape[2:] + tile = opts.SCUNET_tile + tile_overlap = opts.SCUNET_tile_overlap + if tile == 0: + return model(img) + + device = devices.get_device_for('scunet') + assert tile % 8 == 0, "tile size should be a multiple of window_size" + sf = 1 + + stride = tile - tile_overlap + h_idx_list = list(range(0, h - tile, stride)) + [h - tile] + w_idx_list = list(range(0, w - tile, stride)) + [w - tile] + E = torch.zeros(1, 3, h * sf, w * sf, dtype=img.dtype, device=device) + W = torch.zeros_like(E, dtype=devices.dtype, device=device) + + with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="ScuNET tiles") as pbar: + for h_idx in h_idx_list: + + for w_idx in w_idx_list: + + in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile] + + out_patch = model(in_patch) + out_patch_mask = torch.ones_like(out_patch) + + E[ + ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf + ].add_(out_patch) + W[ + ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf + ].add_(out_patch_mask) + pbar.update(1) + output = E.div_(W) + + return output + + def do_upscale(self, img: PIL.Image.Image, selected_file): + + devices.torch_gc() + + try: + model = self.load_model(selected_file) + except Exception as e: + print(f"ScuNET: Unable to load model from {selected_file}: {e}", file=sys.stderr) + return img + + device = devices.get_device_for('scunet') + tile = opts.SCUNET_tile + h, w = img.height, img.width + np_img = np.array(img) + np_img = np_img[:, :, ::-1] # RGB to BGR + np_img = np_img.transpose((2, 0, 1)) / 255 # HWC to CHW + torch_img = torch.from_numpy(np_img).float().unsqueeze(0).to(device) # type: ignore + + if tile > h or tile > w: + _img = torch.zeros(1, 3, max(h, tile), max(w, tile), dtype=torch_img.dtype, device=torch_img.device) + _img[:, :, :h, :w] = torch_img # pad image + torch_img = _img + + torch_output = self.tiled_inference(torch_img, model).squeeze(0) + torch_output = torch_output[:, :h * 1, :w * 1] # remove padding, if any + np_output: np.ndarray = torch_output.float().cpu().clamp_(0, 1).numpy() + del torch_img, torch_output + devices.torch_gc() + + output = np_output.transpose((1, 2, 0)) # CHW to HWC + output = output[:, :, ::-1] # BGR to RGB + return PIL.Image.fromarray((output * 255).astype(np.uint8)) + + def load_model(self, path: str): + device = devices.get_device_for('scunet') + if path.startswith("http"): + # TODO: this doesn't use `path` at all? + filename = load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name=f"{self.name}.pth") + else: + filename = path + model = SCUNet(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64) + model.load_state_dict(torch.load(filename), strict=True) + model.eval() + for _, v in model.named_parameters(): + v.requires_grad = False + model = model.to(device) + + return model + + +def on_ui_settings(): + import gradio as gr + from modules import shared + + shared.opts.add_option("SCUNET_tile", shared.OptionInfo(256, "Tile size for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")).info("0 = no tiling")) + shared.opts.add_option("SCUNET_tile_overlap", shared.OptionInfo(8, "Tile overlap for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, section=('upscaling', "Upscaling")).info("Low values = visible seam")) + + +script_callbacks.on_ui_settings(on_ui_settings) diff --git a/extensions-builtin/ScuNET/scunet_model_arch.py b/extensions-builtin/ScuNET/scunet_model_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..b51a880629baa492ffcbebe682bcf101f06699a6 --- /dev/null +++ b/extensions-builtin/ScuNET/scunet_model_arch.py @@ -0,0 +1,268 @@ +# -*- coding: utf-8 -*- +import numpy as np +import torch +import torch.nn as nn +from einops import rearrange +from einops.layers.torch import Rearrange +from timm.models.layers import trunc_normal_, DropPath + + +class WMSA(nn.Module): + """ Self-attention module in Swin Transformer + """ + + def __init__(self, input_dim, output_dim, head_dim, window_size, type): + super(WMSA, self).__init__() + self.input_dim = input_dim + self.output_dim = output_dim + self.head_dim = head_dim + self.scale = self.head_dim ** -0.5 + self.n_heads = input_dim // head_dim + self.window_size = window_size + self.type = type + self.embedding_layer = nn.Linear(self.input_dim, 3 * self.input_dim, bias=True) + + self.relative_position_params = nn.Parameter( + torch.zeros((2 * window_size - 1) * (2 * window_size - 1), self.n_heads)) + + self.linear = nn.Linear(self.input_dim, self.output_dim) + + trunc_normal_(self.relative_position_params, std=.02) + self.relative_position_params = torch.nn.Parameter( + self.relative_position_params.view(2 * window_size - 1, 2 * window_size - 1, self.n_heads).transpose(1, + 2).transpose( + 0, 1)) + + def generate_mask(self, h, w, p, shift): + """ generating the mask of SW-MSA + Args: + shift: shift parameters in CyclicShift. + Returns: + attn_mask: should be (1 1 w p p), + """ + # supporting square. + attn_mask = torch.zeros(h, w, p, p, p, p, dtype=torch.bool, device=self.relative_position_params.device) + if self.type == 'W': + return attn_mask + + s = p - shift + attn_mask[-1, :, :s, :, s:, :] = True + attn_mask[-1, :, s:, :, :s, :] = True + attn_mask[:, -1, :, :s, :, s:] = True + attn_mask[:, -1, :, s:, :, :s] = True + attn_mask = rearrange(attn_mask, 'w1 w2 p1 p2 p3 p4 -> 1 1 (w1 w2) (p1 p2) (p3 p4)') + return attn_mask + + def forward(self, x): + """ Forward pass of Window Multi-head Self-attention module. + Args: + x: input tensor with shape of [b h w c]; + attn_mask: attention mask, fill -inf where the value is True; + Returns: + output: tensor shape [b h w c] + """ + if self.type != 'W': + x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2)) + + x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size) + h_windows = x.size(1) + w_windows = x.size(2) + # square validation + # assert h_windows == w_windows + + x = rearrange(x, 'b w1 w2 p1 p2 c -> b (w1 w2) (p1 p2) c', p1=self.window_size, p2=self.window_size) + qkv = self.embedding_layer(x) + q, k, v = rearrange(qkv, 'b nw np (threeh c) -> threeh b nw np c', c=self.head_dim).chunk(3, dim=0) + sim = torch.einsum('hbwpc,hbwqc->hbwpq', q, k) * self.scale + # Adding learnable relative embedding + sim = sim + rearrange(self.relative_embedding(), 'h p q -> h 1 1 p q') + # Using Attn Mask to distinguish different subwindows. + if self.type != 'W': + attn_mask = self.generate_mask(h_windows, w_windows, self.window_size, shift=self.window_size // 2) + sim = sim.masked_fill_(attn_mask, float("-inf")) + + probs = nn.functional.softmax(sim, dim=-1) + output = torch.einsum('hbwij,hbwjc->hbwic', probs, v) + output = rearrange(output, 'h b w p c -> b w p (h c)') + output = self.linear(output) + output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size) + + if self.type != 'W': + output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), dims=(1, 2)) + + return output + + def relative_embedding(self): + cord = torch.tensor(np.array([[i, j] for i in range(self.window_size) for j in range(self.window_size)])) + relation = cord[:, None, :] - cord[None, :, :] + self.window_size - 1 + # negative is allowed + return self.relative_position_params[:, relation[:, :, 0].long(), relation[:, :, 1].long()] + + +class Block(nn.Module): + def __init__(self, input_dim, output_dim, head_dim, window_size, drop_path, type='W', input_resolution=None): + """ SwinTransformer Block + """ + super(Block, self).__init__() + self.input_dim = input_dim + self.output_dim = output_dim + assert type in ['W', 'SW'] + self.type = type + if input_resolution <= window_size: + self.type = 'W' + + self.ln1 = nn.LayerNorm(input_dim) + self.msa = WMSA(input_dim, input_dim, head_dim, window_size, self.type) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.ln2 = nn.LayerNorm(input_dim) + self.mlp = nn.Sequential( + nn.Linear(input_dim, 4 * input_dim), + nn.GELU(), + nn.Linear(4 * input_dim, output_dim), + ) + + def forward(self, x): + x = x + self.drop_path(self.msa(self.ln1(x))) + x = x + self.drop_path(self.mlp(self.ln2(x))) + return x + + +class ConvTransBlock(nn.Module): + def __init__(self, conv_dim, trans_dim, head_dim, window_size, drop_path, type='W', input_resolution=None): + """ SwinTransformer and Conv Block + """ + super(ConvTransBlock, self).__init__() + self.conv_dim = conv_dim + self.trans_dim = trans_dim + self.head_dim = head_dim + self.window_size = window_size + self.drop_path = drop_path + self.type = type + self.input_resolution = input_resolution + + assert self.type in ['W', 'SW'] + if self.input_resolution <= self.window_size: + self.type = 'W' + + self.trans_block = Block(self.trans_dim, self.trans_dim, self.head_dim, self.window_size, self.drop_path, + self.type, self.input_resolution) + self.conv1_1 = nn.Conv2d(self.conv_dim + self.trans_dim, self.conv_dim + self.trans_dim, 1, 1, 0, bias=True) + self.conv1_2 = nn.Conv2d(self.conv_dim + self.trans_dim, self.conv_dim + self.trans_dim, 1, 1, 0, bias=True) + + self.conv_block = nn.Sequential( + nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False), + nn.ReLU(True), + nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False) + ) + + def forward(self, x): + conv_x, trans_x = torch.split(self.conv1_1(x), (self.conv_dim, self.trans_dim), dim=1) + conv_x = self.conv_block(conv_x) + conv_x + trans_x = Rearrange('b c h w -> b h w c')(trans_x) + trans_x = self.trans_block(trans_x) + trans_x = Rearrange('b h w c -> b c h w')(trans_x) + res = self.conv1_2(torch.cat((conv_x, trans_x), dim=1)) + x = x + res + + return x + + +class SCUNet(nn.Module): + # def __init__(self, in_nc=3, config=[2, 2, 2, 2, 2, 2, 2], dim=64, drop_path_rate=0.0, input_resolution=256): + def __init__(self, in_nc=3, config=None, dim=64, drop_path_rate=0.0, input_resolution=256): + super(SCUNet, self).__init__() + if config is None: + config = [2, 2, 2, 2, 2, 2, 2] + self.config = config + self.dim = dim + self.head_dim = 32 + self.window_size = 8 + + # drop path rate for each layer + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(config))] + + self.m_head = [nn.Conv2d(in_nc, dim, 3, 1, 1, bias=False)] + + begin = 0 + self.m_down1 = [ConvTransBlock(dim // 2, dim // 2, self.head_dim, self.window_size, dpr[i + begin], + 'W' if not i % 2 else 'SW', input_resolution) + for i in range(config[0])] + \ + [nn.Conv2d(dim, 2 * dim, 2, 2, 0, bias=False)] + + begin += config[0] + self.m_down2 = [ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i + begin], + 'W' if not i % 2 else 'SW', input_resolution // 2) + for i in range(config[1])] + \ + [nn.Conv2d(2 * dim, 4 * dim, 2, 2, 0, bias=False)] + + begin += config[1] + self.m_down3 = [ConvTransBlock(2 * dim, 2 * dim, self.head_dim, self.window_size, dpr[i + begin], + 'W' if not i % 2 else 'SW', input_resolution // 4) + for i in range(config[2])] + \ + [nn.Conv2d(4 * dim, 8 * dim, 2, 2, 0, bias=False)] + + begin += config[2] + self.m_body = [ConvTransBlock(4 * dim, 4 * dim, self.head_dim, self.window_size, dpr[i + begin], + 'W' if not i % 2 else 'SW', input_resolution // 8) + for i in range(config[3])] + + begin += config[3] + self.m_up3 = [nn.ConvTranspose2d(8 * dim, 4 * dim, 2, 2, 0, bias=False), ] + \ + [ConvTransBlock(2 * dim, 2 * dim, self.head_dim, self.window_size, dpr[i + begin], + 'W' if not i % 2 else 'SW', input_resolution // 4) + for i in range(config[4])] + + begin += config[4] + self.m_up2 = [nn.ConvTranspose2d(4 * dim, 2 * dim, 2, 2, 0, bias=False), ] + \ + [ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i + begin], + 'W' if not i % 2 else 'SW', input_resolution // 2) + for i in range(config[5])] + + begin += config[5] + self.m_up1 = [nn.ConvTranspose2d(2 * dim, dim, 2, 2, 0, bias=False), ] + \ + [ConvTransBlock(dim // 2, dim // 2, self.head_dim, self.window_size, dpr[i + begin], + 'W' if not i % 2 else 'SW', input_resolution) + for i in range(config[6])] + + self.m_tail = [nn.Conv2d(dim, in_nc, 3, 1, 1, bias=False)] + + self.m_head = nn.Sequential(*self.m_head) + self.m_down1 = nn.Sequential(*self.m_down1) + self.m_down2 = nn.Sequential(*self.m_down2) + self.m_down3 = nn.Sequential(*self.m_down3) + self.m_body = nn.Sequential(*self.m_body) + self.m_up3 = nn.Sequential(*self.m_up3) + self.m_up2 = nn.Sequential(*self.m_up2) + self.m_up1 = nn.Sequential(*self.m_up1) + self.m_tail = nn.Sequential(*self.m_tail) + # self.apply(self._init_weights) + + def forward(self, x0): + + h, w = x0.size()[-2:] + paddingBottom = int(np.ceil(h / 64) * 64 - h) + paddingRight = int(np.ceil(w / 64) * 64 - w) + x0 = nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x0) + + x1 = self.m_head(x0) + x2 = self.m_down1(x1) + x3 = self.m_down2(x2) + x4 = self.m_down3(x3) + x = self.m_body(x4) + x = self.m_up3(x + x4) + x = self.m_up2(x + x3) + x = self.m_up1(x + x2) + x = self.m_tail(x + x1) + + x = x[..., :h, :w] + + return x + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) diff --git a/extensions-builtin/SwinIR/__pycache__/preload.cpython-310.pyc b/extensions-builtin/SwinIR/__pycache__/preload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbed2814dcb3661c55e9915d72d9cca7e8c1413e Binary files /dev/null and b/extensions-builtin/SwinIR/__pycache__/preload.cpython-310.pyc differ diff --git a/extensions-builtin/SwinIR/__pycache__/swinir_model_arch.cpython-310.pyc b/extensions-builtin/SwinIR/__pycache__/swinir_model_arch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f841b68962d693a7bfa50d9cc9c236cbeda70c86 Binary files /dev/null and b/extensions-builtin/SwinIR/__pycache__/swinir_model_arch.cpython-310.pyc differ diff --git a/extensions-builtin/SwinIR/__pycache__/swinir_model_arch_v2.cpython-310.pyc b/extensions-builtin/SwinIR/__pycache__/swinir_model_arch_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2855db6c62df908bf0b2ebba363c4b1ee07c4a30 Binary files /dev/null and b/extensions-builtin/SwinIR/__pycache__/swinir_model_arch_v2.cpython-310.pyc differ diff --git a/extensions-builtin/SwinIR/preload.py b/extensions-builtin/SwinIR/preload.py new file mode 100644 index 0000000000000000000000000000000000000000..e912c6402bc80faa797cf2e95183101fb9a10286 --- /dev/null +++ b/extensions-builtin/SwinIR/preload.py @@ -0,0 +1,6 @@ +import os +from modules import paths + + +def preload(parser): + parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(paths.models_path, 'SwinIR')) diff --git a/extensions-builtin/SwinIR/scripts/__pycache__/swinir_model.cpython-310.pyc b/extensions-builtin/SwinIR/scripts/__pycache__/swinir_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..584a1a192f814ed7022488f26742ba935fef083b Binary files /dev/null and b/extensions-builtin/SwinIR/scripts/__pycache__/swinir_model.cpython-310.pyc differ diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py new file mode 100644 index 0000000000000000000000000000000000000000..ae0d0e6a8ea04f3054c1e8e5baefd2f76b57f246 --- /dev/null +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -0,0 +1,192 @@ +import sys +import platform + +import numpy as np +import torch +from PIL import Image +from tqdm import tqdm + +from modules import modelloader, devices, script_callbacks, shared +from modules.shared import opts, state +from swinir_model_arch import SwinIR +from swinir_model_arch_v2 import Swin2SR +from modules.upscaler import Upscaler, UpscalerData + +SWINIR_MODEL_URL = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth" + +device_swinir = devices.get_device_for('swinir') + + +class UpscalerSwinIR(Upscaler): + def __init__(self, dirname): + self._cached_model = None # keep the model when SWIN_torch_compile is on to prevent re-compile every runs + self._cached_model_config = None # to clear '_cached_model' when changing model (v1/v2) or settings + self.name = "SwinIR" + self.model_url = SWINIR_MODEL_URL + self.model_name = "SwinIR 4x" + self.user_path = dirname + super().__init__() + scalers = [] + model_files = self.find_models(ext_filter=[".pt", ".pth"]) + for model in model_files: + if model.startswith("http"): + name = self.model_name + else: + name = modelloader.friendly_name(model) + model_data = UpscalerData(name, model, self) + scalers.append(model_data) + self.scalers = scalers + + def do_upscale(self, img, model_file): + use_compile = hasattr(opts, 'SWIN_torch_compile') and opts.SWIN_torch_compile \ + and int(torch.__version__.split('.')[0]) >= 2 and platform.system() != "Windows" + current_config = (model_file, opts.SWIN_tile) + + if use_compile and self._cached_model_config == current_config: + model = self._cached_model + else: + self._cached_model = None + try: + model = self.load_model(model_file) + except Exception as e: + print(f"Failed loading SwinIR model {model_file}: {e}", file=sys.stderr) + return img + model = model.to(device_swinir, dtype=devices.dtype) + if use_compile: + model = torch.compile(model) + self._cached_model = model + self._cached_model_config = current_config + img = upscale(img, model) + devices.torch_gc() + return img + + def load_model(self, path, scale=4): + if path.startswith("http"): + filename = modelloader.load_file_from_url( + url=path, + model_dir=self.model_download_path, + file_name=f"{self.model_name.replace(' ', '_')}.pth", + ) + else: + filename = path + if filename.endswith(".v2.pth"): + model = Swin2SR( + upscale=scale, + in_chans=3, + img_size=64, + window_size=8, + img_range=1.0, + depths=[6, 6, 6, 6, 6, 6], + embed_dim=180, + num_heads=[6, 6, 6, 6, 6, 6], + mlp_ratio=2, + upsampler="nearest+conv", + resi_connection="1conv", + ) + params = None + else: + model = SwinIR( + upscale=scale, + in_chans=3, + img_size=64, + window_size=8, + img_range=1.0, + depths=[6, 6, 6, 6, 6, 6, 6, 6, 6], + embed_dim=240, + num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8], + mlp_ratio=2, + upsampler="nearest+conv", + resi_connection="3conv", + ) + params = "params_ema" + + pretrained_model = torch.load(filename) + if params is not None: + model.load_state_dict(pretrained_model[params], strict=True) + else: + model.load_state_dict(pretrained_model, strict=True) + return model + + +def upscale( + img, + model, + tile=None, + tile_overlap=None, + window_size=8, + scale=4, +): + tile = tile or opts.SWIN_tile + tile_overlap = tile_overlap or opts.SWIN_tile_overlap + + + img = np.array(img) + img = img[:, :, ::-1] + img = np.moveaxis(img, 2, 0) / 255 + img = torch.from_numpy(img).float() + img = img.unsqueeze(0).to(device_swinir, dtype=devices.dtype) + with torch.no_grad(), devices.autocast(): + _, _, h_old, w_old = img.size() + h_pad = (h_old // window_size + 1) * window_size - h_old + w_pad = (w_old // window_size + 1) * window_size - w_old + img = torch.cat([img, torch.flip(img, [2])], 2)[:, :, : h_old + h_pad, :] + img = torch.cat([img, torch.flip(img, [3])], 3)[:, :, :, : w_old + w_pad] + output = inference(img, model, tile, tile_overlap, window_size, scale) + output = output[..., : h_old * scale, : w_old * scale] + output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy() + if output.ndim == 3: + output = np.transpose( + output[[2, 1, 0], :, :], (1, 2, 0) + ) # CHW-RGB to HCW-BGR + output = (output * 255.0).round().astype(np.uint8) # float32 to uint8 + return Image.fromarray(output, "RGB") + + +def inference(img, model, tile, tile_overlap, window_size, scale): + # test the image tile by tile + b, c, h, w = img.size() + tile = min(tile, h, w) + assert tile % window_size == 0, "tile size should be a multiple of window_size" + sf = scale + + stride = tile - tile_overlap + h_idx_list = list(range(0, h - tile, stride)) + [h - tile] + w_idx_list = list(range(0, w - tile, stride)) + [w - tile] + E = torch.zeros(b, c, h * sf, w * sf, dtype=devices.dtype, device=device_swinir).type_as(img) + W = torch.zeros_like(E, dtype=devices.dtype, device=device_swinir) + + with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="SwinIR tiles") as pbar: + for h_idx in h_idx_list: + if state.interrupted or state.skipped: + break + + for w_idx in w_idx_list: + if state.interrupted or state.skipped: + break + + in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile] + out_patch = model(in_patch) + out_patch_mask = torch.ones_like(out_patch) + + E[ + ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf + ].add_(out_patch) + W[ + ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf + ].add_(out_patch_mask) + pbar.update(1) + output = E.div_(W) + + return output + + +def on_ui_settings(): + import gradio as gr + + shared.opts.add_option("SWIN_tile", shared.OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling"))) + shared.opts.add_option("SWIN_tile_overlap", shared.OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}, section=('upscaling', "Upscaling"))) + if int(torch.__version__.split('.')[0]) >= 2 and platform.system() != "Windows": # torch.compile() require pytorch 2.0 or above, and not on Windows + shared.opts.add_option("SWIN_torch_compile", shared.OptionInfo(False, "Use torch.compile to accelerate SwinIR.", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling")).info("Takes longer on first run")) + + +script_callbacks.on_ui_settings(on_ui_settings) diff --git a/extensions-builtin/SwinIR/swinir_model_arch.py b/extensions-builtin/SwinIR/swinir_model_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..93b9327473a6e77c3a3dc6a7743e932c9083a996 --- /dev/null +++ b/extensions-builtin/SwinIR/swinir_model_arch.py @@ -0,0 +1,867 @@ +# ----------------------------------------------------------------------------------- +# SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257 +# Originally Written by Ze Liu, Modified by Jingyun Liang. +# ----------------------------------------------------------------------------------- + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self) -> str: + return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}' + + def flops(self, N): + # calculate flops for 1 window with token length of N + flops = 0 + # qkv = self.qkv(x) + flops += N * self.dim * 3 * self.dim + # attn = (q @ k.transpose(-2, -1)) + flops += self.num_heads * N * (self.dim // self.num_heads) * N + # x = (attn @ v) + flops += self.num_heads * N * N * (self.dim // self.num_heads) + # x = self.proj(x) + flops += N * self.dim * self.dim + return flops + + +class SwinTransformerBlock(nn.Module): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, + qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if self.shift_size > 0: + attn_mask = self.calculate_mask(self.input_resolution) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def calculate_mask(self, x_size): + # calculate attention mask for SW-MSA + H, W = x_size + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + + return attn_mask + + def forward(self, x, x_size): + H, W = x_size + B, L, C = x.shape + # assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size + if self.input_resolution == x_size: + attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C + else: + attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device)) + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ + f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" + + def flops(self): + flops = 0 + H, W = self.input_resolution + # norm1 + flops += self.dim * H * W + # W-MSA/SW-MSA + nW = H * W / self.window_size / self.window_size + flops += nW * self.attn.flops(self.window_size * self.window_size) + # mlp + flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio + # norm2 + flops += self.dim * H * W + return flops + + +class PatchMerging(nn.Module): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." + + x = x.view(B, H, W, C) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + def extra_repr(self) -> str: + return f"input_resolution={self.input_resolution}, dim={self.dim}" + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.dim + flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim + return flops + + +class BasicLayer(nn.Module): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock(dim=dim, input_resolution=input_resolution, + num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x, x_size): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x, x_size) + else: + x = blk(x, x_size) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + + def flops(self): + flops = 0 + for blk in self.blocks: + flops += blk.flops() + if self.downsample is not None: + flops += self.downsample.flops() + return flops + + +class RSTB(nn.Module): + """Residual Swin Transformer Block (RSTB). + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + img_size: Input image size. + patch_size: Patch size. + resi_connection: The convolutional block before residual connection. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False, + img_size=224, patch_size=4, resi_connection='1conv'): + super(RSTB, self).__init__() + + self.dim = dim + self.input_resolution = input_resolution + + self.residual_group = BasicLayer(dim=dim, + input_resolution=input_resolution, + depth=depth, + num_heads=num_heads, + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop, attn_drop=attn_drop, + drop_path=drop_path, + norm_layer=norm_layer, + downsample=downsample, + use_checkpoint=use_checkpoint) + + if resi_connection == '1conv': + self.conv = nn.Conv2d(dim, dim, 3, 1, 1) + elif resi_connection == '3conv': + # to save parameters and memory + self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True), + nn.Conv2d(dim // 4, dim // 4, 1, 1, 0), + nn.LeakyReLU(negative_slope=0.2, inplace=True), + nn.Conv2d(dim // 4, dim, 3, 1, 1)) + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, + norm_layer=None) + + self.patch_unembed = PatchUnEmbed( + img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, + norm_layer=None) + + def forward(self, x, x_size): + return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x + + def flops(self): + flops = 0 + flops += self.residual_group.flops() + H, W = self.input_resolution + flops += H * W * self.dim * self.dim * 9 + flops += self.patch_embed.flops() + flops += self.patch_unembed.flops() + + return flops + + +class PatchEmbed(nn.Module): + r""" Image to Patch Embedding + + Args: + img_size (int): Image size. Default: 224. + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + x = x.flatten(2).transpose(1, 2) # B Ph*Pw C + if self.norm is not None: + x = self.norm(x) + return x + + def flops(self): + flops = 0 + H, W = self.img_size + if self.norm is not None: + flops += H * W * self.embed_dim + return flops + + +class PatchUnEmbed(nn.Module): + r""" Image to Patch Unembedding + + Args: + img_size (int): Image size. Default: 224. + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + def forward(self, x, x_size): + B, HW, C = x.shape + x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C + return x + + def flops(self): + flops = 0 + return flops + + +class Upsample(nn.Sequential): + """Upsample module. + + Args: + scale (int): Scale factor. Supported scales: 2^n and 3. + num_feat (int): Channel number of intermediate features. + """ + + def __init__(self, scale, num_feat): + m = [] + if (scale & (scale - 1)) == 0: # scale = 2^n + for _ in range(int(math.log(scale, 2))): + m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1)) + m.append(nn.PixelShuffle(2)) + elif scale == 3: + m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1)) + m.append(nn.PixelShuffle(3)) + else: + raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.') + super(Upsample, self).__init__(*m) + + +class UpsampleOneStep(nn.Sequential): + """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle) + Used in lightweight SR to save parameters. + + Args: + scale (int): Scale factor. Supported scales: 2^n and 3. + num_feat (int): Channel number of intermediate features. + + """ + + def __init__(self, scale, num_feat, num_out_ch, input_resolution=None): + self.num_feat = num_feat + self.input_resolution = input_resolution + m = [] + m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1)) + m.append(nn.PixelShuffle(scale)) + super(UpsampleOneStep, self).__init__(*m) + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.num_feat * 3 * 9 + return flops + + +class SwinIR(nn.Module): + r""" SwinIR + A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer. + + Args: + img_size (int | tuple(int)): Input image size. Default 64 + patch_size (int | tuple(int)): Patch size. Default: 1 + in_chans (int): Number of input image channels. Default: 3 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction + img_range: Image range. 1. or 255. + upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None + resi_connection: The convolutional block before residual connection. '1conv'/'3conv' + """ + + def __init__(self, img_size=64, patch_size=1, in_chans=3, + embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6), + window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, + norm_layer=nn.LayerNorm, ape=False, patch_norm=True, + use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv', + **kwargs): + super(SwinIR, self).__init__() + num_in_ch = in_chans + num_out_ch = in_chans + num_feat = 64 + self.img_range = img_range + if in_chans == 3: + rgb_mean = (0.4488, 0.4371, 0.4040) + self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1) + else: + self.mean = torch.zeros(1, 1, 1, 1) + self.upscale = upscale + self.upsampler = upsampler + self.window_size = window_size + + ##################################################################################################### + ################################### 1, shallow feature extraction ################################### + self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1) + + ##################################################################################################### + ################################### 2, deep feature extraction ###################################### + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = embed_dim + self.mlp_ratio = mlp_ratio + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # merge non-overlapping patches into image + self.patch_unembed = PatchUnEmbed( + img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.absolute_pos_embed, std=.02) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build Residual Swin Transformer blocks (RSTB) + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = RSTB(dim=embed_dim, + input_resolution=(patches_resolution[0], + patches_resolution[1]), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results + norm_layer=norm_layer, + downsample=None, + use_checkpoint=use_checkpoint, + img_size=img_size, + patch_size=patch_size, + resi_connection=resi_connection + + ) + self.layers.append(layer) + self.norm = norm_layer(self.num_features) + + # build the last conv layer in deep feature extraction + if resi_connection == '1conv': + self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1) + elif resi_connection == '3conv': + # to save parameters and memory + self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1), + nn.LeakyReLU(negative_slope=0.2, inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0), + nn.LeakyReLU(negative_slope=0.2, inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1)) + + ##################################################################################################### + ################################ 3, high quality image reconstruction ################################ + if self.upsampler == 'pixelshuffle': + # for classical SR + self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1), + nn.LeakyReLU(inplace=True)) + self.upsample = Upsample(upscale, num_feat) + self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) + elif self.upsampler == 'pixelshuffledirect': + # for lightweight SR (to save parameters) + self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch, + (patches_resolution[0], patches_resolution[1])) + elif self.upsampler == 'nearest+conv': + # for real-world SR (less artifacts) + self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1), + nn.LeakyReLU(inplace=True)) + self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + if self.upscale == 4: + self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) + self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) + else: + # for image denoising and JPEG compression artifact reduction + self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'absolute_pos_embed'} + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'relative_position_bias_table'} + + def check_image_size(self, x): + _, _, h, w = x.size() + mod_pad_h = (self.window_size - h % self.window_size) % self.window_size + mod_pad_w = (self.window_size - w % self.window_size) % self.window_size + x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect') + return x + + def forward_features(self, x): + x_size = (x.shape[2], x.shape[3]) + x = self.patch_embed(x) + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + + for layer in self.layers: + x = layer(x, x_size) + + x = self.norm(x) # B L C + x = self.patch_unembed(x, x_size) + + return x + + def forward(self, x): + H, W = x.shape[2:] + x = self.check_image_size(x) + + self.mean = self.mean.type_as(x) + x = (x - self.mean) * self.img_range + + if self.upsampler == 'pixelshuffle': + # for classical SR + x = self.conv_first(x) + x = self.conv_after_body(self.forward_features(x)) + x + x = self.conv_before_upsample(x) + x = self.conv_last(self.upsample(x)) + elif self.upsampler == 'pixelshuffledirect': + # for lightweight SR + x = self.conv_first(x) + x = self.conv_after_body(self.forward_features(x)) + x + x = self.upsample(x) + elif self.upsampler == 'nearest+conv': + # for real-world SR + x = self.conv_first(x) + x = self.conv_after_body(self.forward_features(x)) + x + x = self.conv_before_upsample(x) + x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest'))) + if self.upscale == 4: + x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest'))) + x = self.conv_last(self.lrelu(self.conv_hr(x))) + else: + # for image denoising and JPEG compression artifact reduction + x_first = self.conv_first(x) + res = self.conv_after_body(self.forward_features(x_first)) + x_first + x = x + self.conv_last(res) + + x = x / self.img_range + self.mean + + return x[:, :, :H*self.upscale, :W*self.upscale] + + def flops(self): + flops = 0 + H, W = self.patches_resolution + flops += H * W * 3 * self.embed_dim * 9 + flops += self.patch_embed.flops() + for layer in self.layers: + flops += layer.flops() + flops += H * W * 3 * self.embed_dim * self.embed_dim + flops += self.upsample.flops() + return flops + + +if __name__ == '__main__': + upscale = 4 + window_size = 8 + height = (1024 // upscale // window_size + 1) * window_size + width = (720 // upscale // window_size + 1) * window_size + model = SwinIR(upscale=2, img_size=(height, width), + window_size=window_size, img_range=1., depths=[6, 6, 6, 6], + embed_dim=60, num_heads=[6, 6, 6, 6], mlp_ratio=2, upsampler='pixelshuffledirect') + print(model) + print(height, width, model.flops() / 1e9) + + x = torch.randn((1, 3, height, width)) + x = model(x) + print(x.shape) diff --git a/extensions-builtin/SwinIR/swinir_model_arch_v2.py b/extensions-builtin/SwinIR/swinir_model_arch_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..59219f69a9a7f8365628cb2f4f57f5cd0104147a --- /dev/null +++ b/extensions-builtin/SwinIR/swinir_model_arch_v2.py @@ -0,0 +1,1017 @@ +# ----------------------------------------------------------------------------------- +# Swin2SR: Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration, https://arxiv.org/abs/ +# Written by Conde and Choi et al. +# ----------------------------------------------------------------------------------- + +import math +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + +class WindowAttention(nn.Module): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + pretrained_window_size (tuple[int]): The height and width of the window in pre-training. + """ + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0., + pretrained_window_size=(0, 0)): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.pretrained_window_size = pretrained_window_size + self.num_heads = num_heads + + self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True) + + # mlp to generate continuous relative position bias + self.cpb_mlp = nn.Sequential(nn.Linear(2, 512, bias=True), + nn.ReLU(inplace=True), + nn.Linear(512, num_heads, bias=False)) + + # get relative_coords_table + relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32) + relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32) + relative_coords_table = torch.stack( + torch.meshgrid([relative_coords_h, + relative_coords_w])).permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2 + if pretrained_window_size[0] > 0: + relative_coords_table[:, :, :, 0] /= (pretrained_window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= (pretrained_window_size[1] - 1) + else: + relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1) + relative_coords_table *= 8 # normalize to -8, 8 + relative_coords_table = torch.sign(relative_coords_table) * torch.log2( + torch.abs(relative_coords_table) + 1.0) / np.log2(8) + + self.register_buffer("relative_coords_table", relative_coords_table) + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(dim)) + self.v_bias = nn.Parameter(torch.zeros(dim)) + else: + self.q_bias = None + self.v_bias = None + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + # cosine attention + attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) + logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01)).to(self.logit_scale.device)).exp() + attn = attn * logit_scale + + relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads) + relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + relative_position_bias = 16 * torch.sigmoid(relative_position_bias) + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self) -> str: + return f'dim={self.dim}, window_size={self.window_size}, ' \ + f'pretrained_window_size={self.pretrained_window_size}, num_heads={self.num_heads}' + + def flops(self, N): + # calculate flops for 1 window with token length of N + flops = 0 + # qkv = self.qkv(x) + flops += N * self.dim * 3 * self.dim + # attn = (q @ k.transpose(-2, -1)) + flops += self.num_heads * N * (self.dim // self.num_heads) * N + # x = (attn @ v) + flops += self.num_heads * N * N * (self.dim // self.num_heads) + # x = self.proj(x) + flops += N * self.dim * self.dim + return flops + +class SwinTransformerBlock(nn.Module): + r""" Swin Transformer Block. + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + pretrained_window_size (int): Window size in pre-training. + """ + + def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, pretrained_window_size=0): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, + qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + pretrained_window_size=to_2tuple(pretrained_window_size)) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if self.shift_size > 0: + attn_mask = self.calculate_mask(self.input_resolution) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def calculate_mask(self, x_size): + # calculate attention mask for SW-MSA + H, W = x_size + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + + return attn_mask + + def forward(self, x, x_size): + H, W = x_size + B, L, C = x.shape + #assert L == H * W, "input feature has wrong size" + + shortcut = x + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size + if self.input_resolution == x_size: + attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C + else: + attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device)) + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + x = shortcut + self.drop_path(self.norm1(x)) + + # FFN + x = x + self.drop_path(self.norm2(self.mlp(x))) + + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ + f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" + + def flops(self): + flops = 0 + H, W = self.input_resolution + # norm1 + flops += self.dim * H * W + # W-MSA/SW-MSA + nW = H * W / self.window_size / self.window_size + flops += nW * self.attn.flops(self.window_size * self.window_size) + # mlp + flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio + # norm2 + flops += self.dim * H * W + return flops + +class PatchMerging(nn.Module): + r""" Patch Merging Layer. + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(2 * dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." + + x = x.view(B, H, W, C) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.reduction(x) + x = self.norm(x) + + return x + + def extra_repr(self) -> str: + return f"input_resolution={self.input_resolution}, dim={self.dim}" + + def flops(self): + H, W = self.input_resolution + flops = (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim + flops += H * W * self.dim // 2 + return flops + +class BasicLayer(nn.Module): + """ A basic Swin Transformer layer for one stage. + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + pretrained_window_size (int): Local window size in pre-training. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False, + pretrained_window_size=0): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock(dim=dim, input_resolution=input_resolution, + num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer, + pretrained_window_size=pretrained_window_size) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x, x_size): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x, x_size) + else: + x = blk(x, x_size) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + + def flops(self): + flops = 0 + for blk in self.blocks: + flops += blk.flops() + if self.downsample is not None: + flops += self.downsample.flops() + return flops + + def _init_respostnorm(self): + for blk in self.blocks: + nn.init.constant_(blk.norm1.bias, 0) + nn.init.constant_(blk.norm1.weight, 0) + nn.init.constant_(blk.norm2.bias, 0) + nn.init.constant_(blk.norm2.weight, 0) + +class PatchEmbed(nn.Module): + r""" Image to Patch Embedding + Args: + img_size (int): Image size. Default: 224. + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + # assert H == self.img_size[0] and W == self.img_size[1], + # f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C + if self.norm is not None: + x = self.norm(x) + return x + + def flops(self): + Ho, Wo = self.patches_resolution + flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1]) + if self.norm is not None: + flops += Ho * Wo * self.embed_dim + return flops + +class RSTB(nn.Module): + """Residual Swin Transformer Block (RSTB). + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + img_size: Input image size. + patch_size: Patch size. + resi_connection: The convolutional block before residual connection. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False, + img_size=224, patch_size=4, resi_connection='1conv'): + super(RSTB, self).__init__() + + self.dim = dim + self.input_resolution = input_resolution + + self.residual_group = BasicLayer(dim=dim, + input_resolution=input_resolution, + depth=depth, + num_heads=num_heads, + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, + drop_path=drop_path, + norm_layer=norm_layer, + downsample=downsample, + use_checkpoint=use_checkpoint) + + if resi_connection == '1conv': + self.conv = nn.Conv2d(dim, dim, 3, 1, 1) + elif resi_connection == '3conv': + # to save parameters and memory + self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True), + nn.Conv2d(dim // 4, dim // 4, 1, 1, 0), + nn.LeakyReLU(negative_slope=0.2, inplace=True), + nn.Conv2d(dim // 4, dim, 3, 1, 1)) + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=dim, embed_dim=dim, + norm_layer=None) + + self.patch_unembed = PatchUnEmbed( + img_size=img_size, patch_size=patch_size, in_chans=dim, embed_dim=dim, + norm_layer=None) + + def forward(self, x, x_size): + return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x + + def flops(self): + flops = 0 + flops += self.residual_group.flops() + H, W = self.input_resolution + flops += H * W * self.dim * self.dim * 9 + flops += self.patch_embed.flops() + flops += self.patch_unembed.flops() + + return flops + +class PatchUnEmbed(nn.Module): + r""" Image to Patch Unembedding + + Args: + img_size (int): Image size. Default: 224. + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + def forward(self, x, x_size): + B, HW, C = x.shape + x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C + return x + + def flops(self): + flops = 0 + return flops + + +class Upsample(nn.Sequential): + """Upsample module. + + Args: + scale (int): Scale factor. Supported scales: 2^n and 3. + num_feat (int): Channel number of intermediate features. + """ + + def __init__(self, scale, num_feat): + m = [] + if (scale & (scale - 1)) == 0: # scale = 2^n + for _ in range(int(math.log(scale, 2))): + m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1)) + m.append(nn.PixelShuffle(2)) + elif scale == 3: + m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1)) + m.append(nn.PixelShuffle(3)) + else: + raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.') + super(Upsample, self).__init__(*m) + +class Upsample_hf(nn.Sequential): + """Upsample module. + + Args: + scale (int): Scale factor. Supported scales: 2^n and 3. + num_feat (int): Channel number of intermediate features. + """ + + def __init__(self, scale, num_feat): + m = [] + if (scale & (scale - 1)) == 0: # scale = 2^n + for _ in range(int(math.log(scale, 2))): + m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1)) + m.append(nn.PixelShuffle(2)) + elif scale == 3: + m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1)) + m.append(nn.PixelShuffle(3)) + else: + raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.') + super(Upsample_hf, self).__init__(*m) + + +class UpsampleOneStep(nn.Sequential): + """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle) + Used in lightweight SR to save parameters. + + Args: + scale (int): Scale factor. Supported scales: 2^n and 3. + num_feat (int): Channel number of intermediate features. + + """ + + def __init__(self, scale, num_feat, num_out_ch, input_resolution=None): + self.num_feat = num_feat + self.input_resolution = input_resolution + m = [] + m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1)) + m.append(nn.PixelShuffle(scale)) + super(UpsampleOneStep, self).__init__(*m) + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.num_feat * 3 * 9 + return flops + + + +class Swin2SR(nn.Module): + r""" Swin2SR + A PyTorch impl of : `Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration`. + + Args: + img_size (int | tuple(int)): Input image size. Default 64 + patch_size (int | tuple(int)): Patch size. Default: 1 + in_chans (int): Number of input image channels. Default: 3 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction + img_range: Image range. 1. or 255. + upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None + resi_connection: The convolutional block before residual connection. '1conv'/'3conv' + """ + + def __init__(self, img_size=64, patch_size=1, in_chans=3, + embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6), + window_size=7, mlp_ratio=4., qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, + norm_layer=nn.LayerNorm, ape=False, patch_norm=True, + use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv', + **kwargs): + super(Swin2SR, self).__init__() + num_in_ch = in_chans + num_out_ch = in_chans + num_feat = 64 + self.img_range = img_range + if in_chans == 3: + rgb_mean = (0.4488, 0.4371, 0.4040) + self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1) + else: + self.mean = torch.zeros(1, 1, 1, 1) + self.upscale = upscale + self.upsampler = upsampler + self.window_size = window_size + + ##################################################################################################### + ################################### 1, shallow feature extraction ################################### + self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1) + + ##################################################################################################### + ################################### 2, deep feature extraction ###################################### + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = embed_dim + self.mlp_ratio = mlp_ratio + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # merge non-overlapping patches into image + self.patch_unembed = PatchUnEmbed( + img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.absolute_pos_embed, std=.02) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build Residual Swin Transformer blocks (RSTB) + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = RSTB(dim=embed_dim, + input_resolution=(patches_resolution[0], + patches_resolution[1]), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results + norm_layer=norm_layer, + downsample=None, + use_checkpoint=use_checkpoint, + img_size=img_size, + patch_size=patch_size, + resi_connection=resi_connection + + ) + self.layers.append(layer) + + if self.upsampler == 'pixelshuffle_hf': + self.layers_hf = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = RSTB(dim=embed_dim, + input_resolution=(patches_resolution[0], + patches_resolution[1]), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results + norm_layer=norm_layer, + downsample=None, + use_checkpoint=use_checkpoint, + img_size=img_size, + patch_size=patch_size, + resi_connection=resi_connection + + ) + self.layers_hf.append(layer) + + self.norm = norm_layer(self.num_features) + + # build the last conv layer in deep feature extraction + if resi_connection == '1conv': + self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1) + elif resi_connection == '3conv': + # to save parameters and memory + self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1), + nn.LeakyReLU(negative_slope=0.2, inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0), + nn.LeakyReLU(negative_slope=0.2, inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1)) + + ##################################################################################################### + ################################ 3, high quality image reconstruction ################################ + if self.upsampler == 'pixelshuffle': + # for classical SR + self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1), + nn.LeakyReLU(inplace=True)) + self.upsample = Upsample(upscale, num_feat) + self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) + elif self.upsampler == 'pixelshuffle_aux': + self.conv_bicubic = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1) + self.conv_before_upsample = nn.Sequential( + nn.Conv2d(embed_dim, num_feat, 3, 1, 1), + nn.LeakyReLU(inplace=True)) + self.conv_aux = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) + self.conv_after_aux = nn.Sequential( + nn.Conv2d(3, num_feat, 3, 1, 1), + nn.LeakyReLU(inplace=True)) + self.upsample = Upsample(upscale, num_feat) + self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) + + elif self.upsampler == 'pixelshuffle_hf': + self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1), + nn.LeakyReLU(inplace=True)) + self.upsample = Upsample(upscale, num_feat) + self.upsample_hf = Upsample_hf(upscale, num_feat) + self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) + self.conv_first_hf = nn.Sequential(nn.Conv2d(num_feat, embed_dim, 3, 1, 1), + nn.LeakyReLU(inplace=True)) + self.conv_after_body_hf = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1) + self.conv_before_upsample_hf = nn.Sequential( + nn.Conv2d(embed_dim, num_feat, 3, 1, 1), + nn.LeakyReLU(inplace=True)) + self.conv_last_hf = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) + + elif self.upsampler == 'pixelshuffledirect': + # for lightweight SR (to save parameters) + self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch, + (patches_resolution[0], patches_resolution[1])) + elif self.upsampler == 'nearest+conv': + # for real-world SR (less artifacts) + assert self.upscale == 4, 'only support x4 now.' + self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1), + nn.LeakyReLU(inplace=True)) + self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) + self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) + else: + # for image denoising and JPEG compression artifact reduction + self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'absolute_pos_embed'} + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'relative_position_bias_table'} + + def check_image_size(self, x): + _, _, h, w = x.size() + mod_pad_h = (self.window_size - h % self.window_size) % self.window_size + mod_pad_w = (self.window_size - w % self.window_size) % self.window_size + x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect') + return x + + def forward_features(self, x): + x_size = (x.shape[2], x.shape[3]) + x = self.patch_embed(x) + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + + for layer in self.layers: + x = layer(x, x_size) + + x = self.norm(x) # B L C + x = self.patch_unembed(x, x_size) + + return x + + def forward_features_hf(self, x): + x_size = (x.shape[2], x.shape[3]) + x = self.patch_embed(x) + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + + for layer in self.layers_hf: + x = layer(x, x_size) + + x = self.norm(x) # B L C + x = self.patch_unembed(x, x_size) + + return x + + def forward(self, x): + H, W = x.shape[2:] + x = self.check_image_size(x) + + self.mean = self.mean.type_as(x) + x = (x - self.mean) * self.img_range + + if self.upsampler == 'pixelshuffle': + # for classical SR + x = self.conv_first(x) + x = self.conv_after_body(self.forward_features(x)) + x + x = self.conv_before_upsample(x) + x = self.conv_last(self.upsample(x)) + elif self.upsampler == 'pixelshuffle_aux': + bicubic = F.interpolate(x, size=(H * self.upscale, W * self.upscale), mode='bicubic', align_corners=False) + bicubic = self.conv_bicubic(bicubic) + x = self.conv_first(x) + x = self.conv_after_body(self.forward_features(x)) + x + x = self.conv_before_upsample(x) + aux = self.conv_aux(x) # b, 3, LR_H, LR_W + x = self.conv_after_aux(aux) + x = self.upsample(x)[:, :, :H * self.upscale, :W * self.upscale] + bicubic[:, :, :H * self.upscale, :W * self.upscale] + x = self.conv_last(x) + aux = aux / self.img_range + self.mean + elif self.upsampler == 'pixelshuffle_hf': + # for classical SR with HF + x = self.conv_first(x) + x = self.conv_after_body(self.forward_features(x)) + x + x_before = self.conv_before_upsample(x) + x_out = self.conv_last(self.upsample(x_before)) + + x_hf = self.conv_first_hf(x_before) + x_hf = self.conv_after_body_hf(self.forward_features_hf(x_hf)) + x_hf + x_hf = self.conv_before_upsample_hf(x_hf) + x_hf = self.conv_last_hf(self.upsample_hf(x_hf)) + x = x_out + x_hf + x_hf = x_hf / self.img_range + self.mean + + elif self.upsampler == 'pixelshuffledirect': + # for lightweight SR + x = self.conv_first(x) + x = self.conv_after_body(self.forward_features(x)) + x + x = self.upsample(x) + elif self.upsampler == 'nearest+conv': + # for real-world SR + x = self.conv_first(x) + x = self.conv_after_body(self.forward_features(x)) + x + x = self.conv_before_upsample(x) + x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest'))) + x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest'))) + x = self.conv_last(self.lrelu(self.conv_hr(x))) + else: + # for image denoising and JPEG compression artifact reduction + x_first = self.conv_first(x) + res = self.conv_after_body(self.forward_features(x_first)) + x_first + x = x + self.conv_last(res) + + x = x / self.img_range + self.mean + if self.upsampler == "pixelshuffle_aux": + return x[:, :, :H*self.upscale, :W*self.upscale], aux + + elif self.upsampler == "pixelshuffle_hf": + x_out = x_out / self.img_range + self.mean + return x_out[:, :, :H*self.upscale, :W*self.upscale], x[:, :, :H*self.upscale, :W*self.upscale], x_hf[:, :, :H*self.upscale, :W*self.upscale] + + else: + return x[:, :, :H*self.upscale, :W*self.upscale] + + def flops(self): + flops = 0 + H, W = self.patches_resolution + flops += H * W * 3 * self.embed_dim * 9 + flops += self.patch_embed.flops() + for layer in self.layers: + flops += layer.flops() + flops += H * W * 3 * self.embed_dim * self.embed_dim + flops += self.upsample.flops() + return flops + + +if __name__ == '__main__': + upscale = 4 + window_size = 8 + height = (1024 // upscale // window_size + 1) * window_size + width = (720 // upscale // window_size + 1) * window_size + model = Swin2SR(upscale=2, img_size=(height, width), + window_size=window_size, img_range=1., depths=[6, 6, 6, 6], + embed_dim=60, num_heads=[6, 6, 6, 6], mlp_ratio=2, upsampler='pixelshuffledirect') + print(model) + print(height, width, model.flops() / 1e9) + + x = torch.randn((1, 3, height, width)) + x = model(x) + print(x.shape) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js new file mode 100644 index 0000000000000000000000000000000000000000..30199dcd60aa3df4b5440c1dfa0de0319ac1374a --- /dev/null +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -0,0 +1,776 @@ +onUiLoaded(async() => { + const elementIDs = { + img2imgTabs: "#mode_img2img .tab-nav", + inpaint: "#img2maskimg", + inpaintSketch: "#inpaint_sketch", + rangeGroup: "#img2img_column_size", + sketch: "#img2img_sketch" + }; + const tabNameToElementId = { + "Inpaint sketch": elementIDs.inpaintSketch, + "Inpaint": elementIDs.inpaint, + "Sketch": elementIDs.sketch + }; + + // Helper functions + // Get active tab + function getActiveTab(elements, all = false) { + const tabs = elements.img2imgTabs.querySelectorAll("button"); + + if (all) return tabs; + + for (let tab of tabs) { + if (tab.classList.contains("selected")) { + return tab; + } + } + } + + // Get tab ID + function getTabId(elements) { + const activeTab = getActiveTab(elements); + return tabNameToElementId[activeTab.innerText]; + } + + // Wait until opts loaded + async function waitForOpts() { + for (;;) { + if (window.opts && Object.keys(window.opts).length) { + return window.opts; + } + await new Promise(resolve => setTimeout(resolve, 100)); + } + } + + // Function for defining the "Ctrl", "Shift" and "Alt" keys + function isModifierKey(event, key) { + switch (key) { + case "Ctrl": + return event.ctrlKey; + case "Shift": + return event.shiftKey; + case "Alt": + return event.altKey; + default: + return false; + } + } + + // Check if hotkey is valid + function isValidHotkey(value) { + const specialKeys = ["Ctrl", "Alt", "Shift", "Disable"]; + return ( + (typeof value === "string" && + value.length === 1 && + /[a-z]/i.test(value)) || + specialKeys.includes(value) + ); + } + + // Normalize hotkey + function normalizeHotkey(hotkey) { + return hotkey.length === 1 ? "Key" + hotkey.toUpperCase() : hotkey; + } + + // Format hotkey for display + function formatHotkeyForDisplay(hotkey) { + return hotkey.startsWith("Key") ? hotkey.slice(3) : hotkey; + } + + // Create hotkey configuration with the provided options + function createHotkeyConfig(defaultHotkeysConfig, hotkeysConfigOpts) { + const result = {}; // Resulting hotkey configuration + const usedKeys = new Set(); // Set of used hotkeys + + // Iterate through defaultHotkeysConfig keys + for (const key in defaultHotkeysConfig) { + const userValue = hotkeysConfigOpts[key]; // User-provided hotkey value + const defaultValue = defaultHotkeysConfig[key]; // Default hotkey value + + // Apply appropriate value for undefined, boolean, or object userValue + if ( + userValue === undefined || + typeof userValue === "boolean" || + typeof userValue === "object" || + userValue === "disable" + ) { + result[key] = + userValue === undefined ? defaultValue : userValue; + } else if (isValidHotkey(userValue)) { + const normalizedUserValue = normalizeHotkey(userValue); + + // Check for conflicting hotkeys + if (!usedKeys.has(normalizedUserValue)) { + usedKeys.add(normalizedUserValue); + result[key] = normalizedUserValue; + } else { + console.error( + `Hotkey: ${formatHotkeyForDisplay( + userValue + )} for ${key} is repeated and conflicts with another hotkey. The default hotkey is used: ${formatHotkeyForDisplay( + defaultValue + )}` + ); + result[key] = defaultValue; + } + } else { + console.error( + `Hotkey: ${formatHotkeyForDisplay( + userValue + )} for ${key} is not valid. The default hotkey is used: ${formatHotkeyForDisplay( + defaultValue + )}` + ); + result[key] = defaultValue; + } + } + + return result; + } + + // Disables functions in the config object based on the provided list of function names + function disableFunctions(config, disabledFunctions) { + // Bind the hasOwnProperty method to the functionMap object to avoid errors + const hasOwnProperty = + Object.prototype.hasOwnProperty.bind(functionMap); + + // Loop through the disabledFunctions array and disable the corresponding functions in the config object + disabledFunctions.forEach(funcName => { + if (hasOwnProperty(funcName)) { + const key = functionMap[funcName]; + config[key] = "disable"; + } + }); + + // Return the updated config object + return config; + } + + /** + * The restoreImgRedMask function displays a red mask around an image to indicate the aspect ratio. + * If the image display property is set to 'none', the mask breaks. To fix this, the function + * temporarily sets the display property to 'block' and then hides the mask again after 300 milliseconds + * to avoid breaking the canvas. Additionally, the function adjusts the mask to work correctly on + * very long images. + */ + function restoreImgRedMask(elements) { + const mainTabId = getTabId(elements); + + if (!mainTabId) return; + + const mainTab = gradioApp().querySelector(mainTabId); + const img = mainTab.querySelector("img"); + const imageARPreview = gradioApp().querySelector("#imageARPreview"); + + if (!img || !imageARPreview) return; + + imageARPreview.style.transform = ""; + if (parseFloat(mainTab.style.width) > 865) { + const transformString = mainTab.style.transform; + const scaleMatch = transformString.match( + /scale\(([-+]?[0-9]*\.?[0-9]+)\)/ + ); + let zoom = 1; // default zoom + + if (scaleMatch && scaleMatch[1]) { + zoom = Number(scaleMatch[1]); + } + + imageARPreview.style.transformOrigin = "0 0"; + imageARPreview.style.transform = `scale(${zoom})`; + } + + if (img.style.display !== "none") return; + + img.style.display = "block"; + + setTimeout(() => { + img.style.display = "none"; + }, 400); + } + + const hotkeysConfigOpts = await waitForOpts(); + + // Default config + const defaultHotkeysConfig = { + canvas_hotkey_zoom: "Alt", + canvas_hotkey_adjust: "Ctrl", + canvas_hotkey_reset: "KeyR", + canvas_hotkey_fullscreen: "KeyS", + canvas_hotkey_move: "KeyF", + canvas_hotkey_overlap: "KeyO", + canvas_disabled_functions: [], + canvas_show_tooltip: true, + canvas_blur_prompt: false + }; + + const functionMap = { + "Zoom": "canvas_hotkey_zoom", + "Adjust brush size": "canvas_hotkey_adjust", + "Moving canvas": "canvas_hotkey_move", + "Fullscreen": "canvas_hotkey_fullscreen", + "Reset Zoom": "canvas_hotkey_reset", + "Overlap": "canvas_hotkey_overlap" + }; + + // Loading the configuration from opts + const preHotkeysConfig = createHotkeyConfig( + defaultHotkeysConfig, + hotkeysConfigOpts + ); + + // Disable functions that are not needed by the user + const hotkeysConfig = disableFunctions( + preHotkeysConfig, + preHotkeysConfig.canvas_disabled_functions + ); + + let isMoving = false; + let mouseX, mouseY; + let activeElement; + + const elements = Object.fromEntries( + Object.keys(elementIDs).map(id => [ + id, + gradioApp().querySelector(elementIDs[id]) + ]) + ); + const elemData = {}; + + // Apply functionality to the range inputs. Restore redmask and correct for long images. + const rangeInputs = elements.rangeGroup ? + Array.from(elements.rangeGroup.querySelectorAll("input")) : + [ + gradioApp().querySelector("#img2img_width input[type='range']"), + gradioApp().querySelector("#img2img_height input[type='range']") + ]; + + for (const input of rangeInputs) { + input?.addEventListener("input", () => restoreImgRedMask(elements)); + } + + function applyZoomAndPan(elemId) { + const targetElement = gradioApp().querySelector(elemId); + + if (!targetElement) { + console.log("Element not found"); + return; + } + + targetElement.style.transformOrigin = "0 0"; + + elemData[elemId] = { + zoom: 1, + panX: 0, + panY: 0 + }; + let fullScreenMode = false; + + // Create tooltip + function createTooltip() { + const toolTipElemnt = + targetElement.querySelector(".image-container"); + const tooltip = document.createElement("div"); + tooltip.className = "canvas-tooltip"; + + // Creating an item of information + const info = document.createElement("i"); + info.className = "canvas-tooltip-info"; + info.textContent = ""; + + // Create a container for the contents of the tooltip + const tooltipContent = document.createElement("div"); + tooltipContent.className = "canvas-tooltip-content"; + + // Define an array with hotkey information and their actions + const hotkeysInfo = [ + { + configKey: "canvas_hotkey_zoom", + action: "Zoom canvas", + keySuffix: " + wheel" + }, + { + configKey: "canvas_hotkey_adjust", + action: "Adjust brush size", + keySuffix: " + wheel" + }, + {configKey: "canvas_hotkey_reset", action: "Reset zoom"}, + { + configKey: "canvas_hotkey_fullscreen", + action: "Fullscreen mode" + }, + {configKey: "canvas_hotkey_move", action: "Move canvas"}, + {configKey: "canvas_hotkey_overlap", action: "Overlap"} + ]; + + // Create hotkeys array with disabled property based on the config values + const hotkeys = hotkeysInfo.map(info => { + const configValue = hotkeysConfig[info.configKey]; + const key = info.keySuffix ? + `${configValue}${info.keySuffix}` : + configValue.charAt(configValue.length - 1); + return { + key, + action: info.action, + disabled: configValue === "disable" + }; + }); + + for (const hotkey of hotkeys) { + if (hotkey.disabled) { + continue; + } + + const p = document.createElement("p"); + p.innerHTML = `${hotkey.key} - ${hotkey.action}`; + tooltipContent.appendChild(p); + } + + // Add information and content elements to the tooltip element + tooltip.appendChild(info); + tooltip.appendChild(tooltipContent); + + // Add a hint element to the target element + toolTipElemnt.appendChild(tooltip); + } + + //Show tool tip if setting enable + if (hotkeysConfig.canvas_show_tooltip) { + createTooltip(); + } + + // In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui. + function fixCanvas() { + const activeTab = getActiveTab(elements).textContent.trim(); + + if (activeTab !== "img2img") { + const img = targetElement.querySelector(`${elemId} img`); + + if (img && img.style.display !== "none") { + img.style.display = "none"; + img.style.visibility = "hidden"; + } + } + } + + // Reset the zoom level and pan position of the target element to their initial values + function resetZoom() { + elemData[elemId] = { + zoomLevel: 1, + panX: 0, + panY: 0 + }; + + fixCanvas(); + targetElement.style.transform = `scale(${elemData[elemId].zoomLevel}) translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px)`; + + const canvas = gradioApp().querySelector( + `${elemId} canvas[key="interface"]` + ); + + toggleOverlap("off"); + fullScreenMode = false; + + if ( + canvas && + parseFloat(canvas.style.width) > 865 && + parseFloat(targetElement.style.width) > 865 + ) { + fitToElement(); + return; + } + + targetElement.style.width = ""; + if (canvas) { + targetElement.style.height = canvas.style.height; + } + } + + // Toggle the zIndex of the target element between two values, allowing it to overlap or be overlapped by other elements + function toggleOverlap(forced = "") { + const zIndex1 = "0"; + const zIndex2 = "998"; + + targetElement.style.zIndex = + targetElement.style.zIndex !== zIndex2 ? zIndex2 : zIndex1; + + if (forced === "off") { + targetElement.style.zIndex = zIndex1; + } else if (forced === "on") { + targetElement.style.zIndex = zIndex2; + } + } + + // Adjust the brush size based on the deltaY value from a mouse wheel event + function adjustBrushSize( + elemId, + deltaY, + withoutValue = false, + percentage = 5 + ) { + const input = + gradioApp().querySelector( + `${elemId} input[aria-label='Brush radius']` + ) || + gradioApp().querySelector( + `${elemId} button[aria-label="Use brush"]` + ); + + if (input) { + input.click(); + if (!withoutValue) { + const maxValue = + parseFloat(input.getAttribute("max")) || 100; + const changeAmount = maxValue * (percentage / 100); + const newValue = + parseFloat(input.value) + + (deltaY > 0 ? -changeAmount : changeAmount); + input.value = Math.min(Math.max(newValue, 0), maxValue); + input.dispatchEvent(new Event("change")); + } + } + } + + // Reset zoom when uploading a new image + const fileInput = gradioApp().querySelector( + `${elemId} input[type="file"][accept="image/*"].svelte-116rqfv` + ); + fileInput.addEventListener("click", resetZoom); + + // Update the zoom level and pan position of the target element based on the values of the zoomLevel, panX and panY variables + function updateZoom(newZoomLevel, mouseX, mouseY) { + newZoomLevel = Math.max(0.5, Math.min(newZoomLevel, 15)); + + elemData[elemId].panX += + mouseX - (mouseX * newZoomLevel) / elemData[elemId].zoomLevel; + elemData[elemId].panY += + mouseY - (mouseY * newZoomLevel) / elemData[elemId].zoomLevel; + + targetElement.style.transformOrigin = "0 0"; + targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${newZoomLevel})`; + + toggleOverlap("on"); + return newZoomLevel; + } + + // Change the zoom level based on user interaction + function changeZoomLevel(operation, e) { + if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) { + e.preventDefault(); + + let zoomPosX, zoomPosY; + let delta = 0.2; + if (elemData[elemId].zoomLevel > 7) { + delta = 0.9; + } else if (elemData[elemId].zoomLevel > 2) { + delta = 0.6; + } + + zoomPosX = e.clientX; + zoomPosY = e.clientY; + + fullScreenMode = false; + elemData[elemId].zoomLevel = updateZoom( + elemData[elemId].zoomLevel + + (operation === "+" ? delta : -delta), + zoomPosX - targetElement.getBoundingClientRect().left, + zoomPosY - targetElement.getBoundingClientRect().top + ); + } + } + + /** + * This function fits the target element to the screen by calculating + * the required scale and offsets. It also updates the global variables + * zoomLevel, panX, and panY to reflect the new state. + */ + + function fitToElement() { + //Reset Zoom + targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`; + + // Get element and screen dimensions + const elementWidth = targetElement.offsetWidth; + const elementHeight = targetElement.offsetHeight; + const parentElement = targetElement.parentElement; + const screenWidth = parentElement.clientWidth; + const screenHeight = parentElement.clientHeight; + + // Get element's coordinates relative to the parent element + const elementRect = targetElement.getBoundingClientRect(); + const parentRect = parentElement.getBoundingClientRect(); + const elementX = elementRect.x - parentRect.x; + + // Calculate scale and offsets + const scaleX = screenWidth / elementWidth; + const scaleY = screenHeight / elementHeight; + const scale = Math.min(scaleX, scaleY); + + const transformOrigin = + window.getComputedStyle(targetElement).transformOrigin; + const [originX, originY] = transformOrigin.split(" "); + const originXValue = parseFloat(originX); + const originYValue = parseFloat(originY); + + const offsetX = + (screenWidth - elementWidth * scale) / 2 - + originXValue * (1 - scale); + const offsetY = + (screenHeight - elementHeight * scale) / 2.5 - + originYValue * (1 - scale); + + // Apply scale and offsets to the element + targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`; + + // Update global variables + elemData[elemId].zoomLevel = scale; + elemData[elemId].panX = offsetX; + elemData[elemId].panY = offsetY; + + fullScreenMode = false; + toggleOverlap("off"); + } + + /** + * This function fits the target element to the screen by calculating + * the required scale and offsets. It also updates the global variables + * zoomLevel, panX, and panY to reflect the new state. + */ + + // Fullscreen mode + function fitToScreen() { + const canvas = gradioApp().querySelector( + `${elemId} canvas[key="interface"]` + ); + + if (!canvas) return; + + if (canvas.offsetWidth > 862) { + targetElement.style.width = canvas.offsetWidth + "px"; + } + + if (fullScreenMode) { + resetZoom(); + fullScreenMode = false; + return; + } + + //Reset Zoom + targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`; + + // Get scrollbar width to right-align the image + const scrollbarWidth = + window.innerWidth - document.documentElement.clientWidth; + + // Get element and screen dimensions + const elementWidth = targetElement.offsetWidth; + const elementHeight = targetElement.offsetHeight; + const screenWidth = window.innerWidth - scrollbarWidth; + const screenHeight = window.innerHeight; + + // Get element's coordinates relative to the page + const elementRect = targetElement.getBoundingClientRect(); + const elementY = elementRect.y; + const elementX = elementRect.x; + + // Calculate scale and offsets + const scaleX = screenWidth / elementWidth; + const scaleY = screenHeight / elementHeight; + const scale = Math.min(scaleX, scaleY); + + // Get the current transformOrigin + const computedStyle = window.getComputedStyle(targetElement); + const transformOrigin = computedStyle.transformOrigin; + const [originX, originY] = transformOrigin.split(" "); + const originXValue = parseFloat(originX); + const originYValue = parseFloat(originY); + + // Calculate offsets with respect to the transformOrigin + const offsetX = + (screenWidth - elementWidth * scale) / 2 - + elementX - + originXValue * (1 - scale); + const offsetY = + (screenHeight - elementHeight * scale) / 2 - + elementY - + originYValue * (1 - scale); + + // Apply scale and offsets to the element + targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`; + + // Update global variables + elemData[elemId].zoomLevel = scale; + elemData[elemId].panX = offsetX; + elemData[elemId].panY = offsetY; + + fullScreenMode = true; + toggleOverlap("on"); + } + + // Handle keydown events + function handleKeyDown(event) { + // Disable key locks to make pasting from the buffer work correctly + if ((event.ctrlKey && event.code === 'KeyV') || (event.ctrlKey && event.code === 'KeyC') || event.code === "F5") { + return; + } + + // before activating shortcut, ensure user is not actively typing in an input field + if (!hotkeysConfig.canvas_blur_prompt) { + if (event.target.nodeName === 'TEXTAREA' || event.target.nodeName === 'INPUT') { + return; + } + } + + + const hotkeyActions = { + [hotkeysConfig.canvas_hotkey_reset]: resetZoom, + [hotkeysConfig.canvas_hotkey_overlap]: toggleOverlap, + [hotkeysConfig.canvas_hotkey_fullscreen]: fitToScreen + }; + + const action = hotkeyActions[event.code]; + if (action) { + event.preventDefault(); + action(event); + } + + if ( + isModifierKey(event, hotkeysConfig.canvas_hotkey_zoom) || + isModifierKey(event, hotkeysConfig.canvas_hotkey_adjust) + ) { + event.preventDefault(); + } + } + + // Get Mouse position + function getMousePosition(e) { + mouseX = e.offsetX; + mouseY = e.offsetY; + } + + targetElement.addEventListener("mousemove", getMousePosition); + + // Handle events only inside the targetElement + let isKeyDownHandlerAttached = false; + + function handleMouseMove() { + if (!isKeyDownHandlerAttached) { + document.addEventListener("keydown", handleKeyDown); + isKeyDownHandlerAttached = true; + + activeElement = elemId; + } + } + + function handleMouseLeave() { + if (isKeyDownHandlerAttached) { + document.removeEventListener("keydown", handleKeyDown); + isKeyDownHandlerAttached = false; + + activeElement = null; + } + } + + // Add mouse event handlers + targetElement.addEventListener("mousemove", handleMouseMove); + targetElement.addEventListener("mouseleave", handleMouseLeave); + + // Reset zoom when click on another tab + elements.img2imgTabs.addEventListener("click", resetZoom); + elements.img2imgTabs.addEventListener("click", () => { + // targetElement.style.width = ""; + if (parseInt(targetElement.style.width) > 865) { + setTimeout(fitToElement, 0); + } + }); + + targetElement.addEventListener("wheel", e => { + // change zoom level + const operation = e.deltaY > 0 ? "-" : "+"; + changeZoomLevel(operation, e); + + // Handle brush size adjustment with ctrl key pressed + if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) { + e.preventDefault(); + + // Increase or decrease brush size based on scroll direction + adjustBrushSize(elemId, e.deltaY); + } + }); + + // Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element. + function handleMoveKeyDown(e) { + + // Disable key locks to make pasting from the buffer work correctly + if ((e.ctrlKey && e.code === 'KeyV') || (e.ctrlKey && event.code === 'KeyC') || e.code === "F5") { + return; + } + + // before activating shortcut, ensure user is not actively typing in an input field + if (!hotkeysConfig.canvas_blur_prompt) { + if (e.target.nodeName === 'TEXTAREA' || e.target.nodeName === 'INPUT') { + return; + } + } + + + if (e.code === hotkeysConfig.canvas_hotkey_move) { + if (!e.ctrlKey && !e.metaKey && isKeyDownHandlerAttached) { + e.preventDefault(); + document.activeElement.blur(); + isMoving = true; + } + } + } + + function handleMoveKeyUp(e) { + if (e.code === hotkeysConfig.canvas_hotkey_move) { + isMoving = false; + } + } + + document.addEventListener("keydown", handleMoveKeyDown); + document.addEventListener("keyup", handleMoveKeyUp); + + // Detect zoom level and update the pan speed. + function updatePanPosition(movementX, movementY) { + let panSpeed = 2; + + if (elemData[elemId].zoomLevel > 8) { + panSpeed = 3.5; + } + + elemData[elemId].panX += movementX * panSpeed; + elemData[elemId].panY += movementY * panSpeed; + + // Delayed redraw of an element + requestAnimationFrame(() => { + targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${elemData[elemId].zoomLevel})`; + toggleOverlap("on"); + }); + } + + function handleMoveByKey(e) { + if (isMoving && elemId === activeElement) { + updatePanPosition(e.movementX, e.movementY); + targetElement.style.pointerEvents = "none"; + } else { + targetElement.style.pointerEvents = "auto"; + } + } + + // Prevents sticking to the mouse + window.onblur = function() { + isMoving = false; + }; + + gradioApp().addEventListener("mousemove", handleMoveByKey); + } + + applyZoomAndPan(elementIDs.sketch); + applyZoomAndPan(elementIDs.inpaint); + applyZoomAndPan(elementIDs.inpaintSketch); + + // Make the function global so that other extensions can take advantage of this solution + window.applyZoomAndPan = applyZoomAndPan; +}); diff --git a/extensions-builtin/canvas-zoom-and-pan/scripts/__pycache__/hotkey_config.cpython-310.pyc b/extensions-builtin/canvas-zoom-and-pan/scripts/__pycache__/hotkey_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c196759f210788c8d476a168cdfd1650bc1a6f59 Binary files /dev/null and b/extensions-builtin/canvas-zoom-and-pan/scripts/__pycache__/hotkey_config.cpython-310.pyc differ diff --git a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py new file mode 100644 index 0000000000000000000000000000000000000000..380176ce26ccecbdfa1a64791543f3061eba64ed --- /dev/null +++ b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py @@ -0,0 +1,14 @@ +import gradio as gr +from modules import shared + +shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), { + "canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"), + "canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"), + "canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"), + "canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "), + "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"), + "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"), + "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"), + "canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"), + "canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}), +})) diff --git a/extensions-builtin/canvas-zoom-and-pan/style.css b/extensions-builtin/canvas-zoom-and-pan/style.css new file mode 100644 index 0000000000000000000000000000000000000000..6bcc9570c45cf9b2ac426dd5981d78dcb0ac72d0 --- /dev/null +++ b/extensions-builtin/canvas-zoom-and-pan/style.css @@ -0,0 +1,63 @@ +.canvas-tooltip-info { + position: absolute; + top: 10px; + left: 10px; + cursor: help; + background-color: rgba(0, 0, 0, 0.3); + width: 20px; + height: 20px; + border-radius: 50%; + display: flex; + align-items: center; + justify-content: center; + flex-direction: column; + + z-index: 100; +} + +.canvas-tooltip-info::after { + content: ''; + display: block; + width: 2px; + height: 7px; + background-color: white; + margin-top: 2px; +} + +.canvas-tooltip-info::before { + content: ''; + display: block; + width: 2px; + height: 2px; + background-color: white; +} + +.canvas-tooltip-content { + display: none; + background-color: #f9f9f9; + color: #333; + border: 1px solid #ddd; + padding: 15px; + position: absolute; + top: 40px; + left: 10px; + width: 250px; + font-size: 16px; + opacity: 0; + border-radius: 8px; + box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2); + + z-index: 100; +} + +.canvas-tooltip:hover .canvas-tooltip-content { + display: block; + animation: fadeIn 0.5s; + opacity: 1; +} + +@keyframes fadeIn { + from {opacity: 0;} + to {opacity: 1;} +} + diff --git a/extensions-builtin/extra-options-section/scripts/__pycache__/extra_options_section.cpython-310.pyc b/extensions-builtin/extra-options-section/scripts/__pycache__/extra_options_section.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52424fc2db094c9e0b04a76db679d38e3293c0d4 Binary files /dev/null and b/extensions-builtin/extra-options-section/scripts/__pycache__/extra_options_section.cpython-310.pyc differ diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py new file mode 100644 index 0000000000000000000000000000000000000000..6823ff1109bd3a388c8d226996ebc650261d738e --- /dev/null +++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py @@ -0,0 +1,48 @@ +import gradio as gr +from modules import scripts, shared, ui_components, ui_settings +from modules.ui_components import FormColumn + + +class ExtraOptionsSection(scripts.Script): + section = "extra_options" + + def __init__(self): + self.comps = None + self.setting_names = None + + def title(self): + return "Extra options" + + def show(self, is_img2img): + return scripts.AlwaysVisible + + def ui(self, is_img2img): + self.comps = [] + self.setting_names = [] + + with gr.Blocks() as interface: + with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and shared.opts.extra_options else gr.Group(), gr.Row(): + for setting_name in shared.opts.extra_options: + with FormColumn(): + comp = ui_settings.create_setting_component(setting_name) + + self.comps.append(comp) + self.setting_names.append(setting_name) + + def get_settings_values(): + return [ui_settings.get_value_for_setting(key) for key in self.setting_names] + + interface.load(fn=get_settings_values, inputs=[], outputs=self.comps, queue=False, show_progress=False) + + return self.comps + + def before_process(self, p, *args): + for name, value in zip(self.setting_names, args): + if name not in p.override_settings: + p.override_settings[name] = value + + +shared.options_templates.update(shared.options_section(('ui', "User interface"), { + "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_restart(), + "extra_options_accordion": shared.OptionInfo(False, "Place options in main UI into an accordion") +})) diff --git a/extensions-builtin/mobile/javascript/mobile.js b/extensions-builtin/mobile/javascript/mobile.js new file mode 100644 index 0000000000000000000000000000000000000000..12cae4b75764779f7da3e424a959f966c06a8648 --- /dev/null +++ b/extensions-builtin/mobile/javascript/mobile.js @@ -0,0 +1,26 @@ +var isSetupForMobile = false; + +function isMobile() { + for (var tab of ["txt2img", "img2img"]) { + var imageTab = gradioApp().getElementById(tab + '_results'); + if (imageTab && imageTab.offsetParent && imageTab.offsetLeft == 0) { + return true; + } + } + + return false; +} + +function reportWindowSize() { + var currentlyMobile = isMobile(); + if (currentlyMobile == isSetupForMobile) return; + isSetupForMobile = currentlyMobile; + + for (var tab of ["txt2img", "img2img"]) { + var button = gradioApp().getElementById(tab + '_generate_box'); + var target = gradioApp().getElementById(currentlyMobile ? tab + '_results' : tab + '_actions_column'); + target.insertBefore(button, target.firstElementChild); + } +} + +window.addEventListener("resize", reportWindowSize); diff --git a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js new file mode 100644 index 0000000000000000000000000000000000000000..114cf94ccbf69b473757f2fc46443a39723a9269 --- /dev/null +++ b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js @@ -0,0 +1,42 @@ +// Stable Diffusion WebUI - Bracket checker +// By Hingashi no Florin/Bwin4L & @akx +// Counts open and closed brackets (round, square, curly) in the prompt and negative prompt text boxes in the txt2img and img2img tabs. +// If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong. + +function checkBrackets(textArea, counterElt) { + var counts = {}; + (textArea.value.match(/[(){}[\]]/g) || []).forEach(bracket => { + counts[bracket] = (counts[bracket] || 0) + 1; + }); + var errors = []; + + function checkPair(open, close, kind) { + if (counts[open] !== counts[close]) { + errors.push( + `${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.` + ); + } + } + + checkPair('(', ')', 'round brackets'); + checkPair('[', ']', 'square brackets'); + checkPair('{', '}', 'curly brackets'); + counterElt.title = errors.join('\n'); + counterElt.classList.toggle('error', errors.length !== 0); +} + +function setupBracketChecking(id_prompt, id_counter) { + var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); + var counter = gradioApp().getElementById(id_counter); + + if (textarea && counter) { + textarea.addEventListener("input", () => checkBrackets(textarea, counter)); + } +} + +onUiLoaded(function() { + setupBracketChecking('txt2img_prompt', 'txt2img_token_counter'); + setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter'); + setupBracketChecking('img2img_prompt', 'img2img_token_counter'); + setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter'); +}); diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/.gitignore b/extensions/Hypernetwork-MonkeyPatch-Extension/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..fd20fddf874731c364880a33eb9acd43c1512365 --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/.gitignore @@ -0,0 +1,2 @@ + +*.pyc diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/README.md b/extensions/Hypernetwork-MonkeyPatch-Extension/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2ff3a1898d9481e0ff24ac42a985013d756921ba --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/README.md @@ -0,0 +1,83 @@ +# Hypernetwork-MonkeyPatch-Extension +Extension that patches Hypernetwork structures and training +![image](https://user-images.githubusercontent.com/35677394/210898033-44da3cdb-a501-4cb3-a176-07ff8548d699.png) + +![image](https://user-images.githubusercontent.com/35677394/203494809-9874c123-fca7-4d14-9995-63dc8772c920.png) + +For Hypernetwork structure, see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4334 + +For Variable Dropout, see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4288 + + +### Train_Beta(now, train_gamma) tab allows some more options with improved training. +![image](https://user-images.githubusercontent.com/35677394/203494907-68e0ef39-4d8c-42de-ba2e-65590375c435.png) + +### Features + +**No-Crop Training** +![image](https://user-images.githubusercontent.com/35677394/203495373-cef04677-cdd6-43b0-ba42-d7c0f3d5a78f.png) +You can train without cropping images. +THis feature is now implemented in original webui too! :partying_face: + +**Fix OSError while training** + +**Unload Optimizer while generating Previews** + +**Tensorboard integration, and Tuning** + +**Residual-Block based Hypernetwork(in beta test)** + + +### Create_Beta_hypernetwork allows creating beta hypernetworks. + +Beta hypernetworks* can contain additional informations and specified dropout structures. It will be loaded without extension too, but it won't load dropout structure, so training won't work as original. Generating images should work identically. + +This extension also overrides how webui loads and finds hypernetworks, to use variable dropout rates, and etc. +Thus, hypernetwork created with variable dropout rate might not work correctly in original webui. + +Well, at least now it should work, without having any problem except you cannot use variable dropout rate in original webui. If you have problem with loading hypernetworks, please create an issue. I can submit pr to original branch to load these beta typed hypernetworks correctly. + +### Training features are in train_gamma tab. +![image](https://user-images.githubusercontent.com/35677394/204087550-94b8e7fb-70cb-4157-96bc-e022340901c9.png) + +If you're unsure about options, just enable every checkbox, and don't change default value. + + +### CosineAnnealingWarmupRestarts +![image](https://user-images.githubusercontent.com/35677394/204087530-b7938e7e-ebe5-4326-b5cd-25480645a11b.png) + +This also fixes some CUDA memory issues. Currently both Beta and Gamma Training is working very well, as far as I could say. + + +### Hyperparameter Tuning +![image](https://user-images.githubusercontent.com/35677394/212574147-22a32b03-6544-4aee-9ac7-fdefd2b7ee56.png) +Now you can save hypernetwork generation / training setting, and load it in train_tuning tab. This will allow combination of hypernetwork structures, and training setups, to find best way for stuff. + +### CLIP change test tab +![image](https://user-images.githubusercontent.com/35677394/212574217-3dd08007-e33f-4179-96e9-5a90bccd4907.png) +Now you can select CLIP model, its difference is significant but whether its better or not is unknown. + + +## Residual hypernetwork? +The concept of ResNet, returning x + f(x) instead in layers, are available with option. Original webui does not support this, so you cannot load it without extension. +Unlike expanding type (1 -> 2 -> 1), shrinking type(1 -> 0.1 -> 1) network will lost information at initial phase. In this case, we need to additionally train transformation that compresses and decompresses it. This is currently only in code, its not offered in UI at default. + +## D-Adaptation +Currently D-Adaptation is available for hypernetwork training. You can use this with enabling advanced AdamW parameter option and checking the checkbox. +Recommended LR is 1.0, only change it if its required. Other features are not tested with this feature. +The code references to this: +https://github.com/facebookresearch/dadaptation + +### Planned features +Training option loading and tuning for textual inversion + +D-Adaptation for textual inversion + +Adan and more optimizer options. + +D-Adaptation repository update matching + + +### Some personal researches + +We cannot apply convolution for attention, it does do something, but hypernetwork here, only affects attention, and its different from 'attention map' which is already a decoded form(image BW vectors) of attention(latent space). Same goes to SENet, unfortunately. diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/clip_hijack.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/clip_hijack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afcc62353e3a0cb5b0bdaa7b25ccffc114a83fcb Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/clip_hijack.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/ddpm_hijack.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/ddpm_hijack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5692537d638f3975001d66e9b98ceb051e002613 Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/ddpm_hijack.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/hnutil.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/hnutil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dab4b5ed78bd92a23bf1a28f746e9d4f20cd2ca Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/hnutil.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/hypernetwork.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/hypernetwork.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f69adf0839747415df844ea31eb30c48f38106f Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/hypernetwork.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/scheduler.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c51a9f186b7bf2549b6e916e394a00ea92bedfe Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/scheduler.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/shared.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/shared.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f192dbea1004b71aaf5b8b313a90d7219925e014 Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/shared.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/tbutils.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/tbutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4eb4af8414dc822147f2282b32c92150a76db7ca Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/tbutils.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/textual_inversion.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/textual_inversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca9c0cfad0ff58344072d2c5af26cf6752154867 Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/textual_inversion.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/ui.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/ui.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19f70ff03023e73929e9ba0c8fad23403d3af819 Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/__pycache__/ui.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/clip_hijack.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/clip_hijack.py new file mode 100644 index 0000000000000000000000000000000000000000..a826ca4c8287bc536c89b556605104c75522b60e --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/clip_hijack.py @@ -0,0 +1,71 @@ +from modules import sd_hijack_clip, sd_hijack, shared +from modules.sd_hijack import StableDiffusionModelHijack, EmbeddingsWithFixes, apply_optimizations +try: + from modules.sd_hijack import fix_checkpoint + def clear_any_hijacks(): + StableDiffusionModelHijack.hijack = default_hijack +except (ModuleNotFoundError, ImportError): + from modules.sd_hijack_checkpoint import add, remove + def fix_checkpoint(): + add() + + def clear_any_hijacks(): + remove() + StableDiffusionModelHijack.hijack = default_hijack + + +import ldm.modules.encoders.modules + +default_hijack = StableDiffusionModelHijack.hijack + +def trigger_sd_hijack(enabled, pretrained_key): + clear_any_hijacks() + if not enabled or pretrained_key == '': + pretrained_key = 'openai/clip-vit-large-patch14' + StableDiffusionModelHijack.hijack = create_lambda(pretrained_key) + print("Hijacked clip text model!") + sd_hijack.model_hijack.undo_hijack(shared.sd_model) + sd_hijack.model_hijack.hijack(shared.sd_model) + if not enabled: + StableDiffusionModelHijack.hijack = default_hijack + + + + +def create_lambda(model): + def hijack_lambda(self, m): + if type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder: + from transformers import CLIPTextModel, CLIPTokenizer + print(f"Changing CLIP model to {model}") + try: + m.cond_stage_model.transformer = CLIPTextModel.from_pretrained( + model).to(m.cond_stage_model.transformer.device) + m.cond_stage_model.transformer.requires_grad_(False) + m.cond_stage_model.tokenizer = CLIPTokenizer.from_pretrained( + model) + except: + print(f"Cannot initiate from given model key {model}!") + + model_embeddings = m.cond_stage_model.transformer.text_model.embeddings + model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self) + m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) + + self.optimization_method = apply_optimizations() + + self.clip = m.cond_stage_model + + fix_checkpoint() + + + def flatten(el): + flattened = [flatten(children) for children in el.children()] + res = [el] + for c in flattened: + res += c + return res + + self.layers = flatten(m) + else: + print("CLIP change can be only applied to FrozenCLIPEmbedder class") + return default_hijack(self, m) + return hijack_lambda diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/dataset.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..8f656a1854509bd820d5ed84e6afa618ade626ba --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/dataset.py @@ -0,0 +1,130 @@ +import os +import random +import re + +import PIL +import torch +import tqdm +import numpy as np +from PIL import Image +from .hnutil import get_closest +from torch.utils.data import Dataset +from torchvision import transforms + +from modules import shared, devices +from modules.textual_inversion.dataset import DatasetEntry, re_numbers_at_start + + +class PersonalizedBase(Dataset): + def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, device=None, template_file=None, include_cond=False, batch_size=1): + re_word = re.compile(shared.opts.dataset_filename_word_regex) if len(shared.opts.dataset_filename_word_regex) > 0 else None + + self.placeholder_token = placeholder_token + + self.batch_size = batch_size + self.width = width + self.height = height + self.flip = transforms.RandomHorizontalFlip(p=flip_p) + + self.dataset = [] + + with open(template_file, "r") as file: + lines = [x.strip() for x in file.readlines()] + + self.lines = lines + + assert data_root, 'dataset directory not specified' + assert os.path.isdir(data_root), "Dataset directory doesn't exist" + assert os.listdir(data_root), "Dataset directory is empty" + + cond_model = shared.sd_model.cond_stage_model + + self.image_paths = [os.path.join(data_root, file_path) for file_path in os.listdir(data_root)] * batch_size + print("Preparing dataset...") + for path in tqdm.tqdm(self.image_paths): + try: + image = Image.open(path).convert('RGB') + w, h = image.size + r = max(1, w / self.width, h / self.height) # divide by this + amp = min(self.width / w, self.height / h) # if amp < 1, then ignore, else, multiply. + if amp > 1: + w, h = w * amp, h * amp + w, h = int(w/r), int(h/r) + w, h = get_closest(w), get_closest(h) + image = image.resize((w,h), PIL.Image.LANCZOS) + + except Exception: + continue + + text_filename = os.path.splitext(path)[0] + ".txt" + filename = os.path.basename(path) + + if os.path.exists(text_filename): + with open(text_filename, "r", encoding="utf8") as file: + filename_text = file.read() + else: + filename_text = os.path.splitext(filename)[0] + filename_text = re.sub(re_numbers_at_start, '', filename_text) + if re_word: + tokens = re_word.findall(filename_text) + filename_text = (shared.opts.dataset_filename_join_string or "").join(tokens) + + npimage = np.array(image).astype(np.uint8) + npimage = (npimage / 127.5 - 1.0).astype(np.float32) + + torchdata = torch.from_numpy(npimage).to(device=device, dtype=torch.float32) + torchdata = torch.moveaxis(torchdata, 2, 0) + + init_latent = model.get_first_stage_encoding(model.encode_first_stage(torchdata.unsqueeze(dim=0))).squeeze() + init_latent = init_latent.to(devices.cpu) + + entry = DatasetEntry(filename=path, filename_text=filename_text, latent=init_latent) + + if include_cond: + entry.cond_text = self.create_text(filename_text) + entry.cond = cond_model([entry.cond_text]).to(devices.cpu).squeeze(0) + + self.dataset.append(entry) + + assert len(self.dataset) > 0, "No images have been found in the dataset." + self.length = len(self.dataset) * repeats // batch_size + + self.dataset_length = len(self.dataset) + self.indexes = None + self.random = np.random.default_rng(42) + self.shuffle() + + def shuffle(self): + self.indexes = self.random.permutation(self.dataset_length) + + def create_text(self, filename_text): + text = random.choice(self.lines) + text = text.replace("[name]", self.placeholder_token) + tags = filename_text.split(',') + if shared.opts.tag_drop_out != 0: + tags = [t for t in tags if random.random() > shared.opts.tag_drop_out] + if shared.opts.shuffle_tags: + random.shuffle(tags) + text = text.replace("[filewords]", ','.join(tags)) + return text + + def __len__(self): + return self.length + + def __getitem__(self, i): + res = [] + + for j in range(self.batch_size): + position = i * self.batch_size + j + if position % len(self.indexes) == 0: + self.shuffle() + + index = self.indexes[position % len(self.indexes)] + entry = self.dataset[index] + + if entry.cond is None: + entry.cond_text = self.create_text(entry.filename_text) + + res.append(entry) + + return res \ No newline at end of file diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/ddpm_hijack.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/ddpm_hijack.py new file mode 100644 index 0000000000000000000000000000000000000000..8dcb22c7d881d85f59ea0aa893fea6d6af6b4602 --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/ddpm_hijack.py @@ -0,0 +1,71 @@ +import torch +import ldm.models.diffusion.ddpm +from modules import shared + + +class Scheduler: + """ Proportional Noise Step Scheduler""" + def __init__(self, cycle_step=128, repeat=True): + self.disabled = True + self.cycle_step = int(cycle_step) + self.repeat = repeat + self.run_assertion() + + def __call__(self, value, step): + if self.disabled: + return value + if self.repeat: + step %= self.cycle_step + return max(1, int(value * step / self.cycle_step)) + else: + return value if step >= self.cycle_step else max(1, int(value * step / self.cycle_step)) + + def run_assertion(self): + assert type(self.cycle_step) is int + assert type(self.repeat) is bool + assert not self.repeat or self.cycle_step > 0 + + def set(self, cycle_step=-1, repeat=-1, disabled=True): + self.disabled = disabled + if cycle_step >= 0: + self.cycle_step = int(cycle_step) + if repeat != -1: + self.repeat = repeat + self.run_assertion() + + +training_scheduler = Scheduler(cycle_step=-1, repeat=False) + + +def get_current(value, step=None): + if step is None: + if hasattr(shared, 'accessible_hypernetwork'): + hypernetwork = shared.accessible_hypernetwork + else: + return value + if hasattr(hypernetwork, 'step') and hypernetwork.training and hypernetwork.step is not None: + return training_scheduler(value, hypernetwork.step) + return value + return max(1, training_scheduler(value, step)) + + +def set_scheduler(cycle_step, repeat, enabled=False): + global training_scheduler + training_scheduler.set(cycle_step, repeat, not enabled) + + +def forward(self, x, c, *args, **kwargs): + t = torch.randint(0, get_current(self.num_timesteps), (x.shape[0],), device=self.device).long() + if self.model.conditioning_key is not None: + assert c is not None + if self.cond_stage_trainable: + c = self.get_learned_conditioning(c) + if self.shorten_cond_schedule: # TODO: drop this option + tc = self.cond_ids[t].to(self.device) + c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) + return self.p_losses(x, c, t, *args, **kwargs) + + + + +ldm.models.diffusion.ddpm.LatentDiffusion.forward = forward diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/dataset.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..829d6d6262d2c435b603142cfee44392b1f48859 Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/dataset.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/hypernetwork.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/hypernetwork.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..668f34336e220f162448e2c821a1d1d491118dff Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/hypernetwork.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/sd_hijack_checkpoint.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/sd_hijack_checkpoint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a6fdc0719e361ff2644069c7614897f72cd3550 Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/sd_hijack_checkpoint.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/textual_inversion.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/textual_inversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2860d3edb072b973becedad1a5cd043fe5b4bd19 Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/textual_inversion.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/ui.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/ui.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99a128340651700bc6197bd2fec2dbb6bfac6d75 Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/__pycache__/ui.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/dadapt_test/install.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/dadapt_test/install.py new file mode 100644 index 0000000000000000000000000000000000000000..362e4534144524dc2ebcfbf237d7b82c28f005c0 --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/dadapt_test/install.py @@ -0,0 +1,39 @@ + + +def install_or_import() -> bool: + try: + import pip + try: + import dadaptation + except (ModuleNotFoundError, ImportError): + print("Trying to install dadaptation...") + pip.main(['install', 'dadaptation']) + return True + except (ModuleNotFoundError, ImportError): + print("Cannot found pip!") + return False + return True + + +def get_dadapt_adam(optimizer_name=None): + if install_or_import(): + if optimizer_name is None or optimizer_name in ['DAdaptAdamW', 'AdamW', 'DAdaptAdam', 'Adam']: # Adam-dadapt implementation + try: + from dadaptation.dadapt_adam import DAdaptAdam + return DAdaptAdam + except (ModuleNotFoundError, ImportError): + print('Cannot use DAdaptAdam!') + elif optimizer_name == 'DAdaptSGD': + try: + from dadaptation.dadapt_sgd import DAdaptSGD + return DAdaptSGD + except (ModuleNotFoundError, ImportError): + print('Cannot use DAdaptSGD!') + elif optimizer_name == 'DAdaptAdagrad': + try: + from dadaptation.dadapt_adagrad import DAdaptAdaGrad + return DAdaptAdaGrad + except (ModuleNotFoundError, ImportError): + print('Cannot use DAdaptAdaGrad!') + from torch.optim import AdamW + return AdamW diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/dataset.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..a426dd52b4acc3cbf86ffe78b7880be74cf0172a --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/dataset.py @@ -0,0 +1,265 @@ +# source:https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4886/files + +import os +import sys + +import numpy as np +import PIL +import torch +from PIL import Image +from torch.utils.data import Dataset, DataLoader, Sampler +from torchvision import transforms + +from ..hnutil import get_closest +from collections import defaultdict +from random import Random +import tqdm +from modules import devices, shared +import re + +from ldm.modules.distributions.distributions import DiagonalGaussianDistribution + +re_numbers_at_start = re.compile(r"^[-\d]+\s*") + +random_state_manager = Random(None) +shuffle = random_state_manager.shuffle +choice = random_state_manager.choice +choices = random_state_manager.choices +randrange = random_state_manager.randrange + + +def set_rng(seed=None): + random_state_manager.seed(seed) + + +class DatasetEntry: + def __init__(self, filename=None, filename_text=None, latent_dist=None, latent_sample=None, cond=None, + cond_text=None, pixel_values=None, weight=None): + self.filename = filename + self.filename_text = filename_text + self.latent_dist = latent_dist + self.latent_sample = latent_sample + self.cond = cond + self.cond_text = cond_text + self.pixel_values = pixel_values + self.weight = weight + + +class PersonalizedBase(Dataset): + def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, + cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, + shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once', latent_sampling_std=-1, manual_seed=-1, use_weight=False): + re_word = re.compile(shared.opts.dataset_filename_word_regex) if len( + shared.opts.dataset_filename_word_regex) > 0 else None + if manual_seed == -1: + seed = randrange(sys.maxsize) + set_rng(seed) # reset forked RNG state when we create dataset. + print(f"Dataset seed was set to f{seed}") + else: + set_rng(manual_seed) + print(f"Dataset seed was set to f{manual_seed}") + self.placeholder_token = placeholder_token + + self.width = width + self.height = height + self.flip = transforms.RandomHorizontalFlip(p=flip_p) + + self.dataset = [] + + with open(template_file, "r") as file: + lines = [x.strip() for x in file.readlines()] + + self.lines = lines + + assert data_root, 'dataset directory not specified' + assert os.path.isdir(data_root), "Dataset directory doesn't exist" + assert os.listdir(data_root), "Dataset directory is empty" + + self.image_paths = [os.path.join(data_root, file_path) for file_path in os.listdir(data_root)] # We assert batch size > 1 can work, by having multiple same-size images + # But note that we can't stack tensors with other size. so it's not working now. + self.shuffle_tags = shuffle_tags + self.tag_drop_out = tag_drop_out + groups = defaultdict(list) + + print("Preparing dataset...") + _i = 0 + for path in tqdm.tqdm(self.image_paths): + if shared.state.interrupted: + raise Exception("inturrupted") + try: # apply variable size here + image = Image.open(path).convert('RGB') + w, h = image.size + r = max(1, w / self.width, h / self.height) # divide by this + amp = min(self.width / w, self.height / h) # if amp < 1, then ignore, else, multiply. + if amp > 1: + w, h = w * amp, h * amp + w, h = int(w/r), int(h/r) + w, h = get_closest(w), get_closest(h) + image = image.resize((w,h), PIL.Image.LANCZOS) + except Exception: + continue + + text_filename = os.path.splitext(path)[0] + ".txt" + filename = os.path.basename(path) + + if os.path.exists(text_filename): + with open(text_filename, "r", encoding="utf8") as file: + filename_text = file.read() + else: + filename_text = os.path.splitext(filename)[0] + filename_text = re.sub(re_numbers_at_start, '', filename_text) + if re_word: + tokens = re_word.findall(filename_text) + filename_text = (shared.opts.dataset_filename_join_string or "").join(tokens) + + npimage = np.array(image).astype(np.uint8) + npimage = (npimage / 127.5 - 1.0).astype(np.float32) + + torchdata = torch.from_numpy(npimage).permute(2, 0, 1).to(device=device, dtype=torch.float32) + + with torch.autocast("cuda"): + latent_dist = model.encode_first_stage(torchdata.unsqueeze(dim=0)) + latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu) + weight = torch.ones_like(latent_sample) + if latent_sampling_method == "once" or ( + latent_sampling_method == "deterministic" and not isinstance(latent_dist, + DiagonalGaussianDistribution)): + latent_sampling_method = "once" + entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample) + elif latent_sampling_method == "deterministic": + # Works only for DiagonalGaussianDistribution + latent_dist.std = 0 + entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample) + elif latent_sampling_method == "random": + if latent_sampling_std != -1: + assert latent_sampling_std > 0, f"Cannnot apply negative standard deviation {latent_sampling_std}" + print(f"Applying patch, clipping std from {torch.max(latent_dist.std).item()} to {latent_sampling_std}...") + latent_dist.std.clip_(latent_sampling_std) + entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist) + else: + raise RuntimeError("Entry was undefined because of undefined latent sampling method!") + alpha_channel = None + if use_weight and 'A' in image.getbands(): + alpha_channel = image.getchannel('A') + if use_weight and alpha_channel is not None: + channels, *latent_size = latent_sample.shape + weight_img = alpha_channel.resize(latent_size) + npweight = np.array(weight_img).astype(np.float32) + #Repeat for every channel in the latent sample + weight = torch.tensor([npweight] * channels).reshape([channels] + latent_size) + #Normalize the weight to a minimum of 0 and a mean of 1, that way the loss will be comparable to default. + weight -= weight.min() + weight /= weight.mean() + elif use_weight: + #If an image does not have a alpha channel, add a ones weight map anyway so we can stack it later + weight = torch.ones_like(latent_sample) + entry.weight = weight + if not (self.tag_drop_out != 0 or self.shuffle_tags): + entry.cond_text = self.create_text(filename_text) + + if include_cond and not (self.tag_drop_out != 0 or self.shuffle_tags): + with torch.autocast("cuda"): + entry.cond = cond_model([entry.cond_text]).to(devices.cpu).squeeze(0) + groups[image.size].append(_i) #record indexes of images in dataset into group. When we pull batch, try using single group to make torch.stack work. + _i += 1 + self.dataset.append(entry) + del torchdata + del latent_dist + del latent_sample + self.groups = list(groups.values()) + self.length = len(self.dataset) + assert self.length > 0, "No images have been found in the dataset." + self.batch_size = min(batch_size, self.length) + self.gradient_step = min(gradient_step, self.length // self.batch_size) + self.latent_sampling_method = latent_sampling_method + + def create_text(self, filename_text): + text = choice(self.lines) + tags = filename_text.split(',') + if self.tag_drop_out != 0: + tags = [t for t in tags if random_state_manager.random() > self.tag_drop_out] + if self.shuffle_tags: + shuffle(tags) + text = text.replace("[filewords]", ','.join(tags)) + text = text.replace("[name]", self.placeholder_token) + return text + + def __len__(self): + return self.length + + def __getitem__(self, i): + entry = self.dataset[i] + if self.tag_drop_out != 0 or self.shuffle_tags: + entry.cond_text = self.create_text(entry.filename_text) + if self.latent_sampling_method == "random": + entry.latent_sample = shared.sd_model.get_first_stage_encoding(entry.latent_dist).to(devices.cpu) + if entry.weight is None: + entry.weight = torch.ones_like(entry.latent_sample) + return entry + +class GroupedBatchSampler(Sampler): + # See https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6620 + def __init__(self, data_source: PersonalizedBase, batch_size: int): + n = len(data_source) + self.groups = data_source.groups + self.len = n_batch = n // batch_size + expected = [len(g) / n * n_batch * batch_size for g in data_source.groups] + self.base = [int(e) // batch_size for e in expected] + self.n_rand_batches = n_batch - sum(self.base) + self.probs = [e % batch_size/self.n_rand_batches/batch_size if self.n_rand_batches > 0 else 0 for e in expected] + self.batch_size = batch_size + + + def __len__(self): + return self.len + + def __iter__(self): + b = self.batch_size + batches = [] + for g in self.groups: + shuffle(g) + batches.extend(g[i*b:(i+1)*b] for i in range(len(g) // b)) + for _ in range(self.n_rand_batches): + rand_group = choices(self.groups, self.probs)[0] + batches.append(choices(rand_group, k=b)) + shuffle(batches) + yield from batches + +class PersonalizedDataLoader(DataLoader): + def __init__(self, dataset, latent_sampling_method="once", batch_size=1, pin_memory=False): + super(PersonalizedDataLoader, self).__init__(dataset, batch_sampler=GroupedBatchSampler(dataset, batch_size), pin_memory=pin_memory) + if latent_sampling_method == "random": + self.collate_fn = collate_wrapper_random + else: + self.collate_fn = collate_wrapper + + +class BatchLoader: + def __init__(self, data): + self.cond_text = [entry.cond_text for entry in data] + self.cond = [entry.cond for entry in data] + self.latent_sample = torch.stack([entry.latent_sample for entry in data]).squeeze(1) + self.weight = torch.stack([entry.weight for entry in data]).squeeze(1) + self.filename = [entry.filename for entry in data] + # self.emb_index = [entry.emb_index for entry in data] + # print(self.latent_sample.device) + + def pin_memory(self): + self.latent_sample = self.latent_sample.pin_memory() + return self + + +def collate_wrapper(batch): + return BatchLoader(batch) + + +class BatchLoaderRandom(BatchLoader): + def __init__(self, data): + super().__init__(data) + + def pin_memory(self): + return self + + +def collate_wrapper_random(batch): + return BatchLoaderRandom(batch) diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/hypernetwork.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/hypernetwork.py new file mode 100644 index 0000000000000000000000000000000000000000..8de307a84418169bdff683b19d931a65d01c0f7b --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/hypernetwork.py @@ -0,0 +1,1163 @@ +import datetime +import gc +import html +import json +import os +import sys +import time +import traceback +from collections import defaultdict, deque + +import torch +import tqdm + +from modules import shared, sd_models, devices, processing, sd_samplers +from modules.hypernetworks.hypernetwork import optimizer_dict, stack_conds, save_hypernetwork +from modules.textual_inversion import textual_inversion +from modules.textual_inversion.learn_schedule import LearnRateScheduler +from ..tbutils import tensorboard_setup, tensorboard_add, tensorboard_add_image, tensorboard_log_hyperparameter +from .textual_inversion import validate_train_inputs, write_loss +from ..hypernetwork import Hypernetwork, load_hypernetwork +from . import sd_hijack_checkpoint +from ..hnutil import optim_to +from ..ui import create_hypernetwork_load +from ..scheduler import CosineAnnealingWarmUpRestarts +from .dataset import PersonalizedBase, PersonalizedDataLoader +from ..ddpm_hijack import set_scheduler + + +def get_lr_from_optimizer(optimizer: torch.optim.Optimizer): + return optimizer.param_groups[0].get('d', 1) * optimizer.param_groups[0].get('lr', 1) + + +def set_accessible(obj): + setattr(shared, 'accessible_hypernetwork', obj) + if hasattr(shared, 'loaded_hypernetworks'): + shared.loaded_hypernetworks.clear() + shared.loaded_hypernetworks = [obj,] + + +def remove_accessible(): + delattr(shared, 'accessible_hypernetwork') + if hasattr(shared, 'loaded_hypernetworks'): + shared.loaded_hypernetworks.clear() + +def get_training_option(filename): + print(filename) + if os.path.exists(os.path.join(shared.cmd_opts.hypernetwork_dir, filename)) and os.path.isfile( + os.path.join(shared.cmd_opts.hypernetwork_dir, filename)): + filename = os.path.join(shared.cmd_opts.hypernetwork_dir, filename) + elif os.path.exists(filename) and os.path.isfile(filename): + filename = filename + elif os.path.exists(os.path.join(shared.cmd_opts.hypernetwork_dir, filename + '.json')) and os.path.isfile( + os.path.join(shared.cmd_opts.hypernetwork_dir, filename + '.json')): + filename = os.path.join(shared.cmd_opts.hypernetwork_dir, filename + '.json') + else: + return False + print(f"Loading setting from {filename}!") + with open(filename, 'r') as file: + obj = json.load(file) + return obj + + +def prepare_training_hypernetwork(hypernetwork_name, learn_rate=0.1, use_adamw_parameter=False, use_dadaptation=False, dadapt_growth_factor=-1, **adamW_kwarg_dict): + """ returns hypernetwork object binded with optimizer""" + hypernetwork = load_hypernetwork(hypernetwork_name) + hypernetwork.to(devices.device) + assert hypernetwork is not None, f"Cannot load {hypernetwork_name}!" + if not isinstance(hypernetwork, Hypernetwork): + raise RuntimeError("Cannot perform training for Hypernetwork structure pipeline!") + set_accessible(hypernetwork) + weights = hypernetwork.weights(True) + hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0] + filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt') + # Here we use optimizer from saved HN, or we can specify as UI option. + if hypernetwork.optimizer_name == 'DAdaptAdamW': + use_dadaptation = True + optimizer = None + optimizer_name = 'AdamW' + # Here we use optimizer from saved HN, or we can specify as UI option. + if hypernetwork.optimizer_name in optimizer_dict: + if use_adamw_parameter: + if hypernetwork.optimizer_name != 'AdamW' and hypernetwork.optimizer_name != 'DAdaptAdamW': + raise NotImplementedError(f"Cannot use adamW paramters for optimizer {hypernetwork.optimizer_name}!") + if use_dadaptation: + from .dadapt_test.install import get_dadapt_adam + optim_class = get_dadapt_adam(hypernetwork.optimizer_name) + if optim_class != torch.optim.AdamW: + print('Optimizer class is ' + str(optim_class)) + optimizer = optim_class(params=weights, lr=learn_rate, decouple=True, growth_rate = float('inf') if dadapt_growth_factor < 0 else dadapt_growth_factor, **adamW_kwarg_dict) + hypernetwork.optimizer_name = 'DAdaptAdamW' + else: + optimizer = torch.optim.AdamW(params=weights, lr=learn_rate, **adamW_kwarg_dict) + else: + optimizer = torch.optim.AdamW(params=weights, lr=learn_rate, **adamW_kwarg_dict) + else: + optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=learn_rate) + optimizer_name = hypernetwork.optimizer_name + else: + print(f"Optimizer type {hypernetwork.optimizer_name} is not defined!") + if use_dadaptation: + from .dadapt_test.install import get_dadapt_adam + optim_class = get_dadapt_adam(hypernetwork.optimizer_name) + if optim_class != torch.optim.AdamW: + optimizer = optim_class(params=weights, lr=learn_rate, decouple=True, growth_rate = float('inf') if dadapt_growth_factor < 0 else dadapt_growth_factor, **adamW_kwarg_dict) + optimizer_name = 'DAdaptAdamW' + hypernetwork.optimizer_name = 'DAdaptAdamW' + if optimizer is None: + optimizer = torch.optim.AdamW(params=weights, lr=learn_rate, **adamW_kwarg_dict) + optimizer_name = 'AdamW' + if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer. + try: + optimizer.load_state_dict(hypernetwork.optimizer_state_dict) + optim_to(optimizer, devices.device) + print('Loaded optimizer successfully!') + except RuntimeError as e: + print("Cannot resume from saved optimizer!") + print(e) + + return hypernetwork, optimizer, weights, optimizer_name + +def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, + training_width, training_height, steps, shuffle_tags, tag_drop_out, latent_sampling_method, + create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, + preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, + preview_width, preview_height, + use_beta_scheduler=False, beta_repeat_epoch=4000, epoch_mult=1, warmup=10, min_lr=1e-7, + gamma_rate=1, save_when_converge=False, create_when_converge=False, + move_optimizer=True, + use_adamw_parameter=False, adamw_weight_decay=0.01, adamw_beta_1=0.9, adamw_beta_2=0.99, + adamw_eps=1e-8, + use_grad_opts=False, gradient_clip_opt='None', optional_gradient_clip_value=1e01, + optional_gradient_norm_type=2, latent_sampling_std=-1, + noise_training_scheduler_enabled=False, noise_training_scheduler_repeat=False, noise_training_scheduler_cycle=128, + load_training_options='', loss_opt='loss_simple', use_dadaptation=False, dadapt_growth_factor=-1, use_weight=False + ): + # images allows training previews to have infotext. Importing it at the top causes a circular import problem. + from modules import images + if load_training_options != '': + dump: dict = get_training_option(load_training_options) + if dump and dump is not None: + print(f"Loading from {load_training_options}") + learn_rate = dump['learn_rate'] + batch_size = dump['batch_size'] + gradient_step = dump['gradient_step'] + training_width = dump['training_width'] + training_height = dump['training_height'] + steps = dump['steps'] + shuffle_tags = dump['shuffle_tags'] + tag_drop_out = dump['tag_drop_out'] + save_when_converge = dump['save_when_converge'] + create_when_converge = dump['create_when_converge'] + latent_sampling_method = dump['latent_sampling_method'] + template_file = dump['template_file'] + use_beta_scheduler = dump['use_beta_scheduler'] + beta_repeat_epoch = dump['beta_repeat_epoch'] + epoch_mult = dump['epoch_mult'] + warmup = dump['warmup'] + min_lr = dump['min_lr'] + gamma_rate = dump['gamma_rate'] + use_adamw_parameter = dump['use_beta_adamW_checkbox'] + adamw_weight_decay = dump['adamw_weight_decay'] + adamw_beta_1 = dump['adamw_beta_1'] + adamw_beta_2 = dump['adamw_beta_2'] + adamw_eps = dump['adamw_eps'] + use_grad_opts = dump['show_gradient_clip_checkbox'] + gradient_clip_opt = dump['gradient_clip_opt'] + optional_gradient_clip_value = dump['optional_gradient_clip_value'] + optional_gradient_norm_type = dump['optional_gradient_norm_type'] + latent_sampling_std = dump.get('latent_sampling_std', -1) + noise_training_scheduler_enabled = dump.get('noise_training_scheduler_enabled', False) + noise_training_scheduler_repeat = dump.get('noise_training_scheduler_repeat', False) + noise_training_scheduler_cycle = dump.get('noise_training_scheduler_cycle', 128) + loss_opt = dump.get('loss_opt', 'loss_simple') + use_dadaptation = dump.get('use_dadaptation', False) + dadapt_growth_factor = dump.get('dadapt_growth_factor', -1) + use_weight = dump.get('use_weight', False) + try: + if use_adamw_parameter: + adamw_weight_decay, adamw_beta_1, adamw_beta_2, adamw_eps = [float(x) for x in + [adamw_weight_decay, adamw_beta_1, + adamw_beta_2, adamw_eps]] + assert 0 <= adamw_weight_decay, "Weight decay paramter should be larger or equal than zero!" + assert (all(0 <= x <= 1 for x in [adamw_beta_1, adamw_beta_2, + adamw_eps])), "Cannot use negative or >1 number for adamW parameters!" + adamW_kwarg_dict = { + 'weight_decay': adamw_weight_decay, + 'betas': (adamw_beta_1, adamw_beta_2), + 'eps': adamw_eps + } + print('Using custom AdamW parameters') + else: + adamW_kwarg_dict = { + 'weight_decay': 0.01, + 'betas': (0.9, 0.99), + 'eps': 1e-8 + } + if use_beta_scheduler: + print("Using Beta Scheduler") + beta_repeat_epoch = int(float(beta_repeat_epoch)) + assert beta_repeat_epoch > 0, f"Cannot use too small cycle {beta_repeat_epoch}!" + min_lr = float(min_lr) + assert min_lr < 1, f"Cannot use minimum lr with {min_lr}!" + gamma_rate = float(gamma_rate) + print(f"Using learn rate decay(per cycle) of {gamma_rate}") + assert 0 <= gamma_rate <= 1, f"Cannot use gamma rate with {gamma_rate}!" + epoch_mult = float(epoch_mult) + assert 1 <= epoch_mult, "Cannot use epoch multiplier smaller than 1!" + warmup = int(float(warmup)) + assert warmup >= 1, "Warmup epoch should be larger than 0!" + print(f"Save when converges : {save_when_converge}") + print(f"Generate image when converges : {create_when_converge}") + else: + beta_repeat_epoch = 4000 + epoch_mult = 1 + warmup = 10 + min_lr = 1e-7 + gamma_rate = 1 + save_when_converge = False + create_when_converge = False + except ValueError as e: + raise RuntimeError("Cannot use advanced LR scheduler settings! "+ str(e)) + if noise_training_scheduler_enabled: + set_scheduler(noise_training_scheduler_cycle, noise_training_scheduler_repeat, True) + print(f"Noise training scheduler is now ready for {noise_training_scheduler_cycle}, {noise_training_scheduler_repeat}!") + else: + set_scheduler(-1, False, False) + if use_grad_opts and gradient_clip_opt != "None": + try: + optional_gradient_clip_value = float(optional_gradient_clip_value) + except ValueError: + raise RuntimeError(f"Cannot convert invalid gradient clipping value {optional_gradient_clip_value})") + if gradient_clip_opt == "Norm": + try: + grad_norm = int(float(optional_gradient_norm_type)) + except ValueError: + raise RuntimeError(f"Cannot convert invalid gradient norm type {optional_gradient_norm_type})") + assert grad_norm >= 0, f"P-norm cannot be calculated from negative number {grad_norm}" + print( + f"Using gradient clipping by Norm, norm type {optional_gradient_norm_type}, norm limit {optional_gradient_clip_value}") + + def gradient_clipping(arg1): + torch.nn.utils.clip_grad_norm_(arg1, optional_gradient_clip_value, optional_gradient_norm_type) + return + else: + print(f"Using gradient clipping by Value, limit {optional_gradient_clip_value}") + + def gradient_clipping(arg1): + torch.nn.utils.clip_grad_value_(arg1, optional_gradient_clip_value) + return + else: + def gradient_clipping(arg1): + return + save_hypernetwork_every = save_hypernetwork_every or 0 + create_image_every = create_image_every or 0 + if not os.path.isfile(template_file): + template_file = textual_inversion.textual_inversion_templates.get(template_file, None) + if template_file is not None: + template_file = template_file.path + else: + raise AssertionError(f"Cannot find {template_file}!") + validate_train_inputs(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork") + shared.state.job = "train-hypernetwork" + shared.state.textinfo = "Initializing hypernetwork training..." + shared.state.job_count = steps + tmp_scheduler = LearnRateScheduler(learn_rate, steps, 0) + hypernetwork, optimizer, weights, optimizer_name = prepare_training_hypernetwork(hypernetwork_name, tmp_scheduler.learn_rate, use_adamw_parameter, use_dadaptation,dadapt_growth_factor, **adamW_kwarg_dict) + + hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0] + filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt') + + log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name) + unload = shared.opts.unload_models_when_training + + if save_hypernetwork_every > 0 or save_when_converge: + hypernetwork_dir = os.path.join(log_directory, "hypernetworks") + os.makedirs(hypernetwork_dir, exist_ok=True) + else: + hypernetwork_dir = None + + if create_image_every > 0 or create_when_converge: + images_dir = os.path.join(log_directory, "images") + os.makedirs(images_dir, exist_ok=True) + else: + images_dir = None + + checkpoint = sd_models.select_checkpoint() + + initial_step = hypernetwork.step or 0 + if initial_step >= steps: + shared.state.textinfo = f"Model has already been trained beyond specified max steps" + return hypernetwork, filename + + scheduler = LearnRateScheduler(learn_rate, steps, initial_step) + if shared.opts.training_enable_tensorboard: + print("Tensorboard logging enabled") + tensorboard_writer = tensorboard_setup(log_directory) + else: + tensorboard_writer = None + # dataset loading may take a while, so input validations and early returns should be done before this + shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." + detach_grad = shared.opts.disable_ema # test code that removes EMA + if detach_grad: + print("Disabling training for staged models!") + shared.sd_model.cond_stage_model.requires_grad_(False) + shared.sd_model.first_stage_model.requires_grad_(False) + torch.cuda.empty_cache() + pin_memory = shared.opts.pin_memory + + ds = PersonalizedBase(data_root=data_root, width=training_width, + height=training_height, + repeats=shared.opts.training_image_repeats_per_epoch, + placeholder_token=hypernetwork_name, model=shared.sd_model, + cond_model=shared.sd_model.cond_stage_model, + device=devices.device, template_file=template_file, + include_cond=True, batch_size=batch_size, + gradient_step=gradient_step, shuffle_tags=shuffle_tags, + tag_drop_out=tag_drop_out, + latent_sampling_method=latent_sampling_method, + latent_sampling_std=latent_sampling_std, + use_weight=use_weight) + + latent_sampling_method = ds.latent_sampling_method + + dl = PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, + batch_size=ds.batch_size, pin_memory=pin_memory) + old_parallel_processing_allowed = shared.parallel_processing_allowed + + if unload: + shared.parallel_processing_allowed = False + shared.sd_model.cond_stage_model.to(devices.cpu) + shared.sd_model.first_stage_model.to(devices.cpu) + + if use_beta_scheduler: + scheduler_beta = CosineAnnealingWarmUpRestarts(optimizer=optimizer, first_cycle_steps=beta_repeat_epoch, + cycle_mult=epoch_mult, max_lr=scheduler.learn_rate, + warmup_steps=warmup, min_lr=min_lr, gamma=gamma_rate) + scheduler_beta.last_epoch = hypernetwork.step - 1 + else: + scheduler_beta = None + for pg in optimizer.param_groups: + pg['lr'] = scheduler.learn_rate + scaler = torch.cuda.amp.GradScaler() + + batch_size = ds.batch_size + gradient_step = ds.gradient_step + # n steps = batch_size * gradient_step * n image processed + steps_per_epoch = len(ds) // batch_size // gradient_step + max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step + loss_step = 0 + _loss_step = 0 # internal + # size = len(ds.indexes) + loss_dict = defaultdict(lambda: deque(maxlen=1024)) + # losses = torch.zeros((size,)) + # previous_mean_losses = [0] + # previous_mean_loss = 0 + # print("Mean loss of {} elements".format(size)) + + steps_without_grad = 0 + + last_saved_file = "" + last_saved_image = "" + forced_filename = "" + if hasattr(sd_hijack_checkpoint, 'add'): + sd_hijack_checkpoint.add() + pbar = tqdm.tqdm(total=steps - initial_step) + try: + for i in range((steps - initial_step) * gradient_step): + if scheduler.finished or hypernetwork.step > steps: + break + if shared.state.interrupted: + break + for j, batch in enumerate(dl): + # works as a drop_last=True for gradient accumulation + if j == max_steps_per_epoch: + break + if use_beta_scheduler: + scheduler_beta.step(hypernetwork.step) + else: + scheduler.apply(optimizer, hypernetwork.step) + if scheduler.finished: + break + if shared.state.interrupted: + break + + with torch.autocast("cuda"): + x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) + if use_weight: + w = batch.weight.to(devices.device, non_blocking=pin_memory) + if tag_drop_out != 0 or shuffle_tags: + shared.sd_model.cond_stage_model.to(devices.device) + c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, + non_blocking=pin_memory) + shared.sd_model.cond_stage_model.to(devices.cpu) + else: + c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory) + if use_weight: + loss = shared.sd_model.weighted_forward(x, c, w)[0] + else: + _, losses = shared.sd_model.forward(x, c) + loss = losses['val/' + loss_opt] + for filenames in batch.filename: + loss_dict[filenames].append(loss.detach().item()) + loss /= gradient_step + assert not torch.isnan(loss), "Loss is NaN" + del x + del c + + _loss_step += loss.item() + scaler.scale(loss).backward() + batch.latent_sample.to(devices.cpu) + # go back until we reach gradient accumulation steps + if (j + 1) % gradient_step != 0: + continue + gradient_clipping(weights) + # print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.7f}") + # scaler.unscale_(optimizer) + # print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.15f}") + # torch.nn.utils.clip_grad_norm_(weights, max_norm=1.0) + # print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.15f}") + try: + scaler.step(optimizer) + except AssertionError: + optimizer.param_groups[0]['capturable'] = True + scaler.step(optimizer) + scaler.update() + hypernetwork.step += 1 + pbar.update() + optimizer.zero_grad(set_to_none=True) + loss_step = _loss_step + _loss_step = 0 + + steps_done = hypernetwork.step + 1 + + epoch_num = hypernetwork.step // steps_per_epoch + epoch_step = hypernetwork.step % steps_per_epoch + + description = f"Training hypernetwork [Epoch {epoch_num}: {epoch_step + 1}/{steps_per_epoch}]loss: {loss_step:.7f}" + pbar.set_description(description) + if hypernetwork_dir is not None and ( + (use_beta_scheduler and scheduler_beta.is_EOC(hypernetwork.step) and save_when_converge) or ( + save_hypernetwork_every > 0 and steps_done % save_hypernetwork_every == 0)): + # Before saving, change name to match current checkpoint. + hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}' + last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt') + hypernetwork.optimizer_name = optimizer_name + if shared.opts.save_optimizer_state: + hypernetwork.optimizer_state_dict = optimizer.state_dict() + save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file) + hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory. + + write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, steps_per_epoch, + { + "loss": f"{loss_step:.7f}", + "learn_rate": get_lr_from_optimizer(optimizer) + }) + if shared.opts.training_enable_tensorboard: + epoch_num = hypernetwork.step // len(ds) + epoch_step = hypernetwork.step - (epoch_num * len(ds)) + 1 + mean_loss = sum(sum(x) for x in loss_dict.values()) / sum(len(x) for x in loss_dict.values()) + tensorboard_add(tensorboard_writer, loss=mean_loss, global_step=hypernetwork.step, step=epoch_step, + learn_rate=scheduler.learn_rate if not use_beta_scheduler else + get_lr_from_optimizer(optimizer), epoch_num=epoch_num) + if images_dir is not None and ( + use_beta_scheduler and scheduler_beta.is_EOC(hypernetwork.step) and create_when_converge) or ( + create_image_every > 0 and steps_done % create_image_every == 0): + set_scheduler(-1, False, False) + forced_filename = f'{hypernetwork_name}-{steps_done}' + last_saved_image = os.path.join(images_dir, forced_filename) + rng_state = torch.get_rng_state() + cuda_rng_state = None + if torch.cuda.is_available(): + cuda_rng_state = torch.cuda.get_rng_state_all() + hypernetwork.eval() + if move_optimizer: + optim_to(optimizer, devices.cpu) + gc.collect() + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + ) + if hasattr(p, 'disable_extra_networks'): + p.disable_extra_networks = True + is_patched = True + else: + is_patched = False + if preview_from_txt2img: + p.prompt = preview_prompt + (hypernetwork.extra_name() if not is_patched else "") + print(p.prompt) + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_name = sd_samplers.samplers[preview_sampler_index].name + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = batch.cond_text[0] + (hypernetwork.extra_name() if not is_patched else "") + p.steps = 20 + p.width = training_width + p.height = training_height + + preview_text = p.prompt + + processed = processing.process_images(p) + image = processed.images[0] if len(processed.images) > 0 else None + if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images: + tensorboard_add_image(tensorboard_writer, f"Validation at epoch {epoch_num}", image, + hypernetwork.step) + + if unload: + shared.sd_model.cond_stage_model.to(devices.cpu) + shared.sd_model.first_stage_model.to(devices.cpu) + torch.set_rng_state(rng_state) + if torch.cuda.is_available(): + torch.cuda.set_rng_state_all(cuda_rng_state) + hypernetwork.train() + if move_optimizer: + optim_to(optimizer, devices.device) + if noise_training_scheduler_enabled: + set_scheduler(noise_training_scheduler_cycle, noise_training_scheduler_repeat, True) + if image is not None: + if hasattr(shared.state, 'assign_current_image'): + shared.state.assign_current_image(image) + else: + shared.state.current_image = image + last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, + shared.opts.samples_format, + processed.infotexts[0], p=p, + forced_filename=forced_filename, + save_to_dirs=False) + last_saved_image += f", prompt: {preview_text}" + set_accessible(hypernetwork) + + shared.state.job_no = hypernetwork.step + + shared.state.textinfo = f""" +

+Loss: {loss_step:.7f}
+Step: {steps_done}
+Last prompt: {html.escape(batch.cond_text[0])}
+Last saved hypernetwork: {html.escape(last_saved_file)}
+Last saved image: {html.escape(last_saved_image)}
+

+""" + except Exception: + print(traceback.format_exc(), file=sys.stderr) + finally: + pbar.leave = False + pbar.close() + if hypernetwork is not None: + hypernetwork.eval() + shared.parallel_processing_allowed = old_parallel_processing_allowed + if hasattr(sd_hijack_checkpoint, 'remove'): + sd_hijack_checkpoint.remove() + set_scheduler(-1, False, False) + remove_accessible() + gc.collect() + torch.cuda.empty_cache() + filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt') + hypernetwork.optimizer_name = optimizer_name + if shared.opts.save_optimizer_state: + hypernetwork.optimizer_state_dict = optimizer.state_dict() + save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename) + del optimizer + hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory. + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + + return hypernetwork, filename + + +def internal_clean_training(hypernetwork_name, data_root, log_directory, + create_image_every, save_hypernetwork_every, + preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, + preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height, + move_optimizer=True, + load_hypernetworks_option='', load_training_options='', manual_dataset_seed=-1, + setting_tuple=None): + # images allows training previews to have infotext. Importing it at the top causes a circular import problem. + from modules import images + base_hypernetwork_name = hypernetwork_name + manual_seed = int(manual_dataset_seed) + if setting_tuple is not None: + setting_suffix = f"_{setting_tuple[0]}_{setting_tuple[1]}" + else: + setting_suffix = time.strftime('%Y%m%d%H%M%S') + if load_hypernetworks_option != '': + dump_hyper: dict = get_training_option(load_hypernetworks_option) + hypernetwork_name = hypernetwork_name + setting_suffix + enable_sizes = dump_hyper['enable_sizes'] + overwrite_old = dump_hyper['overwrite_old'] + layer_structure = dump_hyper['layer_structure'] + activation_func = dump_hyper['activation_func'] + weight_init = dump_hyper['weight_init'] + add_layer_norm = dump_hyper['add_layer_norm'] + use_dropout = dump_hyper['use_dropout'] + dropout_structure = dump_hyper['dropout_structure'] + optional_info = dump_hyper['optional_info'] + weight_init_seed = dump_hyper['weight_init_seed'] + normal_std = dump_hyper['normal_std'] + skip_connection = dump_hyper['skip_connection'] + hypernetwork = create_hypernetwork_load(hypernetwork_name, enable_sizes, overwrite_old, layer_structure, + activation_func, weight_init, add_layer_norm, use_dropout, + dropout_structure, optional_info, weight_init_seed, normal_std, + skip_connection) + else: + hypernetwork = load_hypernetwork(hypernetwork_name) + hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0] + setting_suffix + hypernetwork.save(os.path.join(shared.cmd_opts.hypernetwork_dir, f"{hypernetwork_name}.pt")) + shared.reload_hypernetworks() + hypernetwork = load_hypernetwork(hypernetwork_name) + if load_training_options != '': + dump: dict = get_training_option(load_training_options) + if dump and dump is not None: + learn_rate = dump['learn_rate'] + batch_size = dump['batch_size'] + gradient_step = dump['gradient_step'] + training_width = dump['training_width'] + training_height = dump['training_height'] + steps = dump['steps'] + shuffle_tags = dump['shuffle_tags'] + tag_drop_out = dump['tag_drop_out'] + save_when_converge = dump['save_when_converge'] + create_when_converge = dump['create_when_converge'] + latent_sampling_method = dump['latent_sampling_method'] + template_file = dump['template_file'] + use_beta_scheduler = dump['use_beta_scheduler'] + beta_repeat_epoch = dump['beta_repeat_epoch'] + epoch_mult = dump['epoch_mult'] + warmup = dump['warmup'] + min_lr = dump['min_lr'] + gamma_rate = dump['gamma_rate'] + use_adamw_parameter = dump['use_beta_adamW_checkbox'] + adamw_weight_decay = dump['adamw_weight_decay'] + adamw_beta_1 = dump['adamw_beta_1'] + adamw_beta_2 = dump['adamw_beta_2'] + adamw_eps = dump['adamw_eps'] + use_grad_opts = dump['show_gradient_clip_checkbox'] + gradient_clip_opt = dump['gradient_clip_opt'] + optional_gradient_clip_value = dump['optional_gradient_clip_value'] + optional_gradient_norm_type = dump['optional_gradient_norm_type'] + latent_sampling_std = dump.get('latent_sampling_std', -1) + noise_training_scheduler_enabled = dump.get('noise_training_scheduler_enabled', False) + noise_training_scheduler_repeat = dump.get('noise_training_scheduler_repeat', False) + noise_training_scheduler_cycle = dump.get('noise_training_scheduler_cycle', 128) + loss_opt = dump.get('loss_opt', 'loss_simple') + use_dadaptation = dump.get('use_dadaptation', False) + dadapt_growth_factor = dump.get('dadapt_growth_factor', -1) + use_weight = dump.get('use_weight', False) + else: + raise RuntimeError(f"Cannot load from {load_training_options}!") + else: + raise RuntimeError(f"Cannot load from {load_training_options}!") + try: + if use_adamw_parameter: + adamw_weight_decay, adamw_beta_1, adamw_beta_2, adamw_eps = [float(x) for x in + [adamw_weight_decay, adamw_beta_1, + adamw_beta_2, adamw_eps]] + assert 0 <= adamw_weight_decay, "Weight decay paramter should be larger or equal than zero!" + assert (all(0 <= x <= 1 for x in [adamw_beta_1, adamw_beta_2, + adamw_eps])), "Cannot use negative or >1 number for adamW parameters!" + adamW_kwarg_dict = { + 'weight_decay': adamw_weight_decay, + 'betas': (adamw_beta_1, adamw_beta_2), + 'eps': adamw_eps + } + print('Using custom AdamW parameters') + else: + adamW_kwarg_dict = { + 'weight_decay': 0.01, + 'betas': (0.9, 0.99), + 'eps': 1e-8 + } + if use_beta_scheduler: + print("Using Beta Scheduler") + beta_repeat_epoch = int(float(beta_repeat_epoch)) + assert beta_repeat_epoch > 0, f"Cannot use too small cycle {beta_repeat_epoch}!" + min_lr = float(min_lr) + assert min_lr < 1, f"Cannot use minimum lr with {min_lr}!" + gamma_rate = float(gamma_rate) + print(f"Using learn rate decay(per cycle) of {gamma_rate}") + assert 0 <= gamma_rate <= 1, f"Cannot use gamma rate with {gamma_rate}!" + epoch_mult = float(epoch_mult) + assert 1 <= epoch_mult, "Cannot use epoch multiplier smaller than 1!" + warmup = int(float(warmup)) + assert warmup >= 1, "Warmup epoch should be larger than 0!" + print(f"Save when converges : {save_when_converge}") + print(f"Generate image when converges : {create_when_converge}") + else: + beta_repeat_epoch = 4000 + epoch_mult = 1 + warmup = 10 + min_lr = 1e-7 + gamma_rate = 1 + save_when_converge = False + create_when_converge = False + except ValueError: + raise RuntimeError("Cannot use advanced LR scheduler settings!") + if use_grad_opts and gradient_clip_opt != "None": + try: + optional_gradient_clip_value = float(optional_gradient_clip_value) + except ValueError: + raise RuntimeError(f"Cannot convert invalid gradient clipping value {optional_gradient_clip_value})") + if gradient_clip_opt == "Norm": + try: + grad_norm = int(float(optional_gradient_norm_type)) + except ValueError: + raise RuntimeError(f"Cannot convert invalid gradient norm type {optional_gradient_norm_type})") + assert grad_norm >= 0, f"P-norm cannot be calculated from negative number {grad_norm}" + print( + f"Using gradient clipping by Norm, norm type {optional_gradient_norm_type}, norm limit {optional_gradient_clip_value}") + + def gradient_clipping(arg1): + torch.nn.utils.clip_grad_norm_(arg1, optional_gradient_clip_value, optional_gradient_norm_type) + return + else: + print(f"Using gradient clipping by Value, limit {optional_gradient_clip_value}") + + def gradient_clipping(arg1): + torch.nn.utils.clip_grad_value_(arg1, optional_gradient_clip_value) + return + else: + def gradient_clipping(arg1): + return + if noise_training_scheduler_enabled: + set_scheduler(noise_training_scheduler_cycle, noise_training_scheduler_repeat, True) + print(f"Noise training scheduler is now ready for {noise_training_scheduler_cycle}, {noise_training_scheduler_repeat}!") + else: + set_scheduler(-1, False, False) + save_hypernetwork_every = save_hypernetwork_every or 0 + create_image_every = create_image_every or 0 + if not os.path.isfile(template_file): + template_file = textual_inversion.textual_inversion_templates.get(template_file, None) + if template_file is not None: + template_file = template_file.path + else: + raise AssertionError(f"Cannot find {template_file}!") + validate_train_inputs(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork") + hypernetwork.to(devices.device) + assert hypernetwork is not None, f"Cannot load {hypernetwork_name}!" + if not isinstance(hypernetwork, Hypernetwork): + raise RuntimeError("Cannot perform training for Hypernetwork structure pipeline!") + set_accessible(hypernetwork) + shared.state.job = "train-hypernetwork" + shared.state.textinfo = "Initializing hypernetwork training..." + shared.state.job_count = steps + + hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0] + filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt') + base_log_directory = log_directory + log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name) + unload = shared.opts.unload_models_when_training + + if save_hypernetwork_every > 0 or save_when_converge: + hypernetwork_dir = os.path.join(log_directory, "hypernetworks") + os.makedirs(hypernetwork_dir, exist_ok=True) + else: + hypernetwork_dir = None + + if create_image_every > 0 or create_when_converge: + images_dir = os.path.join(log_directory, "images") + os.makedirs(images_dir, exist_ok=True) + else: + images_dir = None + + checkpoint = sd_models.select_checkpoint() + + initial_step = hypernetwork.step or 0 + if initial_step >= steps: + shared.state.textinfo = f"Model has already been trained beyond specified max steps" + return hypernetwork, filename + + scheduler = LearnRateScheduler(learn_rate, steps, initial_step) + if shared.opts.training_enable_tensorboard: + print("Tensorboard logging enabled") + tensorboard_writer = tensorboard_setup(os.path.join(base_log_directory, base_hypernetwork_name)) + + else: + tensorboard_writer = None + # dataset loading may take a while, so input validations and early returns should be done before this + shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." + detach_grad = shared.opts.disable_ema # test code that removes EMA + if detach_grad: + print("Disabling training for staged models!") + shared.sd_model.cond_stage_model.requires_grad_(False) + shared.sd_model.first_stage_model.requires_grad_(False) + torch.cuda.empty_cache() + pin_memory = shared.opts.pin_memory + ds = PersonalizedBase(data_root=data_root, width=training_width, + height=training_height, + repeats=shared.opts.training_image_repeats_per_epoch, + placeholder_token=hypernetwork_name, model=shared.sd_model, + cond_model=shared.sd_model.cond_stage_model, + device=devices.device, template_file=template_file, + include_cond=True, batch_size=batch_size, + gradient_step=gradient_step, shuffle_tags=shuffle_tags, + tag_drop_out=tag_drop_out, + latent_sampling_method=latent_sampling_method, + latent_sampling_std=latent_sampling_std, + manual_seed=manual_seed, + use_weight=use_weight) + + latent_sampling_method = ds.latent_sampling_method + + dl = PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, + batch_size=ds.batch_size, pin_memory=pin_memory) + old_parallel_processing_allowed = shared.parallel_processing_allowed + + if unload: + shared.parallel_processing_allowed = False + shared.sd_model.cond_stage_model.to(devices.cpu) + shared.sd_model.first_stage_model.to(devices.cpu) + + weights = hypernetwork.weights(True) + optimizer_name = hypernetwork.optimizer_name + if hypernetwork.optimizer_name == 'DAdaptAdamW': + use_dadaptation = True + optimizer = None + # Here we use optimizer from saved HN, or we can specify as UI option. + if hypernetwork.optimizer_name in optimizer_dict: + if use_adamw_parameter: + if hypernetwork.optimizer_name != 'AdamW' and hypernetwork.optimizer_name != 'DAdaptAdamW': + raise RuntimeError(f"Cannot use adamW paramters for optimizer {hypernetwork.optimizer_name}!") + if use_dadaptation: + from .dadapt_test.install import get_dadapt_adam + optim_class = get_dadapt_adam(hypernetwork.optimizer_name) + if optim_class != torch.optim.AdamW: + optimizer = optim_class(params=weights, lr=scheduler.learn_rate, growth_rate = float('inf') if dadapt_growth_factor < 0 else dadapt_growth_factor, decouple=True, **adamW_kwarg_dict) + else: + optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate, **adamW_kwarg_dict) + else: + optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate, **adamW_kwarg_dict) + else: + optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate) + optimizer_name = hypernetwork.optimizer_name + else: + print(f"Optimizer type {hypernetwork.optimizer_name} is not defined!") + if use_dadaptation: + from .dadapt_test.install import get_dadapt_adam + optim_class = get_dadapt_adam(hypernetwork.optimizer_name) + if optim_class != torch.optim.AdamW: + optimizer = optim_class(params=weights, lr=scheduler.learn_rate, growth_rate = float('inf') if dadapt_growth_factor < 0 else dadapt_growth_factor, decouple=True, **adamW_kwarg_dict) + optimizer_name = 'DAdaptAdamW' + if optimizer is None: + optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate, **adamW_kwarg_dict) + optimizer_name = 'AdamW' + + + + if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer. + try: + optimizer.load_state_dict(hypernetwork.optimizer_state_dict) + except RuntimeError as e: + print("Cannot resume from saved optimizer!") + print(e) + optim_to(optimizer, devices.device) + if use_beta_scheduler: + scheduler_beta = CosineAnnealingWarmUpRestarts(optimizer=optimizer, first_cycle_steps=beta_repeat_epoch, + cycle_mult=epoch_mult, max_lr=scheduler.learn_rate, + warmup_steps=warmup, min_lr=min_lr, gamma=gamma_rate) + scheduler_beta.last_epoch = hypernetwork.step - 1 + else: + scheduler_beta = None + for pg in optimizer.param_groups: + pg['lr'] = scheduler.learn_rate + scaler = torch.cuda.amp.GradScaler() + + batch_size = ds.batch_size + gradient_step = ds.gradient_step + # n steps = batch_size * gradient_step * n image processed + steps_per_epoch = len(ds) // batch_size // gradient_step + max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step + loss_step = 0 + _loss_step = 0 # internal + # size = len(ds.indexes) + loss_dict = defaultdict(lambda: deque(maxlen=1024)) + # losses = torch.zeros((size,)) + # previous_mean_losses = [0] + # previous_mean_loss = 0 + # print("Mean loss of {} elements".format(size)) + + steps_without_grad = 0 + + last_saved_file = "" + last_saved_image = "" + forced_filename = "" + if hasattr(sd_hijack_checkpoint, 'add'): + sd_hijack_checkpoint.add() + pbar = tqdm.tqdm(total=steps - initial_step) + try: + for i in range((steps - initial_step) * gradient_step): + if scheduler.finished or hypernetwork.step > steps: + break + if shared.state.interrupted: + break + for j, batch in enumerate(dl): + # works as a drop_last=True for gradient accumulation + if j == max_steps_per_epoch: + break + if use_beta_scheduler: + scheduler_beta.step(hypernetwork.step) + else: + scheduler.apply(optimizer, hypernetwork.step) + if scheduler.finished: + break + if shared.state.interrupted: + break + + with torch.autocast("cuda"): + x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) + if use_weight: + w = batch.weight.to(devices.device, non_blocking=pin_memory) + if tag_drop_out != 0 or shuffle_tags: + shared.sd_model.cond_stage_model.to(devices.device) + c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, + non_blocking=pin_memory) + shared.sd_model.cond_stage_model.to(devices.cpu) + else: + c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory) + if use_weight: + loss = shared.sd_model.weighted_forward(x, c, w)[0] + else: + _, losses = shared.sd_model.forward(x, c) + loss = losses['val/' + loss_opt] + for filenames in batch.filename: + loss_dict[filenames].append(loss.detach().item()) + loss /= gradient_step + del x + del c + + _loss_step += loss.item() + scaler.scale(loss).backward() + batch.latent_sample.to(devices.cpu) + # go back until we reach gradient accumulation steps + if (j + 1) % gradient_step != 0: + continue + gradient_clipping(weights) + # print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.7f}") + # scaler.unscale_(optimizer) + # print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.15f}") + # torch.nn.utils.clip_grad_norm_(weights, max_norm=1.0) + # print(f"grad:{weights[0].grad.detach().cpu().abs().mean().item():.15f}") + try: + scaler.step(optimizer) + except AssertionError: + optimizer.param_groups[0]['capturable'] = True + scaler.step(optimizer) + scaler.update() + hypernetwork.step += 1 + pbar.update() + optimizer.zero_grad(set_to_none=True) + loss_step = _loss_step + _loss_step = 0 + + steps_done = hypernetwork.step + 1 + + epoch_num = hypernetwork.step // steps_per_epoch + epoch_step = hypernetwork.step % steps_per_epoch + + description = f"Training hypernetwork [Epoch {epoch_num}: {epoch_step + 1}/{steps_per_epoch}]loss: {loss_step:.7f}" + pbar.set_description(description) + if hypernetwork_dir is not None and ( + (use_beta_scheduler and scheduler_beta.is_EOC(hypernetwork.step) and save_when_converge) or ( + save_hypernetwork_every > 0 and steps_done % save_hypernetwork_every == 0)): + # Before saving, change name to match current checkpoint. + hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}' + last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt') + hypernetwork.optimizer_name = optimizer_name + if shared.opts.save_optimizer_state: + hypernetwork.optimizer_state_dict = optimizer.state_dict() + save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file) + hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory. + + write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, steps_per_epoch, + { + "loss": f"{loss_step:.7f}", + "learn_rate": get_lr_from_optimizer(optimizer) + }) + if shared.opts.training_enable_tensorboard: + epoch_num = hypernetwork.step // len(ds) + epoch_step = hypernetwork.step - (epoch_num * len(ds)) + 1 + mean_loss = sum(sum(x) for x in loss_dict.values()) / sum(len(x) for x in loss_dict.values()) + tensorboard_add(tensorboard_writer, loss=mean_loss, global_step=hypernetwork.step, step=epoch_step, + learn_rate=scheduler.learn_rate if not use_beta_scheduler else + get_lr_from_optimizer(optimizer), epoch_num=epoch_num, base_name=hypernetwork_name) + if images_dir is not None and ( + use_beta_scheduler and scheduler_beta.is_EOC(hypernetwork.step) and create_when_converge) or ( + create_image_every > 0 and steps_done % create_image_every == 0): + set_scheduler(-1, False, False) + forced_filename = f'{hypernetwork_name}-{steps_done}' + last_saved_image = os.path.join(images_dir, forced_filename) + rng_state = torch.get_rng_state() + cuda_rng_state = None + if torch.cuda.is_available(): + cuda_rng_state = torch.cuda.get_rng_state_all() + hypernetwork.eval() + if move_optimizer: + optim_to(optimizer, devices.cpu) + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + ) + if hasattr(p, 'disable_extra_networks'): + p.disable_extra_networks = True + is_patched = True + else: + is_patched = False + if preview_from_txt2img: + p.prompt = preview_prompt + (hypernetwork.extra_name() if not is_patched else "") + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_name = sd_samplers.samplers[preview_sampler_index].name + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = batch.cond_text[0] + (hypernetwork.extra_name() if not is_patched else "") + p.steps = 20 + p.width = training_width + p.height = training_height + + preview_text = p.prompt + + processed = processing.process_images(p) + image = processed.images[0] if len(processed.images) > 0 else None + if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images: + tensorboard_add_image(tensorboard_writer, f"Validation at epoch {epoch_num}", image, + hypernetwork.step, base_name=hypernetwork_name) + + if unload: + shared.sd_model.cond_stage_model.to(devices.cpu) + shared.sd_model.first_stage_model.to(devices.cpu) + torch.set_rng_state(rng_state) + if torch.cuda.is_available(): + torch.cuda.set_rng_state_all(cuda_rng_state) + hypernetwork.train() + if move_optimizer: + optim_to(optimizer, devices.device) + if noise_training_scheduler_enabled: + set_scheduler(noise_training_scheduler_cycle, noise_training_scheduler_repeat, True) + if image is not None: + if hasattr(shared.state, 'assign_current_image'): + shared.state.assign_current_image(image) + else: + shared.state.current_image = image + last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, + shared.opts.samples_format, + processed.infotexts[0], p=p, + forced_filename=forced_filename, + save_to_dirs=False) + last_saved_image += f", prompt: {preview_text}" + set_accessible(hypernetwork) + + shared.state.job_no = hypernetwork.step + + shared.state.textinfo = f""" +

+Loss: {loss_step:.7f}
+Step: {steps_done}
+Last prompt: {html.escape(batch.cond_text[0])}
+Last saved hypernetwork: {html.escape(last_saved_file)}
+Last saved image: {html.escape(last_saved_image)}
+

+""" + except Exception: + if pbar is not None: + pbar.set_description(traceback.format_exc()) + shared.state.textinfo = traceback.format_exc() + print(traceback.format_exc(), file=sys.stderr) + finally: + pbar.leave = False + pbar.close() + hypernetwork.eval() + set_scheduler(-1, False, False) + shared.parallel_processing_allowed = old_parallel_processing_allowed + remove_accessible() + if hasattr(sd_hijack_checkpoint, 'remove'): + sd_hijack_checkpoint.remove() + if shared.opts.training_enable_tensorboard: + mean_loss = sum(sum(x) for x in loss_dict.values()) / sum(len(x) for x in loss_dict.values()) if sum(len(x) for x in loss_dict.values()) > 0 else 0 + tensorboard_log_hyperparameter(tensorboard_writer, lr=learn_rate, + GA_steps=gradient_step, + batch_size=batch_size, + layer_structure=hypernetwork.layer_structure, + activation=hypernetwork.activation_func, + weight_init=hypernetwork.weight_init, + dropout_structure=hypernetwork.dropout_structure, + max_steps=steps, + latent_sampling_method=latent_sampling_method, + template=template_file, + CosineAnnealing=use_beta_scheduler, + beta_repeat_epoch=beta_repeat_epoch, + epoch_mult=epoch_mult, + warmup=warmup, + min_lr=min_lr, + gamma_rate=gamma_rate, + adamW_opts=use_adamw_parameter, + adamW_decay=adamw_weight_decay, + adamW_beta_1=adamw_beta_1, + adamW_beta_2=adamw_beta_2, + adamW_eps=adamw_eps, + gradient_clip=gradient_clip_opt, + gradient_clip_value=optional_gradient_clip_value, + gradient_clip_norm_type=optional_gradient_norm_type, + loss=mean_loss, + base_hypernetwork_name=hypernetwork_name + ) + filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt') + hypernetwork.optimizer_name = optimizer_name + if shared.opts.save_optimizer_state: + hypernetwork.optimizer_state_dict = optimizer.state_dict() + save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename) + del optimizer + hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory. + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + gc.collect() + torch.cuda.empty_cache() + return hypernetwork, filename + + +def train_hypernetwork_tuning(id_task, hypernetwork_name, data_root, log_directory, + create_image_every, save_hypernetwork_every, preview_from_txt2img, preview_prompt, + preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, + preview_seed, + preview_width, preview_height, + move_optimizer=True, + optional_new_hypernetwork_name='', load_hypernetworks_options='', + load_training_options='', manual_dataset_seed=-1): + load_hypernetworks_options = load_hypernetworks_options.split(',') + load_training_options = load_training_options.split(',') + # images allows training previews to have infotext. Importing it at the top causes a circular import problem. + for _i, load_hypernetworks_option in enumerate(load_hypernetworks_options): + load_hypernetworks_option = load_hypernetworks_option.strip(' ') + if load_hypernetworks_option != '' and get_training_option(load_hypernetworks_option) is False: + print(f"Cannot load from {load_hypernetworks_option}!") + continue + for _j, load_training_option in enumerate(load_training_options): + load_training_option = load_training_option.strip(' ') + if get_training_option(load_training_option) is False: + print(f"Cannot load from {load_training_option}!") + continue + internal_clean_training( + hypernetwork_name if load_hypernetworks_option == '' else optional_new_hypernetwork_name, + data_root, + log_directory, + create_image_every, + save_hypernetwork_every, + preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, + preview_cfg_scale, preview_seed, preview_width, preview_height, + move_optimizer, + load_hypernetworks_option, load_training_option, manual_dataset_seed, setting_tuple=(_i, _j)) + if shared.state.interrupted: + return None, None diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/sd_hijack_checkpoint.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/sd_hijack_checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..efdcd7a641523d713eaf8bfea51941bed55bfb8c --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/sd_hijack_checkpoint.py @@ -0,0 +1,22 @@ +from torch.utils.checkpoint import checkpoint + + +def BasicTransformerBlock_forward(self, x, context=None): + return checkpoint(self._forward, x, context) + +def AttentionBlock_forward(self, x): + return checkpoint(self._forward, x) + +def ResBlock_forward(self, x, emb): + return checkpoint(self._forward, x, emb) + + +try: + import ldm.modules.attention + import ldm.modules.diffusionmodules.model + import ldm.modules.diffusionmodules.openaimodel + ldm.modules.attention.BasicTransformerBlock.forward = BasicTransformerBlock_forward + ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = ResBlock_forward + ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = AttentionBlock_forward +except: + pass diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/textual_inversion.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/textual_inversion.py new file mode 100644 index 0000000000000000000000000000000000000000..5decdcd7fb0bcfefc972aa16e9f906cfcad3eb0f --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/textual_inversion.py @@ -0,0 +1,504 @@ +import csv +import datetime +import gc +import html +import os +import sys +import traceback + +import torch +import tqdm +from PIL import PngImagePlugin + +from modules import shared, devices, sd_models, images, processing, sd_samplers, sd_hijack, sd_hijack_checkpoint +from modules.textual_inversion.image_embedding import caption_image_overlay, insert_image_data_embed, embedding_to_b64 +from modules.textual_inversion.learn_schedule import LearnRateScheduler +from modules.textual_inversion.textual_inversion import save_embedding +from .dataset import PersonalizedBase, PersonalizedDataLoader +from ..hnutil import optim_to +from ..scheduler import CosineAnnealingWarmUpRestarts +from ..tbutils import tensorboard_setup, tensorboard_add_image + +# apply OsError avoid here +delayed_values = {} + + +def write_loss(log_directory, filename, step, epoch_len, values): + if shared.opts.training_write_csv_every == 0: + return + + if step % shared.opts.training_write_csv_every != 0: + return + write_csv_header = False if os.path.exists(os.path.join(log_directory, filename)) else True + try: + with open(os.path.join(log_directory, filename), "a+", newline='') as fout: + csv_writer = csv.DictWriter(fout, fieldnames=["step", "epoch", "epoch_step", *(values.keys())]) + + if write_csv_header: + csv_writer.writeheader() + if log_directory + filename in delayed_values: + delayed = delayed_values[log_directory + filename] + for step, epoch, epoch_step, values in delayed: + csv_writer.writerow({ + "step": step, + "epoch": epoch, + "epoch_step": epoch_step, + **values, + }) + delayed.clear() + epoch, epoch_step = divmod(step - 1, epoch_len) + csv_writer.writerow({ + "step": step, + "epoch": epoch, + "epoch_step": epoch_step, + **values, + }) + except OSError: + epoch, epoch_step = divmod(step - 1, epoch_len) + if log_directory + filename in delayed_values: + delayed_values[log_directory + filename].append((step, epoch, epoch_step, values)) + else: + delayed_values[log_directory + filename] = [(step, epoch, epoch_step, values)] + + +def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, + save_model_every, create_image_every, log_directory, name="embedding"): + assert model_name, f"{name} not selected" + assert learn_rate, "Learning rate is empty or 0" + assert isinstance(batch_size, int), "Batch size must be integer" + assert batch_size > 0, "Batch size must be positive" + assert isinstance(gradient_step, int), "Gradient accumulation step must be integer" + assert gradient_step > 0, "Gradient accumulation step must be positive" + assert data_root, "Dataset directory is empty" + assert os.path.isdir(data_root), "Dataset directory doesn't exist" + assert os.listdir(data_root), "Dataset directory is empty" + assert template_file, "Prompt template file is empty" + assert os.path.isfile(template_file), "Prompt template file doesn't exist" + assert steps, "Max steps is empty or 0" + assert isinstance(steps, int), "Max steps must be integer" + assert steps > 0, "Max steps must be positive" + assert isinstance(save_model_every, int), "Save {name} must be integer" + assert save_model_every >= 0, "Save {name} must be positive or 0" + assert isinstance(create_image_every, int), "Create image must be integer" + assert create_image_every >= 0, "Create image must be positive or 0" + if save_model_every or create_image_every: + assert log_directory, "Log directory is empty" + + +def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, + training_width, + training_height, steps, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, + save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, + preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, + preview_seed, preview_width, preview_height, + use_beta_scheduler=False, beta_repeat_epoch=4000, epoch_mult=1, warmup=10, min_lr=1e-7, + gamma_rate=1, save_when_converge=False, create_when_converge=False, + move_optimizer=True, + use_adamw_parameter=False, adamw_weight_decay=0.01, adamw_beta_1=0.9, adamw_beta_2=0.99, + adamw_eps=1e-8, + use_grad_opts=False, gradient_clip_opt='None', optional_gradient_clip_value=1e01, + optional_gradient_norm_type=2, latent_sampling_std=-1, use_weight=False + ): + save_embedding_every = save_embedding_every or 0 + create_image_every = create_image_every or 0 + validate_train_inputs(embedding_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, + save_embedding_every, create_image_every, log_directory, name="embedding") + try: + if use_adamw_parameter: + adamw_weight_decay, adamw_beta_1, adamw_beta_2, adamw_eps = [float(x) for x in + [adamw_weight_decay, adamw_beta_1, + adamw_beta_2, adamw_eps]] + assert 0 <= adamw_weight_decay, "Weight decay paramter should be larger or equal than zero!" + assert (all(0 <= x <= 1 for x in [adamw_beta_1, adamw_beta_2, + adamw_eps])), "Cannot use negative or >1 number for adamW parameters!" + adamW_kwarg_dict = { + 'weight_decay': adamw_weight_decay, + 'betas': (adamw_beta_1, adamw_beta_2), + 'eps': adamw_eps + } + print('Using custom AdamW parameters') + else: + adamW_kwarg_dict = { + 'weight_decay': 0.01, + 'betas': (0.9, 0.99), + 'eps': 1e-8 + } + if use_beta_scheduler: + print("Using Beta Scheduler") + beta_repeat_epoch = int(beta_repeat_epoch) + assert beta_repeat_epoch > 0, f"Cannot use too small cycle {beta_repeat_epoch}!" + min_lr = float(min_lr) + assert min_lr < 1, f"Cannot use minimum lr with {min_lr}!" + gamma_rate = float(gamma_rate) + print(f"Using learn rate decay(per cycle) of {gamma_rate}") + assert 0 <= gamma_rate <= 1, f"Cannot use gamma rate with {gamma_rate}!" + epoch_mult = float(epoch_mult) + assert 1 <= epoch_mult, "Cannot use epoch multiplier smaller than 1!" + warmup = int(warmup) + assert warmup >= 1, "Warmup epoch should be larger than 0!" + print(f"Save when converges : {save_when_converge}") + print(f"Generate image when converges : {create_when_converge}") + else: + beta_repeat_epoch = 4000 + epoch_mult = 1 + warmup = 10 + min_lr = 1e-7 + gamma_rate = 1 + save_when_converge = False + create_when_converge = False + except ValueError: + raise RuntimeError("Cannot use advanced LR scheduler settings!") + if use_grad_opts and gradient_clip_opt != "None": + try: + optional_gradient_clip_value = float(optional_gradient_clip_value) + except ValueError: + raise RuntimeError(f"Cannot convert invalid gradient clipping value {optional_gradient_clip_value})") + if gradient_clip_opt == "Norm": + try: + grad_norm = int(optional_gradient_norm_type) + except ValueError: + raise RuntimeError(f"Cannot convert invalid gradient norm type {optional_gradient_norm_type})") + assert grad_norm >= 0, f"P-norm cannot be calculated from negative number {grad_norm}" + + def gradient_clipping(arg1): + torch.nn.utils.clip_grad_norm_(arg1, optional_gradient_clip_value, optional_gradient_norm_type) + return + else: + def gradient_clipping(arg1): + torch.nn.utils.clip_grad_value_(arg1, optional_gradient_clip_value) + return + else: + def gradient_clipping(arg1): + return + # Function gradient clipping is inplace(_) operation. + shared.state.job = "train-embedding" + shared.state.textinfo = "Initializing textual inversion training..." + shared.state.job_count = steps + + filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') + + log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name) + unload = shared.opts.unload_models_when_training + + if save_embedding_every > 0 or save_when_converge: + embedding_dir = os.path.join(log_directory, "embeddings") + os.makedirs(embedding_dir, exist_ok=True) + else: + embedding_dir = None + + if create_image_every > 0 or create_when_converge: + images_dir = os.path.join(log_directory, "images") + os.makedirs(images_dir, exist_ok=True) + else: + images_dir = None + + if (create_image_every > 0 or create_when_converge) and save_image_with_stored_embedding: + images_embeds_dir = os.path.join(log_directory, "image_embeddings") + os.makedirs(images_embeds_dir, exist_ok=True) + else: + images_embeds_dir = None + + hijack = sd_hijack.model_hijack + + embedding = hijack.embedding_db.word_embeddings[embedding_name] + checkpoint = sd_models.select_checkpoint() + + initial_step = embedding.step or 0 + if initial_step >= steps: + shared.state.textinfo = f"Model has already been trained beyond specified max steps" + return embedding, filename + scheduler = LearnRateScheduler(learn_rate, steps, initial_step) + + # dataset loading may take a while, so input validations and early returns should be done before this + shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." + old_parallel_processing_allowed = shared.parallel_processing_allowed + + tensorboard_writer = None + if shared.opts.training_enable_tensorboard: + print("Tensorboard logging enabled") + tensorboard_writer = tensorboard_setup(log_directory) + + pin_memory = shared.opts.pin_memory + detach_grad = shared.opts.disable_ema # test code that removes EMA + if detach_grad: + print("Disabling training for staged models!") + shared.sd_model.cond_stage_model.requires_grad_(False) + shared.sd_model.first_stage_model.requires_grad_(False) + torch.cuda.empty_cache() + ds = PersonalizedBase(data_root=data_root, width=training_width, + height=training_height, + repeats=shared.opts.training_image_repeats_per_epoch, + placeholder_token=embedding_name, model=shared.sd_model, + cond_model=shared.sd_model.cond_stage_model, + device=devices.device, template_file=template_file, + batch_size=batch_size, gradient_step=gradient_step, + shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, + latent_sampling_method=latent_sampling_method, + latent_sampling_std=latent_sampling_std, use_weight=use_weight) + + latent_sampling_method = ds.latent_sampling_method + + dl = PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, + batch_size=ds.batch_size, pin_memory=pin_memory) + if unload: + shared.parallel_processing_allowed = False + shared.sd_model.first_stage_model.to(devices.cpu) + + embedding.vec.requires_grad_(True) + optimizer_name = 'AdamW' # hardcoded optimizer name now + if use_adamw_parameter: + optimizer = torch.optim.AdamW(params=[embedding.vec], lr=scheduler.learn_rate, **adamW_kwarg_dict) + else: + optimizer = torch.optim.AdamW(params=[embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0) + + if os.path.exists( + filename + '.optim'): # This line must be changed if Optimizer type can be different from saved optimizer. + try: + optimizer_saved_dict = torch.load(filename + '.optim', map_location='cpu') + if embedding.checksum() == optimizer_saved_dict.get('hash', None): + optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None) + if optimizer_state_dict is not None: + optimizer.load_state_dict(optimizer_state_dict) + print("Loaded existing optimizer from checkpoint") + except RuntimeError as e: + print("Cannot resume from saved optimizer!") + print(e) + else: + print("No saved optimizer exists in checkpoint") + if move_optimizer: + optim_to(optimizer, devices.device) + if use_beta_scheduler: + scheduler_beta = CosineAnnealingWarmUpRestarts(optimizer=optimizer, first_cycle_steps=beta_repeat_epoch, + cycle_mult=epoch_mult, max_lr=scheduler.learn_rate, + warmup_steps=warmup, min_lr=min_lr, gamma=gamma_rate) + scheduler_beta.last_epoch = embedding.step - 1 + else: + scheduler_beta = None + for pg in optimizer.param_groups: + pg['lr'] = scheduler.learn_rate + + scaler = torch.cuda.amp.GradScaler() + + batch_size = ds.batch_size + gradient_step = ds.gradient_step + # n steps = batch_size * gradient_step * n image processed + steps_per_epoch = len(ds) // batch_size // gradient_step + max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step + loss_step = 0 + _loss_step = 0 # internal + + last_saved_file = "" + last_saved_image = "" + forced_filename = "" + embedding_yet_to_be_embedded = False + + is_training_inpainting_model = shared.sd_model.model.conditioning_key in {'hybrid', 'concat'} + img_c = None + + pbar = tqdm.tqdm(total=steps - initial_step) + if hasattr(sd_hijack_checkpoint, 'add'): + sd_hijack_checkpoint.add() + try: + for i in range((steps - initial_step) * gradient_step): + if scheduler.finished: + break + if shared.state.interrupted: + break + for j, batch in enumerate(dl): + # works as a drop_last=True for gradient accumulation + if j == max_steps_per_epoch: + break + if use_beta_scheduler: + scheduler_beta.step(embedding.step) + else: + scheduler.apply(optimizer, embedding.step) + if scheduler.finished: + break + if shared.state.interrupted: + break + + with devices.autocast(): + x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) + if use_weight: + w = batch.weight.to(devices.device, non_blocking=pin_memory) + shared.sd_model.cond_stage_model.to(devices.device) + c = shared.sd_model.cond_stage_model(batch.cond_text) + if is_training_inpainting_model: + if img_c is None: + img_c = processing.txt2img_image_conditioning(shared.sd_model, c, training_width, + training_height) + + cond = {"c_concat": [img_c], "c_crossattn": [c]} + else: + cond = c + if use_weight: + loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step + del w + else: + loss = shared.sd_model.forward(x, cond)[0] / gradient_step + del x + _loss_step += loss.item() + scaler.scale(loss).backward() + # go back until we reach gradient accumulation steps + if (j + 1) % gradient_step != 0: + continue + gradient_clipping(embedding.vec) + try: + scaler.step(optimizer) + except AssertionError: + raise RuntimeError("This error happens because None of the template used embedding's text!") + scaler.update() + embedding.step += 1 + pbar.update() + optimizer.zero_grad(set_to_none=True) + loss_step = _loss_step + _loss_step = 0 + + steps_done = embedding.step + 1 + + epoch_num = embedding.step // steps_per_epoch + epoch_step = embedding.step % steps_per_epoch + + pbar.set_description(f"[Epoch {epoch_num}: {epoch_step + 1}/{steps_per_epoch}]loss: {loss_step:.7f}") + if embedding_dir is not None and ( + (use_beta_scheduler and scheduler_beta.is_EOC(embedding.step) and save_when_converge) or ( + save_embedding_every > 0 and steps_done % save_embedding_every == 0)): + # Before saving, change name to match current checkpoint. + embedding_name_every = f'{embedding_name}-{steps_done}' + last_saved_file = os.path.join(embedding_dir, f'{embedding_name_every}.pt') + # if shared.opts.save_optimizer_state: + # embedding.optimizer_state_dict = optimizer.state_dict() + save_embedding(embedding, optimizer, checkpoint, embedding_name_every, last_saved_file, + remove_cached_checksum=True) + embedding_yet_to_be_embedded = True + + write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, steps_per_epoch, { + "loss": f"{loss_step:.7f}", + "learn_rate": scheduler.learn_rate + }) + + if images_dir is not None and ( + (use_beta_scheduler and scheduler_beta.is_EOC(embedding.step) and create_when_converge) or ( + create_image_every > 0 and steps_done % create_image_every == 0)): + forced_filename = f'{embedding_name}-{steps_done}' + last_saved_image = os.path.join(images_dir, forced_filename) + rng_state = torch.get_rng_state() + cuda_rng_state = None + if torch.cuda.is_available(): + cuda_rng_state = torch.cuda.get_rng_state_all() + if move_optimizer: + optim_to(optimizer, devices.cpu) + gc.collect() + shared.sd_model.first_stage_model.to(devices.device) + + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + do_not_reload_embeddings=True, + ) + + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_name = sd_samplers.samplers[preview_sampler_index].name + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = batch.cond_text[0] + p.steps = 20 + p.width = training_width + p.height = training_height + + preview_text = p.prompt + if hasattr(p, 'disable_extra_networks'): + p.disable_extra_networks = True + is_patched = True + else: + is_patched = False + processed = processing.process_images(p) + image = processed.images[0] if len(processed.images) > 0 else None + + if move_optimizer: + optim_to(optimizer, devices.device) + if image is not None: + if hasattr(shared.state, 'assign_current_image'): + shared.state.assign_current_image(image) + else: + shared.state.current_image = image + last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, + shared.opts.samples_format, + processed.infotexts[0], p=p, + forced_filename=forced_filename, + save_to_dirs=False) + last_saved_image += f", prompt: {preview_text}" + if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images: + tensorboard_add_image(tensorboard_writer, f"Validation at epoch {epoch_num}", image, + embedding.step) + + if save_image_with_stored_embedding and os.path.exists( + last_saved_file) and embedding_yet_to_be_embedded: + + last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png') + + info = PngImagePlugin.PngInfo() + data = torch.load(last_saved_file) + info.add_text("sd-ti-embedding", embedding_to_b64(data)) + + title = "<{}>".format(data.get('name', '???')) + + try: + vectorSize = list(data['string_to_param'].values())[0].shape[0] + except Exception as e: + vectorSize = '?' + + checkpoint = sd_models.select_checkpoint() + footer_left = checkpoint.model_name + footer_mid = '[{}]'.format( + checkpoint.shorthash if hasattr(checkpoint, 'shorthash') else checkpoint.hash) + footer_right = '{}v {}s'.format(vectorSize, steps_done) + + captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right) + captioned_image = insert_image_data_embed(captioned_image, data) + + captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info) + embedding_yet_to_be_embedded = False + if unload: + shared.sd_model.first_stage_model.to(devices.cpu) + torch.set_rng_state(rng_state) + if torch.cuda.is_available(): + torch.cuda.set_rng_state_all(cuda_rng_state) + last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, + shared.opts.samples_format, + processed.infotexts[0], p=p, + forced_filename=forced_filename, + save_to_dirs=False) + last_saved_image += f", prompt: {preview_text}" + + shared.state.job_no = embedding.step + + shared.state.textinfo = f""" +

+Loss: {loss_step:.7f}
+Step: {steps_done}
+Last prompt: {html.escape(batch.cond_text[0])}
+Last saved embedding: {html.escape(last_saved_file)}
+Last saved image: {html.escape(last_saved_image)}
+

+""" + filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') + save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True) + except Exception: + print(traceback.format_exc(), file=sys.stderr) + pass + finally: + pbar.leave = False + pbar.close() + shared.sd_model.first_stage_model.to(devices.device) + shared.parallel_processing_allowed = old_parallel_processing_allowed + if hasattr(sd_hijack_checkpoint, 'remove'): + sd_hijack_checkpoint.remove() + return embedding, filename diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/ui.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/ui.py new file mode 100644 index 0000000000000000000000000000000000000000..d9d1acdea2ad9a017b43406c0077f1fd3fc0f9c8 --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/external_pr/ui.py @@ -0,0 +1,480 @@ +import gc +import html +import json +import os +import random + +from modules import shared, sd_hijack, devices +from modules.call_queue import wrap_gradio_call +from modules.paths import script_path +from modules.ui import create_refresh_button, gr_show +from webui import wrap_gradio_gpu_call +from .textual_inversion import train_embedding as train_embedding_external +from .hypernetwork import train_hypernetwork as train_hypernetwork_external, train_hypernetwork_tuning +import gradio as gr + + +def train_hypernetwork_ui(*args): + initial_hypernetwork = None + if hasattr(shared, 'loaded_hypernetwork'): + initial_hypernetwork = shared.loaded_hypernetwork + else: + shared.loaded_hypernetworks = [] + assert not shared.cmd_opts.lowvram, 'Training models with lowvram is not possible' + + try: + sd_hijack.undo_optimizations() + + hypernetwork, filename = train_hypernetwork_external(*args) + + res = f""" +Training {'interrupted' if shared.state.interrupted else 'finished'} at {hypernetwork.step} steps. +Hypernetwork saved to {html.escape(filename)} +""" + return res, "" + except Exception: + raise + finally: + if hasattr(shared, 'loaded_hypernetwork'): + shared.loaded_hypernetwork = initial_hypernetwork + else: + shared.loaded_hypernetworks = [] + # check hypernetwork is bounded then delete it + if locals().get('hypernetwork', None) is not None: + del hypernetwork + gc.collect() + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + sd_hijack.apply_optimizations() + + +def train_hypernetwork_ui_tuning(*args): + initial_hypernetwork = None + if hasattr(shared, 'loaded_hypernetwork'): + initial_hypernetwork = shared.loaded_hypernetwork + else: + shared.loaded_hypernetworks = [] + + assert not shared.cmd_opts.lowvram, 'Training models with lowvram is not possible' + + try: + sd_hijack.undo_optimizations() + + train_hypernetwork_tuning(*args) + + res = f""" +Training {'interrupted' if shared.state.interrupted else 'finished'}. +""" + return res, "" + except Exception: + raise + finally: + if hasattr(shared, 'loaded_hypernetwork'): + shared.loaded_hypernetwork = initial_hypernetwork + else: + shared.loaded_hypernetworks = [] + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + sd_hijack.apply_optimizations() + + +def save_training_setting(*args): + save_file_name, learn_rate, batch_size, gradient_step, training_width, \ + training_height, steps, shuffle_tags, tag_drop_out, latent_sampling_method, \ + template_file, use_beta_scheduler, beta_repeat_epoch, epoch_mult, warmup, min_lr, \ + gamma_rate, use_beta_adamW_checkbox, save_when_converge, create_when_converge, \ + adamw_weight_decay, adamw_beta_1, adamw_beta_2, adamw_eps, show_gradient_clip_checkbox, \ + gradient_clip_opt, optional_gradient_clip_value, optional_gradient_norm_type, latent_sampling_std,\ + noise_training_scheduler_enabled, noise_training_scheduler_repeat, noise_training_scheduler_cycle, loss_opt, use_dadaptation, dadapt_growth_factor, use_weight = args + dumped_locals = locals() + dumped_locals.pop('args') + filename = (str(random.randint(0, 1024)) if save_file_name == '' else save_file_name) + '_train_' + '.json' + filename = os.path.join(shared.cmd_opts.hypernetwork_dir, filename) + with open(filename, 'w') as file: + print(dumped_locals) + json.dump(dumped_locals, file) + print(f"File saved as {filename}") + return filename, "" + + +def save_hypernetwork_setting(*args): + save_file_name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure, optional_info, weight_init_seed, normal_std, skip_connection = args + dumped_locals = locals() + dumped_locals.pop('args') + filename = (str(random.randint(0, 1024)) if save_file_name == '' else save_file_name) + '_hypernetwork_' + '.json' + filename = os.path.join(shared.cmd_opts.hypernetwork_dir, filename) + with open(filename, 'w') as file: + print(dumped_locals) + json.dump(dumped_locals, file) + print(f"File saved as {filename}") + return filename, "" + + +def on_train_gamma_tab(params=None): + dummy_component = gr.Label(visible=False) + with gr.Tab(label="Train_Gamma") as train_gamma: + gr.HTML( + value="

Train an embedding or Hypernetwork; you must specify a directory [wiki]

") + with gr.Row(): + train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted( + sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) + create_refresh_button(train_embedding_name, + sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: { + "choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, + "refresh_train_embedding_name") + with gr.Row(): + train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", + choices=[x for x in shared.hypernetworks.keys()]) + create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, + lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, + "refresh_train_hypernetwork_name") + with gr.Row(): + embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', + placeholder="Embedding Learning rate", value="0.005") + hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', + placeholder="Hypernetwork Learning rate", value="0.00004") + use_beta_scheduler_checkbox = gr.Checkbox( + label='Show advanced learn rate scheduler options') + use_beta_adamW_checkbox = gr.Checkbox( + label='Show advanced adamW parameter options)') + show_gradient_clip_checkbox = gr.Checkbox( + label='Show Gradient Clipping Options(for both)') + show_noise_options = gr.Checkbox( + label='Show Noise Scheduler Options(for both)') + with gr.Row(visible=False) as adamW_options: + use_dadaptation = gr.Checkbox(label="Uses D-Adaptation(LR Free) AdamW. Recommended LR is 1.0 at base") + adamw_weight_decay = gr.Textbox(label="AdamW weight decay parameter", placeholder="default = 0.01", + value="0.01") + adamw_beta_1 = gr.Textbox(label="AdamW beta1 parameter", placeholder="default = 0.9", value="0.9") + adamw_beta_2 = gr.Textbox(label="AdamW beta2 parameter", placeholder="default = 0.99", value="0.99") + adamw_eps = gr.Textbox(label="AdamW epsilon parameter", placeholder="default = 1e-8", value="1e-8") + with gr.Row(visible=False) as dadapt_growth_options: + dadapt_growth_factor = gr.Number(value=-1, label='Growth factor limiting, use value like 1.02 or leave it as -1') + with gr.Row(visible=False) as beta_scheduler_options: + use_beta_scheduler = gr.Checkbox(label='Use CosineAnnealingWarmupRestarts Scheduler') + beta_repeat_epoch = gr.Textbox(label='Steps for cycle', placeholder="Cycles every nth Step", value="64") + epoch_mult = gr.Textbox(label='Step multiplier per cycle', placeholder="Step length multiplier every cycle", + value="1") + warmup = gr.Textbox(label='Warmup step per cycle', placeholder="CosineAnnealing lr increase step", + value="5") + min_lr = gr.Textbox(label='Minimum learning rate', + placeholder="restricts decay value, but does not restrict gamma rate decay", + value="6e-7") + gamma_rate = gr.Textbox(label='Decays learning rate every cycle', + placeholder="Value should be in (0-1]", value="1") + with gr.Row(visible=False) as beta_scheduler_options2: + save_converge_opt = gr.Checkbox(label="Saves when every cycle finishes") + generate_converge_opt = gr.Checkbox(label="Generates image when every cycle finishes") + with gr.Row(visible=False) as gradient_clip_options: + gradient_clip_opt = gr.Radio(label="Gradient Clipping Options", choices=["None", "limit", "norm"]) + optional_gradient_clip_value = gr.Textbox(label="Limiting value", value="1e-1") + optional_gradient_norm_type = gr.Textbox(label="Norm type", value="2") + with gr.Row(visible=False) as noise_scheduler_options: + noise_training_scheduler_enabled = gr.Checkbox(label="Use Noise training scheduler(test)") + noise_training_scheduler_repeat = gr.Checkbox(label="Restarts noise scheduler, or linear") + noise_training_scheduler_cycle = gr.Number(label="Restarts noise scheduler every nth epoch") + use_weight = gr.Checkbox(label="Uses image alpha(transparency) channel for adjusting loss") + # change by feedback + use_dadaptation.change( + fn=lambda show: gr_show(show), + inputs=[use_dadaptation], + outputs=[dadapt_growth_options] + ) + show_noise_options.change( + fn = lambda show:gr_show(show), + inputs = [show_noise_options], + outputs = [noise_scheduler_options] + ) + use_beta_adamW_checkbox.change( + fn=lambda show: gr_show(show), + inputs=[use_beta_adamW_checkbox], + outputs=[adamW_options], + ) + use_beta_scheduler_checkbox.change( + fn=lambda show: gr_show(show), + inputs=[use_beta_scheduler_checkbox], + outputs=[beta_scheduler_options], + ) + use_beta_scheduler_checkbox.change( + fn=lambda show: gr_show(show), + inputs=[use_beta_scheduler_checkbox], + outputs=[beta_scheduler_options2], + ) + show_gradient_clip_checkbox.change( + fn=lambda show: gr_show(show), + inputs=[show_gradient_clip_checkbox], + outputs=[gradient_clip_options], + ) + move_optim_when_generate = gr.Checkbox(label="Unload Optimizer when generating preview(hypernetwork)", + value=True) + batch_size = gr.Number(label='Batch size', value=1, precision=0) + gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0) + dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images") + log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", + value="textual_inversion") + template_file = gr.Textbox(label='Prompt template file', + value=os.path.join(script_path, "textual_inversion_templates", + "style_filewords.txt")) + training_width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512) + training_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512) + steps = gr.Number(label='Max steps', value=100000, precision=0) + create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', + value=500, precision=0) + save_embedding_every = gr.Number( + label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0) + save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True) + preview_from_txt2img = gr.Checkbox( + label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False) + with gr.Row(): + shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False) + tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", + value=0) + with gr.Row(): + latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", + choices=['once', 'deterministic', 'random']) + latent_sampling_std_value = gr.Number(label="Standard deviation for sampling", value=-1) + with gr.Row(): + loss_opt = gr.Radio(label="loss type", value="loss", + choices=['loss', 'loss_simple', 'loss_vlb']) + with gr.Row(): + save_training_option = gr.Button(value="Save training setting") + save_file_name = gr.Textbox(label="File name to save setting as", value="") + load_training_option = gr.Textbox( + label="Load training option from saved json file. This will override settings above", value="") + with gr.Row(): + interrupt_training = gr.Button(value="Interrupt") + train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary') + train_embedding = gr.Button(value="Train Embedding", variant='primary') + ti_output = gr.Text(elem_id="ti_output3", value="", show_label=False) + ti_outcome = gr.HTML(elem_id="ti_error3", value="") + + # Full path to .json or simple names are recommended. + save_training_option.click( + fn=wrap_gradio_call(save_training_setting), + inputs=[ + save_file_name, + hypernetwork_learn_rate, + batch_size, + gradient_step, + training_width, + training_height, + steps, + shuffle_tags, + tag_drop_out, + latent_sampling_method, + template_file, + use_beta_scheduler, + beta_repeat_epoch, + epoch_mult, + warmup, + min_lr, + gamma_rate, + use_beta_adamW_checkbox, + save_converge_opt, + generate_converge_opt, + adamw_weight_decay, + adamw_beta_1, + adamw_beta_2, + adamw_eps, + show_gradient_clip_checkbox, + gradient_clip_opt, + optional_gradient_clip_value, + optional_gradient_norm_type, + latent_sampling_std_value, + noise_training_scheduler_enabled, + noise_training_scheduler_repeat, + noise_training_scheduler_cycle, + loss_opt, + use_dadaptation, + dadapt_growth_factor, + use_weight + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + train_embedding.click( + fn=wrap_gradio_gpu_call(train_embedding_external, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + train_embedding_name, + embedding_learn_rate, + batch_size, + gradient_step, + dataset_directory, + log_directory, + training_width, + training_height, + steps, + shuffle_tags, + tag_drop_out, + latent_sampling_method, + create_image_every, + save_embedding_every, + template_file, + save_image_with_stored_embedding, + preview_from_txt2img, + *params.txt2img_preview_params, + use_beta_scheduler, + beta_repeat_epoch, + epoch_mult, + warmup, + min_lr, + gamma_rate, + save_converge_opt, + generate_converge_opt, + move_optim_when_generate, + use_beta_adamW_checkbox, + adamw_weight_decay, + adamw_beta_1, + adamw_beta_2, + adamw_eps, + show_gradient_clip_checkbox, + gradient_clip_opt, + optional_gradient_clip_value, + optional_gradient_norm_type, + latent_sampling_std_value, + use_weight + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + + train_hypernetwork.click( + fn=wrap_gradio_gpu_call(train_hypernetwork_ui, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + train_hypernetwork_name, + hypernetwork_learn_rate, + batch_size, + gradient_step, + dataset_directory, + log_directory, + training_width, + training_height, + steps, + shuffle_tags, + tag_drop_out, + latent_sampling_method, + create_image_every, + save_embedding_every, + template_file, + preview_from_txt2img, + *params.txt2img_preview_params, + use_beta_scheduler, + beta_repeat_epoch, + epoch_mult, + warmup, + min_lr, + gamma_rate, + save_converge_opt, + generate_converge_opt, + move_optim_when_generate, + use_beta_adamW_checkbox, + adamw_weight_decay, + adamw_beta_1, + adamw_beta_2, + adamw_eps, + show_gradient_clip_checkbox, + gradient_clip_opt, + optional_gradient_clip_value, + optional_gradient_norm_type, + latent_sampling_std_value, + noise_training_scheduler_enabled, + noise_training_scheduler_repeat, + noise_training_scheduler_cycle, + load_training_option, + loss_opt, + use_dadaptation, + dadapt_growth_factor, + use_weight + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + + interrupt_training.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + return [(train_gamma, "Train Gamma", "train_gamma")] + + +def on_train_tuning(params=None): + dummy_component = gr.Label(visible=False) + with gr.Tab(label="Train_Tuning") as train_tuning: + gr.HTML( + value="

Train Hypernetwork; you must specify a directory [wiki]

") + with gr.Row(): + train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", + choices=[x for x in shared.hypernetworks.keys()]) + create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, + lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, + "refresh_train_hypernetwork_name") + optional_new_hypernetwork_name = gr.Textbox( + label="Hypernetwork name to create, leave it empty to use selected", value="") + with gr.Row(): + load_hypernetworks_option = gr.Textbox( + label="Load Hypernetwork creation option from saved json file", + placeholder=". filename cannot have ',' inside, and files should be splitted by ','.", value="") + with gr.Row(): + load_training_options = gr.Textbox( + label="Load training option(s) from saved json file", + placeholder=". filename cannot have ',' inside, and files should be splitted by ','.", value="") + move_optim_when_generate = gr.Checkbox(label="Unload Optimizer when generating preview(hypernetwork)", + value=True) + dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images") + log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", + value="textual_inversion") + create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', + value=500, precision=0) + save_model_every = gr.Number( + label='Save a copy of model to log directory every N steps, 0 to disable', value=500, precision=0) + preview_from_txt2img = gr.Checkbox( + label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False) + manual_dataset_seed = gr.Number( + label="Manual dataset seed", value=-1, precision=0 + ) + with gr.Row(): + interrupt_training = gr.Button(value="Interrupt") + train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary') + ti_output = gr.Text(elem_id="ti_output4", value="", show_label=False) + ti_outcome = gr.HTML(elem_id="ti_error4", value="") + train_hypernetwork.click( + fn=wrap_gradio_gpu_call(train_hypernetwork_ui_tuning, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + dummy_component, + train_hypernetwork_name, + dataset_directory, + log_directory, + create_image_every, + save_model_every, + preview_from_txt2img, + *params.txt2img_preview_params, + move_optim_when_generate, + optional_new_hypernetwork_name, + load_hypernetworks_option, + load_training_options, + manual_dataset_seed + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + + interrupt_training.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + return [(train_tuning, "Train Tuning", "train_tuning")] diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/hashes_backup.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/hashes_backup.py new file mode 100644 index 0000000000000000000000000000000000000000..1ace37f796d1c875caecedc0c9de59af3e8a1e78 --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/hashes_backup.py @@ -0,0 +1,84 @@ +import hashlib +import json +import os.path + +import filelock + +# This is full copy of modules/hashes. This will be only loaded if compatibility issue happens due to version mismatch. +cache_filename = "cache.json" +cache_data = None +blksize = 1 << 20 + +def dump_cache(): + with filelock.FileLock(cache_filename+".lock"): + with open(cache_filename, "w", encoding="utf8") as file: + json.dump(cache_data, file, indent=4) + + +def cache(subsection): + global cache_data + + if cache_data is None: + with filelock.FileLock(cache_filename+".lock"): + if not os.path.isfile(cache_filename): + cache_data = {} + else: + with open(cache_filename, "r", encoding="utf8") as file: + cache_data = json.load(file) + + s = cache_data.get(subsection, {}) + cache_data[subsection] = s + + return s + + +def calculate_sha256(filename): + hash_sha256 = hashlib.sha256() + global blksize + with open(filename, "rb") as f: + for chunk in iter(lambda: f.read(blksize), b""): + hash_sha256.update(chunk) + + return hash_sha256.hexdigest() + + +def sha256_from_cache(filename, title): + hashes = cache("hashes") + ondisk_mtime = os.path.getmtime(filename) + + if title not in hashes: + return None + + cached_sha256 = hashes[title].get("sha256", None) + cached_mtime = hashes[title].get("mtime", 0) + + if ondisk_mtime > cached_mtime or cached_sha256 is None: + return None + + return cached_sha256 + + +def sha256(filename, title): + hashes = cache("hashes") + + sha256_value = sha256_from_cache(filename, title) + if sha256_value is not None: + return sha256_value + + print(f"Calculating sha256 for {filename}: ", end='') + sha256_value = calculate_sha256(filename) + print(f"{sha256_value}") + + hashes[title] = { + "mtime": os.path.getmtime(filename), + "sha256": sha256_value, + } + + dump_cache() + + return sha256_value + + + + + diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/hnutil.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/hnutil.py new file mode 100644 index 0000000000000000000000000000000000000000..2e21c54473718c1e509cfe8cdc31168eac776deb --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/hnutil.py @@ -0,0 +1,46 @@ +import torch + +import modules.shared + + +def find_self(self): + for k, v in modules.shared.hypernetworks.items(): + if v == self: + return k + return None + + +def optim_to(optim:torch.optim.Optimizer, device="cpu"): + def inplace_move(obj: torch.Tensor, target): + if hasattr(obj, 'data'): + obj.data = obj.data.to(target) + if hasattr(obj, '_grad') and obj._grad is not None: + obj._grad.data = obj._grad.data.to(target) + if isinstance(optim, torch.optim.Optimizer): + for param in optim.state.values(): + if isinstance(param, torch.Tensor): + inplace_move(param, device) + elif isinstance(param, dict): + for subparams in param.values(): + inplace_move(subparams, device) + torch.cuda.empty_cache() + + +def parse_dropout_structure(layer_structure, use_dropout, last_layer_dropout): + if layer_structure is None: + layer_structure = [1, 2, 1] + if not use_dropout: + return [0] * len(layer_structure) + dropout_values = [0] + dropout_values.extend([0.3] * (len(layer_structure) - 3)) + if last_layer_dropout: + dropout_values.append(0.3) + else: + dropout_values.append(0) + dropout_values.append(0) + return dropout_values + + +def get_closest(val): + i, j = divmod(val,64) + return i*64 + (j!=0) * 64 \ No newline at end of file diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/hypernetwork.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/hypernetwork.py new file mode 100644 index 0000000000000000000000000000000000000000..87483d8d991925e2ce781f4ac5c2711fdda374f7 --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/hypernetwork.py @@ -0,0 +1,572 @@ +import glob +import inspect +import os +import sys +import traceback + +import torch +from torch.nn.init import normal_, xavier_uniform_, zeros_, xavier_normal_, kaiming_uniform_, kaiming_normal_ + +try: + from modules.hashes import sha256 +except (ImportError, ModuleNotFoundError): + print("modules.hashes is not found, will use backup module from extension!") + from .hashes_backup import sha256 + +import modules.hypernetworks.hypernetwork +from modules import devices, shared, sd_models +from .hnutil import parse_dropout_structure, find_self +from .shared import version_flag + +def init_weight(layer, weight_init="Normal", normal_std=0.01, activation_func="relu"): + w, b = layer.weight.data, layer.bias.data + if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm: + normal_(w, mean=0.0, std=normal_std) + normal_(b, mean=0.0, std=0) + elif weight_init == 'XavierUniform': + xavier_uniform_(w) + zeros_(b) + elif weight_init == 'XavierNormal': + xavier_normal_(w) + zeros_(b) + elif weight_init == 'KaimingUniform': + kaiming_uniform_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu') + zeros_(b) + elif weight_init == 'KaimingNormal': + kaiming_normal_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu') + zeros_(b) + else: + raise KeyError(f"Key {weight_init} is not defined as initialization!") + + +class ResBlock(torch.nn.Module): + """Residual Block""" + def __init__(self, n_inputs, n_outputs, activation_func, weight_init, add_layer_norm, dropout_p, normal_std, device=None, state_dict=None, **kwargs): + super().__init__() + self.n_outputs = n_outputs + self.upsample_layer = None + self.upsample = kwargs.get("upsample_model", None) + if self.upsample == "Linear": + self.upsample_layer = torch.nn.Linear(n_inputs, n_outputs, bias=False) + linears = [torch.nn.Linear(n_inputs, n_outputs)] + init_weight(linears[0], weight_init, normal_std, activation_func) + if add_layer_norm: + linears.append(torch.nn.LayerNorm(n_outputs)) + init_weight(linears[1], weight_init, normal_std, activation_func) + if dropout_p > 0: + linears.append(torch.nn.Dropout(p=dropout_p)) + if activation_func == "linear" or activation_func is None: + pass + elif activation_func in HypernetworkModule.activation_dict: + linears.append(HypernetworkModule.activation_dict[activation_func]()) + else: + raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}') + self.linear = torch.nn.Sequential(*linears) + if state_dict is not None: + self.load_state_dict(state_dict) + if device is not None: + self.to(device) + + def trainables(self, train=False): + layer_structure = [] + for layer in self.linear: + if train: + layer.train() + else: + layer.eval() + if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm: + layer_structure += [layer.weight, layer.bias] + return layer_structure + + def forward(self, x, **kwargs): + if self.upsample_layer is None: + interpolated = torch.nn.functional.interpolate(x, size=self.n_outputs, mode="nearest-exact") + else: + interpolated = self.upsample_layer(x) + return interpolated + self.linear(x) + + + +class HypernetworkModule(torch.nn.Module): + multiplier = 1.0 + activation_dict = { + "linear": torch.nn.Identity, + "relu": torch.nn.ReLU, + "leakyrelu": torch.nn.LeakyReLU, + "elu": torch.nn.ELU, + "swish": torch.nn.Hardswish, + "tanh": torch.nn.Tanh, + "sigmoid": torch.nn.Sigmoid, + } + activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'}) + + def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', + add_layer_norm=False, activate_output=False, dropout_structure=None, device=None, generation_seed=None, normal_std=0.01, **kwargs): + super().__init__() + self.skip_connection = skip_connection = kwargs.get('skip_connection', False) + upsample_linear = kwargs.get('upsample_linear', None) + assert layer_structure is not None, "layer_structure must not be None" + assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!" + assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!" + # instead of throwing error, maybe try warning. first value is always not used. + if not (skip_connection or dropout_structure is None or dropout_structure[0] == dropout_structure[-1] == 0): + print("Dropout sequence does not starts or ends with zero.") + # assert skip_connection or dropout_structure is None or dropout_structure[0] == dropout_structure[-1] == 0, "Dropout Sequence should start and end with probability 0!" + assert dropout_structure is None or len(dropout_structure) == len(layer_structure), "Dropout Sequence should match length with layer structure!" + + linears = [] + if skip_connection: + if generation_seed is not None: + torch.manual_seed(generation_seed) + for i in range(len(layer_structure) - 1): + if skip_connection: + n_inputs, n_outputs = int(dim * layer_structure[i]), int(dim * layer_structure[i+1]) + dropout_p = dropout_structure[i+1] + if activation_func is None: + activation_func = "linear" + linears.append(ResBlock(n_inputs, n_outputs, activation_func, weight_init, add_layer_norm, dropout_p, normal_std, device, upsample_model=upsample_linear)) + continue + + # Add a fully-connected layer + linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1]))) + + # Add an activation func except last layer + if activation_func == "linear" or activation_func is None or (i >= len(layer_structure) - 2 and not activate_output): + pass + elif activation_func in self.activation_dict: + linears.append(self.activation_dict[activation_func]()) + else: + raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}') + + # Add layer normalization + if add_layer_norm: + linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) + + # Everything should be now parsed into dropout structure, and applied here. + # Since we only have dropouts after layers, dropout structure should start with 0 and end with 0. + if dropout_structure is not None and dropout_structure[i+1] > 0: + assert 0 < dropout_structure[i+1] < 1, "Dropout probability should be 0 or float between 0 and 1!" + linears.append(torch.nn.Dropout(p=dropout_structure[i+1])) + # Code explanation : [1, 2, 1] -> dropout is missing when last_layer_dropout is false. [1, 2, 2, 1] -> [0, 0.3, 0, 0], when its True, [0, 0.3, 0.3, 0]. + + self.linear = torch.nn.Sequential(*linears) + + if state_dict is not None: + self.fix_old_state_dict(state_dict) + self.load_state_dict(state_dict) + elif not skip_connection: + if generation_seed is not None: + torch.manual_seed(generation_seed) + for layer in self.linear: + if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm: + w, b = layer.weight.data, layer.bias.data + if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm: + normal_(w, mean=0.0, std=normal_std) + normal_(b, mean=0.0, std=0) + elif weight_init == 'XavierUniform': + xavier_uniform_(w) + zeros_(b) + elif weight_init == 'XavierNormal': + xavier_normal_(w) + zeros_(b) + elif weight_init == 'KaimingUniform': + kaiming_uniform_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu') + zeros_(b) + elif weight_init == 'KaimingNormal': + kaiming_normal_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu') + zeros_(b) + else: + raise KeyError(f"Key {weight_init} is not defined as initialization!") + if device is None: + self.to(devices.device) + else: + self.to(device) + + + def fix_old_state_dict(self, state_dict): + changes = { + 'linear1.bias': 'linear.0.bias', + 'linear1.weight': 'linear.0.weight', + 'linear2.bias': 'linear.1.bias', + 'linear2.weight': 'linear.1.weight', + } + + for fr, to in changes.items(): + x = state_dict.get(fr, None) + if x is None: + continue + + del state_dict[fr] + state_dict[to] = x + + def forward(self, x, multiplier=None): + if self.skip_connection: + if self.training: + return self.linear(x) + else: + resnet_result = self.linear(x) + residual = resnet_result - x + if multiplier is None or not isinstance(multiplier, (int, float)): + multiplier = self.multiplier if not version_flag else HypernetworkModule.multiplier + return x + multiplier * residual # interpolate + if multiplier is None or not isinstance(multiplier, (int, float)): + return x + self.linear(x) * ((self.multiplier if not version_flag else HypernetworkModule.multiplier) if not self.training else 1) + return x + self.linear(x) * multiplier + + def trainables(self, train=False): + layer_structure = [] + self.train(train) + for layer in self.linear: + if train: + layer.train() + else: + layer.eval() + if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm: + layer_structure += [layer.weight, layer.bias] + elif type(layer) == ResBlock: + layer_structure += layer.trainables(train) + return layer_structure + + def set_train(self,mode=True): + self.train(mode) + for layer in self.linear: + if mode: + layer.train(mode) + else: + layer.eval() + + +class Hypernetwork: + filename = None + name = None + + def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs): + self.filename = None + self.name = name + self.layers = {} + self.step = 0 + self.sd_checkpoint = None + self.sd_checkpoint_name = None + self.layer_structure = layer_structure + self.activation_func = activation_func + self.weight_init = weight_init + self.add_layer_norm = add_layer_norm + self.use_dropout = use_dropout + self.activate_output = activate_output + self.last_layer_dropout = kwargs['last_layer_dropout'] if 'last_layer_dropout' in kwargs else True + self.optimizer_name = None + self.optimizer_state_dict = None + self.dropout_structure = kwargs['dropout_structure'] if 'dropout_structure' in kwargs and use_dropout else None + self.optional_info = kwargs.get('optional_info', None) + self.skip_connection = kwargs.get('skip_connection', False) + self.upsample_linear = kwargs.get('upsample_linear', None) + self.training = False + generation_seed = kwargs.get('generation_seed', None) + normal_std = kwargs.get('normal_std', 0.01) + if self.dropout_structure is None: + self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout) + + for size in enable_sizes or []: + self.layers[size] = ( + HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure, generation_seed=generation_seed, normal_std=normal_std, skip_connection=self.skip_connection, + upsample_linear=self.upsample_linear), + HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure, generation_seed=generation_seed, normal_std=normal_std, skip_connection=self.skip_connection, + upsample_linear=self.upsample_linear), + ) + self.eval() + + def weights(self, train=False): + self.training = train + res = [] + for k, layers in self.layers.items(): + for layer in layers: + res += layer.trainables(train) + return res + + def eval(self): + self.training = False + for k, layers in self.layers.items(): + for layer in layers: + layer.eval() + layer.set_train(False) + + def train(self, mode=True): + self.training = mode + for k, layers in self.layers.items(): + for layer in layers: + layer.set_train(mode) + + def detach_grad(self): + for k, layers in self.layers.items(): + for layer in layers: + layer.requires_grad_(False) + + def shorthash(self): + sha256v = sha256(self.filename, f'hypernet/{self.name}') + return sha256v[0:10] + + def extra_name(self): + if version_flag: + return "" + found = find_self(self) + if found is not None: + return f" " + return f" " + + def save(self, filename): + state_dict = {} + optimizer_saved_dict = {} + + for k, v in self.layers.items(): + state_dict[k] = (v[0].state_dict(), v[1].state_dict()) + + state_dict['step'] = self.step + state_dict['name'] = self.name + state_dict['layer_structure'] = self.layer_structure + state_dict['activation_func'] = self.activation_func + state_dict['is_layer_norm'] = self.add_layer_norm + state_dict['weight_initialization'] = self.weight_init + state_dict['sd_checkpoint'] = self.sd_checkpoint + state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name + state_dict['activate_output'] = self.activate_output + state_dict['use_dropout'] = self.use_dropout + state_dict['dropout_structure'] = self.dropout_structure + state_dict['last_layer_dropout'] = (self.dropout_structure[-2] != 0) if self.dropout_structure is not None else self.last_layer_dropout + state_dict['optional_info'] = self.optional_info if self.optional_info else None + state_dict['skip_connection'] = self.skip_connection + state_dict['upsample_linear'] = self.upsample_linear + + if self.optimizer_name is not None: + optimizer_saved_dict['optimizer_name'] = self.optimizer_name + + torch.save(state_dict, filename) + if shared.opts.save_optimizer_state and self.optimizer_state_dict: + optimizer_saved_dict['hash'] = self.shorthash() # this is necessary + optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict + torch.save(optimizer_saved_dict, filename + '.optim') + + def load(self, filename): + self.filename = filename + if self.name is None: + self.name = os.path.splitext(os.path.basename(filename))[0] + + state_dict = torch.load(filename, map_location='cpu') + + self.layer_structure = state_dict.get('layer_structure', [1, 2, 1]) + print(self.layer_structure) + optional_info = state_dict.get('optional_info', None) + if optional_info is not None: + self.optional_info = optional_info + self.activation_func = state_dict.get('activation_func', None) + self.weight_init = state_dict.get('weight_initialization', 'Normal') + self.add_layer_norm = state_dict.get('is_layer_norm', False) + self.dropout_structure = state_dict.get('dropout_structure', None) + self.use_dropout = True if self.dropout_structure is not None and any(self.dropout_structure) else state_dict.get('use_dropout', False) + self.activate_output = state_dict.get('activate_output', True) + self.last_layer_dropout = state_dict.get('last_layer_dropout', False) # Silent fix for HNs before 4918eb6 + self.skip_connection = state_dict.get('skip_connection', False) + self.upsample_linear = state_dict.get('upsample_linear', False) + # Dropout structure should have same length as layer structure, Every digits should be in [0,1), and last digit must be 0. + if self.dropout_structure is None: + self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout) + if hasattr(shared.opts, 'print_hypernet_extra') and shared.opts.print_hypernet_extra: + if optional_info is not None: + print(f"INFO:\n {optional_info}\n") + print(f"Activation function is {self.activation_func}") + print(f"Weight initialization is {self.weight_init}") + print(f"Layer norm is set to {self.add_layer_norm}") + print(f"Dropout usage is set to {self.use_dropout}") + print(f"Activate last layer is set to {self.activate_output}") + print(f"Dropout structure is set to {self.dropout_structure}") + optimizer_saved_dict = torch.load(self.filename + '.optim', map_location = 'cpu') if os.path.exists(self.filename + '.optim') else {} + self.optimizer_name = state_dict.get('optimizer_name', 'AdamW') + + if optimizer_saved_dict.get('hash', None) == self.shorthash() or optimizer_saved_dict.get('hash', None) == sd_models.model_hash(filename): + self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None) + else: + self.optimizer_state_dict = None + if self.optimizer_state_dict: + self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW') + print("Loaded existing optimizer from checkpoint") + print(f"Optimizer name is {self.optimizer_name}") + else: + print("No saved optimizer exists in checkpoint") + + for size, sd in state_dict.items(): + if type(size) == int: + self.layers[size] = ( + HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.activate_output, self.dropout_structure, skip_connection=self.skip_connection, upsample_linear=self.upsample_linear), + HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.activate_output, self.dropout_structure, skip_connection=self.skip_connection, upsample_linear=self.upsample_linear), + ) + + self.name = state_dict.get('name', self.name) + self.step = state_dict.get('step', 0) + self.sd_checkpoint = state_dict.get('sd_checkpoint', None) + self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None) + self.eval() + + def to(self, device): + for k, layers in self.layers.items(): + for layer in layers: + layer.to(device) + + return self + + def set_multiplier(self, multiplier): + for k, layers in self.layers.items(): + for layer in layers: + layer.multiplier = multiplier + + return self + + def __call__(self, context, *args, **kwargs): + return self.forward(context, *args, **kwargs) + + def forward(self, context, context_v=None, layer=None): + context_layers = self.layers.get(context.shape[2], None) + if context_v is None: + context_v = context + if context_layers is None: + return context, context_v + if layer is not None and hasattr(layer, 'hyper_k') and hasattr(layer, 'hyper_v'): + layer.hyper_k = context_layers[0] + layer.hyper_v = context_layers[1] + transform_k, transform_v = context_layers[0](context), context_layers[1](context_v) + return transform_k, transform_v + + +def list_hypernetworks(path): + res = {} + for filename in sorted(glob.iglob(os.path.join(path, '**/*.pt'), recursive=True)): + name = os.path.splitext(os.path.basename(filename))[0] + idx = 0 + while name in res: + idx += 1 + name = name + f"({idx})" + # Prevent a hypothetical "None.pt" from being listed. + if name != "None": + res[name] = filename + for filename in glob.iglob(os.path.join(path, '**/*.hns'), recursive=True): + name = os.path.splitext(os.path.basename(filename))[0] + if name != "None": + res[name] = filename + return res + +def find_closest_first(keyset, target): + for keys in keyset: + if target == keys.rsplit('(', 1)[0]: + return keys + return None + + + +def load_hypernetwork(filename): + hypernetwork = None + path = shared.hypernetworks.get(filename, None) + if path is None: + filename = find_closest_first(shared.hypernetworks.keys(), filename) + path = shared.hypernetworks.get(filename, None) + print(path) + # Prevent any file named "None.pt" from being loaded. + if path is not None and filename != "None": + print(f"Loading hypernetwork {filename}") + if path.endswith(".pt"): + try: + hypernetwork = Hypernetwork() + hypernetwork.load(path) + if hasattr(shared, 'loaded_hypernetwork'): + shared.loaded_hypernetwork = hypernetwork + else: + return hypernetwork + + except Exception: + print(f"Error loading hypernetwork {path}", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + elif path.endswith(".hns"): + # Load Hypernetwork processing + try: + from .hypernetworks import load as load_hns + if hasattr(shared, 'loaded_hypernetwork'): + shared.loaded_hypernetwork = load_hns(path) + else: + hypernetwork = load_hns(path) + print(f"Loaded Hypernetwork Structure {path}") + return hypernetwork + except Exception: + print(f"Error loading hypernetwork processing file {path}", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + else: + print(f"Tried to load unknown file extension: {filename}") + else: + if hasattr(shared, 'loaded_hypernetwork'): + if shared.loaded_hypernetwork is not None: + print(f"Unloading hypernetwork") + shared.loaded_hypernetwork = None + return hypernetwork + + +def apply_hypernetwork(hypernetwork, context, layer=None): + if hypernetwork is None: + return context, context + if isinstance(hypernetwork, Hypernetwork): + hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None) + if hypernetwork_layers is None: + return context, context + if layer is not None: + layer.hyper_k = hypernetwork_layers[0] + layer.hyper_v = hypernetwork_layers[1] + + context_k = devices.cond_cast_unet(hypernetwork_layers[0](devices.cond_cast_float(context))) + context_v = devices.cond_cast_unet(hypernetwork_layers[1](devices.cond_cast_float(context))) + return context_k, context_v + context_k, context_v = hypernetwork(context, layer=layer) + return context_k, context_v + + +def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None): + if hypernetwork is None: + return context_k, context_v + if isinstance(hypernetwork, Hypernetwork): + hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context_k.shape[2], None) + if hypernetwork_layers is None: + return context_k, context_v + if layer is not None: + layer.hyper_k = hypernetwork_layers[0] + layer.hyper_v = hypernetwork_layers[1] + + context_k = devices.cond_cast_unet(hypernetwork_layers[0](devices.cond_cast_float(context_k))) + context_v = devices.cond_cast_unet(hypernetwork_layers[1](devices.cond_cast_float(context_v))) + return context_k, context_v + context_k, context_v = hypernetwork(context_k, context_v, layer=layer) + return context_k, context_v + + +def apply_strength(value=None): + HypernetworkModule.multiplier = value if value is not None else shared.opts.sd_hypernetwork_strength + + +def apply_hypernetwork_strength(p, x, xs): + apply_strength(x) + + +modules.hypernetworks.hypernetwork.list_hypernetworks = list_hypernetworks +modules.hypernetworks.hypernetwork.load_hypernetwork = load_hypernetwork +if hasattr(modules.hypernetworks.hypernetwork, 'apply_hypernetwork'): + modules.hypernetworks.hypernetwork.apply_hypernetwork = apply_hypernetwork +else: + modules.hypernetworks.hypernetwork.apply_single_hypernetwork = apply_single_hypernetwork +if hasattr(modules.hypernetworks.hypernetwork, 'apply_strength'): + modules.hypernetworks.hypernetwork.apply_strength = apply_strength +modules.hypernetworks.hypernetwork.Hypernetwork = Hypernetwork +modules.hypernetworks.hypernetwork.HypernetworkModule = HypernetworkModule +try: + import scripts.xy_grid + if hasattr(scripts.xy_grid, 'apply_hypernetwork_strength'): + scripts.xy_grid.apply_hypernetwork_strength = apply_hypernetwork_strength +except (ModuleNotFoundError, ImportError): + pass + diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/hypernetworks.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/hypernetworks.py new file mode 100644 index 0000000000000000000000000000000000000000..350fbb8e9ea9f4802d53d0c447cfbe8c3c8f2655 --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/hypernetworks.py @@ -0,0 +1,329 @@ +import ast +import os.path + +import torch + +from modules import devices, shared +from .hnutil import find_self +from .shared import version_flag + +lazy_load = False # when this is enabled, HNs will be loaded when required. +if not hasattr(devices, 'cond_cast_unet'): + raise RuntimeError("Cannot find cond_cast_unet attribute, please update your webui version!") + + +class DynamicDict(dict): # Brief dict that dynamically unloads Hypernetworks if required. + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.current = None + self.hash = None + self.dict = {**kwargs} + + def prepare(self, key, value): + if lazy_load and self.current is not None and ( + key != self.current): # or filename is identical, but somehow hash is changed? + self.current.to('cpu') + self.current = value + if self.current is not None: + self.current.to(devices.device) + + def __getitem__(self, item): + value = self.dict[item] + self.prepare(item, value) + return value + + def __setitem__(self, key, value): + if key in self.dict: + return + self.dict[key] = value + + def __contains__(self, item): + return item in self.dict + + +available_opts = DynamicDict() # string -> HN itself. + + +# Behavior definition. +# [[], [], []] -> sequential processing +# [{"A" : 0.8, "B" : 0.1}] -> parallel processing. with weighted sum in this case, A = 8/9 effect, B = 1/9 effect. +# [("A", 0.2), ("B", 0.4)] -> tuple is used to specify strength. +# [{"A", "B", "C"}] -> parallel, but having same effects (set) +# ["A", "B", []] -> sequential processing +# [{"A":0.6}, "B", "C"] -> sequential, dict with single value will be considered as strength modification. +# [["A"], {"B"}, "C"] -> singletons are equal to items without covers, nested singleton will not be parsed, because its inefficient. +# {{'Aa' : 0.2, 'Ab' : 0.8} : 0.8, 'B' : 0.1} (X) -> {"{'Aa' : 0.2, 'Ab' : 0.8}" : 0.8, 'B' : 0.1} (O), When you want complex setups in parallel, you need to cover them with "". You can use backslash too. + + +# Testing parsing function. + +def test_parsing(string=None): + def test(arg): + print(arg) + try: + obj = str(Forward.parse(arg)) + print(obj) + except Exception as e: + print(e) + + if string: + test(string) + else: + for strings in ["[[], [], []]", "[{\"A\" : 0.8, \"B\" : 0.1}]", '[("A", 0.2), ("B", 0.4)]', '[{"A", "B", "C"}]', + '[{"A":0.6}, "B", "C"]', '[["A"], {"B"}, "C"]', + '{"{\'Aa\' : 0.2, \'Ab\' : 0.8}" : 0.8, \'B\' : 0.1}']: + test(strings) + + +class Forward: + def __init__(self, **kwargs): + self.name = "defaultForward" if 'name' not in kwargs else kwargs['name'] + pass + + def __call__(self, *args, **kwargs): + raise NotImplementedError + + def set_multiplier(self, *args, **kwargs): + pass + + def extra_name(self): + if version_flag: + return "" + found = find_self(self) + if found is not None: + return f" " + return f" " + + @staticmethod + def parse(arg, name=None): + arg = Forward.unpack(arg) + arg = Forward.eval(arg) + if Forward.isSingleTon(arg): + return SingularForward(*Forward.parseSingleTon(arg)) + elif Forward.isParallel(arg): + return ParallelForward(Forward.parseParallel(arg), name=name) + elif Forward.isSequential(arg): + return SequentialForward(Forward.parseSequential(arg), name=name) + raise ValueError(f"Cannot parse {arg} into sequences!") + + @staticmethod + def unpack(arg): # stop using ({({{((a))}})}) please + if len(arg) == 1 and type(arg) in (set, list, tuple): + return Forward.unpack(list(arg)[0]) + if len(arg) == 1 and type(arg) is dict: + key = list(arg.keys())[0] + if arg[key] == 1: + return Forward.unpack(key) + return arg + + @staticmethod + def eval(arg): # from "{something}", parse as etc form. + if arg is None: + raise ValueError("None cannot be evaluated!") + try: + newarg = ast.literal_eval(arg) + if type(arg) is str and arg.startswith(("{", "[", "(")) and newarg is not None: + if not newarg: + raise RuntimeError(f"Cannot eval false object {arg}!") + return newarg + except ValueError: + return arg + return arg + + @staticmethod + def isSingleTon( + arg): # Very strict. This applies strength to HN, which cannot happen in combined networks. Only weighting is allowed in complex process. + if type(arg) is str and not arg.startswith(('[', '(', '{')): # Strict. only accept str + return True + elif type( + arg) is dict: # Strict. only accept {str : int/float} - Strength modification can only happen for str. + return len(arg) == 1 and all(type(value) in (int, float) for value in arg.values()) and all( + type(k) is str for k in arg) + elif type(arg) in (list, set): + return len(arg) == 1 and all(type(x) is str for x in arg) + elif type(arg) is tuple: + return len(arg) == 2 and type(arg[0]) is str and type(arg[1]) in (int, float) + return False + + @staticmethod + def parseSingleTon(sequence): # accepts sequence, returns str, float pair. This is Strict. + if type(sequence) in (list, dict, set): + assert len(sequence) == 1, f"SingularForward only accepts singletons, but given {sequence}!" + key = list(sequence)[0] + if type(sequence) is dict: + assert type(key) is str, f"Strength modification only accepts single Hypernetwork, but given {key}!" + return key, sequence[key] + else: + key = list(key)[0] + return key, 1 + elif type(sequence) is tuple: + assert len(sequence) == 2, f"Tuple with non-couple {sequence} encountered in SingularForward!" + assert type( + sequence[0]) is str, f"Strength modification only accepts single Hypernetwork, but given {sequence[0]}!" + assert type(sequence[1]) in (int, float), f"Strength tuple only accepts Numbers, but given {sequence[1]}!" + return sequence[0], sequence[1] + else: + assert type( + sequence) is str, f"Strength modification only accepts single Hypernetwork, but given {sequence}!" + return sequence, 1 + + @staticmethod + def isParallel( + arg): # Parallel, or Sequential processing is not strict, it can have {"String covered sequence or just HN String" : weight, ... + if type(arg) in (dict, set) and len(arg) > 1: + if type(arg) is set: + return all(type(key) is str for key in + arg), f"All keys should be Hypernetwork Name/Sequence for Set but given :{arg}" + else: + arg: dict + return all(type(key) is str for key in + arg.keys()), f"All keys should be Hypernetwork Name/Sequence for Set but given :{arg}" + else: + return False + + @staticmethod + def parseParallel(sequence): # accepts sequence, returns {"Name or sequence" : weight...} + assert len(sequence) > 1, f"Length of sequence {sequence} was not enough for parallel!" + if type(sequence) is set: # only allows hashable types. otherwise it should be supplied as string cover + assert all(type(key) in (str, tuple) for key in + sequence), f"All keys should be Hypernetwork Name/Sequence for Set but given :{sequence}" + return {key: 1 / len(sequence) for key in sequence} + elif type(sequence) is dict: + assert all(type(key) in (str, tuple) for key in + sequence.keys()), f"All keys should be Hypernetwork Name/Sequence for Dict but given :{sequence}" + assert all(type(value) in (int, float) for value in + sequence.values()), f"All values should be int/float for Dict but given :{sequence}" + return sequence + else: + raise ValueError(f"Cannot parse parallel sequence {sequence}!") + + @staticmethod + def isSequential(arg): + if type(arg) is list and len(arg) > 0: + return True + return False + + @staticmethod + def parseSequential(sequence): # accepts sequence, only checks if its list, then returns sequence. + if type(sequence) is list and len(sequence) > 0: + return sequence + else: + raise ValueError(f"Cannot parse non-list sequence {sequence}!") + + def shorthash(self): + return '0000000000' + +from .hypernetwork import Hypernetwork + + +def find_non_hash_key(target): + closest = [x for x in shared.hypernetworks if x.rsplit('(', 1)[0] == target or x == target] + if closest: + return shared.hypernetworks[closest[0]] + raise KeyError(f"{target} is not found in Hypernetworks!") + + +class SingularForward(Forward): + + def __init__(self, processor, strength): + assert processor != 'defaultForward', "Cannot use name defaultForward!" + super(SingularForward, self).__init__() + self.name = processor + self.processor = processor + self.strength = strength + # parse. We expect parsing Singletons or (k,v) pair here, which is HN Name and Strength. + hn = Hypernetwork() + try: + hn.load(find_non_hash_key(self.processor)) + except: + global lazy_load + lazy_load = True + print("Encountered CUDA Memory Error, will unload HNs, speed might go down severely!") + hn.load(find_non_hash_key(self.processor)) + available_opts[self.processor] = hn + # assert self.processor in available_opts, f"Hypernetwork named {processor} is not ready!" + assert 0 <= self.strength <= 1, "Strength must be between 0 and 1!" + print(f"SingularForward <{self.name}, {self.strength}>") + + def __call__(self, context_k, context_v=None, layer=None): + if self.processor in available_opts: + context_layers = available_opts[self.processor].layers.get(context_k.shape[2], None) + if context_v is None: + context_v = context_k + if context_layers is None: + return context_k, context_v + #if layer is not None and hasattr(layer, 'hyper_k') and hasattr(layer, 'hyper_v'): + # layer.hyper_k = context_layers[0], layer.hyper_v = context_layers[1] + return devices.cond_cast_unet(context_layers[0](devices.cond_cast_float(context_k), multiplier=self.strength)),\ + devices.cond_cast_unet(context_layers[1](devices.cond_cast_float(context_v), multiplier=self.strength)) + # define forward_strength, which invokes HNModule with specified strength. + # Note : we share same HN if it is called multiple time, which means you might not be able to train it via this structure. + raise KeyError(f"Key {self.processor} is not found in cached Hypernetworks!") + + def __str__(self): + return "SingularForward>" + str(self.processor) + + +class ParallelForward(Forward): + + def __init__(self, sequence, name=None): + self.name = "ParallelForwardHypernet" if name is None else name + self.callers = {} + self.weights = {} + super(ParallelForward, self).__init__() + # parse + for keys in sequence: + self.callers[keys] = Forward.parse(keys) + self.weights[keys] = sequence[keys] / sum(sequence.values()) + print(str(self)) + + def __call__(self, context, context_v=None, layer=None): + ctx_k, ctx_v = torch.zeros_like(context, device=context.device), torch.zeros_like(context, + device=context.device) + for key in self.callers: + k, v = self.callers[key](context, context_v, layer=layer) + ctx_k += k * self.weights[key] + ctx_v += v * self.weights[key] + return ctx_k, ctx_v + + def __str__(self): + return "ParallelForward>" + str({str(k): str(v) for (k, v) in self.callers.items()}) + + +class SequentialForward(Forward): + def __init__(self, sequence, name=None): + self.name = "SequentialForwardHypernet" if name is None else name + self.callers = [] + super(SequentialForward, self).__init__() + for keys in sequence: + self.callers.append(Forward.parse(keys)) + print(str(self)) + + def __call__(self, context, context_v=None, layer=None): + if context_v is None: + context_v = context + for keys in self.callers: + context, context_v = keys(context, context_v, layer=layer) + return context, context_v + + def __str__(self): + return "SequentialForward>" + str([str(x) for x in self.callers]) + + +class EmptyForward(Forward): + def __init__(self): + super().__init__() + self.name = None + + def __call__(self, context, context_v=None, layer=None): + if context_v is None: + context_v = context + return context, context_v + + def __str__(self): + return "EmptyForward" + + +def load(filename): + with open(filename, 'r') as file: + return Forward.parse(file.read(), name=os.path.basename(filename)) diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/scheduler.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..015123f6e7242530ce2b86d542b906afe6ca6923 --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/scheduler.py @@ -0,0 +1,110 @@ +import math +from torch.optim.lr_scheduler import _LRScheduler + + +class CosineAnnealingWarmUpRestarts(_LRScheduler): + # see https://github.com/katsura-jp/pytorch-cosine-annealing-with-warmup + """ + optimizer (Optimizer): Wrapped optimizer. + first_cycle_steps (int): First cycle step size. + cycle_mult(float): Cycle steps magnification. Default: -1. + max_lr(float): First cycle's max learning rate. Default: 0.1. + min_lr(float): Min learning rate. Default: 0.001. + warmup_steps(int): Linear warmup step size. Default: 0. + gamma(float): Decrease rate of max learning rate by cycle. Default: 1. + last_epoch (int): The index of last epoch. Default: -1. + """ + + def __init__(self, + optimizer, + first_cycle_steps: int, + cycle_mult: float = 1., + max_lr: float = 0.1, + min_lr: float = 0.001, + warmup_steps: int = 0, + gamma: float = 1., + last_epoch: int = -1 + ): + assert warmup_steps < first_cycle_steps + + self.first_cycle_steps = first_cycle_steps # first cycle step size + self.cycle_mult = cycle_mult # cycle steps magnification + self.base_max_lr = max_lr # first max learning rate + self.max_lr = max_lr # max learning rate in the current cycle + self.min_lr = min_lr # min learning rate + self.warmup_steps = warmup_steps # warmup step size + self.gamma = gamma # decrease rate of max learning rate by cycle + + self.cur_cycle_steps = first_cycle_steps # first cycle step size + self.cycle = 0 # cycle count + self.step_in_cycle = last_epoch # step size of the current cycle + + super(CosineAnnealingWarmUpRestarts, self).__init__(optimizer, last_epoch) + + # set learning rate min_lr + self.init_lr() + + def init_lr(self): + self.base_lrs = [] + for param_group in self.optimizer.param_groups: + param_group['lr'] = self.min_lr + self.base_lrs.append(self.min_lr) + + def get_lr(self): + if self.step_in_cycle == -1: + return self.base_lrs + elif self.step_in_cycle < self.warmup_steps: + return [(self.max_lr - base_lr) * self.step_in_cycle / self.warmup_steps + base_lr for base_lr in + self.base_lrs] + else: + return [base_lr + (self.max_lr - base_lr) \ + * (1 + math.cos(math.pi * (self.step_in_cycle - self.warmup_steps) / (self.cur_cycle_steps - self.warmup_steps))) / 2 + for base_lr in self.base_lrs] + + def step(self, epoch=None): + if epoch is None: + epoch = self.last_epoch + 1 + self.step_in_cycle = self.step_in_cycle + 1 + if self.step_in_cycle >= self.cur_cycle_steps: + self.cycle += 1 + self.step_in_cycle = self.step_in_cycle - self.cur_cycle_steps + self.cur_cycle_steps = int( + (self.cur_cycle_steps - self.warmup_steps) * self.cycle_mult) + self.warmup_steps + else: + if epoch >= self.first_cycle_steps: + if self.cycle_mult == 1.: + self.step_in_cycle = epoch % self.first_cycle_steps + self.cycle = epoch // self.first_cycle_steps + else: + n = int(math.log((epoch / self.first_cycle_steps * (self.cycle_mult - 1) + 1), self.cycle_mult)) + self.cycle = n + self.step_in_cycle = epoch - int( + self.first_cycle_steps * (self.cycle_mult ** n - 1) / (self.cycle_mult - 1)) + self.cur_cycle_steps = self.first_cycle_steps * self.cycle_mult ** (n) + else: + self.cur_cycle_steps = self.first_cycle_steps + self.step_in_cycle = epoch + + self.max_lr = self.base_max_lr * (self.gamma ** self.cycle) + self.last_epoch = math.floor(epoch) + for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): + param_group['lr'] = lr + + def is_EOC(self, epoch=None): + saved_cycle = self.cycle + expect_cycle = saved_cycle + step_in_cycle_2 = self.step_in_cycle + cur_cycle_step_2 = self.cur_cycle_steps + if epoch is None: + step_in_cycle_2 = step_in_cycle_2 + 1 + if step_in_cycle_2 >= cur_cycle_step_2: + expect_cycle += 1 + else: + if epoch >= self.first_cycle_steps: + if self.cycle_mult == 1.: + expect_cycle = epoch // self.first_cycle_steps + else: + n = int(math.log((epoch / self.first_cycle_steps * (self.cycle_mult - 1) + 1), self.cycle_mult)) + expect_cycle = n + ''' returns if current cycle is end of cycle''' + return expect_cycle > saved_cycle diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/shared.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/shared.py new file mode 100644 index 0000000000000000000000000000000000000000..457bfe2ee37a54e8fbe10570e2d2bd5c8e5150a2 --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/shared.py @@ -0,0 +1,17 @@ + +from modules.shared import cmd_opts, opts +import modules.shared + +version_flag = hasattr(modules.shared, 'loaded_hypernetwork') + +def reload_hypernetworks(): + from .hypernetwork import list_hypernetworks, load_hypernetwork + modules.shared.hypernetworks = list_hypernetworks(cmd_opts.hypernetwork_dir) + if hasattr(modules.shared, 'loaded_hypernetwork'): + load_hypernetwork(opts.sd_hypernetwork) + + +try: + modules.shared.reload_hypernetworks = reload_hypernetworks +except: + pass diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/tbutils.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/tbutils.py new file mode 100644 index 0000000000000000000000000000000000000000..6454ab4ecf71d4f7a136c69064b75fcf3030498c --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/tbutils.py @@ -0,0 +1,69 @@ +import os + +import numpy as np +import torch +from torch.utils.tensorboard import SummaryWriter + +from modules import shared + + +def tensorboard_setup(log_directory): + os.makedirs(os.path.join(log_directory, "tensorboard"), exist_ok=True) + return SummaryWriter( + log_dir=os.path.join(log_directory, "tensorboard"), + flush_secs=shared.opts.training_tensorboard_flush_every) + +def tensorboard_log_hyperparameter(tensorboard_writer:SummaryWriter, **kwargs): + for keys in kwargs: + if type(kwargs[keys]) not in [bool, str, float, int,None]: + kwargs[keys] = str(kwargs[keys]) + tensorboard_writer.add_hparams({ + 'lr' : kwargs.get('lr', 0.01), + 'GA steps' : kwargs.get('GA_steps', 1), + 'bsize' : kwargs.get('batch_size', 1), + 'layer structure' : kwargs.get('layer_structure', '1,2,1'), + 'activation' : kwargs.get('activation', 'Linear'), + 'weight_init' : kwargs.get('weight_init', 'Normal'), + 'dropout_structure' : kwargs.get('dropout_structure', '0,0,0'), + 'steps' : kwargs.get('max_steps', 10000), + 'latent sampling': kwargs.get('latent_sampling_method', 'once'), + 'template file': kwargs.get('template', 'nothing'), + 'CosineAnnealing' : kwargs.get('CosineAnnealing', False), + 'beta_repeat epoch': kwargs.get('beta_repeat_epoch', 0), + 'epoch_mult':kwargs.get('epoch_mult', 1), + 'warmup_step' : kwargs.get('warmup', 5), + 'min_lr' : kwargs.get('min_lr', 6e-7), + 'decay' : kwargs.get('gamma_rate', 1), + 'adamW' : kwargs.get('adamW_opts', False), + 'adamW_decay' : kwargs.get('adamW_decay', 0.01), + 'adamW_beta1' : kwargs.get('adamW_beta_1', 0.9), + 'adamW_beta2': kwargs.get('adamW_beta_2', 0.99), + 'adamW_eps': kwargs.get('adamW_eps', 1e-8), + 'gradient_clip_opt':kwargs.get('gradient_clip', 'None'), + 'gradient_clip_value' : kwargs.get('gradient_clip_value', 1e-1), + 'gradient_clip_norm' : kwargs.get('gradient_clip_norm_type', 2) + }, + {'hparam/loss' : kwargs.get('loss', 0.0)} + ) +def tensorboard_add(tensorboard_writer:SummaryWriter, loss, global_step, step, learn_rate, epoch_num, base_name=""): + prefix = base_name + "/" if base_name else "" + tensorboard_add_scaler(tensorboard_writer, prefix+"Loss/train", loss, global_step) + tensorboard_add_scaler(tensorboard_writer, prefix+f"Loss/train/epoch-{epoch_num}", loss, step) + tensorboard_add_scaler(tensorboard_writer, prefix+"Learn rate/train", learn_rate, global_step) + tensorboard_add_scaler(tensorboard_writer, prefix+f"Learn rate/train/epoch-{epoch_num}", learn_rate, step) + + +def tensorboard_add_scaler(tensorboard_writer:SummaryWriter, tag, value, step): + tensorboard_writer.add_scalar(tag=tag, + scalar_value=value, global_step=step) + + +def tensorboard_add_image(tensorboard_writer:SummaryWriter, tag, pil_image, step, base_name=""): + # Convert a pil image to a torch tensor + prefix = base_name + "/" if base_name else "" + img_tensor = torch.as_tensor(np.array(pil_image, copy=True)) + img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0], + len(pil_image.getbands())) + img_tensor = img_tensor.permute((2, 0, 1)) + + tensorboard_writer.add_image(prefix+tag, img_tensor, global_step=step) diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/textual_inversion.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/textual_inversion.py new file mode 100644 index 0000000000000000000000000000000000000000..ffe0e6af9cd081e6a37453d9064b02f31d0dc947 --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/textual_inversion.py @@ -0,0 +1,48 @@ +import csv +import os + +import modules.textual_inversion.textual_inversion +from modules import shared + +delayed_values = {} + + +def write_loss(log_directory, filename, step, epoch_len, values): + if shared.opts.training_write_csv_every == 0: + return + + if (step + 1) % shared.opts.training_write_csv_every != 0: + return + write_csv_header = False if os.path.exists(os.path.join(log_directory, filename)) else True + try: + with open(os.path.join(log_directory, filename), "a+", newline='') as fout: + csv_writer = csv.DictWriter(fout, fieldnames=["step", "epoch", "epoch_step", *(values.keys())]) + + if write_csv_header: + csv_writer.writeheader() + if log_directory + filename in delayed_values: + delayed = delayed_values[log_directory + filename] + for step, epoch, epoch_step, values in delayed: + csv_writer.writerow({ + "step": step, + "epoch": epoch, + "epoch_step": epoch_step + 1, + **values, + }) + delayed.clear() + epoch = step // epoch_len + epoch_step = step % epoch_len + csv_writer.writerow({ + "step": step + 1, + "epoch": epoch, + "epoch_step": epoch_step + 1, + **values, + }) + except OSError: + epoch, epoch_step = divmod(step, epoch_len) + if log_directory + filename in delayed_values: + delayed_values[log_directory + filename].append((step + 1, epoch, epoch_step, values)) + else: + delayed_values[log_directory + filename] = [(step+1, epoch, epoch_step, values)] + +modules.textual_inversion.textual_inversion.write_loss = write_loss \ No newline at end of file diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/patches/ui.py b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/ui.py new file mode 100644 index 0000000000000000000000000000000000000000..a912caef96ca48ad51eb7244f8728877d2ddd3cb --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/patches/ui.py @@ -0,0 +1,78 @@ +import os + +from modules import shared +from .hypernetwork import Hypernetwork, load_hypernetwork + + +def create_hypernetwork_load(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None, optional_info=None, + weight_init_seed=None, normal_std=0.01, skip_connection=False): + # Remove illegal characters from name. + name = "".join( x for x in name if (x.isalnum() or x in "._- ")) + assert name, "Name cannot be empty!" + fn = os.path.join(shared.cmd_opts.hypernetwork_dir, f"{name}.pt") + if not overwrite_old: + assert not os.path.exists(fn), f"file {fn} already exists" + + if type(layer_structure) == str: + layer_structure = [float(x.strip()) for x in layer_structure.split(",")] + + if dropout_structure and type(dropout_structure) == str: + dropout_structure = [float(x.strip()) for x in dropout_structure.split(",")] + normal_std = float(normal_std) + assert normal_std > 0, "Normal Standard Deviation should be bigger than 0!" + hypernet = Hypernetwork( + name=name, + enable_sizes=[int(x) for x in enable_sizes], + layer_structure=layer_structure, + activation_func=activation_func, + weight_init=weight_init, + add_layer_norm=add_layer_norm, + use_dropout=use_dropout, + dropout_structure=dropout_structure if use_dropout and dropout_structure else [0] * len(layer_structure), + optional_info=optional_info, + generation_seed=weight_init_seed if weight_init_seed != -1 else None, + normal_std=normal_std, + skip_connection=skip_connection + ) + hypernet.save(fn) + shared.reload_hypernetworks() + hypernet = load_hypernetwork(name) + assert hypernet is not None, f"Cannot load from {name}!" + return hypernet + + +def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None, optional_info=None, + weight_init_seed=None, normal_std=0.01, skip_connection=False): + # Remove illegal characters from name. + name = "".join( x for x in name if (x.isalnum() or x in "._- ")) + assert name, "Name cannot be empty!" + fn = os.path.join(shared.cmd_opts.hypernetwork_dir, f"{name}.pt") + if not overwrite_old: + assert not os.path.exists(fn), f"file {fn} already exists" + + if type(layer_structure) == str: + layer_structure = [float(x.strip()) for x in layer_structure.split(",")] + + if dropout_structure and type(dropout_structure) == str: + dropout_structure = [float(x.strip()) for x in dropout_structure.split(",")] + normal_std = float(normal_std) + assert normal_std >= 0, "Normal Standard Deviation should be bigger than 0!" + hypernet = Hypernetwork( + name=name, + enable_sizes=[int(x) for x in enable_sizes], + layer_structure=layer_structure, + activation_func=activation_func, + weight_init=weight_init, + add_layer_norm=add_layer_norm, + use_dropout=use_dropout, + dropout_structure=dropout_structure if use_dropout and dropout_structure else [0] * len(layer_structure), + optional_info=optional_info, + generation_seed=weight_init_seed if weight_init_seed != -1 else None, + normal_std=normal_std, + skip_connection=skip_connection + ) + hypernet.save(fn) + + shared.reload_hypernetworks() + + return name, f"Created: {fn}", "" diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/scripts/__pycache__/hypernetwork-extensions.cpython-310.pyc b/extensions/Hypernetwork-MonkeyPatch-Extension/scripts/__pycache__/hypernetwork-extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f9cffda966c49faca2955770f0dd133c6b4d65c Binary files /dev/null and b/extensions/Hypernetwork-MonkeyPatch-Extension/scripts/__pycache__/hypernetwork-extensions.cpython-310.pyc differ diff --git a/extensions/Hypernetwork-MonkeyPatch-Extension/scripts/hypernetwork-extensions.py b/extensions/Hypernetwork-MonkeyPatch-Extension/scripts/hypernetwork-extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..5d9f7581f5b8943d26d9dd3a0ffe73b3f3e2155f --- /dev/null +++ b/extensions/Hypernetwork-MonkeyPatch-Extension/scripts/hypernetwork-extensions.py @@ -0,0 +1,150 @@ +import os + +from modules.call_queue import wrap_gradio_call +from modules.hypernetworks.ui import keys +import modules.scripts as scripts +from modules import script_callbacks, shared, sd_hijack +import gradio as gr + +from modules.paths import script_path +from modules.ui import create_refresh_button, gr_show +import patches.clip_hijack as clip_hijack +import patches.textual_inversion as textual_inversion +import patches.ui as ui +import patches.shared as shared_patch +import patches.external_pr.ui as external_patch_ui +from webui import wrap_gradio_gpu_call + +setattr(shared.opts,'pin_memory', False) + +def create_extension_tab(params=None): + with gr.Tab(label="Create Beta hypernetwork") as create_beta: + new_hypernetwork_name = gr.Textbox(label="Name") + new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1024", "1280"], + choices=["768", "320", "640", "1024", "1280"]) + new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", + placeholder="1st and last digit must be 1. ex:'1, 2, 1'") + new_hypernetwork_activation_func = gr.Dropdown(value="linear", + label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", + choices=keys) + new_hypernetwork_initialization_option = gr.Dropdown(value="Normal", + label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", + choices=["Normal", "KaimingUniform", "KaimingNormal", + "XavierUniform", "XavierNormal"]) + show_additional_options = gr.Checkbox( + label='Show advanced options') + with gr.Row(visible=False) as weight_options: + generation_seed = gr.Number(label='Weight initialization seed, set -1 for default', value=-1, precision=0) + normal_std = gr.Textbox(label="Standard Deviation for Normal weight initialization", placeholder="must be positive float", value="0.01") + show_additional_options.change( + fn=lambda show: gr_show(show), + inputs=[show_additional_options], + outputs=[weight_options],) + new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization") + new_hypernetwork_use_dropout = gr.Checkbox( + label="Use dropout. Might improve training when dataset is small / limited.") + new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", + label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", + placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'") + skip_connection = gr.Checkbox(label="Use skip-connection. Won't work without extension!") + optional_info = gr.Textbox("", label="Optional information about Hypernetwork", placeholder="Training information, dateset, etc") + overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary') + setting_name = gr.Textbox(label="Setting file name", value="") + save_setting = gr.Button(value="Save hypernetwork setting to file") + ti_output = gr.Text(elem_id="ti_output2", value="", show_label=False) + ti_outcome = gr.HTML(elem_id="ti_error2", value="") + + + + save_setting.click( + fn=wrap_gradio_call(external_patch_ui.save_hypernetwork_setting), + inputs=[ + setting_name, + new_hypernetwork_sizes, + overwrite_old_hypernetwork, + new_hypernetwork_layer_structure, + new_hypernetwork_activation_func, + new_hypernetwork_initialization_option, + new_hypernetwork_add_layer_norm, + new_hypernetwork_use_dropout, + new_hypernetwork_dropout_structure, + optional_info, + generation_seed if generation_seed.visible else None, + normal_std if normal_std.visible else 0.01, + skip_connection], + outputs=[ + ti_output, + ti_outcome, + ] + ) + create_hypernetwork.click( + fn=ui.create_hypernetwork, + inputs=[ + new_hypernetwork_name, + new_hypernetwork_sizes, + overwrite_old_hypernetwork, + new_hypernetwork_layer_structure, + new_hypernetwork_activation_func, + new_hypernetwork_initialization_option, + new_hypernetwork_add_layer_norm, + new_hypernetwork_use_dropout, + new_hypernetwork_dropout_structure, + optional_info, + generation_seed if generation_seed.visible else None, + normal_std if normal_std.visible else 0.01, + skip_connection + ], + outputs=[ + new_hypernetwork_name, + ti_output, + ti_outcome, + ] + ) + return [(create_beta, "Create_beta", "create_beta")] + + +def create_extension_tab2(params=None): + with gr.Blocks(analytics_enabled=False) as CLIP_test_interface: + with gr.Tab(label="CLIP-test") as clip_test: + with gr.Row(): + clipTextModelPath = gr.Textbox("openai/clip-vit-large-patch14", label="CLIP Text models. Set to empty to not change.") + # see https://huggingface.co/openai/clip-vit-large-patch14 and related pages to find model. + change_model = gr.Checkbox(label="Enable clip model change. This will be triggered from next model changes.") + change_model.change( + fn=clip_hijack.trigger_sd_hijack, + inputs=[ + change_model, + clipTextModelPath + ], + outputs=[] + ) + return [(CLIP_test_interface, "CLIP_test", "clip_test")] + +def on_ui_settings(): + shared.opts.add_option("disable_ema", + shared.OptionInfo(False, "Detach grad from conditioning models", + section=('training', "Training"))) + if not hasattr(shared.opts, 'training_enable_tensorboard'): + shared.opts.add_option("training_enable_tensorboard", + shared.OptionInfo(False, "Enable tensorboard logging", + section=('training', "Training"))) + +#script_callbacks.on_ui_train_tabs(create_training_tab) # Deprecate Beta Training +script_callbacks.on_ui_train_tabs(create_extension_tab) +script_callbacks.on_ui_train_tabs(external_patch_ui.on_train_gamma_tab) +script_callbacks.on_ui_train_tabs(external_patch_ui.on_train_tuning) +script_callbacks.on_ui_tabs(create_extension_tab2) +script_callbacks.on_ui_settings(on_ui_settings) +class Script(scripts.Script): + def title(self): + return "Hypernetwork Monkey Patch" + + def show(self, _): + return scripts.AlwaysVisible diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/.github/ISSUE_TEMPLATE/simple-issue-template.md b/extensions/Stable-Diffusion-Webui-Civitai-Helper/.github/ISSUE_TEMPLATE/simple-issue-template.md new file mode 100644 index 0000000000000000000000000000000000000000..82fb14ed9f2ef9ebc6f6bba8cb8217d07a7454d7 --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/.github/ISSUE_TEMPLATE/simple-issue-template.md @@ -0,0 +1,20 @@ +--- +name: Simple Issue template +about: Describe this issue template's purpose here. +title: '' +labels: '' +assignees: '' + +--- + +## Have you read document? + +## Have you checked console log window's msg? + +## Describe Issue + + +## Screenshot for UI issue + + +## Console log's msg or screenshot for function issue diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/.gitignore b/extensions/Stable-Diffusion-Webui-Civitai-Helper/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..1583181d3435909dc99d7486261e83d5edcf33e8 --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/.gitignore @@ -0,0 +1,3 @@ +scripts/__pycache__/ +scripts/ch_lib/__pycache__/ +setting.json diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/README.cn.md b/extensions/Stable-Diffusion-Webui-Civitai-Helper/README.cn.md new file mode 100644 index 0000000000000000000000000000000000000000..0ec7bed76fa8ac4855598fefa05aeb81030825aa --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/README.cn.md @@ -0,0 +1,241 @@ +## 关于Civitai Helper2: Model Info Helper +Civitai助手2将改名为**Model Info助手**。目前还在缓慢开发中。你可以查看它的UI演示视频,了解它会是什么样子: +[https://youtu.be/mPcKwQDDH8s](https://youtu.be/mPcKwQDDH8s) + + +# Civitai Helper +Stable Diffusion Webui 扩展Civitai助手,用于更轻松的管理和使用Civitai模型。 + +[Civitai Url](https://civitai.com/models/16768/civitai-helper-sd-webui-civitai-extension) + +# 注意 +**本插件现在非常稳定,很多人用得很好,如果碰到问题,先看[常见问题](#常见问题),并检查命令行窗口的详情。** +开issue前,请先看文档。找茬行为的用户将会被拉黑,参考:[找茬行为会被拉黑](https://github.com/butaixianran/Stable-Diffusion-Webui-Civitai-Helper/issues/96#issuecomment-1500310981) + + +# 功能 +[中文介绍视频(非官方)](https://youtu.be/x4tPWPmeAgM?t=373) + +* 扫描所有模型,从Civitai下载模型信息和预览图 +* 通过civitai模型页面url,连接本地模型和civitai模型信息 +* 通过Civitai模型页面url,下载模型(含信息和预览图)到SD目录或子目录。 +* 下载支持断点续传 +* 批量检查本地模型,在civitai上的新版本 +* 直接下载新版本模型到SD模型目录内(含信息和预览图) +* 修改了内置的"Extra Network"模型卡片,每个卡片增加了如下功能按钮: + - 🖼: 修改文字"replace preview"为这个图标 + - 🌐: 在新标签页打开这个模型的Civitai页面 + - 💡: 一键添加这个模型的触发词到关键词输入框 + - 🏷: 一键使用这个模型预览图所使用的关键词 +* 以上额外功能按钮支持thumbnail模式 +* 增加一直显示按钮的选项,以供触屏用户使用 + + +# 安装 +下载本项目为zip文件,解压到`你的SD webui目录/extensions`下即可。 + +不管是安装还是升级本插件,都要整个关闭SD Webui,重新启动它。只是Reload UI不起作用。 + +(如果用SD webui的插件界面安装,请先给git配置代理。它不是通过浏览器下载,是通过git下载。) + + +# 使用方法 + +## 更新你的SD webui +本扩展需要取到 Extra Network的卡片列表id。**这个是2023-02-06,才添加到SD webui里面的。** + +所以,如果你用的版本比这个早,你就需要先更新你的SD Webui! + + +## 扫描模型 +前往扩展页面"Civitai Helper",有个按钮叫:"Scan Model" + +![](img/extension_tab.jpg) + +点击,就会扫描所有模型,生成SHA256码,用于从civitai获取模型信息和预览图。**扫描需要很久,耐心等待**。 + +每个模型,本扩展都会创建一个json文件,用来保存从civitai得到的模型信息。这个文件会保存在模型同目录下,名称为:"模型名字.civitai.info"。 + +![](img/model_info_file.jpg) + +如果模型信息文件已经存在,扫描时就会跳过这个模型。如果模型不是civitai的,就会创建个空信息文件,以避免以后重复扫描。 + +### 添加新模型 +当你下载了新模型之后,只要再次点击扫描按钮即可。已经扫描过的文件不会重复扫描,会自动得到新模型的信息和预览图。无须重启SD webui。 + +## 模型卡片 +**(先完成扫描,再使用卡片功能)** +打开SD webui's 内置的 "Extra Network" 页面,显示模型卡片 + +![](img/extra_network.jpg) + + +移动鼠标到模型卡片底部,就会显示4个按钮: + - 🖼: 修改文字"replace preview"为这个图标 + - 🌐: 在新标签页打开这个模型的Civitai页面 + - 💡: 一键添加这个模型的触发词到关键词输入框 + - 🏷: 一键使用这个模型预览图所使用的关键词 + +![](img/model_card.jpg) + +如果你没有看到这些额外的按钮,只要点击`Refresh Civitai Helper`,他们就会被重新添加到卡片上。 + +![](img/refresh_ch.jpg) + +每次当Extra Network刷新,他都会删除掉额外的修改,我们的按钮就会消失。这时你就需要点击`Refresh Civitai Helper`把这些功能添加回去。 + + +### 小图模式 +以上功能按钮支持小图模式,但受制于SD Webui的CSS问题,目前,只能要么一直显示,要么一直不显示,不能鼠标滑过才显示。 +![](img/thumb_mode.jpg) + +## 下载 +**(单任务,下载完一个再下另一个)** +通过Civitai模型页面Url下载模型,要3个步骤: +* 填入url,点击按钮获取模型信息 +* 扩展会自动填入模型名称和类型,你需要选择下载的子目录和模型版本。 +* 点击下载 +![](img/download_model.jpg) + +下载过程会显示在命令行界面带个进度条。 +支持断点续传,无畏大文件。 + + +## 批量检查模型新版本 +你可以按照模型类型,批量检查你的本地模型,在civitai上的新版本。你可以选择多个模型类型。 +![](img/check_model_new_version.jpg) + +检查新版本的时候,每检查完一个模型,都会有一个1秒的延迟,所以速度有点慢。 + +这是为了保护Civitai避免因为本插件而短暂陷入类似DDos的局面。有些云服务商,有类似“免费用户每秒API请求不能超过1次”的保护机制。Civitai还没有这种设置。但我们还是得自觉保护它。因为如果它挂了,对大家都没有好处。 + +**检查完毕之后**,就会如下图,在UI上显示所有找到的新版本的信息。 + +每个模型新版本,都有3个链接。 +* 第一个是这个模型的网页。 +* 第二个是这个新版本的下载地址。 +* 第三个是个按钮,在python端,直接下载新版本到模型目录内。 +这种方式下载,下载详情显示在"Download Model"的区域和命令行窗口中。一次一个任务,不支持多任务。 +![](img/check_model_new_version_output.jpg) + + + +## 根据URL获取模型信息 +如果无法在civitai上找到你的模型的SHA256,但你还是希望能把你的模型连接到一个civitai模型,你可以在本扩展页面,从列表中选择你的模型,并提供一个civitai模型页面的url。 + +点击按钮之后,扩展就会下载那个civitai模型的信息,作为你这个本地模型的信息使用。 + +![](img/get_one_model_info.jpg) + +## 代理 +**如果你是刚更新新版本,你需要重启SD webui再来使用** + +代理输入框在插件页面最下方。 + +**每次填入或清除代理后,都要保存,并用SDwebui设置页面的Reload UI按钮刷新UI** + +然后所有发到civitai的请求就会用代理。 + +有些sock5代理, 需要使用socks5h开头的形式"socks5h://xxxxx"才能生效。 + + + +## 其他设置 +**保存设置按钮, 会保存扫描模型区域,以及其他设置 这两个区域的选项** + +* "一直显示按钮" 是为了方便触屏。 +* "小图模式显示功能按钮" 会开关功能按钮在小图模式的显示 +![](img/other_setting.jpg) + +## 预览图 +Extra Network支持两种预览图命名:`model_name.png` 和 `model_name.preview.png`。其中,`model_name.png`优先级较高。 + +当优先级较高的预览图不存在,他就会自动使用`model_name.preview.png`。 + +这样,你自己创建的预览图 和 网络下载的预览图,能够同时存在,并优先使用你自己创建的。 + +## 关键词 +卡片上,添加关键词按钮,是添加从civitai预览图中得到的关键词,而不是你自己创建的图片的关键词。 + +civitai不是每个图片都有关键词,一个模型中,也不是所有预览图关键词都一样。所以这里是遍历所有civitai预览图信息,加载第一个有关键词的。 + + +## SHA256 +为了创建文件的SHA256,插件需要读取整个文件。对于大尺寸文件,就会很慢。 + +有两种情况,这个SHA256无法从civitai找到对应模型: +* 太老的模型,civitai没有存储SHA256. +* 模型作者,静静的换掉了模型文件,但没有修改描述和版本。所以,虽然网页上看不出来,但实际上civitai上的 和你本地的模型文件,已经不是同一个文件了。 + +这些情况下,你可以在插件上,通过提供模型页面的url,来获取模型信息文件。 + + + +## 新特性 +从v1.5开始,v1.x不再接受任何新特性。所有新特性进入2.x。 + +2.x专注于自定义模型信息,并可能改名为"Model Info Helper"。因为不再是专注Civitai了。 + +从v1.5开始。v1.x进入维护阶段。 + + +Enjoy! + + +## 常见问题 +### 4个卡片按钮不显示 +#### 汉化原因 +下载新版,最新版已经处理汉化导致的问题。**双语汉化插件需要v1.6.1.1之后的版本才开始支持。** + +#### 使用了云端汉化功能 +如果是秋叶启动器,就关闭启动器“云端汉化”功能。如果是专门的云端汉化插件,就换用普通汉化插件。 + +#### 其他情况 +首先,确保你点过了"Refresh Civitai Helper"刷新按钮。 + +然后,如果还有这个问题,那么唯一原因,是你没有使用最新版SD webui。 + +如果你修改过SD webui的文件, 你的更新操作可能会失败。你需要检查git命令行的输出信息,来确定你更新成功了。 + +git在很多时候,会拒绝升级,并告诉你有些冲突需要你手动先解决。如果你不看命令行输出,你就会以为你已经更新成功了,但其实并没有。 + + +### Request model info from civitai +意思就是正在连接civitai,如果没有后面的信息,就是连不上,请挂代理。 + + +### 扫描或获取模型信息失败 +这个插件现在很稳定,所以,这个问题的原因,基本是是因为Civitai拒绝了你的连接请求。 + +Civitai不像那些大网站那么稳定。他网站会挂,会拒绝API连接,还会把API请求转到真人验证页面,来挡住。 + +Civitai还有连接池的设定。基本上,就是同时能允许的最大连接数。一旦达到这个数字,接下来的API连接请求,都会被拒绝。 + +所以,这种时候你只能等一下再试。 + +另外,对于国内用户,还有代理问题。现在国内都要用代理才能连上。 + + +### 扫描之后得到了错误的预览图和模型信息 +坏消息是,有些模型在civitai数据库中,保存的sha256完全是错的。查看下面的issue了解详情: +[https://github.com/civitai/civitai/issues/426](https://github.com/civitai/civitai/issues/426) + +对于这种模型,那这个插件自然就无法获得正确的模型信息和预览图。 + +这种情况下,请删除扫描得到的模型信息和预览图,在插件界面提供正确的模型url来获取。 + +另外,civitai官方有个页面,专门用于回报带有错误sha256的模型: +[https://discord.com/channels/1037799583784370196/1096271712959615100/1096271712959615100](https://discord.com/channels/1037799583784370196/1096271712959615100/1096271712959615100) + +请把这类模型反馈给civitai,好让他们进行修复。 + + + + +### 使用colab时扫描失败 +首先,在google中搜索你看到的错误信息。更有可能是,你碰到的是个colab的问题。 + +然后,如果colab连接了google drive,会有一次性访问文件数量的限制,而导致扫描失败。这是google drive的限制,请自行google搜索了解详情。 + + + diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/README.jp.md b/extensions/Stable-Diffusion-Webui-Civitai-Helper/README.jp.md new file mode 100644 index 0000000000000000000000000000000000000000..8f8d4bb6609ecd8e8ebd26167d4cc3f69b24abc3 --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/README.jp.md @@ -0,0 +1,224 @@ +### Language +[中文](README.cn.md) +[English](README.md) +[한국어(ChatGPT)](README.kr.md) + +## About Civitai Helper2: Model Info Helper +Civitai Helper 2は、**ModelInfo Helper**に改名されます。現在開発中です。デモをご覧ください: +[YouTube](https://youtu.be/mPcKwQDDH8s) + +# お知らせ +**この拡張機能は現在、非常に安定しています。もし問題があれば、コンソールログの詳細を確認し、[よくある質問](#よくある質問)を確認してください。** + +# Civitai Helper +この拡張機能は、Civitaiのモデルをより簡単に扱えるようにするためのものです。 + +Civitai: [Civitai Url](https://civitai.com/models/16768/civitai-helper-sd-webui-civitai-extension) + +# 機能 +* 全てのモデルをスキャンし、Civitaiからモデル情報とプレビューをダウンロード +* CivitaiモデルページのURLを使って、ローカルモデルとCivitaiモデル情報を取得 +* CivitaiモデルページのURLから、モデル(情報とプレビューを含む)をSDディレクトリまたはサブディレクトリにダウンロードする。 +* ダウンロードは途中から再開可能 +* ローカルのモデルとCivitai上の新しいバージョンを一括でチェック +* 新しいバージョンのモデルを直接モデルのディレクトリにダウンロード(情報とプレビュー画像を含む) +* 内蔵の**Extra Network**モデルカードを変更し、各カードに以下の機能ボタンを追加しました。 + - 🖼: `replace preview`のテキストをこのアイコンに変更 + - 🌐: このモデルのCivitaiページを新しいタブで開く + - 💡: このモデルのトリガーワードをキーワード入力欄に一括で追加する + - 🏷: このモデルのプレビュー画像で使用されているキーワードを一括で使用する +* 上記の追加機能ボタンは、サムネイルモードにも対応しています。 +* タッチスクリーンデバイス向けに、常に表示されるボタンのオプションを追加しました。 + + +# インストール +SD webui's extensionタブから、`Install from url`のタブに移動。 +このリポジトリのURLをコピーペーストし、インストールする。 + +または、このリポジトリをzipでダウンロードし、`./webui/extensions`へ展開してください。 + +この拡張機能をインストール、またはアップデートするたびに、SD Webui再起動する必要があります。 +この拡張機能は、**UIを再読み込みする**だけでは動作しません。 + +# 使い方 + +## WebUIをアップデート +この拡張機能は`network cards id`を取得する必要があります。この機能は**2023-02-06**に追加されました。 +**SD webuiがこれより前のバージョンである場合は、アップデートする必要があります!** + +## モデルのスキャン +拡張機能タブからCivitai Helperへ。 +Scan modelというボタンがあります。 + +![](img/extension_tab.jpg) + +これをクリックすると、拡張機能がすべてのモデルをスキャンしてSHA256ハッシュを生成し、それを使ってCivitaiからモデル情報とプレビュー画像を取得します。 +**スキャンには時間がかかります。 終了までお待ちください。** + +各モデルに対して、Civitaiからすべてのモデル情報を保存するためのjsonファイルを作成します。このモデル情報ファイルは、modelsディレクトリ内の`Your_model_name.civitai.info`となります。 + +![](img/model_info_file.jpg) + +モデル情報ファイルがすでに存在する場合は、スキップされます。Civitaiでモデルが見つからない場合、空のモデル情報ファイルを作成するので、モデルが2回スキャンされることはありません。 + +### 新しいモデルを追加 +新規のモデルがある場合、もう一度スキャンボタンをクリックするだけで、新しいモデルの情報とプレビューを取得できます。同じモデルを2回スキャンすることはありません。 + +## モデルカード +**(スキャン終了後に使用)** +SD webuiの`Extra Network`タブを開き、モデルカードを表示します。 + +![](img/extra_network.jpg) + + +マウスをモデルカードの下部に移動すると、4つのボタンが表示されます。 + - 🖼: プレビューを置き換えるためのテキストを`replace preview`からこのアイコンに変更します + - 🌐: このモデルのCivitaiページを新しいタブで開きます + - 💡: このモデルのトリガーワードをキーワード入力欄に一括追加します + - 🏷: このモデルのプレビュー画像に使用されているキーワードを一括で使用します + +![](img/model_card.jpg) + +これらのボタンが表示されない場合は、Refresh Civitai Helperをクリックすると、ボタンがカードに再追加されます。 + +![](img/refresh_ch.jpg) + +`Extra Network`が更新されるたびに、余分な変更が削除され、ボタンが消えてしまいます。その場合は、「Refresh Civitai Helper」をクリックして、これらの機能を再度追加する必要があります。 + + +### サムネイル +これらのボタンは、サムネイルをサポートしていますが、SD WebuiのCSSの問題により、現在は常に表示か非表示かのどちらかに制限されています。マウスをスライドして表示することはできません。 +![](img/thumb_mode.jpg) + + +## ダウンロード +**(タスクが一つ完了してから、次のタスクをダウンロードしてください)** +CivitaiモデルページのURLを使用してモデルをダウンロードするには、3つのステップが必要です。 +1. URLを入力し、モデル情報を取得するためにボタンをクリック +2. 拡張機能が自動的にモデル名とタイプを入力します。ダウンロードするサブディレクトリとモデルバージョンを選択 +3. ダウンロードをクリックします +![](img/download_model.jpg) + +ダウンロード状況は、CLIに進行状況バーを表示します。 +断片的に再開することができ、大きなファイルをダウンロードする際にも心配する必要はありません。 + + +## 新しいモデルのバージョンを確認する +モデルの種類に従って、ローカルのモデルを一括でCivitaiの新バージョンがないかをチェックすることができます。複数のモデルの種類を選択できます。 +![](img/check_model_new_version.jpg) + +これを押すと、各モデルをチェックするたびに1秒の遅延が発生するため、速度がやや遅くなります。 + +これは、本拡張機能のユーザーの過失によるDDoSを回避し、Civitaiを保護するために行われます。 +一部のクラウドサービスプロバイダーには、「無料ユーザーのAPIリクエストは1秒あたり1回を超えてはいけない」というような保護があります。Civitaiにはまだこのような設定がありませんが、我々はそれを自衛しなければなりません。 +なぜなら、もしCivitaiがダウンした場合、誰にとっても良いことではないからです。 + +チェックが完了すると、すべての新しいバージョンがUIに表示されます。 + +各モデルの新しいバージョンには、3つのリンクがあります。 +* 最初のものは、このモデルのWebページです。 +* 2つ目は、この新しいバージョンのダウンロードアドレスです。 +* 3つ目は、Python(拡張機能)側で新しいバージョンをモデルディレクトリに直接ダウンロードするボタンです。 +この方法でダウンロードすると、ダウンロードの詳細が「Download Model」の領域とコマンドラインに表示されます。一度に1つのタスクしかサポートされていません。 +![](img/check_model_new_version_output.jpg) + + + +## URLからモデル情報を取得する +Civitai上で自分のモデルのSHA256が見つからない場合でも、自分のモデルをCivitaiモデルに接続したい場合は、この拡張機能のページから、モデルをリストから選択し、CivitaiモデルページのURLを提供することができます。 + +ボタンをクリックすると、拡張機能はCivitaiモデルの情報をダウンロードし、それをローカルモデルの情報として使用します。 + +![](img/get_one_model_info.jpg) + + + +## その他の設定 +**設定保存ボタンを押すと、Scan Modelの設定とその他の設定の両方が保存されます。** + +* Always Display Buttonは、タッチデバイスでの操作を容易にするためです。 +* Show Buttons on Thumb Modeは、小さな画像モードでの機能ボタンの表示を切り替えます。 +![](img/other_setting.jpg) + +## プレビュー +Extra Networkは、2つのプレビュー画像の命名をサポートしています:`model_name.png`と`model_name.preview.png`。 +デフォルトでは自動で`model_name.png`が優先的に使われます。 + +優先度が高いプレビュー画像が存在しない場合は、自動的に`model_name.preview.png`が使用されます。 + +これにより、自分で作成したプレビュー画像とネットからダウンロードしたプレビュー画像を同時に使用し、自分で作成したプレビュー画像を優先的に使用できます。 + +## プロンプト +カード上のUse prompt from preview imageボタンは、Civitaiプレビュー画像から取得したキーワードであり、自分で作成した画像のキーワードではありません。 + +Civitaiにはすべての画像にキーワードがあるわけではなく、1つのモデルに含まれるすべてのプレビュー画像のキーワードが同じであるわけでもありません。したがって、ここではすべてのCivitaiプレビュー画像情報を走査し、最初にキーワードがあるものを読み込みます。 + + +## SHA256 +ファイルのSHA256を作成するために、はファイル全体を読み取る必要があります。大きなファイルの場合、処理が遅くなります。 + +Civitaiで対応するモデルのSHA256が見つからない場合は、次の2つの場合が考えられます: +* 古すぎるモデルには、SHA256が保存されていません。 +* モデルの作成者が静かにモデルファイルを変更しましたが、説明やバージョンを変更していないため、サイト上ではわかりませんが、実際にはCivitaiに保存されているモデルファイルとローカルのモデルファイルは異なるものとなっています。 + +これらの場合は、拡張機能にモデルページのURLを提供することで、モデルの情報ファイルを取得できます。 + +## Feature Request +v1.5以降のv1.xには新機能はありません。すべての新機能は2.xに移行されます。 +2.xでは、カスタムモデル情報にフォーカスし、Civitaiだけではなく、`Model Info Helper`という名称に変更する可能性があります。 +v1.5からv1.xはメンテナンスのフェーズに入ります。 + +お楽しみに! + + +## よくある質問 +### 4つのカードボタンが表示されない +#### ローカライズの問題 +新しいバージョンをダウンロードしてください。 +最新バージョンでは、ローカライズによる問題が解決されています。 +[バイリンガル拡張機能](https://github.com/journey-ad/sd-webui-bilingual-localization)は、v1.6.1.1以降のバージョンでサポートされるようになりました。 + +#### クラウドサービスベースの翻訳機能を使用した +クラウドサービスベースの翻訳機能を使用している場合は、通常のローカライズに変更してください。 + +#### その他の場合 +まず、Refresh Civitai Helperをクリックして更新しましたか? + +それでもこの問題が発生する場合は、おそらく最新バージョンのSD webuiを使用していないためです。 + +SD webuiのファイルを変更した場合、更新操作が失敗する可能性があります。更新が成功したかどうかを確認するには、gitコマンドラインの出力情報を確認する必要があります。 + +gitは、多くの場合、アップグレードを拒否し、手動で解決する必要があるいくつかの競合状態を示します。コマンドライン出力を見ない場合、更新が成功したと思うかもしれませんが、実際には成功していません。 + + +### Request model info from civitai +これはcivitaiに接続しています。情報がない場合は接続できないため、プロキシを使用してください。 + + +### スキャンまたはモデル情報の取得に失敗しました +この拡張機能は現在非常に安定しているため、この問題の原因は基本的にはCivitaiが接続要求を拒否したためです。 + +Civitaiは大きなウェブサイトとは異なり、安定していません。彼らのウェブサイトはダウンしたり、API接続を拒否したり、APIリクエストをCpatchaページに転送してブロックしたりすることがあります。 + +Civitaiには接続プールの上限もあります。基本的に、同時に許可される最大接続数です。この数字に達すると、以降のAPI接続要求はすべて拒否されます。 + +そのため、このような場合はしばらく待ってから再試行するしかありません。 + +### civitaiから誤ったモデル情報とプレビュー画像を取得する(Translated by ChatGPT) +悪いニュースですが、civitaiのデータベースには誤ったsha256で保存されたモデルがいくつかあります。詳細についてはこちらをご覧ください: +[https://github.com/civitai/civitai/issues/426](https://github.com/civitai/civitai/issues/426) + +したがって、これらのモデルについては、この拡張機能では正しいモデル情報やプレビュー画像を取得できません。 + +この場合、モデル情報ファイルを削除し、この拡張機能のタブページでcivitaiのURLから正しいモデル情報を取得する必要があります。 + +また、誤ったsha256を持つこれらのモデルをcivitaiに報告することもできます。 +[https://discord.com/channels/1037799583784370196/1096271712959615100/1096271712959615100](https://discord.com/channels/1037799583784370196/1096271712959615100/1096271712959615100) + +civitaiにそのモデルを報告して修正してもらうようにしてください。 + + +### colabを使用した際にスキャンに失敗する +まず、表示されたエラーメッセージをGoogleで検索してください。おそらくcolabの問題が発生している可能性があります。表示されたエラーメッセージを検索して、原因を特定してください。 + +Google Driveに接続する際には、ファイルへのアクセス数に制限があるため、スキャンが失敗することがよくあります。これはGoogle Drive側の制限です。詳細についてはインターネットで[検索](https://google.com)してください。 \ No newline at end of file diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/README.kr.md b/extensions/Stable-Diffusion-Webui-Civitai-Helper/README.kr.md new file mode 100644 index 0000000000000000000000000000000000000000..1217da7f200aa89a8b201cda2c81ea00d0f2de07 --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/README.kr.md @@ -0,0 +1,206 @@ +# Civitai Helper +Stable Diffusion Webui는 Civitai 모델을 더 쉽게 관리하고 사용하기 위한 Civitai Assistant 확장 기능입니다. + +[Civitai Url](https://civitai.com/models/16768/civitai-helper-sd-webui-civitai-extension) + +# 주의사항 +**이 플러그인은 지금 매우 안정적이며 많은 사용자들이 잘 사용하고 있습니다. 문제가 발생하면, [자주 묻는 질문](#자주-묻는-질문)을 먼저 확인하고 명령 프롬프트 창의 세부 정보를 확인하세요.** + + + + +# 기능 +* 모든 모델을 스캔하여 Civitai에서 모델 정보 및 미리보기 이미지 다운로드 +* Civitai 모델 페이지 URL을 통해 로컬 모델 및 Civitai 모델 정보 연결 +* Civitai 모델 페이지 URL을 통해 모델(정보 및 미리보기 이미지 포함) 다운로드하여 SD 디렉토리 또는 하위 디렉토리에 저장 +* 이어받기 지원 다운로드 +* 로컬 모델에서 Civitai에 새 버전이 있는지 일괄 확인 +* 새 버전 모델을 SD 모델 디렉토리에 직접 다운로드(정보 및 미리보기 이미지 포함) +* "Extra Network" 모델 카드 내부를 수정하여 다음과 같은 기능 버튼을 추가: + - 🖼: "replace preview" 텍스트를 이 아이콘으로 변경 + - 🌐: 해당 모델의 Civitai 페이지를 새 탭에서 열기 + - 💡: 이 모델의 트리거 단어를 키워드 입력란에 일괄 추가 + - 🏷: 이 모델 미리보기 이미지에 사용된 키워드 사용 +* 위의 추가 기능 버튼은 썸네일 모드를 지원합니다. +* 터치 스크린 사용자를 위해 항상 표시되는 버튼 옵션 추가 + + +# 설치 +이 프로젝트를 zip 파일로 다운로드하고 SD webui 디렉토리/extensions에 압축 해제하면 됩니다. + +이 플러그인을 설치하거나 업그레이드하려면 SD Webui를 완전히 종료하고 다시 시작해야 합니다. UI 다시로드는 작동하지 않습니다. + +# 사용 방법 + +## SD Webui 업데이트 +이 확장 기능은 Extra Network 카드 목록 ID를 가져와야 합니다. 이것은 2023-02-06에 SD Webui에 추가된 것입니다. + +따라서, 만약 사용 중인 버전이 이보다 이전 버전이라면, 먼저 SD Webui를 업데이트해야 합니다! + + +## 모델 스캔 +확장 페이지 "Civitai Helper"로 이동하고 "Scan Model"이라는 버튼을 클릭합니다. + +![](img/extension_tab.jpg) + +클릭하면 모든 모델을 스캔하고 SHA256 코드를 생성하여 Civitai에서 모델 정보 및 미리보기 이미지를 가져옵니다. 스캔에는 시간이 걸리므로 인내심을 가지고 기다려주세요. + +이 확장 기능은 각 모델마다 Civitai에서 얻은 모델 정보를 저장하는 JSON 파일을 생성합니다. 이 파일은 모델이 있는 디렉토리에 "모델 이름.civitai.info"라는 이름으로 저장됩니다. + +![](img/model_info_file.jpg) + +모델 정보 파일이 이미 존재하는 경우 해당 모델은 스캔하지 않습니다. 모델이 Civitai가 아닌 경우 빈 정보 파일이 생성되어 나중에 중복 스캔을 피합니다. + +### 새 모델 추가 +새 모델을 다운로드한 후 스캔 버튼을 다시 클릭하면 됩니다. 이미 스캔된 파일은 다시 스캔하지 않으며 새 모델의 정보와 미리보기 이미지를 자동으로 얻을 수 있습니다. SD Webui를 다시 시작할 필요가 없습니다. + +## 모델 카드 +**(스캔을 완료한 후에 카드 기능을 사용하세요)** +SD Webui의 내장 "Extra Network" 페이지를 열어 모델 카드를 표시합니다. + +![](img/extra_network.jpg) + + +마우스를 모델 카드 아래쪽으로 이동하면 4개의 버튼이 표시됩니다: + - 🖼: "replace preview" 텍스트를이 아이콘으로 변경 + - 🌐:이 모델의 Civitai 페이지를 새 탭에서 엽니다. + - 💡:이 모델의 트리거 단어를 키워드 입력 상자에 추가합니다. + - 🏷:이 모델 미리보기에 사용되는 키워드를 사용합니다. + +![](img/model_card.jpg) + +이러한 추가 버튼이 표시되지 않으면 Refresh Civitai Helper를 클릭하여 다시 추가하십시오. + +![](img/refresh_ch.jpg) + +Extra Network가 새로 고침될 때마다이 추가 수정이 제거되므로 버튼이 사라지면 Refresh Civitai Helper를 클릭하여 기능을 다시 추가해야합니다. + + +### 작은 미리보기 모드 +이러한 기능 버튼은 작은 미리보기 모드를 지원하지만 SD Webui의 CSS 문제로 인해 현재 항상 표시하거나 항상 표시하지 않아야합니다. +![](img/thumb_mode.jpg) + +## 다운로드 +**(한 번에 하나씩, 하나를 다운로드하고 다른 것을 다운로드하세요)** +Civitai 모델 페이지 URL을 통해 모델을 다운로드하려면 3 단계가 필요합니다: +* URL을 입력하고 모델 정보를 가져 오는 버튼을 클릭합니다. +* 확장 프로그램이 모델 이름과 유형을 자동으로 입력합니다. 다운로드 할 하위 디렉토리와 모델 버전을 선택해야합니다. +* 다운로드를 클릭하십시오. +![](img/download_model.jpg) + +다운로드 과정은 진행률 표시 줄이있는 명령 줄 인터페이스에서 표시됩니다. +일시 중지 및 다시 시작을 지원하며 대용량 파일도 문제없이 처리합니다. + + +## 일괄적으로 모델 새 버전 확인 +Civitai에서 새 버전을 확인하기 위해 로컬 모델을 모델 유형 별로 일괄적으로 확인할 수 있습니다. 여러 모델 유형을 선택할 수 있습니다. +![](img/check_model_new_version.jpg) + +새 버전을 확인 할 때마다 모델이 모두 확인 될 때까지 1 초의 지연이 있으므로 속도가 다소 느립니다. + +이것은 Civitai가 이 플러그인으로 인해 일시적으로 DDos와 유사한 상황에 빠지지 않도록 보호하기 위한 것입니다. 일부 클라우드 서비스 제공 업체는 "무료 사용자의 초당 API 요청 수는 1 회를 초과 할 수 없다"는 보호 메커니즘이 있습니다. Civitai는 이러한 설정이 없습니다. 그러나 우리는 여전히 그것을 보호해야합니다. 왜냐하면 그것이 다운되면 모두에게 좋지 않기 때문입니다. + +확인이 완료되면 다음과 같이 UI에 모든 새 버전을 찾은 정보가 표시됩니다. + +각 모델 새 버전에는 3 개의 링크가 있습니다. +* 첫 번째는 이 모델의 웹 페이지입니다. +* 두 번째는이 새 버전의 다운로드 주소입니다. +* 세 번째는 버튼입니다. Python 측에서 새 버전을 모델 디렉토리로 직접 다운로드합니다. +이 방식으로 다운로드하면 "모델 다운로드" 영역과 명령 줄 창에 다운로드 세부 정보가 표시됩니다. 한 번에 하나의 작업만 지원됩니다. +![](img/check_model_new_version_output.jpg) + + + +## URL을 기반으로 모델 정보 가져오기 +Civitai에서 모델의 SHA256을 찾을 수 없지만 여전히 Civitai 모델에 모델을 연결하고 싶다면 해당 확장 프로그램 페이지에서 모델을 선택하고 Civitai 모델 페이지의 URL을 제공할 수 있습니다. + +버튼을 클릭하면 확장 프로그램이 해당 Civitai 모델의 정보를 다운로드하여 로컬 모델의 정보로 사용합니다. + +![](img/get_one_model_info.jpg) + + + +## 기타 설정 +**설정 저장 버튼은 스캔 모델 영역 및 기타 설정 두 영역의 옵션을 저장합니다.** + +* "항상 표시 버튼"은 터치 스크린에서 편리하게 사용하기 위한 것입니다. +* "작은 그림 모드에서 기능 버튼 표시"는 작은 그림 모드에서 기능 버튼을 표시할지 여부를 전환합니다. +![](img/other_setting.jpg) + +## 미리보기 이미지 +Extra Network는 model_name.png 및 model_name.preview.png 두 가지 미리보기 이미지 이름을 지원합니다. 여기서 model_name.png이 우선순위가 높습니다. + +우선순위가 높은 미리보기 이미지가 없으면 자동으로 model_name.preview.png를 사용합니다. + +이렇게 하면 직접 만든 미리보기 이미지와 인터넷에서 다운로드한 미리보기 이미지를 함께 사용할 수 있으며, 우선순위는 직접 만든 이미지가 높습니다. + +## 키워드 +카드에 키워드 추가 버튼은 civitai 미리보기 이미지에서 얻은 키워드를 추가하는 것이며, 사용자가 직접 만든 이미지의 키워드가 아닙니다. + +모든 이미지에 키워드가 있는 것은 아니며, 모델에 따라 미리보기 이미지의 키워드가 모두 같지 않을 수 있습니다. 따라서 여기서는 civitai 모든 미리보기 이미지 정보를 탐색하여 첫 번째 키워드가 있는 이미지를 로드합니다. + + +## SHA256 +파일의 SHA256을 생성하려면 플러그인에서 전체 파일을 읽어야 합니다. 대형 파일의 경우 시스템이 느려질 수 있습니다. + +SHA256은 civitai에서 해당 모델을 찾을 수 없는 두 가지 경우가 있습니다. +* 너무 오래된 모델이므로 civitai에 SHA256이 저장되어 있지 않습니다. +* 모델 작성자가 모델 파일을 조용히 교체했지만 설명 및 버전을 수정하지 않았습니다. 따라서 웹 페이지에서는 확인할 수 없지만 civitai 및 로컬 모델 파일은 이미 다른 파일입니다. + +이러한 경우에는 플러그인에서 모델 페이지 URL을 제공하여 모델 정보 파일을 얻을 수 있습니다 + + +## 자주 묻는 질문 +### 4개의 카드 버튼이 표시되지 않습니다. +#### 한국어 플러그인을 사용했습니다 +새 버전을 다운로드하면, 최신 버전에서 한국어 번역으로 인한 문제가 해결되었습니다. 양방향 다국어 플러그인은 v1.6.1.1 이후 버전부터 지원됩니다. + +#### 클라우드 기반 한국어 플러그인을 사용했습니다 +클라우드 기반 한국어 플러그인을 사용한 경우 일반적인 한국어 플러그인으로 변경하십시오. + + +#### 다른 경우 +먼저 "Refresh Civitai Helper" 버튼을 클릭하여 Civitai Helper를 새로고침했는지 확인하세요. + +그런 다음 이 문제가 계속되는 경우, 유일한 이유는 최신 버전의 SD webui를 사용하지 않았기 때문입니다. + +만약 SD webui의 파일을 수정했다면, 업데이트 작업이 실패할 수 있습니다. 업데이트가 제대로 이루어졌는지 확인하려면 git 명령 줄의 출력 정보를 확인해야 합니다. + +git은 종종 업그레이드를 거부하고, 일부 충돌을 수동으로 해결해야 한다는 메시지를 보여줍니다. 명령 줄 출력을 확인하지 않으면 업그레이드가 성공했다고 잘못 생각할 수 있습니다. + + +### Request model info from civitai +이것은 Civitai에 연결하고 있음을 나타내며, 정보가 없으면 연결할 수 없으므로 프록시를 사용해야 합니다. + + +### 모델 정보 스캔 또는 가져오기 실패 +이 플러그인은 이제 매우 안정적이므로, 이 문제의 원인은 대부분 Civitai가 연결 요청을 거부했기 때문입니다. + +Civitai는 대형 웹사이트와 같이 안정적이지 않습니다. 웹사이트가 다운되거나 API 연결을 거부할 수 있고, API 요청을 실제 검증 페이지로 전환하여 차단할 수도 있습니다. + +Civitai에는 연결 풀 설정이 있습니다. 이는 동시에 허용되는 최대 연결 수입니다. 이 수치에 도달하면 다음 API 연결 요청은 모두 거부됩니다. 이 때는 잠시 기다렸다가 다시 시도해야 합니다. + +또한 국내 사용자들에게는 프록시 문제가 있습니다. 대개는 프록시를 사용해야만 연결할 수 있습니다. + + +### civitai에서 잘못된 모델 정보 및 미리보기 이미지 가져오기 +안타깝게도, civitai의 데이터베이스에 일부 모델이 잘못된 sha256으로 저장되어 있습니다. 자세한 내용은 여기를 확인하십시오: +[https://github.com/civitai/civitai/issues/426](https://github.com/civitai/civitai/issues/426) + +따라서 이 확장 프로그램은 해당 모델의 올바른 모델 정보나 미리보기 이미지를 가져올 수 없습니다. + +이 경우 모델 정보 파일을 제거하고 이 확장 프로그램의 탭 페이지에서 civitai url로 올바른 모델 정보를 가져와야 합니다. + +또한, 잘못된 sha256을 가진 해당 모델을 civitai에 신고할 수 있습니다. +[https://discord.com/channels/1037799583784370196/1096271712959615100/1096271712959615100](https://discord.com/channels/1037799583784370196/1096271712959615100/1096271712959615100) + +civitai에 그 모델을 신고하여 수정할 수 있도록 해주시기 바랍니다. + + +### Colab 사용시 스캔 실패 +먼저 보이는 오류 메시지를 Google에서 검색해보세요. 대개 Colab의 문제일 가능성이 높습니다. + +그리고 Colab이 Google 드라이브에 연결되어 있다면, 파일에 대한 일회성 액세스 제한으로 인해 스캔이 실패할 수 있습니다. 이는 Google 드라이브의 제한 사항으로, 자세한 내용은 Google 검색을 통해 알아보세요. + + + diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/README.md b/extensions/Stable-Diffusion-Webui-Civitai-Helper/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9d393fc6adb0a005c78c6e5ac5cb2f1835d77096 --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/README.md @@ -0,0 +1,329 @@ +### Language +[中文](README.cn.md) +[日本語](README.jp.md) +[한국어(ChatGPT)](README.kr.md) + +## About Civitai Helper2: Model Info Helper +Civitai Helper 2 will be renamed to **ModelInfo Helper**. It is under development, you can watch its UI demo video to see how it gonna look like: +[YouTube](https://youtu.be/mPcKwQDDH8s) + +# Notice +**This extension now is very stable and works well for many people. If you have an issue, check console log window's detail and read [common issue](#common-issue) part** + +If you want to claim it doesn't work, check this first: [Claim Wall](claim_wall.md) + + +# Civitai Helper +Stable Diffusion Webui Extension for Civitai, to handle your models much more easily. + +Civitai: [Civitai Url](https://civitai.com/models/16768/civitai-helper-sd-webui-civitai-extension) + +# Features +* Scans all models to download model information and preview images from Civitai. +* Link local model to a civitai model by civitai model's url +* Download a model(with info+preview) by Civitai Url into SD's model folder or subfolder. +* Downloading can resume at break-point, which is good for large file. +* Checking all your local model's new version from Civitai +* Download a new version directly into SD model folder (with info+preview) +* Modified Built-in "Extra Network" cards, to add the following buttons on each card: + - 🖼️: Modified "replace preview" text into this icon + - 🌐: Open this model's Civitai url in a new tab + - 💡: Add this model's trigger words to prompt + - 🏷️: Use this model's preview image's prompt +* Above buttons support thumbnail mode of Extra Network +* Option to always show additional buttons, to work with touchscreen. + + +# Install +Go to SD webui's extension tab, go to `Install from url` sub-tab. +Copy this project's url into it, click install. + +Alternatively, download this project as a zip file, and unzip it to `Your SD webui folder/extensions`. + +Everytime you install or update this extension, you need to shutdown SD Webui and Relaunch it. Just "Reload UI" won't work for this extension. + +Done. + +# How to Use + +## Update Your SD Webui +This extension need to get extra network's cards id. Which is added since **2023-02-06**. +**If your SD webui is an earlier version, you need to update it!** + +## Scanning Models +Go to extension tab "Civitai Helper". There is a button called "Scan model". + +![](img/extension_tab.jpg) + +Click it and the extension will scan all your models to generate SHA256 hashes, using them to retreive model information and preview images from Civitai. + +**Scanning takes time, just wait it finish** + +For each model, it will create a json file to save all model info from Civitai. This model info file will be "Your_model_name.civitai.info" in your model folder. + +![](img/model_info_file.jpg) + +If a model info file already exists, it will be skipped. If a model cannot be found in Civitai, it will create an empty model info file, so the model won't be scanned twice. + +### Adding New Models +When you have some new models, just click scan button again, to get new model's information and preview images. It won't scan the same model twice. + +## Model Card +**(Use this only after scanning finished)** +Open SD webui's build-in "Extra Network" tab, to show model cards. + +![](img/extra_network.jpg) + + +Move your mouse on to the bottom of a model card. It will show 4 icon buttons: + - 🖼: Replace preview (a build-in button, modified from text to icon) + - 🌐: Open this model's Civitai url in a new tab + - 💡: Add this model's trigger words to prompt + - 🏷: Use this model's preview image's prompt + +![](img/model_card.jpg) + +**If these additional buttons are not there**, click the `Refresh Civitai Helper` button to bring them back. + +![](img/refresh_ch.jpg) +Everytime after Extra Network tab refreshed, it will remove all these additional buttons. So, you need to click `Refresh Civitai Helper` button to bring them back. + +### Thumbnail Mode +Additional buttons work on thumbnail too, but due to SD webui's CSS issue, for now, they must be always displayed on thumbnail or don't display at all. +![](img/thumb_mode.jpg) + + +## Download +To download a model by Civitai Model Page's Url, you need 3 steps: +* Fill url, click button to get model info +* It will show model name and type automatically. Just choose sub-folder and model version +* Click download. +![](img/download_model.jpg) + +Detail will be displayed on console log, with a progress bar. +Downloading can resume from break-point, so no fear for large file. + +## Checking Model's New Version +You can checking your local model's new version from civitai by model types. You can select multiple model types. +![](img/check_model_new_version.jpg) + +The checking process has a "1 second delay" after each model's new version checking request. So it is a little slow. + +This is to protect Civitai from issue like DDos from this extension. Some cloud service provider has a rule as "no more than 1 API request in a second for free user". Civitai doesn't have this rule yet, but we still need to protect it. There is no good for us if it is down. + +**After checking process done**, it will display all new version's information on UI. + +There are 3 urls for each new version. +* First one is model's civitai page. +* Second one is new version's download url. +* Third one is a button to download it into your SD's model folder with python. +With this one, output information is on "Download Model" section's log and console log. **One task at a time**. + +![](img/check_model_new_version_output.jpg) + + +## Get Model Info By Url +This is used to force a local model links to a Civitai model. For example, you converted a model's format or pruned it. Then it can not be found on civitai when scanning. + +In that case, if you still want to link it to a civitai model. You can use this funcion. + +Choose this model from list, then offer a civitai model page's url. + +After clicking button, extension will download that civitai model's info and preview image for the local file you picked. + +![](img/get_one_model_info.jpg) + +## Proxy +**If you are updating to new version, you need to re-lanuch SD webui before using it.** + +Proxy textbox is at the bottom of extension tab. + +**Each time you fill or clear a proxy value, you need to save setting, and Re-load UI with setting tab's reload button.** + +Then all requests to civitai will use the proxy. + +For some sock5 proxy, need to be used as "socks5h://xxxxx". + + + + +## Other Setting +**The Save Setting button, will save both "Scan Model"'s setting and other setting.** + +* "Always Display Button" is good for touch screen. +* "Show Buttons on Thumb Mode" will turn on/off additional Buttons on thumbnail. +![](img/other_setting.jpg) + + + + +## Preview Image +Extra network uses both `model_file.png` and `model_file.preview.png` as preview image. But `model_file.png` has higher priority, because it is created by yourself. + +When you don't have the higher priority one, it will use the other automatically. + +## Prompt +When you click the button "Use prompt from preview image", it does not use the prompt from your own preview image. It uses the one from civitai's preview image. + +On civitai, a model's preview images may not has prompt. This extension will check this model's all civitai preview images' information and use the first one has prompt in it. + +## SHA256 +To create a file SHA256, it need to read the whole file to generate a hash code. It gonna be slow for large files. + +Also, extension uses Memory Optimized SHA256, which won't stuck your system and works with colab. + +There are 2 cases this hash code can not find the model on civitai: +* Some old models, which do not have SHA256 code on civitai. +* The model's owner changed file on civitai, but does not change version name and description. So, the file on civitai is actually not the one on your manchine. + +In these cases, you can always link a model to civitai by filling its URL in this extension. + + + +## Feature Request +No new feature for v1.x after v1.5. All new feature will go to 2.x. + +2.x will focus on custom model information and may change name to "Model Info Helper", because it is not just focus on Civitai anymore. + +From v1.5, v1.x goes into maintenance phase. + +Enjoy! + + +## Common Issue +### 4 Buttons on card didn't show +#### Localization +There was a Localization issue if you are not using English version of SD webui. This is fixed in the latest version of this extension. **Bilingual localization extension is supported by PR since v1.6.1.1.** + +##### Using cloud based localization extension +Turn off cloud based localization extension, use normal localization extension. + +#### Other case +First of all, make sure you clicked "Refresh Civitai Helper" button. + +If issue is still there, then only reason is you are not using the latest SD webui. So, Make sure you updated it. + +Your update could be failed if you have modified SD webui's file. You need to check git command's console log to make sure it is updated. + +In many cases, git will just refuse to update and tell you there are some conflicts need you to handle manually. If you don't check the consloe log, you will think your SD webui is updated, but it is not. + +### Request, Scan or Get model info failed +This extension is stable. So, the reason for this most likely is your internet connection to Civitai API service. + +Civitai is not as stable as those rich websites, it can be down or refuse your API connection. + +Civitai has a connection pool setting. Basicly, it's a max connection number that civitai can have at the same time. So, if there are already too manny connections on civitai, it will refuse your API connection. + +In those cases, the only thing you can do is just wait a while then try again. + +### Get Wrong model info and preview images from civitai +A bad news is, some models are saved with a wrong sha256 in civitai's database. Check here for more detail: +[https://github.com/civitai/civitai/issues/426](https://github.com/civitai/civitai/issues/426) + +So, for those models, this extension can not get the right model info or preview images. + +In this case, you have to remove the model info file and get the right model info by a civitai url on this extension's tab page. + +Also, you can report those models with wrong sha256 to civitai at following page: +[https://discord.com/channels/1037799583784370196/1096271712959615100/1096271712959615100](https://discord.com/channels/1037799583784370196/1096271712959615100/1096271712959615100) + +Please report that model to civitai, so they can fix it. + + + + +### Scanning fail when using colab +First of, search your error message with google. Most likely, it will be a colab issue. + +If you are sure it is a out of memory issue when scanning models, and you are using this extension's latest version, then there is nothing we can do. + +Since v1.5.5, we've already optimized the SHA256 function to the top. So the only 2 choices for you are: +* try again +* or use a pro account of colab. + + + + + +# Change Log +## v1.6.4 +* Add "Download All files" checkbox for downloading model section. Uncheck means only download 1 file. + +## v1.6.3 +* Support downloading multiple files, not avaiable when checking new version. + +## v1.6.2.1 +* when parsing civitai url, remove query string by PR + +## v1.6.2 +* When downloading, re-name file if file already exists + +## v1.6.1.1 +* Support bilingual localization extension by PR + +## v1.6.1 +* Fix Localization issue for 4 addtional buttons on cards. (Forgot that again...) + +## v1.6.0 +* Fix some UI issues to work with gradio 3.23.0 +* Support Proxy when connecting to civitai. Check document for detail. +* check realpath when opening file, to fix error when using junction +* Fix multiple addtional buttons issue after switching tabs. + +## v1.5.7 +* Fix Localization issue for 4 addtional buttons on cards + +## v1.5.6 +* update error msg when can not connect to civitai API service +* update thumb mode for SD webui new version's metadata button + +## v1.5.5 +* update SHA256 function, now it just use the code from pip + +## v1.5.4 +* set sys.stdout to utf-8 +* Add default header for requests to prevent from being blocked by civitai. +* merge other v1.5.x change log to v1.5.4 +* When downloading a model by url, check if target model version is already existed in user selected sub-folder. +* Support scanning only selected model types. +* Force TI scanning delay 1 second to prevent from civitai treating this extension's requests as attacking. + +## v1.5.0 +* Download a model by Civitai model page's url +* Resume downloading from break-point +* Download new version into SD Webui's model folder +* Addtional button now works on thumbnail mode +* Option to always show addtion button, for touch screen. + +## v1.4.2 +* ignore .vae file in model folder when scanning + +## v1.4.1 +* When checking new versions, also searching and ignore already existed ones. +* Add version number to the bottom of this extension's tab + +## v1.4 +* Support checking model's new version, display the result in UI and offer download url +* Remove addintional sub tabs on extension tab. make ui simpler. + +## v1.3 +* Open url at client side +* Link selected model to civitai by url or model id +* Save and load extension setting to file +* Show button action's output to UI +* Code refactoring + +## v1.2.1 +* Add more error checking to work with different versions of SD webui. + +## v1.2 +* Support customer model folder +* Support readable model info file +* Support download preview image with max size +* Remove card buttons when extra network is in thumbnail mode + +## v1.1 +* Support subfolders +* Check if refresh is needed when clicking "Refresh Civitai Helper" +* Add space when adding trigger words +* Add memory Optimized sha256 as an option diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/claim_wall.md b/extensions/Stable-Diffusion-Webui-Civitai-Helper/claim_wall.md new file mode 100644 index 0000000000000000000000000000000000000000..e707ed1606d57ffef4a8bc94c9599791fa08712f --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/claim_wall.md @@ -0,0 +1,91 @@ +# Claim Wall + +Since this extension got a little hot, some users come to **claim many other issues to this extension**. + +Following is a wall, to show a few examples how they claim this extension doesn't work, because they don't read document or forget what they did before. + +If you are looking for guideline, go to section [What you should do](#what-you-should-do) + +# Wall + +### Didn't even update SD Webui and claim "tried everything" + +![](img/user_claim_wall/have_not_update_sdwebui.jpg) + + +### Havn't even scanned model +After 4 replies, find that out, and modified his comment. + +![](img/user_claim_wall/have_not_scan_model.jpg) + +### Claim "pretty sure" this extension breaks his UI, takes 2days to find out it is not +Then removed his comment from civitai, but his post on reddit is still there, so you can know what's really going on there. + +1. Claim "pretty sure" this extension breaks his UI + +![](img/user_claim_wall/css_issue_part1.jpg) + +![](img/user_claim_wall/css_issue_part2.jpg) + +2. Find out it is not, after 2 days + +![](img/user_claim_wall/css_issue_part3.jpg) + +3. Still don't remember what he did with other extensions, until another user tells him, about 4 days later. + +![](img/user_claim_wall/css_issue_part4.jpg) + +### **Blame SD Webui's modification to this extension** +Latest SD webui removed a button from UI, they claim this extension did that, and want it back by this extension + +![](img/user_claim_wall/blame_sdweui_update_to_this_ext.jpg) + +### Claim other extension's error to this extension +Just because both extensions have "Civitai" in extension's name + +![](img/user_claim_wall/do_not_even_use_this_ext.jpg) + + +### **Didn't even use this extension and request a feature it already has** + +![](img/user_claim_wall/request_a_feature_it_already_has.jpg) + +### **Renamed model folder's name carelessly and forgot that** +Takes about 8 hours to find out why this extension doesn't work on his SDwebui and ready to re-install SD webui from beginning. + +1. claim this extension can not open civitai url on checkpoint models + +![](img/user_claim_wall/changed_model_folder_name_then_forget_part1.jpg) + +2. I reply that model he mentioned works well in my SDwebui + +![](img/user_claim_wall/changed_model_folder_name_then_forget_part2.jpg) + +3. After 6 hours' trying, find out his model folder's name is modified. + +![](img/user_claim_wall/changed_model_folder_name_then_forget_part3.jpg) + +![](img/user_claim_wall/changed_model_folder_name_then_forget_part4.jpg) + + +# What you should do +Above are just a very small piece of this kind of claims. Those claims won't help you. If you have an issue, following is the guidline: + +* If you want to make your extension work, read the document. + +* If your SD webui is broken, before you claim it is caused by this extension, you can disable it and try again. + +* If you followed document, but it still doesn't work well, you can check console log's msg to find out the reason. If you can not understand those msg, you can come and ask for help, with console log's msg or screenshot. + +* If you are using colab, and get an error from colab, then search that error msg in google. Because it's a colab's issue or limitation. + +* If you checked console log window's msg and understand what it means, you are welcome to submit your issue. + + + + + + + + + diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/icon/.keep b/extensions/Stable-Diffusion-Webui-Civitai-Helper/icon/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/all_in_one.png b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/all_in_one.png new file mode 100644 index 0000000000000000000000000000000000000000..7db1d22b4c770224d6619813cdf975fce52eb21e --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/all_in_one.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bedc58968db1a83c84e57d94327c4e27f5cd6fbd93c45a59627a5257b06c8a47 +size 1104345 diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/check_model_new_version.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/check_model_new_version.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d05e4490b2ff7a3a82edee277a7130aee149d55 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/check_model_new_version.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/check_model_new_version_output.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/check_model_new_version_output.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47761f788c50549355c1d2f58cffc3032cda3ebd Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/check_model_new_version_output.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/download_model.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/download_model.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f6c052ccfca850008f7a87e7dfd8b45d2ec17209 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/download_model.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/extension_tab.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/extension_tab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a9e0dbe1d1164117cd11064ebbf220377a495c2e Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/extension_tab.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/extra_network.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/extra_network.jpg new file mode 100644 index 0000000000000000000000000000000000000000..25b2acbdb4afefb348f058a9d45fd8a090eda8ca Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/extra_network.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/get_one_model_info.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/get_one_model_info.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9de82e031c5320d45fef054026ada12f3026e776 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/get_one_model_info.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/model_card.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/model_card.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ceb33f68f91f3bf3eef625bc18ee427e1002456 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/model_card.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/model_info_file.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/model_info_file.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9e36c8e69a3feb90034a2c205a4d4c27aa43bd7 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/model_info_file.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/other_setting.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/other_setting.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9b9a342410e65bcf616978530c79a636ab173f4e Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/other_setting.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/refresh_ch.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/refresh_ch.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93b4ab70f94ff1718cebc3880f34c4dce45ddfc8 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/refresh_ch.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/thumb_mode.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/thumb_mode.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1044e68cf89c9f0faa1e1f74a8a7cbb5d97809d6 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/thumb_mode.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/blame_sdweui_update_to_this_ext.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/blame_sdweui_update_to_this_ext.jpg new file mode 100644 index 0000000000000000000000000000000000000000..59ccb37188850686ef48248d2f565098403a0d99 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/blame_sdweui_update_to_this_ext.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/changed_model_folder_name_then_forget_part1.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/changed_model_folder_name_then_forget_part1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..76acd20220c8f3c22619e5d5f4541ce2aeefbeb4 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/changed_model_folder_name_then_forget_part1.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/changed_model_folder_name_then_forget_part2.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/changed_model_folder_name_then_forget_part2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f231f6f8b9fb981fd9b52ff04aa44f26abf4ddb Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/changed_model_folder_name_then_forget_part2.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/changed_model_folder_name_then_forget_part3.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/changed_model_folder_name_then_forget_part3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc032c496932463b958444048200dd43068984ca Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/changed_model_folder_name_then_forget_part3.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/changed_model_folder_name_then_forget_part4.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/changed_model_folder_name_then_forget_part4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ffd6197fe4889e946dd2c24d2192b8940c1bddc Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/changed_model_folder_name_then_forget_part4.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/css_issue_part1.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/css_issue_part1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..26567730c1a3253244d9a8230b90456add779b69 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/css_issue_part1.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/css_issue_part2.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/css_issue_part2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54b9a2cd28a2f57e43a3f2f0b92c68dbd1c52123 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/css_issue_part2.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/css_issue_part3.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/css_issue_part3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c0d4ce269af46779bdf4981a7bebf40a4768559 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/css_issue_part3.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/css_issue_part4.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/css_issue_part4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ddad8e72b21e5148b14a1e4170a6a774a8d2c4c Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/css_issue_part4.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/did_not_relaunch_sdwebui.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/did_not_relaunch_sdwebui.jpg new file mode 100644 index 0000000000000000000000000000000000000000..984228d4cffa6367e5191bb756350f7611b42091 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/did_not_relaunch_sdwebui.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/do_not_even_use_this_ext.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/do_not_even_use_this_ext.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e4be133af1c95e21fce1e6b9b4a26f312722ccb5 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/do_not_even_use_this_ext.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/have_not_scan_model.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/have_not_scan_model.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc20a79fd6c096d1a2df8d612d06f89a323cdbbd Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/have_not_scan_model.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/have_not_update_sdwebui.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/have_not_update_sdwebui.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3fb704c1f4ce3ef9ea423d2bab3eeff9eab7fe4f Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/have_not_update_sdwebui.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/request_a_feature_it_already_has.jpg b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/request_a_feature_it_already_has.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e1eab1df869661eab9475344d28eed4c59129e0 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/img/user_claim_wall/request_a_feature_it_already_has.jpg differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/javascript/civitai_helper.js b/extensions/Stable-Diffusion-Webui-Civitai-Helper/javascript/civitai_helper.js new file mode 100644 index 0000000000000000000000000000000000000000..4454d0d4d4b73ec0436beb570e65f943aebfe97f --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/javascript/civitai_helper.js @@ -0,0 +1,728 @@ +"use strict"; + + +function ch_convert_file_path_to_url(path){ + let prefix = "file="; + let path_to_url = path.replaceAll('\\', '/'); + return prefix+path_to_url; +} + +function ch_img_node_str(path){ + return ``; +} + + +function ch_gradio_version(){ + let foot = gradioApp().getElementById("footer"); + if (!foot){return null;} + + let versions = foot.querySelector(".versions"); + if (!versions){return null;} + + if (versions.innerHTML.indexOf("gradio: 3.16.2")>0) { + return "3.16.2"; + } else { + return "3.23.0"; + } + +} + + +// send msg to python side by filling a hidden text box +// then will click a button to trigger an action +// msg is an object, not a string, will be stringify in this function +function send_ch_py_msg(msg){ + console.log("run send_ch_py_msg") + let js_msg_txtbox = gradioApp().querySelector("#ch_js_msg_txtbox textarea"); + if (js_msg_txtbox && msg) { + // fill to msg box + js_msg_txtbox.value = JSON.stringify(msg); + js_msg_txtbox.dispatchEvent(new Event("input")); + } + +} + +// get msg from python side from a hidden textbox +// normally this is an old msg, need to wait for a new msg +function get_ch_py_msg(){ + console.log("run get_ch_py_msg") + const py_msg_txtbox = gradioApp().querySelector("#ch_py_msg_txtbox textarea"); + if (py_msg_txtbox && py_msg_txtbox.value) { + console.log("find py_msg_txtbox"); + console.log("py_msg_txtbox value: "); + console.log(py_msg_txtbox.value) + return py_msg_txtbox.value + } else { + return "" + } +} + + +// get msg from python side from a hidden textbox +// it will try once in every sencond, until it reach the max try times +const get_new_ch_py_msg = (max_count=3) => new Promise((resolve, reject) => { + console.log("run get_new_ch_py_msg") + + let count = 0; + let new_msg = ""; + let find_msg = false; + const interval = setInterval(() => { + const py_msg_txtbox = gradioApp().querySelector("#ch_py_msg_txtbox textarea"); + count++; + + if (py_msg_txtbox && py_msg_txtbox.value) { + console.log("find py_msg_txtbox"); + console.log("py_msg_txtbox value: "); + console.log(py_msg_txtbox.value) + + new_msg = py_msg_txtbox.value + if (new_msg != "") { + find_msg=true + } + } + + if (find_msg) { + //clear msg in both sides + py_msg_txtbox.value = ""; + py_msg_txtbox.dispatchEvent(new Event("input")); + + resolve(new_msg); + clearInterval(interval); + } else if (count > max_count) { + //clear msg in both sides + py_msg_txtbox.value = ""; + py_msg_txtbox.dispatchEvent(new Event("input")); + + reject(''); + clearInterval(interval); + } + + }, 1000); +}) + + +function getActiveTabType() { + const currentTab = get_uiCurrentTabContent(); + switch (currentTab.id) { + case "tab_txt2img": + return "txt2img"; + case "tab_img2img": + return "img2img"; + } + return null; +} + + + +function getActivePrompt() { + const currentTab = get_uiCurrentTabContent(); + switch (currentTab.id) { + case "tab_txt2img": + return currentTab.querySelector("#txt2img_prompt textarea"); + case "tab_img2img": + return currentTab.querySelector("#img2img_prompt textarea"); + } + return null; +} + +function getActiveNegativePrompt() { + const currentTab = get_uiCurrentTabContent(); + switch (currentTab.id) { + case "tab_txt2img": + return currentTab.querySelector("#txt2img_neg_prompt textarea"); + case "tab_img2img": + return currentTab.querySelector("#img2img_neg_prompt textarea"); + } + return null; +} + + +//button's click function +async function open_model_url(event, model_type, search_term){ + console.log("start open_model_url"); + + //get hidden components of extension + let js_open_url_btn = gradioApp().getElementById("ch_js_open_url_btn"); + if (!js_open_url_btn) { + return + } + + + //msg to python side + let msg = { + "action": "", + "model_type": "", + "search_term": "", + "prompt": "", + "neg_prompt": "", + } + + + msg["action"] = "open_url"; + msg["model_type"] = model_type; + msg["search_term"] = search_term; + msg["prompt"] = ""; + msg["neg_prompt"] = ""; + + // fill to msg box + send_ch_py_msg(msg) + + //click hidden button + js_open_url_btn.click(); + + // stop parent event + event.stopPropagation() + event.preventDefault() + + //check response msg from python + let new_py_msg = await get_new_ch_py_msg(); + console.log("new_py_msg:"); + console.log(new_py_msg); + + //check msg + if (new_py_msg) { + let py_msg_json = JSON.parse(new_py_msg); + //check for url + if (py_msg_json && py_msg_json.content) { + if (py_msg_json.content.url) { + window.open(py_msg_json.content.url, "_blank"); + } + + } + + + } + + + console.log("end open_model_url"); + + +} + +function add_trigger_words(event, model_type, search_term){ + console.log("start add_trigger_words"); + + //get hidden components of extension + let js_add_trigger_words_btn = gradioApp().getElementById("ch_js_add_trigger_words_btn"); + if (!js_add_trigger_words_btn) { + return + } + + + //msg to python side + let msg = { + "action": "", + "model_type": "", + "search_term": "", + "prompt": "", + "neg_prompt": "", + } + + msg["action"] = "add_trigger_words"; + msg["model_type"] = model_type; + msg["search_term"] = search_term; + msg["neg_prompt"] = ""; + + // get active prompt + let act_prompt = getActivePrompt(); + msg["prompt"] = act_prompt.value; + + // fill to msg box + send_ch_py_msg(msg) + + //click hidden button + js_add_trigger_words_btn.click(); + + console.log("end add_trigger_words"); + + event.stopPropagation() + event.preventDefault() + + +} + +function use_preview_prompt(event, model_type, search_term){ + console.log("start use_preview_prompt"); + + //get hidden components of extension + let js_use_preview_prompt_btn = gradioApp().getElementById("ch_js_use_preview_prompt_btn"); + if (!js_use_preview_prompt_btn) { + return + } + + //msg to python side + let msg = { + "action": "", + "model_type": "", + "search_term": "", + "prompt": "", + "neg_prompt": "", + } + + msg["action"] = "use_preview_prompt"; + msg["model_type"] = model_type; + msg["search_term"] = search_term; + + // get active prompt + let act_prompt = getActivePrompt(); + msg["prompt"] = act_prompt.value; + + // get active neg prompt + let neg_prompt = getActiveNegativePrompt(); + msg["neg_prompt"] = neg_prompt.value; + + // fill to msg box + send_ch_py_msg(msg) + + //click hidden button + js_use_preview_prompt_btn.click(); + + console.log("end use_preview_prompt"); + + event.stopPropagation() + event.preventDefault() + +} + + + +// download model's new version into SD at python side +function ch_dl_model_new_version(event, model_path, version_id, download_url){ + console.log("start ch_dl_model_new_version"); + + // must confirm before downloading + let dl_confirm = "\nConfirm to download.\n\nCheck Download Model Section's log and console log for detail."; + if (!confirm(dl_confirm)) { + return + } + + //get hidden components of extension + let js_dl_model_new_version_btn = gradioApp().getElementById("ch_js_dl_model_new_version_btn"); + if (!js_dl_model_new_version_btn) { + return + } + + //msg to python side + let msg = { + "action": "", + "model_path": "", + "version_id": "", + "download_url": "", + } + + msg["action"] = "dl_model_new_version"; + msg["model_path"] = model_path; + msg["version_id"] = version_id; + msg["download_url"] = download_url; + + // fill to msg box + send_ch_py_msg(msg) + + //click hidden button + js_dl_model_new_version_btn.click(); + + console.log("end dl_model_new_version"); + + event.stopPropagation() + event.preventDefault() + + +} + + +onUiLoaded(() => { + + //get gradio version + let gradio_ver = ch_gradio_version(); + console.log("gradio_ver:" + gradio_ver); + + // get all extra network tabs + let tab_prefix_list = ["txt2img", "img2img"]; + let model_type_list = ["textual_inversion", "hypernetworks", "checkpoints", "lora"]; + let cardid_suffix = "cards"; + + //get init py msg + // let init_py_msg_str = get_ch_py_msg(); + // let extension_path = ""; + // if (!init_py_msg_str) { + // console.log("Can not get init_py_msg"); + // } else { + // init_py_msg = JSON.parse(init_py_msg_str); + // if (init_py_msg) { + // extension_path = init_py_msg.extension_path; + // console.log("get extension path: " + extension_path); + // } + // } + + // //icon image node as string + // function icon(icon_name){ + // let icon_path = extension_path+"/icon/"+icon_name; + // return ch_img_node_str(icon_path); + // } + + + // update extra network tab pages' cards + // * replace "replace preview" text button into an icon + // * add 3 button to each card: + // - open model url 🌐 + // - add trigger words 💡 + // - use preview image's prompt 🏷️ + // notice: javascript can not get response from python side + // so, these buttons just sent request to python + // then, python side gonna open url and update prompt text box, without telling js side. + function update_card_for_civitai(){ + + //css + let btn_margin = "0px 5px"; + let btn_fontSize = "200%"; + let btn_thumb_fontSize = "100%"; + let btn_thumb_display = "inline"; + let btn_thumb_pos = "static"; + let btn_thumb_backgroundImage = "none"; + let btn_thumb_background = "rgba(0, 0, 0, 0.8)"; + + let ch_btn_txts = ['🌐', '💡', '🏷️']; + let replace_preview_text = getTranslation("replace preview"); + if (!replace_preview_text) { + replace_preview_text = "replace preview"; + } + + + + // get component + let ch_always_display_ckb = gradioApp().querySelector("#ch_always_display_ckb input"); + let ch_show_btn_on_thumb_ckb = gradioApp().querySelector("#ch_show_btn_on_thumb_ckb input"); + let ch_always_display = false; + let ch_show_btn_on_thumb = false; + if (ch_always_display_ckb) { + ch_always_display = ch_always_display_ckb.checked; + } + if (ch_show_btn_on_thumb_ckb) { + ch_show_btn_on_thumb = ch_show_btn_on_thumb_ckb.checked; + } + + + //change all "replace preview" into an icon + let extra_network_id = ""; + let extra_network_node = null; + let metadata_button = null; + let additional_node = null; + let replace_preview_btn = null; + let ul_node = null; + let search_term_node = null; + let search_term = ""; + let model_type = ""; + let cards = null; + let need_to_add_buttons = false; + let is_thumb_mode = false; + + //get current tab + let active_tab_type = getActiveTabType(); + if (!active_tab_type){active_tab_type = "txt2img";} + + for (const tab_prefix of tab_prefix_list) { + if (tab_prefix != active_tab_type) {continue;} + + + //find out current selected model type tab + let active_extra_tab_type = ""; + let extra_tabs = gradioApp().getElementById(tab_prefix+"_extra_tabs"); + if (!extra_tabs) {console.log("can not find extra_tabs: " + tab_prefix+"_extra_tabs");} + + //get active extratab + const active_extra_tab = Array.from(get_uiCurrentTabContent().querySelectorAll('.extra-network-cards,.extra-network-thumbs')) + .find(el => el.closest('.tabitem').style.display === 'block') + ?.id.match(/^(txt2img|img2img)_(.+)_cards$/)[2] + + + console.log("found active tab: " + active_extra_tab); + + switch (active_extra_tab) { + case "textual_inversion": + active_extra_tab_type = "ti"; + break; + case "hypernetworks": + active_extra_tab_type = "hyper"; + break; + case "checkpoints": + active_extra_tab_type = "ckp"; + break; + case "lora": + active_extra_tab_type = "lora"; + break; + } + + + for (const js_model_type of model_type_list) { + //get model_type for python side + switch (js_model_type) { + case "textual_inversion": + model_type = "ti"; + break; + case "hypernetworks": + model_type = "hyper"; + break; + case "checkpoints": + model_type = "ckp"; + break; + case "lora": + model_type = "lora"; + break; + } + + if (!model_type) { + console.log("can not get model_type from: " + js_model_type); + continue; + } + + + //only handle current sub-tab + if (model_type != active_extra_tab_type) { + continue; + } + + console.log("handle active extra tab"); + + + extra_network_id = tab_prefix+"_"+js_model_type+"_"+cardid_suffix; + // console.log("searching extra_network_node: " + extra_network_id); + extra_network_node = gradioApp().getElementById(extra_network_id); + // check if extr network is under thumbnail mode + is_thumb_mode = false + if (extra_network_node) { + if (extra_network_node.className == "extra-network-thumbs") { + console.log(extra_network_id + " is in thumbnail mode"); + is_thumb_mode = true; + // if (!ch_show_btn_on_thumb) {continue;} + } + } else { + console.log("can not find extra_network_node: " + extra_network_id); + continue; + } + // console.log("find extra_network_node: " + extra_network_id); + + // get all card nodes + cards = extra_network_node.querySelectorAll(".card"); + for (let card of cards) { + //metadata_buttoncard + metadata_button = card.querySelector(".metadata-button"); + //additional node + additional_node = card.querySelector(".actions .additional"); + //get ul node, which is the parent of all buttons + ul_node = card.querySelector(".actions .additional ul"); + // replace preview text button + replace_preview_btn = card.querySelector(".actions .additional a"); + + // check thumb mode + if (is_thumb_mode) { + additional_node.style.display = null; + + if (ch_show_btn_on_thumb) { + ul_node.style.background = btn_thumb_background; + } else { + //reset + ul_node.style.background = null; + // console.log("remove existed buttons"); + // remove existed buttons + if (ul_node) { + // find all .a child nodes + let atags = ul_node.querySelectorAll("a"); + + for (let atag of atags) { + //reset display + atag.style.display = null; + //remove extension's button + if (ch_btn_txts.indexOf(atag.innerHTML)>=0) { + //need to remove + ul_node.removeChild(atag); + } else { + //do not remove, just reset + atag.innerHTML = replace_preview_text; + atag.style.display = null; + atag.style.fontSize = null; + atag.style.position = null; + atag.style.backgroundImage = null; + } + } + + //also remove br tag in ul + let brtag = ul_node.querySelector("br"); + if (brtag) { + ul_node.removeChild(brtag); + } + + } + //just reset and remove nodes, do nothing else + continue; + + } + + } else { + // full preview mode + if (ch_always_display) { + additional_node.style.display = "block"; + } else { + additional_node.style.display = null; + } + + // remove br tag + let brtag = ul_node.querySelector("br"); + if (brtag) { + ul_node.removeChild(brtag); + } + + } + + // change replace preview text button into icon + if (replace_preview_btn) { + if (replace_preview_btn.innerHTML !== "🖼️") { + need_to_add_buttons = true; + replace_preview_btn.innerHTML = "🖼️"; + if (!is_thumb_mode) { + replace_preview_btn.style.fontSize = btn_fontSize; + replace_preview_btn.style.margin = btn_margin; + } else { + replace_preview_btn.style.display = btn_thumb_display; + replace_preview_btn.style.fontSize = btn_thumb_fontSize; + replace_preview_btn.style.position = btn_thumb_pos; + replace_preview_btn.style.backgroundImage = btn_thumb_backgroundImage; + } + + } + } + + if (!need_to_add_buttons) { + continue; + } + + + // search_term node + // search_term = subfolder path + model name + ext + search_term_node = card.querySelector(".actions .additional .search_term"); + if (!search_term_node){ + console.log("can not find search_term node for cards in " + extra_network_id); + continue; + } + + // get search_term + search_term = search_term_node.innerHTML; + if (!search_term) { + console.log("search_term is empty for cards in " + extra_network_id); + continue; + } + + + + // if (is_thumb_mode) { + // ul_node.style.background = btn_thumb_background; + // } + + // then we need to add 3 buttons to each ul node: + let open_url_node = document.createElement("a"); + open_url_node.href = "#"; + open_url_node.innerHTML = "🌐"; + if (!is_thumb_mode) { + open_url_node.style.fontSize = btn_fontSize; + open_url_node.style.margin = btn_margin; + } else { + open_url_node.style.display = btn_thumb_display; + open_url_node.style.fontSize = btn_thumb_fontSize; + open_url_node.style.position = btn_thumb_pos; + open_url_node.style.backgroundImage = btn_thumb_backgroundImage; + } + open_url_node.title = "Open this model's civitai url"; + open_url_node.setAttribute("onclick","open_model_url(event, '"+model_type+"', '"+search_term+"')"); + + let add_trigger_words_node = document.createElement("a"); + add_trigger_words_node.href = "#"; + add_trigger_words_node.innerHTML = "💡"; + if (!is_thumb_mode) { + add_trigger_words_node.style.fontSize = btn_fontSize; + add_trigger_words_node.style.margin = btn_margin; + } else { + add_trigger_words_node.style.display = btn_thumb_display; + add_trigger_words_node.style.fontSize = btn_thumb_fontSize; + add_trigger_words_node.style.position = btn_thumb_pos; + add_trigger_words_node.style.backgroundImage = btn_thumb_backgroundImage; + } + + add_trigger_words_node.title = "Add trigger words to prompt"; + add_trigger_words_node.setAttribute("onclick","add_trigger_words(event, '"+model_type+"', '"+search_term+"')"); + + let use_preview_prompt_node = document.createElement("a"); + use_preview_prompt_node.href = "#"; + use_preview_prompt_node.innerHTML = "🏷️"; + if (!is_thumb_mode) { + use_preview_prompt_node.style.fontSize = btn_fontSize; + use_preview_prompt_node.style.margin = btn_margin; + } else { + use_preview_prompt_node.style.display = btn_thumb_display; + use_preview_prompt_node.style.fontSize = btn_thumb_fontSize; + use_preview_prompt_node.style.position = btn_thumb_pos; + use_preview_prompt_node.style.backgroundImage = btn_thumb_backgroundImage; + } + use_preview_prompt_node.title = "Use prompt from preview image"; + use_preview_prompt_node.setAttribute("onclick","use_preview_prompt(event, '"+model_type+"', '"+search_term+"')"); + + //add to card + ul_node.appendChild(open_url_node); + //add br if metadata_button exists + if (is_thumb_mode && metadata_button) { + ul_node.appendChild(document.createElement("br")); + } + ul_node.appendChild(add_trigger_words_node); + ul_node.appendChild(use_preview_prompt_node); + + + + + } + + + } + } + + + } + + + let tab_id = "" + let extra_tab = null; + let extra_toolbar = null; + let extra_network_refresh_btn = null; + //add refresh button to extra network's toolbar + for (let prefix of tab_prefix_list) { + tab_id = prefix + "_extra_tabs"; + extra_tab = gradioApp().getElementById(tab_id); + + //get toolbar + //get Refresh button + extra_network_refresh_btn = gradioApp().getElementById(prefix+"_extra_refresh"); + + + if (!extra_network_refresh_btn){ + console.log("can not get extra network refresh button for " + tab_id); + continue; + } + + // add refresh button to toolbar + let ch_refresh = document.createElement("button"); + ch_refresh.innerHTML = "🔁"; + ch_refresh.title = "Refresh Civitai Helper's additional buttons"; + ch_refresh.className = "lg secondary gradio-button"; + ch_refresh.style.fontSize = "200%"; + ch_refresh.onclick = update_card_for_civitai; + + extra_network_refresh_btn.parentNode.appendChild(ch_refresh); + + } + + + //run it once + update_card_for_civitai(); + + +}); + + + diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/__pycache__/civitai_helper.cpython-310.pyc b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/__pycache__/civitai_helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d91c98c60b2c06fd3d97f6bbe52b30bda934c48a Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/__pycache__/civitai_helper.cpython-310.pyc differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__init__.py b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/__init__.cpython-310.pyc b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4db2082394f01efaf8d09e42c30fb9e4a325edf3 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/civitai.cpython-310.pyc b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/civitai.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81c9e5892657cb993a8c05378c4242d0ae38c2d7 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/civitai.cpython-310.pyc differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/downloader.cpython-310.pyc b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/downloader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a51cf400d62c695d8093b0828994d64fd34d135f Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/downloader.cpython-310.pyc differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/js_action_civitai.cpython-310.pyc b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/js_action_civitai.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7dbfaab595b1246d7025f3cbd0eed109ccc57c0 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/js_action_civitai.cpython-310.pyc differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/model.cpython-310.pyc b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cba323a88e7500a1968bae502d67c95b56f0a9fb Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/model.cpython-310.pyc differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/model_action_civitai.cpython-310.pyc b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/model_action_civitai.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27a7b834c841ff2b48ba4e83bcc8f96812c93830 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/model_action_civitai.cpython-310.pyc differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/msg_handler.cpython-310.pyc b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/msg_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5992f6683b7f08973375a0fe88e8508b4b6a00e5 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/msg_handler.cpython-310.pyc differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/setting.cpython-310.pyc b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/setting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa72754ba1d6a0cb1ee39d0c63802c64ad240b35 Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/setting.cpython-310.pyc differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/util.cpython-310.pyc b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b27428b40ac6e29c559fdf8674e1eb7da2bf297a Binary files /dev/null and b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/__pycache__/util.cpython-310.pyc differ diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/civitai.py b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/civitai.py new file mode 100644 index 0000000000000000000000000000000000000000..2a15ae745d6d98b75a177d8a3deb1c897fa2a2c3 --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/civitai.py @@ -0,0 +1,612 @@ +# -*- coding: UTF-8 -*- +# handle msg between js and python side +import os +import time +import json +import re +import requests +from . import util +from . import model +from . import setting + +suffix = ".civitai" + +url_dict = { + "modelPage":"https://civitai.com/models/", + "modelId": "https://civitai.com/api/v1/models/", + "modelVersionId": "https://civitai.com/api/v1/model-versions/", + "hash": "https://civitai.com/api/v1/model-versions/by-hash/" +} + +model_type_dict = { + "Checkpoint": "ckp", + "TextualInversion": "ti", + "Hypernetwork": "hyper", + "LORA": "lora", + "LoCon": "lora", +} + + + +# get image with full size +# width is in number, not string +# return: url str +def get_full_size_image_url(image_url, width): + return re.sub('/width=\d+/', '/width=' + str(width) + '/', image_url) + + +# use this sha256 to get model info from civitai +# return: model info dict +def get_model_info_by_hash(hash:str): + util.printD("Request model info from civitai") + + if not hash: + util.printD("hash is empty") + return + + r = requests.get(url_dict["hash"]+hash, headers=util.def_headers, proxies=util.proxies) + if not r.ok: + if r.status_code == 404: + # this is not a civitai model + util.printD("Civitai does not have this model") + return {} + else: + util.printD("Get error code: " + str(r.status_code)) + util.printD(r.text) + return + + # try to get content + content = None + try: + content = r.json() + except Exception as e: + util.printD("Parse response json failed") + util.printD(str(e)) + util.printD("response:") + util.printD(r.text) + return + + if not content: + util.printD("error, content from civitai is None") + return + + return content + + + +def get_model_info_by_id(id:str) -> dict: + util.printD("Request model info from civitai") + + if not id: + util.printD("id is empty") + return + + r = requests.get(url_dict["modelId"]+str(id), headers=util.def_headers, proxies=util.proxies) + if not r.ok: + if r.status_code == 404: + # this is not a civitai model + util.printD("Civitai does not have this model") + return {} + else: + util.printD("Get error code: " + str(r.status_code)) + util.printD(r.text) + return + + # try to get content + content = None + try: + content = r.json() + except Exception as e: + util.printD("Parse response json failed") + util.printD(str(e)) + util.printD("response:") + util.printD(r.text) + return + + if not content: + util.printD("error, content from civitai is None") + return + + return content + + +def get_version_info_by_version_id(id:str) -> dict: + util.printD("Request version info from civitai") + + if not id: + util.printD("id is empty") + return + + r = requests.get(url_dict["modelVersionId"]+str(id), headers=util.def_headers, proxies=util.proxies) + if not r.ok: + if r.status_code == 404: + # this is not a civitai model + util.printD("Civitai does not have this model version") + return {} + else: + util.printD("Get error code: " + str(r.status_code)) + util.printD(r.text) + return + + # try to get content + content = None + try: + content = r.json() + except Exception as e: + util.printD("Parse response json failed") + util.printD(str(e)) + util.printD("response:") + util.printD(r.text) + return + + if not content: + util.printD("error, content from civitai is None") + return + + return content + + +def get_version_info_by_model_id(id:str) -> dict: + + model_info = get_model_info_by_id(id) + if not model_info: + util.printD(f"Failed to get model info by id: {id}") + return + + # check content to get version id + if "modelVersions" not in model_info.keys(): + util.printD("There is no modelVersions in this model_info") + return + + if not model_info["modelVersions"]: + util.printD("modelVersions is None") + return + + if len(model_info["modelVersions"])==0: + util.printD("modelVersions is Empty") + return + + def_version = model_info["modelVersions"][0] + if not def_version: + util.printD("default version is None") + return + + if "id" not in def_version.keys(): + util.printD("default version has no id") + return + + version_id = def_version["id"] + + if not version_id: + util.printD("default version's id is None") + return + + # get version info + version_info = get_version_info_by_version_id(str(version_id)) + if not version_info: + util.printD(f"Failed to get version info by version_id: {version_id}") + return + + return version_info + + + + +# get model info file's content by model type and search_term +# parameter: model_type, search_term +# return: model_info +def load_model_info_by_search_term(model_type, search_term): + util.printD(f"Load model info of {search_term} in {model_type}") + if model_type not in model.folders.keys(): + util.printD("unknow model type: " + model_type) + return + + # search_term = subfolderpath + model name + ext. And it always start with a / even there is no sub folder + base, ext = os.path.splitext(search_term) + model_info_base = base + if base[:1] == "/": + model_info_base = base[1:] + + model_folder = model.folders[model_type] + model_info_filename = model_info_base + suffix + model.info_ext + model_info_filepath = os.path.join(model_folder, model_info_filename) + + if not os.path.isfile(model_info_filepath): + util.printD("Can not find model info file: " + model_info_filepath) + return + + return model.load_model_info(model_info_filepath) + + + + + +# get model file names by model type +# parameter: model_type - string +# parameter: filter - dict, which kind of model you need +# return: model name list +def get_model_names_by_type_and_filter(model_type:str, filter:dict) -> list: + + model_folder = model.folders[model_type] + + # set filter + # only get models don't have a civitai info file + no_info_only = False + empty_info_only = False + + if filter: + if "no_info_only" in filter.keys(): + no_info_only = filter["no_info_only"] + if "empty_info_only" in filter.keys(): + empty_info_only = filter["empty_info_only"] + + + + # get information from filter + # only get those model names don't have a civitai model info file + model_names = [] + for root, dirs, files in os.walk(model_folder, followlinks=True): + for filename in files: + item = os.path.join(root, filename) + # check extension + base, ext = os.path.splitext(item) + if ext in model.exts: + # find a model + + # check filter + if no_info_only: + # check model info file + info_file = base + suffix + model.info_ext + if os.path.isfile(info_file): + continue + + if empty_info_only: + # check model info file + info_file = base + suffix + model.info_ext + if os.path.isfile(info_file): + # load model info + model_info = model.load_model_info(info_file) + # check content + if model_info: + if "id" in model_info.keys(): + # find a non-empty model info file + continue + + model_names.append(filename) + + + return model_names + +def get_model_names_by_input(model_type, empty_info_only): + return get_model_names_by_type_and_filter(model_type, {"empty_info_only":empty_info_only}) + + +# get id from url +def get_model_id_from_url(url:str) -> str: + util.printD("Run get_model_id_from_url") + id = "" + + if not url: + util.printD("url or model id can not be empty") + return "" + + if url.isnumeric(): + # is already an id + id = str(url) + return id + + s = re.sub("\\?.+$", "", url).split("/") + if len(s) < 2: + util.printD("url is not valid") + return "" + + if s[-2].isnumeric(): + id = s[-2] + elif s[-1].isnumeric(): + id = s[-1] + else: + util.printD("There is no model id in this url") + return "" + + return id + + +# get preview image by model path +# image will be saved to file, so no return +def get_preview_image_by_model_path(model_path:str, max_size_preview, skip_nsfw_preview): + if not model_path: + util.printD("model_path is empty") + return + + if not os.path.isfile(model_path): + util.printD("model_path is not a file: "+model_path) + return + + base, ext = os.path.splitext(model_path) + first_preview = base+".png" + sec_preview = base+".preview.png" + info_file = base + suffix + model.info_ext + + # check preview image + if not os.path.isfile(sec_preview): + # need to download preview image + util.printD("Checking preview image for model: " + model_path) + # load model_info file + if os.path.isfile(info_file): + model_info = model.load_model_info(info_file) + if not model_info: + util.printD("Model Info is empty") + return + + if "images" in model_info.keys(): + if model_info["images"]: + for img_dict in model_info["images"]: + if "nsfw" in img_dict.keys(): + if img_dict["nsfw"]: + util.printD("This image is NSFW") + if skip_nsfw_preview: + util.printD("Skip NSFW image") + continue + + if "url" in img_dict.keys(): + img_url = img_dict["url"] + if max_size_preview: + # use max width + if "width" in img_dict.keys(): + if img_dict["width"]: + img_url = get_full_size_image_url(img_url, img_dict["width"]) + + util.download_file(img_url, sec_preview) + # we only need 1 preview image + break + + + +# search local model by version id in 1 folder, no subfolder +# return - model_info +def search_local_model_info_by_version_id(folder:str, version_id:int) -> dict: + util.printD("Searching local model by version id") + util.printD("folder: " + folder) + util.printD("version_id: " + str(version_id)) + + if not folder: + util.printD("folder is none") + return + + if not os.path.isdir(folder): + util.printD("folder is not a dir") + return + + if not version_id: + util.printD("version_id is none") + return + + # search civitai model info file + for filename in os.listdir(folder): + # check ext + base, ext = os.path.splitext(filename) + if ext == model.info_ext: + # find info file + if len(base) < 9: + # not a civitai info file + continue + + if base[-8:] == suffix: + # find a civitai info file + path = os.path.join(folder, filename) + model_info = model.load_model_info(path) + if not model_info: + continue + + if "id" not in model_info.keys(): + continue + + id = model_info["id"] + if not id: + continue + + # util.printD(f"Compare version id, src: {id}, target:{version_id}") + if str(id) == str(version_id): + # find the one + return model_info + + + return + + + + + +# check new version for a model by model path +# return (model_path, model_id, model_name, new_verion_id, new_version_name, description, download_url, img_url) +def check_model_new_version_by_path(model_path:str, delay:float=1) -> tuple: + if not model_path: + util.printD("model_path is empty") + return + + if not os.path.isfile(model_path): + util.printD("model_path is not a file: "+model_path) + return + + # get model info file name + base, ext = os.path.splitext(model_path) + info_file = base + suffix + model.info_ext + + if not os.path.isfile(info_file): + return + + # get model info + model_info_file = model.load_model_info(info_file) + if not model_info_file: + return + + if "id" not in model_info_file.keys(): + return + + local_version_id = model_info_file["id"] + if not local_version_id: + return + + if "modelId" not in model_info_file.keys(): + return + + model_id = model_info_file["modelId"] + if not model_id: + return + + # get model info by id from civitai + model_info = get_model_info_by_id(model_id) + # delay before next request, to prevent to be treat as DDoS + util.printD(f"delay:{delay} second") + time.sleep(delay) + + if not model_info: + return + + if "modelVersions" not in model_info.keys(): + return + + modelVersions = model_info["modelVersions"] + if not modelVersions: + return + + if not len(modelVersions): + return + + current_version = modelVersions[0] + if not current_version: + return + + if "id" not in current_version.keys(): + return + + current_version_id = current_version["id"] + if not current_version_id: + return + + util.printD(f"Compare version id, local: {local_version_id}, remote: {current_version_id} ") + if current_version_id == local_version_id: + return + + model_name = "" + if "name" in model_info.keys(): + model_name = model_info["name"] + + if not model_name: + model_name = "" + + + new_version_name = "" + if "name" in current_version.keys(): + new_version_name = current_version["name"] + + if not new_version_name: + new_version_name = "" + + description = "" + if "description" in current_version.keys(): + description = current_version["description"] + + if not description: + description = "" + + downloadUrl = "" + if "downloadUrl" in current_version.keys(): + downloadUrl = current_version["downloadUrl"] + + if not downloadUrl: + downloadUrl = "" + + # get 1 preview image + img_url = "" + if "images" in current_version.keys(): + if current_version["images"]: + if current_version["images"][0]: + if "url" in current_version["images"][0].keys(): + img_url = current_version["images"][0]["url"] + if not img_url: + img_url = "" + + + + return (model_path, model_id, model_name, current_version_id, new_version_name, description, downloadUrl, img_url) + + + + +# check model's new version +# parameter: delay - float, how many seconds to delay between each request to civitai +# return: new_versions - a list for all new versions, each one is (model_path, model_id, model_name, new_verion_id, new_version_name, description, download_url, img_url) +def check_models_new_version_by_model_types(model_types:list, delay:float=1) -> list: + util.printD("Checking models' new version") + + if not model_types: + return [] + + # check model types, which cloud be a string as 1 type + mts = [] + if type(model_types) == str: + mts.append(model_types) + elif type(model_types) == list: + mts = model_types + else: + util.printD("Unknow model types:") + util.printD(model_types) + return [] + + # output is a markdown document string to show a list of new versions on UI + output = "" + # new version list + new_versions = [] + + # walk all models + for model_type, model_folder in model.folders.items(): + if model_type not in mts: + continue + + util.printD("Scanning path: " + model_folder) + for root, dirs, files in os.walk(model_folder, followlinks=True): + for filename in files: + # check ext + item = os.path.join(root, filename) + base, ext = os.path.splitext(item) + if ext in model.exts: + # find a model + r = check_model_new_version_by_path(item, delay) + + if not r: + continue + + model_path, model_id, model_name, current_version_id, new_version_name, description, downloadUrl, img_url = r + # check exist + if not current_version_id: + continue + + # check this version id in list + is_already_in_list = False + for new_version in new_versions: + if current_version_id == new_version[3]: + # already in list + is_already_in_list = True + break + + if is_already_in_list: + util.printD("New version is already in list") + continue + + # search this new version id to check if this model is already downloaded + target_model_info = search_local_model_info_by_version_id(root, current_version_id) + if target_model_info: + util.printD("New version is already existed") + continue + + # add to list + new_versions.append(r) + + + + + return new_versions + + + diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/downloader.py b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/downloader.py new file mode 100644 index 0000000000000000000000000000000000000000..2b14ebc2cde6650337b4b3e1520337a66d6ea61a --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/downloader.py @@ -0,0 +1,115 @@ +# -*- coding: UTF-8 -*- +import sys +import requests +import os +from . import util + + +dl_ext = ".downloading" + +# disable ssl warning info +requests.packages.urllib3.disable_warnings() + +# output is downloaded file path +def dl(url, folder, filename, filepath): + util.printD("Start downloading from: " + url) + # get file_path + file_path = "" + if filepath: + file_path = filepath + else: + # if file_path is not in parameter, then folder must be in parameter + if not folder: + util.printD("folder is none") + return + + if not os.path.isdir(folder): + util.printD("folder does not exist: "+folder) + return + + if filename: + file_path = os.path.join(folder, filename) + + # first request for header + rh = requests.get(url, stream=True, verify=False, headers=util.def_headers, proxies=util.proxies) + # get file size + total_size = 0 + total_size = int(rh.headers['Content-Length']) + util.printD(f"File size: {total_size}") + + # if file_path is empty, need to get file name from download url's header + if not file_path: + filename = "" + if "Content-Disposition" in rh.headers.keys(): + cd = rh.headers["Content-Disposition"] + # Extract the filename from the header + # content of a CD: "attachment;filename=FileName.txt" + # in case "" is in CD filename's start and end, need to strip them out + filename = cd.split("=")[1].strip('"') + if not filename: + util.printD("Fail to get file name from Content-Disposition: " + cd) + return + + if not filename: + util.printD("Can not get file name from download url's header") + return + + # with folder and filename, now we have the full file path + file_path = os.path.join(folder, filename) + + + util.printD("Target file path: " + file_path) + base, ext = os.path.splitext(file_path) + + # check if file is already exist + count = 2 + new_base = base + while os.path.isfile(file_path): + util.printD("Target file already exist.") + # re-name + new_base = base + "_" + str(count) + file_path = new_base + ext + count += 1 + + # use a temp file for downloading + dl_file_path = new_base+dl_ext + + + util.printD(f"Downloading to temp file: {dl_file_path}") + + # check if downloading file is exsited + downloaded_size = 0 + if os.path.exists(dl_file_path): + downloaded_size = os.path.getsize(dl_file_path) + + util.printD(f"Downloaded size: {downloaded_size}") + + # create header range + headers = {'Range': 'bytes=%d-' % downloaded_size} + headers['User-Agent'] = util.def_headers['User-Agent'] + + # download with header + r = requests.get(url, stream=True, verify=False, headers=headers, proxies=util.proxies) + + # write to file + with open(dl_file_path, "ab") as f: + for chunk in r.iter_content(chunk_size=1024): + if chunk: + downloaded_size += len(chunk) + f.write(chunk) + # force to write to disk + f.flush() + + # progress + progress = int(50 * downloaded_size / total_size) + sys.stdout.reconfigure(encoding='utf-8') + sys.stdout.write("\r[%s%s] %d%%" % ('-' * progress, ' ' * (50 - progress), 100 * downloaded_size / total_size)) + sys.stdout.flush() + + print() + + # rename file + os.rename(dl_file_path, file_path) + util.printD(f"File Downloaded to: {file_path}") + return file_path + diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/js_action_civitai.py b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/js_action_civitai.py new file mode 100644 index 0000000000000000000000000000000000000000..0c818539e421329ed35a903ab08294a3553804c1 --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/js_action_civitai.py @@ -0,0 +1,256 @@ +# -*- coding: UTF-8 -*- +# handle msg between js and python side +import os +import json +import requests +import webbrowser +from . import util +from . import model +from . import civitai +from . import msg_handler +from . import downloader + + + +# get civitai's model url and open it in browser +# parameter: model_type, search_term +# output: python msg - will be sent to hidden textbox then picked by js side +def open_model_url(msg, open_url_with_js): + util.printD("Start open_model_url") + + output = "" + result = msg_handler.parse_js_msg(msg) + if not result: + util.printD("Parsing js ms failed") + return + + model_type = result["model_type"] + search_term = result["search_term"] + + model_info = civitai.load_model_info_by_search_term(model_type, search_term) + if not model_info: + util.printD(f"Failed to get model info for {model_type} {search_term}") + return "" + + if "modelId" not in model_info.keys(): + util.printD(f"Failed to get model id from info file for {model_type} {search_term}") + return "" + + model_id = model_info["modelId"] + if not model_id: + util.printD(f"model id from info file of {model_type} {search_term} is None") + return "" + + url = civitai.url_dict["modelPage"]+str(model_id) + + + # msg content for js + content = { + "url":"" + } + + if not open_url_with_js: + util.printD("Open Url: " + url) + # open url + webbrowser.open_new_tab(url) + else: + util.printD("Send Url to js") + content["url"] = url + output = msg_handler.build_py_msg("open_url", content) + + util.printD("End open_model_url") + return output + + + +# add trigger words to prompt +# parameter: model_type, search_term, prompt +# return: [new_prompt, new_prompt] - new prompt with trigger words, return twice for txt2img and img2img +def add_trigger_words(msg): + util.printD("Start add_trigger_words") + + result = msg_handler.parse_js_msg(msg) + if not result: + util.printD("Parsing js ms failed") + return + + model_type = result["model_type"] + search_term = result["search_term"] + prompt = result["prompt"] + + + model_info = civitai.load_model_info_by_search_term(model_type, search_term) + if not model_info: + util.printD(f"Failed to get model info for {model_type} {search_term}") + return [prompt, prompt] + + if "trainedWords" not in model_info.keys(): + util.printD(f"Failed to get trainedWords from info file for {model_type} {search_term}") + return [prompt, prompt] + + trainedWords = model_info["trainedWords"] + if not trainedWords: + util.printD(f"No trainedWords from info file for {model_type} {search_term}") + return [prompt, prompt] + + if len(trainedWords) == 0: + util.printD(f"trainedWords from info file for {model_type} {search_term} is empty") + return [prompt, prompt] + + # get ful trigger words + trigger_words = "" + for word in trainedWords: + trigger_words = trigger_words + word + ", " + + new_prompt = prompt + " " + trigger_words + util.printD("trigger_words: " + trigger_words) + util.printD("prompt: " + prompt) + util.printD("new_prompt: " + new_prompt) + + util.printD("End add_trigger_words") + + # add to prompt + return [new_prompt, new_prompt] + + + +# use preview image's prompt as prompt +# parameter: model_type, model_name, prompt, neg_prompt +# return: [new_prompt, new_neg_prompt, new_prompt, new_neg_prompt,] - return twice for txt2img and img2img +def use_preview_image_prompt(msg): + util.printD("Start use_preview_image_prompt") + + result = msg_handler.parse_js_msg(msg) + if not result: + util.printD("Parsing js ms failed") + return + + model_type = result["model_type"] + search_term = result["search_term"] + prompt = result["prompt"] + neg_prompt = result["neg_prompt"] + + + model_info = civitai.load_model_info_by_search_term(model_type, search_term) + if not model_info: + util.printD(f"Failed to get model info for {model_type} {search_term}") + return [prompt, neg_prompt, prompt, neg_prompt] + + if "images" not in model_info.keys(): + util.printD(f"Failed to get images from info file for {model_type} {search_term}") + return [prompt, neg_prompt, prompt, neg_prompt] + + images = model_info["images"] + if not images: + util.printD(f"No images from info file for {model_type} {search_term}") + return [prompt, neg_prompt, prompt, neg_prompt] + + if len(images) == 0: + util.printD(f"images from info file for {model_type} {search_term} is empty") + return [prompt, neg_prompt, prompt, neg_prompt] + + # get prompt from preview images' meta data + preview_prompt = "" + preview_neg_prompt = "" + for img in images: + if "meta" in img.keys(): + if img["meta"]: + if "prompt" in img["meta"].keys(): + if img["meta"]["prompt"]: + preview_prompt = img["meta"]["prompt"] + + if "negativePrompt" in img["meta"].keys(): + if img["meta"]["negativePrompt"]: + preview_neg_prompt = img["meta"]["negativePrompt"] + + # we only need 1 prompt + if preview_prompt: + break + + if not preview_prompt: + util.printD(f"There is no prompt of {model_type} {search_term} in its preview image") + return [prompt, neg_prompt, prompt, neg_prompt] + + util.printD("End use_preview_image_prompt") + + return [preview_prompt, preview_neg_prompt, preview_prompt, preview_neg_prompt] + + +# download model's new verson by model path, version id and download url +# output is a md log +def dl_model_new_version(msg, max_size_preview, skip_nsfw_preview): + util.printD("Start dl_model_new_version") + + output = "" + + result = msg_handler.parse_js_msg(msg) + if not result: + output = "Parsing js ms failed" + util.printD(output) + return output + + model_path = result["model_path"] + version_id = result["version_id"] + download_url = result["download_url"] + + util.printD("model_path: " + model_path) + util.printD("version_id: " + str(version_id)) + util.printD("download_url: " + download_url) + + # check data + if not model_path: + output = "model_path is empty" + util.printD(output) + return output + + if not version_id: + output = "version_id is empty" + util.printD(output) + return output + + if not download_url: + output = "download_url is empty" + util.printD(output) + return output + + if not os.path.isfile(model_path): + output = "model_path is not a file: "+ model_path + util.printD(output) + return output + + # get model folder from model path + model_folder = os.path.dirname(model_path) + + # no need to check when downloading new version, since checking new version is already checked + # check if this model is already existed + # r = civitai.search_local_model_info_by_version_id(model_folder, version_id) + # if r: + # output = "This model version is already existed" + # util.printD(output) + # return output + + # download file + new_model_path = downloader.dl(download_url, model_folder, None, None) + if not new_model_path: + output = "Download failed, check console log for detail. Download url: " + download_url + util.printD(output) + return output + + # get version info + version_info = civitai.get_version_info_by_version_id(version_id) + if not version_info: + output = "Model downloaded, but failed to get version info, check console log for detail. Model saved to: " + new_model_path + util.printD(output) + return output + + # now write version info to file + base, ext = os.path.splitext(new_model_path) + info_file = base + civitai.suffix + model.info_ext + model.write_model_info(info_file, version_info) + + # then, get preview image + civitai.get_preview_image_by_model_path(new_model_path, max_size_preview, skip_nsfw_preview) + + output = "Done. Model downloaded to: " + new_model_path + util.printD(output) + return output diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/model.py b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/model.py new file mode 100644 index 0000000000000000000000000000000000000000..729012d742738cab559a8836727fc8289c21f18e --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/model.py @@ -0,0 +1,120 @@ +# -*- coding: UTF-8 -*- +# handle msg between js and python side +import os +import json +from . import util +from modules import shared + + +# this is the default root path +root_path = os.getcwd() + +# if command line arguement is used to change model folder, +# then model folder is in absolute path, not based on this root path anymore. +# so to make extension work with those absolute model folder paths, model folder also need to be in absolute path +folders = { + "ti": os.path.join(root_path, "embeddings"), + "hyper": os.path.join(root_path, "models", "hypernetworks"), + "ckp": os.path.join(root_path, "models", "Stable-diffusion"), + "lora": os.path.join(root_path, "models", "Lora"), +} + +exts = (".bin", ".pt", ".safetensors", ".ckpt") +info_ext = ".info" +vae_suffix = ".vae" + + +# get cusomter model path +def get_custom_model_folder(): + util.printD("Get Custom Model Folder") + + global folders + + if shared.cmd_opts.embeddings_dir and os.path.isdir(shared.cmd_opts.embeddings_dir): + folders["ti"] = shared.cmd_opts.embeddings_dir + + if shared.cmd_opts.hypernetwork_dir and os.path.isdir(shared.cmd_opts.hypernetwork_dir): + folders["hyper"] = shared.cmd_opts.hypernetwork_dir + + if shared.cmd_opts.ckpt_dir and os.path.isdir(shared.cmd_opts.ckpt_dir): + folders["ckp"] = shared.cmd_opts.ckpt_dir + + if shared.cmd_opts.lora_dir and os.path.isdir(shared.cmd_opts.lora_dir): + folders["lora"] = shared.cmd_opts.lora_dir + + + + + +# write model info to file +def write_model_info(path, model_info): + util.printD("Write model info to file: " + path) + with open(os.path.realpath(path), 'w') as f: + f.write(json.dumps(model_info, indent=4)) + + +def load_model_info(path): + # util.printD("Load model info from file: " + path) + model_info = None + with open(os.path.realpath(path), 'r') as f: + try: + model_info = json.load(f) + except Exception as e: + util.printD("Selected file is not json: " + path) + util.printD(e) + return + + return model_info + + +# get model file names by model type +# parameter: model_type - string +# return: model name list +def get_model_names_by_type(model_type:str) -> list: + + model_folder = folders[model_type] + + # get information from filter + # only get those model names don't have a civitai model info file + model_names = [] + for root, dirs, files in os.walk(model_folder, followlinks=True): + for filename in files: + item = os.path.join(root, filename) + # check extension + base, ext = os.path.splitext(item) + if ext in exts: + # find a model + model_names.append(filename) + + + return model_names + + +# return 2 values: (model_root, model_path) +def get_model_path_by_type_and_name(model_type:str, model_name:str) -> str: + util.printD("Run get_model_path_by_type_and_name") + if model_type not in folders.keys(): + util.printD("unknown model_type: " + model_type) + return + + if not model_name: + util.printD("model name can not be empty") + return + + folder = folders[model_type] + + # model could be in subfolder, need to walk. + model_root = "" + model_path = "" + for root, dirs, files in os.walk(folder, followlinks=True): + for filename in files: + if filename == model_name: + # find model + model_root = root + model_path = os.path.join(root, filename) + return (model_root, model_path) + + return + + + diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/model_action_civitai.py b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/model_action_civitai.py new file mode 100644 index 0000000000000000000000000000000000000000..f09f0752a406e772dcaa95b1ccec20ba9de39506 --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/model_action_civitai.py @@ -0,0 +1,511 @@ +# -*- coding: UTF-8 -*- +# handle msg between js and python side +import os +import time +from . import util +from . import model +from . import civitai +from . import downloader + + +# scan model to generate SHA256, then use this SHA256 to get model info from civitai +# return output msg +def scan_model(scan_model_types, max_size_preview, skip_nsfw_preview): + util.printD("Start scan_model") + output = "" + + # check model types + if not scan_model_types: + output = "Model Types is None, can not scan." + util.printD(output) + return output + + model_types = [] + # check type if it is a string + if type(scan_model_types) == str: + model_types.append(scan_model_types) + else: + model_types = scan_model_types + + model_count = 0 + image_count = 0 + # scan_log = "" + for model_type, model_folder in model.folders.items(): + if model_type not in model_types: + continue + + util.printD("Scanning path: " + model_folder) + for root, dirs, files in os.walk(model_folder, followlinks=True): + for filename in files: + # check ext + item = os.path.join(root, filename) + base, ext = os.path.splitext(item) + if ext in model.exts: + # ignore vae file + if len(base) > 4: + if base[-4:] == model.vae_suffix: + # find .vae + util.printD("This is a vae file: " + filename) + continue + + # find a model + # get info file + info_file = base + civitai.suffix + model.info_ext + # check info file + if not os.path.isfile(info_file): + util.printD("Creating model info for: " + filename) + # get model's sha256 + hash = util.gen_file_sha256(item) + + if not hash: + output = "failed generating SHA256 for model:" + filename + util.printD(output) + return output + + # use this sha256 to get model info from civitai + model_info = civitai.get_model_info_by_hash(hash) + # delay 1 second for ti + if model_type == "ti": + util.printD("Delay 1 second for TI") + time.sleep(1) + + if model_info is None: + output = "Connect to Civitai API service failed. Wait a while and try again" + util.printD(output) + return output+", check console log for detail" + + # write model info to file + model.write_model_info(info_file, model_info) + + # set model_count + model_count = model_count+1 + + # check preview image + civitai.get_preview_image_by_model_path(item, max_size_preview, skip_nsfw_preview) + image_count = image_count+1 + + + # scan_log = "Done" + + output = f"Done. Scanned {model_count} models, checked {image_count} images" + + util.printD(output) + + return output + + + +# Get model info by model type, name and url +# output is log info to display on markdown component +def get_model_info_by_input(model_type, model_name, model_url_or_id, max_size_preview, skip_nsfw_preview): + output = "" + # parse model id + model_id = civitai.get_model_id_from_url(model_url_or_id) + if not model_id: + output = "failed to parse model id from url: " + model_url_or_id + util.printD(output) + return output + + # get model file path + # model could be in subfolder + result = model.get_model_path_by_type_and_name(model_type, model_name) + if not result: + output = "failed to get model file path" + util.printD(output) + return output + + model_root, model_path = result + if not model_path: + output = "model path is empty" + util.printD(output) + return output + + # get info file path + base, ext = os.path.splitext(model_path) + info_file = base + civitai.suffix + model.info_ext + + # get model info + #we call it model_info, but in civitai, it is actually version info + model_info = civitai.get_version_info_by_model_id(model_id) + + if not model_info: + output = "failed to get model info from url: " + model_url_or_id + util.printD(output) + return output + + # write model info to file + model.write_model_info(info_file, model_info) + + util.printD("Saved model info to: "+ info_file) + + # check preview image + civitai.get_preview_image_by_model_path(model_path, max_size_preview, skip_nsfw_preview) + + output = "Model Info saved to: " + info_file + return output + + + +# check models' new version and output to UI as markdown doc +def check_models_new_version_to_md(model_types:list) -> str: + new_versions = civitai.check_models_new_version_by_model_types(model_types, 1) + + count = 0 + output = "" + if not new_versions: + output = "No model has new version" + else: + output = "Found new version for following models:
" + for new_version in new_versions: + count = count+1 + model_path, model_id, model_name, new_verion_id, new_version_name, description, download_url, img_url = new_version + # in md, each part is something like this: + # [model_name](model_url) + # [version_name](download_url) + # version description + url = civitai.url_dict["modelPage"]+str(model_id) + + part = f'' + part = part + f'
File: {model_path}
' + if download_url: + # replace "\" to "/" in model_path for windows + model_path = model_path.replace('\\', '\\\\') + part = part + f'
New Version: {new_version_name}' + # add js function to download new version into SD webui by python + part = part + " " + # in embed HTML, onclick= will also follow a ", never a ', so have to write it as following + part = part + f"[Download into SD]" + + else: + part = part + f'
New Version: {new_version_name}' + part = part + '
' + + # description + if description: + part = part + '
'+ description + '

' + + # preview image + if img_url: + part = part + f"
" + + + output = output + part + + util.printD(f"Done. Find {count} models have new version. Check UI for detail.") + + return output + + +# get model info by url +def get_model_info_by_url(model_url_or_id:str) -> tuple: + util.printD("Getting model info by: " + model_url_or_id) + + # parse model id + model_id = civitai.get_model_id_from_url(model_url_or_id) + if not model_id: + util.printD("failed to parse model id from url or id") + return + + model_info = civitai.get_model_info_by_id(model_id) + if model_info is None: + util.printD("Connect to Civitai API service failed. Wait a while and try again") + return + + if not model_info: + util.printD("failed to get model info from url or id") + return + + # parse model type, model name, subfolder, version from this model info + # get model type + if "type" not in model_info.keys(): + util.printD("model type is not in model_info") + return + + civitai_model_type = model_info["type"] + if civitai_model_type not in civitai.model_type_dict.keys(): + util.printD("This model type is not supported:"+civitai_model_type) + return + + model_type = civitai.model_type_dict[civitai_model_type] + + # get model type + if "name" not in model_info.keys(): + util.printD("model name is not in model_info") + return + + model_name = model_info["name"] + if not model_name: + util.printD("model name is Empty") + model_name = "" + + # get version list + if "modelVersions" not in model_info.keys(): + util.printD("modelVersions is not in model_info") + return + + modelVersions = model_info["modelVersions"] + if not modelVersions: + util.printD("modelVersions is Empty") + return + + version_strs = [] + for version in modelVersions: + # version name can not be used as id + # version id is not readable + # so , we use name_id as version string + version_str = version["name"]+"_"+str(version["id"]) + version_strs.append(version_str) + + # get folder by model type + folder = model.folders[model_type] + # get subfolders + subfolders = util.get_subfolders(folder) + if not subfolders: + subfolders = [] + + # add default root folder + subfolders.append("/") + + util.printD("Get following info for downloading:") + util.printD(f"model_name:{model_name}") + util.printD(f"model_type:{model_type}") + util.printD(f"subfolders:{subfolders}") + util.printD(f"version_strs:{version_strs}") + + return (model_info, model_name, model_type, subfolders, version_strs) + +# get version info by version string +def get_ver_info_by_ver_str(version_str:str, model_info:dict) -> dict: + if not version_str: + util.printD("version_str is empty") + return + + if not model_info: + util.printD("model_info is None") + return + + # get version list + if "modelVersions" not in model_info.keys(): + util.printD("modelVersions is not in model_info") + return + + modelVersions = model_info["modelVersions"] + if not modelVersions: + util.printD("modelVersions is Empty") + return + + # find version by version_str + version = None + for ver in modelVersions: + # version name can not be used as id + # version id is not readable + # so , we use name_id as version string + ver_str = ver["name"]+"_"+str(ver["id"]) + if ver_str == version_str: + # find version + version = ver + + if not version: + util.printD("can not find version by version string: " + version_str) + return + + # get version id + if "id" not in version.keys(): + util.printD("this version has no id") + return + + return version + + +# get download url from model info by version string +# return - (version_id, download_url) +def get_id_and_dl_url_by_version_str(version_str:str, model_info:dict) -> tuple: + if not version_str: + util.printD("version_str is empty") + return + + if not model_info: + util.printD("model_info is None") + return + + # get version list + if "modelVersions" not in model_info.keys(): + util.printD("modelVersions is not in model_info") + return + + modelVersions = model_info["modelVersions"] + if not modelVersions: + util.printD("modelVersions is Empty") + return + + # find version by version_str + version = None + for ver in modelVersions: + # version name can not be used as id + # version id is not readable + # so , we use name_id as version string + ver_str = ver["name"]+"_"+str(ver["id"]) + if ver_str == version_str: + # find version + version = ver + + if not version: + util.printD("can not find version by version string: " + version_str) + return + + # get version id + if "id" not in version.keys(): + util.printD("this version has no id") + return + + version_id = version["id"] + if not version_id: + util.printD("version id is Empty") + return + + # get download url + if "downloadUrl" not in version.keys(): + util.printD("downloadUrl is not in this version") + return + + downloadUrl = version["downloadUrl"] + if not downloadUrl: + util.printD("downloadUrl is Empty") + return + + util.printD("Get Download Url: " + downloadUrl) + + return (version_id, downloadUrl) + + +# download model from civitai by input +# output to markdown log +def dl_model_by_input(model_info:dict, model_type:str, subfolder_str:str, version_str:str, dl_all_bool:bool, max_size_preview:bool, skip_nsfw_preview:bool) -> str: + + output = "" + + if not model_info: + output = "model_info is None" + util.printD(output) + return output + + if not model_type: + output = "model_type is None" + util.printD(output) + return output + + if not subfolder_str: + output = "subfolder string is None" + util.printD(output) + return output + + if not version_str: + output = "version_str is None" + util.printD(output) + return output + + # get model root folder + if model_type not in model.folders.keys(): + output = "Unknow model type: "+model_type + util.printD(output) + return output + + model_root_folder = model.folders[model_type] + + + # get subfolder + subfolder = "" + if subfolder_str == "/" or subfolder_str == "\\": + subfolder = "" + elif subfolder_str[:1] == "/" or subfolder_str[:1] == "\\": + subfolder = subfolder_str[1:] + else: + subfolder = subfolder_str + + # get model folder for downloading + model_folder = os.path.join(model_root_folder, subfolder) + if not os.path.isdir(model_folder): + output = "Model folder is not a dir: "+ model_folder + util.printD(output) + return output + + # get version info + ver_info = get_ver_info_by_ver_str(version_str, model_info) + if not ver_info: + output = "Fail to get version info, check console log for detail" + util.printD(output) + return output + + version_id = ver_info["id"] + + + if dl_all_bool: + # get all download url from files info + # some model versions have multiple files + download_urls = [] + if "files" in ver_info.keys(): + for file_info in ver_info["files"]: + if "downloadUrl" in file_info.keys(): + download_urls.append(file_info["downloadUrl"]) + + if not len(download_urls): + if "downloadUrl" in ver_info.keys(): + download_urls.append(ver_info["downloadUrl"]) + + + # check if this model is already existed + r = civitai.search_local_model_info_by_version_id(model_folder, version_id) + if r: + output = "This model version is already existed" + util.printD(output) + return output + + # download + filepath = "" + for url in download_urls: + model_filepath = downloader.dl(url, model_folder, None, None) + if not model_filepath: + output = "Downloading failed, check console log for detail" + util.printD(output) + return output + + if url == ver_info["downloadUrl"]: + filepath = model_filepath + else: + # only download one file + # get download url + url = ver_info["downloadUrl"] + if not url: + output = "Fail to get download url, check console log for detail" + util.printD(output) + return output + + # download + filepath = downloader.dl(url, model_folder, None, None) + if not filepath: + output = "Downloading failed, check console log for detail" + util.printD(output) + return output + + + if not filepath: + filepath = model_filepath + + # get version info + version_info = civitai.get_version_info_by_version_id(version_id) + if not version_info: + output = "Model downloaded, but failed to get version info, check console log for detail. Model saved to: " + filepath + util.printD(output) + return output + + # write version info to file + base, ext = os.path.splitext(filepath) + info_file = base + civitai.suffix + model.info_ext + model.write_model_info(info_file, version_info) + + # then, get preview image + civitai.get_preview_image_by_model_path(filepath, max_size_preview, skip_nsfw_preview) + + output = "Done. Model downloaded to: " + filepath + util.printD(output) + return output diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/msg_handler.py b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/msg_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..1132d775f0f03547f0291138dbe53d970f2fe869 --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/msg_handler.py @@ -0,0 +1,64 @@ +# -*- coding: UTF-8 -*- +# handle msg between js and python side +import json +from . import util + +# action list +js_actions = ("open_url", "add_trigger_words", "use_preview_prompt", "dl_model_new_version") +py_actions = ("open_url") + + +# handle request from javascript +# parameter: msg - msg from js as string in a hidden textbox +# return: dict for result +def parse_js_msg(msg): + util.printD("Start parse js msg") + msg_dict = json.loads(msg) + + # in case client side run JSON.stringify twice + if (type(msg_dict) == str): + msg_dict = json.loads(msg_dict) + + if "action" not in msg_dict.keys(): + util.printD("Can not find action from js request") + return + + action = msg_dict["action"] + if not action: + util.printD("Action from js request is None") + return + + if action not in js_actions: + util.printD("Unknow action: " + action) + return + + util.printD("End parse js msg") + + return msg_dict + + +# build python side msg for sending to js +# parameter: content dict +# return: msg as string, to fill into a hidden textbox +def build_py_msg(action:str, content:dict): + util.printD("Start build_msg") + if not content: + util.printD("Content is None") + return + + if not action: + util.printD("Action is None") + return + + if action not in py_actions: + util.printD("Unknow action: " + action) + return + + msg = { + "action" : action, + "content": content + } + + + util.printD("End build_msg") + return json.dumps(msg) \ No newline at end of file diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/setting.py b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/setting.py new file mode 100644 index 0000000000000000000000000000000000000000..aabff852d5fb989840f2aefc05867bce8d673994 --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/setting.py @@ -0,0 +1,113 @@ +# -*- coding: UTF-8 -*- +# collecting settings to here +import json +import os +import modules.scripts as scripts +from . import util + + +name = "setting.json" +path = os.path.join(scripts.basedir(), name) + +data = { + "model":{ + "max_size_preview": True, + "skip_nsfw_preview": False + }, + "general":{ + "open_url_with_js": True, + "always_display": False, + "show_btn_on_thumb": True, + "proxy": "", + }, + "tool":{ + } +} + + + +# save setting +# return output msg for log +def save(): + print("Saving setting to: " + path) + + json_data = json.dumps(data, indent=4) + + output = "" + + #write to file + try: + with open(path, 'w') as f: + f.write(json_data) + except Exception as e: + util.printD("Error when writing file:"+path) + output = str(e) + util.printD(str(e)) + return output + + output = "Setting saved to: " + path + util.printD(output) + + return output + + +# load setting to global data +def load(): + # load data into globel data + global data + + util.printD("Load setting from: " + path) + + if not os.path.isfile(path): + util.printD("No setting file, use default") + return + + json_data = None + with open(path, 'r') as f: + json_data = json.load(f) + + # check error + if not json_data: + util.printD("load setting file failed") + return + + data = json_data + + # check for new key + if "always_display" not in data["general"].keys(): + data["general"]["always_display"] = False + + if "show_btn_on_thumb" not in data["general"].keys(): + data["general"]["show_btn_on_thumb"] = True + + if "proxy" not in data["general"].keys(): + data["general"]["proxy"] = "" + + + return + +# save setting from parameter +def save_from_input(max_size_preview, skip_nsfw_preview, open_url_with_js, always_display, show_btn_on_thumb, proxy): + global data + data = { + "model":{ + "max_size_preview": max_size_preview, + "skip_nsfw_preview": skip_nsfw_preview + }, + "general":{ + "open_url_with_js": open_url_with_js, + "always_display": always_display, + "show_btn_on_thumb": show_btn_on_thumb, + "proxy": proxy, + }, + "tool":{ + } + } + + output = save() + + if not output: + output = "" + + return output + diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/util.py b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/util.py new file mode 100644 index 0000000000000000000000000000000000000000..3e43c98d01c4c62a64fbdcbeaafdce051ddd4d36 --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/ch_lib/util.py @@ -0,0 +1,105 @@ +# -*- coding: UTF-8 -*- +import os +import io +import hashlib +import requests +import shutil + + +version = "1.6.4" + +def_headers = {'User-Agent': 'Mozilla/5.0 (iPad; CPU OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148'} + + +proxies = None + + +# print for debugging +def printD(msg): + print(f"Civitai Helper: {msg}") + + +def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE): + """Yield pieces of data from a file-like object until EOF.""" + while True: + chunk = file.read(size) + if not chunk: + break + yield chunk + +# Now, hashing use the same way as pip's source code. +def gen_file_sha256(filname): + printD("Use Memory Optimized SHA256") + blocksize=1 << 20 + h = hashlib.sha256() + length = 0 + with open(os.path.realpath(filname), 'rb') as f: + for block in read_chunks(f, size=blocksize): + length += len(block) + h.update(block) + + hash_value = h.hexdigest() + printD("sha256: " + hash_value) + printD("length: " + str(length)) + return hash_value + + + +# get preview image +def download_file(url, path): + printD("Downloading file from: " + url) + # get file + r = requests.get(url, stream=True, headers=def_headers, proxies=proxies) + if not r.ok: + printD("Get error code: " + str(r.status_code)) + printD(r.text) + return + + # write to file + with open(os.path.realpath(path), 'wb') as f: + r.raw.decode_content = True + shutil.copyfileobj(r.raw, f) + + printD("File downloaded to: " + path) + +# get subfolder list +def get_subfolders(folder:str) -> list: + printD("Get subfolder for: " + folder) + if not folder: + printD("folder can not be None") + return + + if not os.path.isdir(folder): + printD("path is not a folder") + return + + prefix_len = len(folder) + subfolders = [] + for root, dirs, files in os.walk(folder, followlinks=True): + for dir in dirs: + full_dir_path = os.path.join(root, dir) + # get subfolder path from it + subfolder = full_dir_path[prefix_len:] + subfolders.append(subfolder) + + return subfolders + + +# get relative path +def get_relative_path(item_path:str, parent_path:str) -> str: + # printD("item_path:"+item_path) + # printD("parent_path:"+parent_path) + # item path must start with parent_path + if not item_path: + return "" + if not parent_path: + return "" + if not item_path.startswith(parent_path): + return item_path + + relative = item_path[len(parent_path):] + if relative[:1] == "/" or relative[:1] == "\\": + relative = relative[1:] + + # printD("relative:"+relative) + return relative \ No newline at end of file diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/civitai_helper.py b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/civitai_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..ad9e70a1cff66b38085691765753c5dd5dc0677d --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/scripts/civitai_helper.py @@ -0,0 +1,218 @@ +# -*- coding: UTF-8 -*- +# This extension can help you manage your models from civitai. It can download preview, add trigger words, open model page and use the prompt from preview image +# repo: https://github.com/butaixianran/ + + + +import modules.scripts as scripts +import gradio as gr +import os +import webbrowser +import requests +import random +import hashlib +import json +import shutil +import re +import modules +from modules import script_callbacks +from modules import shared +from scripts.ch_lib import model +from scripts.ch_lib import js_action_civitai +from scripts.ch_lib import model_action_civitai +from scripts.ch_lib import setting +from scripts.ch_lib import civitai +from scripts.ch_lib import util + + +# init + +# root path +root_path = os.getcwd() + +# extension path +extension_path = scripts.basedir() + +model.get_custom_model_folder() +setting.load() + +# set proxy +if setting.data["general"]["proxy"]: + util.printD("Set Proxy: "+setting.data["general"]["proxy"]) + util.proxies = { + "http": setting.data["general"]["proxy"], + "https": setting.data["general"]["proxy"], + } + + + + +def on_ui_tabs(): + # init + # init_py_msg = { + # # relative extension path + # "extension_path": util.get_relative_path(extension_path, root_path), + # } + # init_py_msg_str = json.dumps(init_py_msg) + + + # get prompt textarea + # check modules/ui.py, search for txt2img_paste_fields + # Negative prompt is the second element + txt2img_prompt = modules.ui.txt2img_paste_fields[0][0] + txt2img_neg_prompt = modules.ui.txt2img_paste_fields[1][0] + img2img_prompt = modules.ui.img2img_paste_fields[0][0] + img2img_neg_prompt = modules.ui.img2img_paste_fields[1][0] + + # ====Event's function==== + def get_model_names_by_input(model_type, empty_info_only): + names = civitai.get_model_names_by_input(model_type, empty_info_only) + return model_name_drop.update(choices=names) + + + def get_model_info_by_url(url): + r = model_action_civitai.get_model_info_by_url(url) + + model_info = {} + model_name = "" + model_type = "" + subfolders = [] + version_strs = [] + if r: + model_info, model_name, model_type, subfolders, version_strs = r + + return [model_info, model_name, model_type, dl_subfolder_drop.update(choices=subfolders), dl_version_drop.update(choices=version_strs)] + + # ====UI==== + with gr.Blocks(analytics_enabled=False) as civitai_helper: + # with gr.Blocks(css=".block.padded {padding: 10px !important}") as civitai_helper: + + # init + max_size_preview = setting.data["model"]["max_size_preview"] + skip_nsfw_preview = setting.data["model"]["skip_nsfw_preview"] + open_url_with_js = setting.data["general"]["open_url_with_js"] + always_display = setting.data["general"]["always_display"] + show_btn_on_thumb = setting.data["general"]["show_btn_on_thumb"] + proxy = setting.data["general"]["proxy"] + + model_types = list(model.folders.keys()) + no_info_model_names = civitai.get_model_names_by_input("ckp", False) + + # session data + dl_model_info = gr.State({}) + + + + with gr.Box(elem_classes="ch_box"): + with gr.Column(): + gr.Markdown("### Scan Models for Civitai") + with gr.Row(): + max_size_preview_ckb = gr.Checkbox(label="Download Max Size Preview", value=max_size_preview, elem_id="ch_max_size_preview_ckb") + skip_nsfw_preview_ckb = gr.Checkbox(label="Skip NSFW Preview Images", value=skip_nsfw_preview, elem_id="ch_skip_nsfw_preview_ckb") + scan_model_types_ckbg = gr.CheckboxGroup(choices=model_types, label="Model Types", value=model_types) + + # with gr.Row(): + scan_model_civitai_btn = gr.Button(value="Scan", variant="primary", elem_id="ch_scan_model_civitai_btn") + # with gr.Row(): + scan_model_log_md = gr.Markdown(value="Scanning takes time, just wait. Check console log for detail", elem_id="ch_scan_model_log_md") + + + with gr.Box(elem_classes="ch_box"): + with gr.Column(): + gr.Markdown("### Get Model Info from Civitai by URL") + gr.Markdown("Use this when scanning can not find a local model on civitai") + with gr.Row(): + model_type_drop = gr.Dropdown(choices=model_types, label="Model Type", value="ckp", multiselect=False) + empty_info_only_ckb = gr.Checkbox(label="Only Show Models have no Info", value=False, elem_id="ch_empty_info_only_ckb", elem_classes="ch_vpadding") + model_name_drop = gr.Dropdown(choices=no_info_model_names, label="Model", value="ckp", multiselect=False) + + model_url_or_id_txtbox = gr.Textbox(label="Civitai URL", lines=1, value="") + get_civitai_model_info_by_id_btn = gr.Button(value="Get Model Info from Civitai", variant="primary") + get_model_by_id_log_md = gr.Markdown("") + + with gr.Box(elem_classes="ch_box"): + with gr.Column(): + gr.Markdown("### Download Model") + with gr.Row(): + dl_model_url_or_id_txtbox = gr.Textbox(label="Civitai URL", lines=1, value="") + dl_model_info_btn = gr.Button(value="1. Get Model Info by Civitai Url", variant="primary") + + gr.Markdown(value="2. Pick Subfolder and Model Version") + with gr.Row(): + dl_model_name_txtbox = gr.Textbox(label="Model Name", interactive=False, lines=1, value="") + dl_model_type_txtbox = gr.Textbox(label="Model Type", interactive=False, lines=1, value="") + dl_subfolder_drop = gr.Dropdown(choices=[], label="Sub-folder", value="", interactive=True, multiselect=False) + dl_version_drop = gr.Dropdown(choices=[], label="Model Version", value="", interactive=True, multiselect=False) + dl_all_ckb = gr.Checkbox(label="Download All files", value=False, elem_id="ch_dl_all_ckb", elem_classes="ch_vpadding") + + dl_civitai_model_by_id_btn = gr.Button(value="3. Download Model", variant="primary") + dl_log_md = gr.Markdown(value="Check Console log for Downloading Status") + + with gr.Box(elem_classes="ch_box"): + with gr.Column(): + gr.Markdown("### Check models' new version") + with gr.Row(): + model_types_ckbg = gr.CheckboxGroup(choices=model_types, label="Model Types", value=["lora"]) + check_models_new_version_btn = gr.Button(value="Check New Version from Civitai", variant="primary") + + check_models_new_version_log_md = gr.HTML("It takes time, just wait. Check console log for detail") + + with gr.Box(elem_classes="ch_box"): + with gr.Column(): + gr.Markdown("### Other Setting") + with gr.Row(): + open_url_with_js_ckb = gr.Checkbox(label="Open Url At Client Side", value=open_url_with_js, elem_id="ch_open_url_with_js_ckb") + always_display_ckb = gr.Checkbox(label="Always Display Buttons", value=always_display, elem_id="ch_always_display_ckb") + show_btn_on_thumb_ckb = gr.Checkbox(label="Show Button On Thumb Mode", value=show_btn_on_thumb, elem_id="ch_show_btn_on_thumb_ckb") + + proxy_txtbox = gr.Textbox(label="Proxy", interactive=True, lines=1, value=proxy, info="format: http://127.0.0.1:port") + + save_setting_btn = gr.Button(value="Save Setting") + general_log_md = gr.Markdown(value="") + + + # ====Footer==== + gr.Markdown(f"
version:{util.version}
") + + # ====hidden component for js, not in any tab==== + js_msg_txtbox = gr.Textbox(label="Request Msg From Js", visible=False, lines=1, value="", elem_id="ch_js_msg_txtbox") + py_msg_txtbox = gr.Textbox(label="Response Msg From Python", visible=False, lines=1, value="", elem_id="ch_py_msg_txtbox") + + js_open_url_btn = gr.Button(value="Open Model Url", visible=False, elem_id="ch_js_open_url_btn") + js_add_trigger_words_btn = gr.Button(value="Add Trigger Words", visible=False, elem_id="ch_js_add_trigger_words_btn") + js_use_preview_prompt_btn = gr.Button(value="Use Prompt from Preview Image", visible=False, elem_id="ch_js_use_preview_prompt_btn") + js_dl_model_new_version_btn = gr.Button(value="Download Model's new version", visible=False, elem_id="ch_js_dl_model_new_version_btn") + + # ====events==== + # Scan Models for Civitai + scan_model_civitai_btn.click(model_action_civitai.scan_model, inputs=[scan_model_types_ckbg, max_size_preview_ckb, skip_nsfw_preview_ckb], outputs=scan_model_log_md) + + # Get Civitai Model Info by Model Page URL + model_type_drop.change(get_model_names_by_input, inputs=[model_type_drop, empty_info_only_ckb], outputs=model_name_drop) + empty_info_only_ckb.change(get_model_names_by_input, inputs=[model_type_drop, empty_info_only_ckb], outputs=model_name_drop) + + get_civitai_model_info_by_id_btn.click(model_action_civitai.get_model_info_by_input, inputs=[model_type_drop, model_name_drop, model_url_or_id_txtbox, max_size_preview_ckb, skip_nsfw_preview_ckb], outputs=get_model_by_id_log_md) + + # Download Model + dl_model_info_btn.click(get_model_info_by_url, inputs=dl_model_url_or_id_txtbox, outputs=[dl_model_info, dl_model_name_txtbox, dl_model_type_txtbox, dl_subfolder_drop, dl_version_drop]) + dl_civitai_model_by_id_btn.click(model_action_civitai.dl_model_by_input, inputs=[dl_model_info, dl_model_type_txtbox, dl_subfolder_drop, dl_version_drop, dl_all_ckb, max_size_preview_ckb, skip_nsfw_preview_ckb], outputs=dl_log_md) + + # Check models' new version + check_models_new_version_btn.click(model_action_civitai.check_models_new_version_to_md, inputs=model_types_ckbg, outputs=check_models_new_version_log_md) + + # Other Setting + save_setting_btn.click(setting.save_from_input, inputs=[max_size_preview_ckb, skip_nsfw_preview_ckb, open_url_with_js_ckb, always_display_ckb, show_btn_on_thumb_ckb, proxy_txtbox], outputs=general_log_md) + + # js action + js_open_url_btn.click(js_action_civitai.open_model_url, inputs=[js_msg_txtbox, open_url_with_js_ckb], outputs=py_msg_txtbox) + js_add_trigger_words_btn.click(js_action_civitai.add_trigger_words, inputs=[js_msg_txtbox], outputs=[txt2img_prompt, img2img_prompt]) + js_use_preview_prompt_btn.click(js_action_civitai.use_preview_image_prompt, inputs=[js_msg_txtbox], outputs=[txt2img_prompt, txt2img_neg_prompt, img2img_prompt, img2img_neg_prompt]) + js_dl_model_new_version_btn.click(js_action_civitai.dl_model_new_version, inputs=[js_msg_txtbox, max_size_preview_ckb, skip_nsfw_preview_ckb], outputs=dl_log_md) + + # the third parameter is the element id on html, with a "tab_" as prefix + return (civitai_helper , "Civitai Helper", "civitai_helper"), + +script_callbacks.on_ui_tabs(on_ui_tabs) + + + diff --git a/extensions/Stable-Diffusion-Webui-Civitai-Helper/style.css b/extensions/Stable-Diffusion-Webui-Civitai-Helper/style.css new file mode 100644 index 0000000000000000000000000000000000000000..387156e4cd2c93e798fd373e1a4797b2470ac921 --- /dev/null +++ b/extensions/Stable-Diffusion-Webui-Civitai-Helper/style.css @@ -0,0 +1,18 @@ +blockquote ul { + list-style:disc; + margin:4px 40px; +} + +blockquote ol { + list-style:decimal; + margin:4px 40px; +} + +.block.padded.ch_box { + padding: 10px !important; +} + +.block.padded.ch_vpadding { + padding: 10px 0 !important; +} + diff --git a/extensions/put extensions here.txt b/extensions/put extensions here.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/html/card-no-preview.png b/html/card-no-preview.png new file mode 100644 index 0000000000000000000000000000000000000000..e2beb2692067db56ac5f7bd5bfc3d895d9063371 Binary files /dev/null and b/html/card-no-preview.png differ diff --git a/html/extra-networks-card.html b/html/extra-networks-card.html new file mode 100644 index 0000000000000000000000000000000000000000..39674666f1e336d9bf61d2a6986721cf8591eeee --- /dev/null +++ b/html/extra-networks-card.html @@ -0,0 +1,14 @@ +
+ {background_image} +
+ {metadata_button} + {edit_button} +
+
+
+ +
+ {name} + {description} +
+
diff --git a/html/extra-networks-no-cards.html b/html/extra-networks-no-cards.html new file mode 100644 index 0000000000000000000000000000000000000000..389358d6c4b383fdc3c5686e029e7b3b1ae9a493 --- /dev/null +++ b/html/extra-networks-no-cards.html @@ -0,0 +1,8 @@ +
+

Nothing here. Add some content to the following directories:

+ +
    +{dirs} +
+
+ diff --git a/html/footer.html b/html/footer.html new file mode 100644 index 0000000000000000000000000000000000000000..8739a0f4752fd00b941d888d9a676158a3ba31a2 --- /dev/null +++ b/html/footer.html @@ -0,0 +1,15 @@ +
+ API +  •  + Github +  •  + Gradio +  •  + Startup profile +  •  + Reload UI +
+
+
+{versions} +
diff --git a/html/licenses.html b/html/licenses.html new file mode 100644 index 0000000000000000000000000000000000000000..ca44deddd3663514962493c06a42a38d608c1229 --- /dev/null +++ b/html/licenses.html @@ -0,0 +1,690 @@ + + +

CodeFormer

+Parts of CodeFormer code had to be copied to be compatible with GFPGAN. +
+S-Lab License 1.0
+
+Copyright 2022 S-Lab
+
+Redistribution and use for non-commercial purpose in source and
+binary forms, with or without modification, are permitted provided
+that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in
+   the documentation and/or other materials provided with the
+   distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived
+   from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+In the event that redistribution and/or use for commercial purpose in
+source or binary forms, with or without modification is required,
+please contact the contributor(s) of the work.
+
+ + +

ESRGAN

+Code for architecture and reading models copied. +
+MIT License
+
+Copyright (c) 2021 victorca25
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

Real-ESRGAN

+Some code is copied to support ESRGAN models. +
+BSD 3-Clause License
+
+Copyright (c) 2021, Xintao Wang
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ +

InvokeAI

+Some code for compatibility with OSX is taken from lstein's repository. +
+MIT License
+
+Copyright (c) 2022 InvokeAI Team
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

LDSR

+Code added by contirubtors, most likely copied from this repository. +
+MIT License
+
+Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

CLIP Interrogator

+Some small amounts of code borrowed and reworked. +
+MIT License
+
+Copyright (c) 2022 pharmapsychotic
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

SwinIR

+Code added by contributors, most likely copied from this repository. + +
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [2021] [SwinIR Authors]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+ +

Memory Efficient Attention

+The sub-quadratic cross attention optimization uses modified code from the Memory Efficient Attention package that Alex Birch optimized for 3D tensors. This license is updated to reflect that. +
+MIT License
+
+Copyright (c) 2023 Alex Birch
+Copyright (c) 2023 Amin Rezaei
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

Scaled Dot Product Attention

+Some small amounts of code borrowed and reworked. +
+   Copyright 2023 The HuggingFace Team. All rights reserved.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+ +

Curated transformers

+The MPS workaround for nn.Linear on macOS 13.2.X is based on the MPS workaround for nn.Linear created by danieldk for Curated transformers +
+The MIT License (MIT)
+
+Copyright (C) 2021 ExplosionAI GmbH
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+ +

TAESD

+Tiny AutoEncoder for Stable Diffusion option for live previews +
+MIT License
+
+Copyright (c) 2023 Ollin Boer Bohan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
\ No newline at end of file diff --git a/javascript/aspectRatioOverlay.js b/javascript/aspectRatioOverlay.js new file mode 100644 index 0000000000000000000000000000000000000000..2cf2d571fc02a026b6cdedcf589a217ef0d65d27 --- /dev/null +++ b/javascript/aspectRatioOverlay.js @@ -0,0 +1,113 @@ + +let currentWidth = null; +let currentHeight = null; +let arFrameTimeout = setTimeout(function() {}, 0); + +function dimensionChange(e, is_width, is_height) { + + if (is_width) { + currentWidth = e.target.value * 1.0; + } + if (is_height) { + currentHeight = e.target.value * 1.0; + } + + var inImg2img = gradioApp().querySelector("#tab_img2img").style.display == "block"; + + if (!inImg2img) { + return; + } + + var targetElement = null; + + var tabIndex = get_tab_index('mode_img2img'); + if (tabIndex == 0) { // img2img + targetElement = gradioApp().querySelector('#img2img_image div[data-testid=image] img'); + } else if (tabIndex == 1) { //Sketch + targetElement = gradioApp().querySelector('#img2img_sketch div[data-testid=image] img'); + } else if (tabIndex == 2) { // Inpaint + targetElement = gradioApp().querySelector('#img2maskimg div[data-testid=image] img'); + } else if (tabIndex == 3) { // Inpaint sketch + targetElement = gradioApp().querySelector('#inpaint_sketch div[data-testid=image] img'); + } + + + if (targetElement) { + + var arPreviewRect = gradioApp().querySelector('#imageARPreview'); + if (!arPreviewRect) { + arPreviewRect = document.createElement('div'); + arPreviewRect.id = "imageARPreview"; + gradioApp().appendChild(arPreviewRect); + } + + + + var viewportOffset = targetElement.getBoundingClientRect(); + + var viewportscale = Math.min(targetElement.clientWidth / targetElement.naturalWidth, targetElement.clientHeight / targetElement.naturalHeight); + + var scaledx = targetElement.naturalWidth * viewportscale; + var scaledy = targetElement.naturalHeight * viewportscale; + + var cleintRectTop = (viewportOffset.top + window.scrollY); + var cleintRectLeft = (viewportOffset.left + window.scrollX); + var cleintRectCentreY = cleintRectTop + (targetElement.clientHeight / 2); + var cleintRectCentreX = cleintRectLeft + (targetElement.clientWidth / 2); + + var arscale = Math.min(scaledx / currentWidth, scaledy / currentHeight); + var arscaledx = currentWidth * arscale; + var arscaledy = currentHeight * arscale; + + var arRectTop = cleintRectCentreY - (arscaledy / 2); + var arRectLeft = cleintRectCentreX - (arscaledx / 2); + var arRectWidth = arscaledx; + var arRectHeight = arscaledy; + + arPreviewRect.style.top = arRectTop + 'px'; + arPreviewRect.style.left = arRectLeft + 'px'; + arPreviewRect.style.width = arRectWidth + 'px'; + arPreviewRect.style.height = arRectHeight + 'px'; + + clearTimeout(arFrameTimeout); + arFrameTimeout = setTimeout(function() { + arPreviewRect.style.display = 'none'; + }, 2000); + + arPreviewRect.style.display = 'block'; + + } + +} + + +onAfterUiUpdate(function() { + var arPreviewRect = gradioApp().querySelector('#imageARPreview'); + if (arPreviewRect) { + arPreviewRect.style.display = 'none'; + } + var tabImg2img = gradioApp().querySelector("#tab_img2img"); + if (tabImg2img) { + var inImg2img = tabImg2img.style.display == "block"; + if (inImg2img) { + let inputs = gradioApp().querySelectorAll('input'); + inputs.forEach(function(e) { + var is_width = e.parentElement.id == "img2img_width"; + var is_height = e.parentElement.id == "img2img_height"; + + if ((is_width || is_height) && !e.classList.contains('scrollwatch')) { + e.addEventListener('input', function(e) { + dimensionChange(e, is_width, is_height); + }); + e.classList.add('scrollwatch'); + } + if (is_width) { + currentWidth = e.value * 1.0; + } + if (is_height) { + currentHeight = e.value * 1.0; + } + }); + } + } +}); diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js new file mode 100644 index 0000000000000000000000000000000000000000..ccae242f2b6a731e89d8752814aae6b78e143482 --- /dev/null +++ b/javascript/contextMenus.js @@ -0,0 +1,176 @@ + +var contextMenuInit = function() { + let eventListenerApplied = false; + let menuSpecs = new Map(); + + const uid = function() { + return Date.now().toString(36) + Math.random().toString(36).substring(2); + }; + + function showContextMenu(event, element, menuEntries) { + let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft; + let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop; + + let oldMenu = gradioApp().querySelector('#context-menu'); + if (oldMenu) { + oldMenu.remove(); + } + + let baseStyle = window.getComputedStyle(uiCurrentTab); + + const contextMenu = document.createElement('nav'); + contextMenu.id = "context-menu"; + contextMenu.style.background = baseStyle.background; + contextMenu.style.color = baseStyle.color; + contextMenu.style.fontFamily = baseStyle.fontFamily; + contextMenu.style.top = posy + 'px'; + contextMenu.style.left = posx + 'px'; + + + + const contextMenuList = document.createElement('ul'); + contextMenuList.className = 'context-menu-items'; + contextMenu.append(contextMenuList); + + menuEntries.forEach(function(entry) { + let contextMenuEntry = document.createElement('a'); + contextMenuEntry.innerHTML = entry['name']; + contextMenuEntry.addEventListener("click", function() { + entry['func'](); + }); + contextMenuList.append(contextMenuEntry); + + }); + + gradioApp().appendChild(contextMenu); + + let menuWidth = contextMenu.offsetWidth + 4; + let menuHeight = contextMenu.offsetHeight + 4; + + let windowWidth = window.innerWidth; + let windowHeight = window.innerHeight; + + if ((windowWidth - posx) < menuWidth) { + contextMenu.style.left = windowWidth - menuWidth + "px"; + } + + if ((windowHeight - posy) < menuHeight) { + contextMenu.style.top = windowHeight - menuHeight + "px"; + } + + } + + function appendContextMenuOption(targetElementSelector, entryName, entryFunction) { + + var currentItems = menuSpecs.get(targetElementSelector); + + if (!currentItems) { + currentItems = []; + menuSpecs.set(targetElementSelector, currentItems); + } + let newItem = { + id: targetElementSelector + '_' + uid(), + name: entryName, + func: entryFunction, + isNew: true + }; + + currentItems.push(newItem); + return newItem['id']; + } + + function removeContextMenuOption(uid) { + menuSpecs.forEach(function(v) { + let index = -1; + v.forEach(function(e, ei) { + if (e['id'] == uid) { + index = ei; + } + }); + if (index >= 0) { + v.splice(index, 1); + } + }); + } + + function addContextMenuEventListener() { + if (eventListenerApplied) { + return; + } + gradioApp().addEventListener("click", function(e) { + if (!e.isTrusted) { + return; + } + + let oldMenu = gradioApp().querySelector('#context-menu'); + if (oldMenu) { + oldMenu.remove(); + } + }); + gradioApp().addEventListener("contextmenu", function(e) { + let oldMenu = gradioApp().querySelector('#context-menu'); + if (oldMenu) { + oldMenu.remove(); + } + menuSpecs.forEach(function(v, k) { + if (e.composedPath()[0].matches(k)) { + showContextMenu(e, e.composedPath()[0], v); + e.preventDefault(); + } + }); + }); + eventListenerApplied = true; + + } + + return [appendContextMenuOption, removeContextMenuOption, addContextMenuEventListener]; +}; + +var initResponse = contextMenuInit(); +var appendContextMenuOption = initResponse[0]; +var removeContextMenuOption = initResponse[1]; +var addContextMenuEventListener = initResponse[2]; + +(function() { + //Start example Context Menu Items + let generateOnRepeat = function(genbuttonid, interruptbuttonid) { + let genbutton = gradioApp().querySelector(genbuttonid); + let interruptbutton = gradioApp().querySelector(interruptbuttonid); + if (!interruptbutton.offsetParent) { + genbutton.click(); + } + clearInterval(window.generateOnRepeatInterval); + window.generateOnRepeatInterval = setInterval(function() { + if (!interruptbutton.offsetParent) { + genbutton.click(); + } + }, + 500); + }; + + let generateOnRepeat_txt2img = function() { + generateOnRepeat('#txt2img_generate', '#txt2img_interrupt'); + }; + + let generateOnRepeat_img2img = function() { + generateOnRepeat('#img2img_generate', '#img2img_interrupt'); + }; + + appendContextMenuOption('#txt2img_generate', 'Generate forever', generateOnRepeat_txt2img); + appendContextMenuOption('#txt2img_interrupt', 'Generate forever', generateOnRepeat_txt2img); + appendContextMenuOption('#img2img_generate', 'Generate forever', generateOnRepeat_img2img); + appendContextMenuOption('#img2img_interrupt', 'Generate forever', generateOnRepeat_img2img); + + let cancelGenerateForever = function() { + clearInterval(window.generateOnRepeatInterval); + }; + + appendContextMenuOption('#txt2img_interrupt', 'Cancel generate forever', cancelGenerateForever); + appendContextMenuOption('#txt2img_generate', 'Cancel generate forever', cancelGenerateForever); + appendContextMenuOption('#img2img_interrupt', 'Cancel generate forever', cancelGenerateForever); + appendContextMenuOption('#img2img_generate', 'Cancel generate forever', cancelGenerateForever); + +})(); +//End example Context Menu Items + +onAfterUiUpdate(addContextMenuEventListener); diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js new file mode 100644 index 0000000000000000000000000000000000000000..5803daea5ef33341b5307e03a7ebbadc7c324ed7 --- /dev/null +++ b/javascript/dragdrop.js @@ -0,0 +1,130 @@ +// allows drag-dropping files into gradio image elements, and also pasting images from clipboard + +function isValidImageList(files) { + return files && files?.length === 1 && ['image/png', 'image/gif', 'image/jpeg'].includes(files[0].type); +} + +function dropReplaceImage(imgWrap, files) { + if (!isValidImageList(files)) { + return; + } + + const tmpFile = files[0]; + + imgWrap.querySelector('.modify-upload button + button, .touch-none + div button + button')?.click(); + const callback = () => { + const fileInput = imgWrap.querySelector('input[type="file"]'); + if (fileInput) { + if (files.length === 0) { + files = new DataTransfer(); + files.items.add(tmpFile); + fileInput.files = files.files; + } else { + fileInput.files = files; + } + fileInput.dispatchEvent(new Event('change')); + } + }; + + if (imgWrap.closest('#pnginfo_image')) { + // special treatment for PNG Info tab, wait for fetch request to finish + const oldFetch = window.fetch; + window.fetch = async(input, options) => { + const response = await oldFetch(input, options); + if ('api/predict/' === input) { + const content = await response.text(); + window.fetch = oldFetch; + window.requestAnimationFrame(() => callback()); + return new Response(content, { + status: response.status, + statusText: response.statusText, + headers: response.headers + }); + } + return response; + }; + } else { + window.requestAnimationFrame(() => callback()); + } +} + +function eventHasFiles(e) { + if (!e.dataTransfer || !e.dataTransfer.files) return false; + if (e.dataTransfer.files.length > 0) return true; + if (e.dataTransfer.items.length > 0 && e.dataTransfer.items[0].kind == "file") return true; + + return false; +} + +function dragDropTargetIsPrompt(target) { + if (target?.placeholder && target?.placeholder.indexOf("Prompt") >= 0) return true; + if (target?.parentNode?.parentNode?.className?.indexOf("prompt") > 0) return true; + return false; +} + +window.document.addEventListener('dragover', e => { + const target = e.composedPath()[0]; + if (!eventHasFiles(e)) return; + + var targetImage = target.closest('[data-testid="image"]'); + if (!dragDropTargetIsPrompt(target) && !targetImage) return; + + e.stopPropagation(); + e.preventDefault(); + e.dataTransfer.dropEffect = 'copy'; +}); + +window.document.addEventListener('drop', e => { + const target = e.composedPath()[0]; + if (!eventHasFiles(e)) return; + + if (dragDropTargetIsPrompt(target)) { + e.stopPropagation(); + e.preventDefault(); + + let prompt_target = get_tab_index('tabs') == 1 ? "img2img_prompt_image" : "txt2img_prompt_image"; + + const imgParent = gradioApp().getElementById(prompt_target); + const files = e.dataTransfer.files; + const fileInput = imgParent.querySelector('input[type="file"]'); + if (fileInput) { + fileInput.files = files; + fileInput.dispatchEvent(new Event('change')); + } + } + + var targetImage = target.closest('[data-testid="image"]'); + if (targetImage) { + e.stopPropagation(); + e.preventDefault(); + const files = e.dataTransfer.files; + dropReplaceImage(targetImage, files); + return; + } +}); + +window.addEventListener('paste', e => { + const files = e.clipboardData.files; + if (!isValidImageList(files)) { + return; + } + + const visibleImageFields = [...gradioApp().querySelectorAll('[data-testid="image"]')] + .filter(el => uiElementIsVisible(el)) + .sort((a, b) => uiElementInSight(b) - uiElementInSight(a)); + + + if (!visibleImageFields.length) { + return; + } + + const firstFreeImageField = visibleImageFields + .filter(el => el.querySelector('input[type=file]'))?.[0]; + + dropReplaceImage( + firstFreeImageField ? + firstFreeImageField : + visibleImageFields[visibleImageFields.length - 1] + , files + ); +}); diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js new file mode 100644 index 0000000000000000000000000000000000000000..8906c8922e17709ebde168f15d3f7c18706e75d4 --- /dev/null +++ b/javascript/edit-attention.js @@ -0,0 +1,121 @@ +function keyupEditAttention(event) { + let target = event.originalTarget || event.composedPath()[0]; + if (!target.matches("*:is([id*='_toprow'] [id*='_prompt'], .prompt) textarea")) return; + if (!(event.metaKey || event.ctrlKey)) return; + + let isPlus = event.key == "ArrowUp"; + let isMinus = event.key == "ArrowDown"; + if (!isPlus && !isMinus) return; + + let selectionStart = target.selectionStart; + let selectionEnd = target.selectionEnd; + let text = target.value; + + function selectCurrentParenthesisBlock(OPEN, CLOSE) { + if (selectionStart !== selectionEnd) return false; + + // Find opening parenthesis around current cursor + const before = text.substring(0, selectionStart); + let beforeParen = before.lastIndexOf(OPEN); + if (beforeParen == -1) return false; + let beforeParenClose = before.lastIndexOf(CLOSE); + while (beforeParenClose !== -1 && beforeParenClose > beforeParen) { + beforeParen = before.lastIndexOf(OPEN, beforeParen - 1); + beforeParenClose = before.lastIndexOf(CLOSE, beforeParenClose - 1); + } + + // Find closing parenthesis around current cursor + const after = text.substring(selectionStart); + let afterParen = after.indexOf(CLOSE); + if (afterParen == -1) return false; + let afterParenOpen = after.indexOf(OPEN); + while (afterParenOpen !== -1 && afterParen > afterParenOpen) { + afterParen = after.indexOf(CLOSE, afterParen + 1); + afterParenOpen = after.indexOf(OPEN, afterParenOpen + 1); + } + if (beforeParen === -1 || afterParen === -1) return false; + + // Set the selection to the text between the parenthesis + const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen); + const lastColon = parenContent.lastIndexOf(":"); + selectionStart = beforeParen + 1; + selectionEnd = selectionStart + lastColon; + target.setSelectionRange(selectionStart, selectionEnd); + return true; + } + + function selectCurrentWord() { + if (selectionStart !== selectionEnd) return false; + const delimiters = opts.keyedit_delimiters + " \r\n\t"; + + // seek backward until to find beggining + while (!delimiters.includes(text[selectionStart - 1]) && selectionStart > 0) { + selectionStart--; + } + + // seek forward to find end + while (!delimiters.includes(text[selectionEnd]) && selectionEnd < text.length) { + selectionEnd++; + } + + target.setSelectionRange(selectionStart, selectionEnd); + return true; + } + + // If the user hasn't selected anything, let's select their current parenthesis block or word + if (!selectCurrentParenthesisBlock('<', '>') && !selectCurrentParenthesisBlock('(', ')')) { + selectCurrentWord(); + } + + event.preventDefault(); + + var closeCharacter = ')'; + var delta = opts.keyedit_precision_attention; + + if (selectionStart > 0 && text[selectionStart - 1] == '<') { + closeCharacter = '>'; + delta = opts.keyedit_precision_extra; + } else if (selectionStart == 0 || text[selectionStart - 1] != "(") { + + // do not include spaces at the end + while (selectionEnd > selectionStart && text[selectionEnd - 1] == ' ') { + selectionEnd -= 1; + } + if (selectionStart == selectionEnd) { + return; + } + + text = text.slice(0, selectionStart) + "(" + text.slice(selectionStart, selectionEnd) + ":1.0)" + text.slice(selectionEnd); + + selectionStart += 1; + selectionEnd += 1; + } + + var end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1; + var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + 1 + end)); + if (isNaN(weight)) return; + + weight += isPlus ? delta : -delta; + weight = parseFloat(weight.toPrecision(12)); + if (String(weight).length == 1) weight += ".0"; + + if (closeCharacter == ')' && weight == 1) { + var endParenPos = text.substring(selectionEnd).indexOf(')'); + text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + endParenPos + 1); + selectionStart--; + selectionEnd--; + } else { + text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + end); + } + + target.focus(); + target.value = text; + target.selectionStart = selectionStart; + target.selectionEnd = selectionEnd; + + updateInput(target); +} + +addEventListener('keydown', (event) => { + keyupEditAttention(event); +}); diff --git a/javascript/edit-order.js b/javascript/edit-order.js new file mode 100644 index 0000000000000000000000000000000000000000..ed4ef9ac399a6d0bd83435958dc4d46837760c6a --- /dev/null +++ b/javascript/edit-order.js @@ -0,0 +1,41 @@ +/* alt+left/right moves text in prompt */ + +function keyupEditOrder(event) { + if (!opts.keyedit_move) return; + + let target = event.originalTarget || event.composedPath()[0]; + if (!target.matches("*:is([id*='_toprow'] [id*='_prompt'], .prompt) textarea")) return; + if (!event.altKey) return; + + let isLeft = event.key == "ArrowLeft"; + let isRight = event.key == "ArrowRight"; + if (!isLeft && !isRight) return; + event.preventDefault(); + + let selectionStart = target.selectionStart; + let selectionEnd = target.selectionEnd; + let text = target.value; + let items = text.split(","); + let indexStart = (text.slice(0, selectionStart).match(/,/g) || []).length; + let indexEnd = (text.slice(0, selectionEnd).match(/,/g) || []).length; + let range = indexEnd - indexStart + 1; + + if (isLeft && indexStart > 0) { + items.splice(indexStart - 1, 0, ...items.splice(indexStart, range)); + target.value = items.join(); + target.selectionStart = items.slice(0, indexStart - 1).join().length + (indexStart == 1 ? 0 : 1); + target.selectionEnd = items.slice(0, indexEnd).join().length; + } else if (isRight && indexEnd < items.length - 1) { + items.splice(indexStart + 1, 0, ...items.splice(indexStart, range)); + target.value = items.join(); + target.selectionStart = items.slice(0, indexStart + 1).join().length + 1; + target.selectionEnd = items.slice(0, indexEnd + 2).join().length; + } + + event.preventDefault(); + updateInput(target); +} + +addEventListener('keydown', (event) => { + keyupEditOrder(event); +}); diff --git a/javascript/extensions.js b/javascript/extensions.js new file mode 100644 index 0000000000000000000000000000000000000000..1f7254c5dfe90f1e8913898687fec99a63a7acdb --- /dev/null +++ b/javascript/extensions.js @@ -0,0 +1,92 @@ + +function extensions_apply(_disabled_list, _update_list, disable_all) { + var disable = []; + var update = []; + + gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x) { + if (x.name.startsWith("enable_") && !x.checked) { + disable.push(x.name.substring(7)); + } + + if (x.name.startsWith("update_") && x.checked) { + update.push(x.name.substring(7)); + } + }); + + restart_reload(); + + return [JSON.stringify(disable), JSON.stringify(update), disable_all]; +} + +function extensions_check() { + var disable = []; + + gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x) { + if (x.name.startsWith("enable_") && !x.checked) { + disable.push(x.name.substring(7)); + } + }); + + gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x) { + x.innerHTML = "Loading..."; + }); + + + var id = randomId(); + requestProgress(id, gradioApp().getElementById('extensions_installed_top'), null, function() { + + }); + + return [id, JSON.stringify(disable)]; +} + +function install_extension_from_index(button, url) { + button.disabled = "disabled"; + button.value = "Installing..."; + + var textarea = gradioApp().querySelector('#extension_to_install textarea'); + textarea.value = url; + updateInput(textarea); + + gradioApp().querySelector('#install_extension_button').click(); +} + +function config_state_confirm_restore(_, config_state_name, config_restore_type) { + if (config_state_name == "Current") { + return [false, config_state_name, config_restore_type]; + } + let restored = ""; + if (config_restore_type == "extensions") { + restored = "all saved extension versions"; + } else if (config_restore_type == "webui") { + restored = "the webui version"; + } else { + restored = "the webui version and all saved extension versions"; + } + let confirmed = confirm("Are you sure you want to restore from this state?\nThis will reset " + restored + "."); + if (confirmed) { + restart_reload(); + gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x) { + x.innerHTML = "Loading..."; + }); + } + return [confirmed, config_state_name, config_restore_type]; +} + +function toggle_all_extensions(event) { + gradioApp().querySelectorAll('#extensions .extension_toggle').forEach(function(checkbox_el) { + checkbox_el.checked = event.target.checked; + }); +} + +function toggle_extension() { + let all_extensions_toggled = true; + for (const checkbox_el of gradioApp().querySelectorAll('#extensions .extension_toggle')) { + if (!checkbox_el.checked) { + all_extensions_toggled = false; + break; + } + } + + gradioApp().querySelector('#extensions .all_extensions_toggle').checked = all_extensions_toggled; +} diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js new file mode 100644 index 0000000000000000000000000000000000000000..5582a6e5d3b4ea553e661f00048fa98b12564d90 --- /dev/null +++ b/javascript/extraNetworks.js @@ -0,0 +1,313 @@ +function setupExtraNetworksForTab(tabname) { + gradioApp().querySelector('#' + tabname + '_extra_tabs').classList.add('extra-networks'); + + var tabs = gradioApp().querySelector('#' + tabname + '_extra_tabs > div'); + var search = gradioApp().querySelector('#' + tabname + '_extra_search textarea'); + var sort = gradioApp().getElementById(tabname + '_extra_sort'); + var sortOrder = gradioApp().getElementById(tabname + '_extra_sortorder'); + var refresh = gradioApp().getElementById(tabname + '_extra_refresh'); + + search.classList.add('search'); + sort.classList.add('sort'); + sortOrder.classList.add('sortorder'); + sort.dataset.sortkey = 'sortDefault'; + tabs.appendChild(search); + tabs.appendChild(sort); + tabs.appendChild(sortOrder); + tabs.appendChild(refresh); + + var applyFilter = function() { + var searchTerm = search.value.toLowerCase(); + + gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card').forEach(function(elem) { + var searchOnly = elem.querySelector('.search_only'); + var text = elem.querySelector('.name').textContent.toLowerCase() + " " + elem.querySelector('.search_term').textContent.toLowerCase(); + + var visible = text.indexOf(searchTerm) != -1; + + if (searchOnly && searchTerm.length < 4) { + visible = false; + } + + elem.style.display = visible ? "" : "none"; + }); + }; + + var applySort = function() { + var reverse = sortOrder.classList.contains("sortReverse"); + var sortKey = sort.querySelector("input").value.toLowerCase().replace("sort", "").replaceAll(" ", "_").replace(/_+$/, "").trim(); + sortKey = sortKey ? "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1) : ""; + var sortKeyStore = sortKey ? sortKey + (reverse ? "Reverse" : "") : ""; + if (!sortKey || sortKeyStore == sort.dataset.sortkey) { + return; + } + + sort.dataset.sortkey = sortKeyStore; + + var cards = gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card'); + cards.forEach(function(card) { + card.originalParentElement = card.parentElement; + }); + var sortedCards = Array.from(cards); + sortedCards.sort(function(cardA, cardB) { + var a = cardA.dataset[sortKey]; + var b = cardB.dataset[sortKey]; + if (!isNaN(a) && !isNaN(b)) { + return parseInt(a) - parseInt(b); + } + + return (a < b ? -1 : (a > b ? 1 : 0)); + }); + if (reverse) { + sortedCards.reverse(); + } + cards.forEach(function(card) { + card.remove(); + }); + sortedCards.forEach(function(card) { + card.originalParentElement.appendChild(card); + }); + }; + + search.addEventListener("input", applyFilter); + applyFilter(); + ["change", "blur", "click"].forEach(function(evt) { + sort.querySelector("input").addEventListener(evt, applySort); + }); + sortOrder.addEventListener("click", function() { + sortOrder.classList.toggle("sortReverse"); + applySort(); + }); + + extraNetworksApplyFilter[tabname] = applyFilter; +} + +function applyExtraNetworkFilter(tabname) { + setTimeout(extraNetworksApplyFilter[tabname], 1); +} + +var extraNetworksApplyFilter = {}; +var activePromptTextarea = {}; + +function setupExtraNetworks() { + setupExtraNetworksForTab('txt2img'); + setupExtraNetworksForTab('img2img'); + + function registerPrompt(tabname, id) { + var textarea = gradioApp().querySelector("#" + id + " > label > textarea"); + + if (!activePromptTextarea[tabname]) { + activePromptTextarea[tabname] = textarea; + } + + textarea.addEventListener("focus", function() { + activePromptTextarea[tabname] = textarea; + }); + } + + registerPrompt('txt2img', 'txt2img_prompt'); + registerPrompt('txt2img', 'txt2img_neg_prompt'); + registerPrompt('img2img', 'img2img_prompt'); + registerPrompt('img2img', 'img2img_neg_prompt'); +} + +onUiLoaded(setupExtraNetworks); + +var re_extranet = /<([^:]+:[^:]+):[\d.]+>(.*)/; +var re_extranet_g = /\s+<([^:]+:[^:]+):[\d.]+>/g; + +function tryToRemoveExtraNetworkFromPrompt(textarea, text) { + var m = text.match(re_extranet); + var replaced = false; + var newTextareaText; + if (m) { + var extraTextAfterNet = m[2]; + var partToSearch = m[1]; + var foundAtPosition = -1; + newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found, net, pos) { + m = found.match(re_extranet); + if (m[1] == partToSearch) { + replaced = true; + foundAtPosition = pos; + return ""; + } + return found; + }); + + if (foundAtPosition >= 0 && newTextareaText.substr(foundAtPosition, extraTextAfterNet.length) == extraTextAfterNet) { + newTextareaText = newTextareaText.substr(0, foundAtPosition) + newTextareaText.substr(foundAtPosition + extraTextAfterNet.length); + } + } else { + newTextareaText = textarea.value.replaceAll(new RegExp(text, "g"), function(found) { + if (found == text) { + replaced = true; + return ""; + } + return found; + }); + } + + if (replaced) { + textarea.value = newTextareaText; + return true; + } + + return false; +} + +function cardClicked(tabname, textToAdd, allowNegativePrompt) { + var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea"); + + if (!tryToRemoveExtraNetworkFromPrompt(textarea, textToAdd)) { + textarea.value = textarea.value + opts.extra_networks_add_text_separator + textToAdd; + } + + updateInput(textarea); +} + +function saveCardPreview(event, tabname, filename) { + var textarea = gradioApp().querySelector("#" + tabname + '_preview_filename > label > textarea'); + var button = gradioApp().getElementById(tabname + '_save_preview'); + + textarea.value = filename; + updateInput(textarea); + + button.click(); + + event.stopPropagation(); + event.preventDefault(); +} + +function extraNetworksSearchButton(tabs_id, event) { + var searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > div > textarea'); + var button = event.target; + var text = button.classList.contains("search-all") ? "" : button.textContent.trim(); + + searchTextarea.value = text; + updateInput(searchTextarea); +} + +var globalPopup = null; +var globalPopupInner = null; +function closePopup() { + if (!globalPopup) return; + + globalPopup.style.display = "none"; +} +function popup(contents) { + if (!globalPopup) { + globalPopup = document.createElement('div'); + globalPopup.onclick = closePopup; + globalPopup.classList.add('global-popup'); + + var close = document.createElement('div'); + close.classList.add('global-popup-close'); + close.onclick = closePopup; + close.title = "Close"; + globalPopup.appendChild(close); + + globalPopupInner = document.createElement('div'); + globalPopupInner.onclick = function(event) { + event.stopPropagation(); return false; + }; + globalPopupInner.classList.add('global-popup-inner'); + globalPopup.appendChild(globalPopupInner); + + gradioApp().querySelector('.main').appendChild(globalPopup); + } + + globalPopupInner.innerHTML = ''; + globalPopupInner.appendChild(contents); + + globalPopup.style.display = "flex"; +} + +function extraNetworksShowMetadata(text) { + var elem = document.createElement('pre'); + elem.classList.add('popup-metadata'); + elem.textContent = text; + + popup(elem); +} + +function requestGet(url, data, handler, errorHandler) { + var xhr = new XMLHttpRequest(); + var args = Object.keys(data).map(function(k) { + return encodeURIComponent(k) + '=' + encodeURIComponent(data[k]); + }).join('&'); + xhr.open("GET", url + "?" + args, true); + + xhr.onreadystatechange = function() { + if (xhr.readyState === 4) { + if (xhr.status === 200) { + try { + var js = JSON.parse(xhr.responseText); + handler(js); + } catch (error) { + console.error(error); + errorHandler(); + } + } else { + errorHandler(); + } + } + }; + var js = JSON.stringify(data); + xhr.send(js); +} + +function extraNetworksRequestMetadata(event, extraPage, cardName) { + var showError = function() { + extraNetworksShowMetadata("there was an error getting metadata"); + }; + + requestGet("./sd_extra_networks/metadata", {page: extraPage, item: cardName}, function(data) { + if (data && data.metadata) { + extraNetworksShowMetadata(data.metadata); + } else { + showError(); + } + }, showError); + + event.stopPropagation(); +} + +var extraPageUserMetadataEditors = {}; + +function extraNetworksEditUserMetadata(event, tabname, extraPage, cardName) { + var id = tabname + '_' + extraPage + '_edit_user_metadata'; + + var editor = extraPageUserMetadataEditors[id]; + if (!editor) { + editor = {}; + editor.page = gradioApp().getElementById(id); + editor.nameTextarea = gradioApp().querySelector("#" + id + "_name" + ' textarea'); + editor.button = gradioApp().querySelector("#" + id + "_button"); + extraPageUserMetadataEditors[id] = editor; + } + + editor.nameTextarea.value = cardName; + updateInput(editor.nameTextarea); + + editor.button.click(); + + popup(editor.page); + + event.stopPropagation(); +} + +function extraNetworksRefreshSingleCard(page, tabname, name) { + requestGet("./sd_extra_networks/get-single-card", {page: page, tabname: tabname, name: name}, function(data) { + if (data && data.html) { + var card = gradioApp().querySelector('.card[data-name=' + JSON.stringify(name) + ']'); // likely using the wrong stringify function + + var newDiv = document.createElement('DIV'); + newDiv.innerHTML = data.html; + var newCard = newDiv.firstElementChild; + + newCard.style = ''; + card.parentElement.insertBefore(newCard, card); + card.parentElement.removeChild(card); + } + }); +} diff --git a/javascript/generationParams.js b/javascript/generationParams.js new file mode 100644 index 0000000000000000000000000000000000000000..7c0fd221d63313ab063f545570eb0da780b9da3a --- /dev/null +++ b/javascript/generationParams.js @@ -0,0 +1,35 @@ +// attaches listeners to the txt2img and img2img galleries to update displayed generation param text when the image changes + +let txt2img_gallery, img2img_gallery, modal = undefined; +onAfterUiUpdate(function() { + if (!txt2img_gallery) { + txt2img_gallery = attachGalleryListeners("txt2img"); + } + if (!img2img_gallery) { + img2img_gallery = attachGalleryListeners("img2img"); + } + if (!modal) { + modal = gradioApp().getElementById('lightboxModal'); + modalObserver.observe(modal, {attributes: true, attributeFilter: ['style']}); + } +}); + +let modalObserver = new MutationObserver(function(mutations) { + mutations.forEach(function(mutationRecord) { + let selectedTab = gradioApp().querySelector('#tabs div button.selected')?.innerText; + if (mutationRecord.target.style.display === 'none' && (selectedTab === 'txt2img' || selectedTab === 'img2img')) { + gradioApp().getElementById(selectedTab + "_generation_info_button")?.click(); + } + }); +}); + +function attachGalleryListeners(tab_name) { + var gallery = gradioApp().querySelector('#' + tab_name + '_gallery'); + gallery?.addEventListener('click', () => gradioApp().getElementById(tab_name + "_generation_info_button").click()); + gallery?.addEventListener('keydown', (e) => { + if (e.keyCode == 37 || e.keyCode == 39) { // left or right arrow + gradioApp().getElementById(tab_name + "_generation_info_button").click(); + } + }); + return gallery; +} diff --git a/javascript/hints.js b/javascript/hints.js new file mode 100644 index 0000000000000000000000000000000000000000..4167cb28b7c0ed934b37d346aacd3784ebec1016 --- /dev/null +++ b/javascript/hints.js @@ -0,0 +1,192 @@ +// mouseover tooltips for various UI elements + +var titles = { + "Sampling steps": "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results", + "Sampling method": "Which algorithm to use to produce the image", + "GFPGAN": "Restore low quality faces using GFPGAN neural network", + "Euler a": "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps higher than 30-40 does not help", + "DDIM": "Denoising Diffusion Implicit Models - best at inpainting", + "UniPC": "Unified Predictor-Corrector Framework for Fast Sampling of Diffusion Models", + "DPM adaptive": "Ignores step count - uses a number of steps determined by the CFG and resolution", + + "\u{1F4D0}": "Auto detect size from img2img", + "Batch count": "How many batches of images to create (has no impact on generation performance or VRAM usage)", + "Batch size": "How many image to create in a single batch (increases generation performance at cost of higher VRAM usage)", + "CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results", + "Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result", + "\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time", + "\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomized", + "\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.", + "\u{1f4c2}": "Open images output directory", + "\u{1f4be}": "Save style", + "\u{1f5d1}\ufe0f": "Clear prompt", + "\u{1f4cb}": "Apply selected styles to current prompt", + "\u{1f4d2}": "Paste available values into the field", + "\u{1f3b4}": "Show/hide extra networks", + "\u{1f300}": "Restore progress", + + "Inpaint a part of image": "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt", + "SD upscale": "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back", + + "Just resize": "Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.", + "Crop and resize": "Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.", + "Resize and fill": "Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.", + + "Mask blur": "How much to blur the mask before processing, in pixels.", + "Masked content": "What to put inside the masked area before processing it with Stable Diffusion.", + "fill": "fill it with colors of the image", + "original": "keep whatever was there originally", + "latent noise": "fill it with latent space noise", + "latent nothing": "fill it with latent space zeroes", + "Inpaint at full resolution": "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image", + + "Denoising strength": "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.", + + "Skip": "Stop processing current image and continue processing.", + "Interrupt": "Stop processing images and return any results accumulated so far.", + "Save": "Write image to a directory (default - log/images) and generation parameters into csv file.", + + "X values": "Separate values for X axis using commas.", + "Y values": "Separate values for Y axis using commas.", + + "None": "Do not do anything special", + "Prompt matrix": "Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)", + "X/Y/Z plot": "Create grid(s) where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows", + "Custom code": "Run Python code. Advanced user only. Must run program with --allow-code for this to work", + + "Prompt S/R": "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others", + "Prompt order": "Separate a list of words with commas, and the script will make a variation of prompt with those words for their every possible order", + + "Tiling": "Produce an image that can be tiled.", + "Tile overlap": "For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.", + + "Variation seed": "Seed of a different picture to be mixed into the generation.", + "Variation strength": "How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).", + "Resize seed from height": "Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution", + "Resize seed from width": "Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution", + + "Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.", + + "Images filename pattern": "Use tags like [seed] and [date] to define how filenames for images are chosen. Leave empty for default.", + "Directory name pattern": "Use tags like [seed] and [date] to define how subdirectories for images and grids are chosen. Leave empty for default.", + "Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle", + + "Loopback": "Performs img2img processing multiple times. Output images are used as input for the next loop.", + "Loops": "How many times to process an image. Each output is used as the input of the next loop. If set to 1, behavior will be as if this script were not used.", + "Final denoising strength": "The denoising strength for the final loop of each image in the batch.", + "Denoising strength curve": "The denoising curve controls the rate of denoising strength change each loop. Aggressive: Most of the change will happen towards the start of the loops. Linear: Change will be constant through all loops. Lazy: Most of the change will happen towards the end of the loops.", + + "Style 1": "Style to apply; styles have components for both positive and negative prompts and apply to both", + "Style 2": "Style to apply; styles have components for both positive and negative prompts and apply to both", + "Apply style": "Insert selected styles into prompt fields", + "Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style uses that as a placeholder for your prompt when you use the style in the future.", + + "Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.", + "Inpainting conditioning mask strength": "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.", + + "Eta noise seed delta": "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.", + + "Filename word regex": "This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.", + "Filename join string": "This string will be used to join split words into a single line if the option above is enabled.", + + "Quicksettings list": "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.", + + "Weighted sum": "Result = A * (1 - M) + B * M", + "Add difference": "Result = A + (B - C) * M", + "No interpolation": "Result = A", + + "Initialization text": "If the number of tokens is more than the number of vectors, some may be skipped.\nLeave the textbox empty to start with zeroed out vectors", + "Learning rate": "How fast should training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.", + + "Clip skip": "Early stopping parameter for CLIP model; 1 is stop at last layer as usual, 2 is stop at penultimate layer, etc.", + + "Approx NN": "Cheap neural network approximation. Very fast compared to VAE, but produces pictures with 4 times smaller horizontal/vertical resolution and lower quality.", + "Approx cheap": "Very cheap approximation. Very fast compared to VAE, but produces pictures with 8 times smaller horizontal/vertical resolution and extremely low quality.", + + "Hires. fix": "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition", + "Hires steps": "Number of sampling steps for upscaled picture. If 0, uses same as for original.", + "Upscale by": "Adjusts the size of the image by multiplying the original width and height by the selected value. Ignored if either Resize width to or Resize height to are non-zero.", + "Resize width to": "Resizes image to this width. If 0, width is inferred from either of two nearby sliders.", + "Resize height to": "Resizes image to this height. If 0, height is inferred from either of two nearby sliders.", + "Discard weights with matching name": "Regular expression; if weights's name matches it, the weights is not written to the resulting checkpoint. Use ^model_ema to discard EMA weights.", + "Extra networks tab order": "Comma-separated list of tab names; tabs listed here will appear in the extra networks UI first and in order listed.", + "Negative Guidance minimum sigma": "Skip negative prompt for steps where image is already mostly denoised; the higher this value, the more skips there will be; provides increased performance in exchange for minor quality reduction." +}; + +function updateTooltip(element) { + if (element.title) return; // already has a title + + let text = element.textContent; + let tooltip = localization[titles[text]] || titles[text]; + + if (!tooltip) { + let value = element.value; + if (value) tooltip = localization[titles[value]] || titles[value]; + } + + if (!tooltip) { + // Gradio dropdown options have `data-value`. + let dataValue = element.dataset.value; + if (dataValue) tooltip = localization[titles[dataValue]] || titles[dataValue]; + } + + if (!tooltip) { + for (const c of element.classList) { + if (c in titles) { + tooltip = localization[titles[c]] || titles[c]; + break; + } + } + } + + if (tooltip) { + element.title = tooltip; + } +} + +// Nodes to check for adding tooltips. +const tooltipCheckNodes = new Set(); +// Timer for debouncing tooltip check. +let tooltipCheckTimer = null; + +function processTooltipCheckNodes() { + for (const node of tooltipCheckNodes) { + updateTooltip(node); + } + tooltipCheckNodes.clear(); +} + +onUiUpdate(function(mutationRecords) { + for (const record of mutationRecords) { + if (record.type === "childList" && record.target.classList.contains("options")) { + // This smells like a Gradio dropdown menu having changed, + // so let's enqueue an update for the input element that shows the current value. + let wrap = record.target.parentNode; + let input = wrap?.querySelector("input"); + if (input) { + input.title = ""; // So we'll even have a chance to update it. + tooltipCheckNodes.add(input); + } + } + for (const node of record.addedNodes) { + if (node.nodeType === Node.ELEMENT_NODE && !node.classList.contains("hide")) { + if (!node.title) { + if ( + node.tagName === "SPAN" || + node.tagName === "BUTTON" || + node.tagName === "P" || + node.tagName === "INPUT" || + (node.tagName === "LI" && node.classList.contains("item")) // Gradio dropdown item + ) { + tooltipCheckNodes.add(node); + } + } + node.querySelectorAll('span, button, p').forEach(n => tooltipCheckNodes.add(n)); + } + } + } + if (tooltipCheckNodes.size) { + clearTimeout(tooltipCheckTimer); + tooltipCheckTimer = setTimeout(processTooltipCheckNodes, 1000); + } +}); diff --git a/javascript/hires_fix.js b/javascript/hires_fix.js new file mode 100644 index 0000000000000000000000000000000000000000..0d04ab3b424338634af3e71a2f9d8796a5f00224 --- /dev/null +++ b/javascript/hires_fix.js @@ -0,0 +1,18 @@ + +function onCalcResolutionHires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y) { + function setInactive(elem, inactive) { + elem.classList.toggle('inactive', !!inactive); + } + + var hrUpscaleBy = gradioApp().getElementById('txt2img_hr_scale'); + var hrResizeX = gradioApp().getElementById('txt2img_hr_resize_x'); + var hrResizeY = gradioApp().getElementById('txt2img_hr_resize_y'); + + gradioApp().getElementById('txt2img_hires_fix_row2').style.display = opts.use_old_hires_fix_width_height ? "none" : ""; + + setInactive(hrUpscaleBy, opts.use_old_hires_fix_width_height || hr_resize_x > 0 || hr_resize_y > 0); + setInactive(hrResizeX, opts.use_old_hires_fix_width_height || hr_resize_x == 0); + setInactive(hrResizeY, opts.use_old_hires_fix_width_height || hr_resize_y == 0); + + return [enable, width, height, hr_scale, hr_resize_x, hr_resize_y]; +} diff --git a/javascript/imageMaskFix.js b/javascript/imageMaskFix.js new file mode 100644 index 0000000000000000000000000000000000000000..900c56f32fdf7128f0433621df25a0fbd14c4e42 --- /dev/null +++ b/javascript/imageMaskFix.js @@ -0,0 +1,43 @@ +/** + * temporary fix for https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/668 + * @see https://github.com/gradio-app/gradio/issues/1721 + */ +function imageMaskResize() { + const canvases = gradioApp().querySelectorAll('#img2maskimg .touch-none canvas'); + if (!canvases.length) { + window.removeEventListener('resize', imageMaskResize); + return; + } + + const wrapper = canvases[0].closest('.touch-none'); + const previewImage = wrapper.previousElementSibling; + + if (!previewImage.complete) { + previewImage.addEventListener('load', imageMaskResize); + return; + } + + const w = previewImage.width; + const h = previewImage.height; + const nw = previewImage.naturalWidth; + const nh = previewImage.naturalHeight; + const portrait = nh > nw; + + const wW = Math.min(w, portrait ? h / nh * nw : w / nw * nw); + const wH = Math.min(h, portrait ? h / nh * nh : w / nw * nh); + + wrapper.style.width = `${wW}px`; + wrapper.style.height = `${wH}px`; + wrapper.style.left = `0px`; + wrapper.style.top = `0px`; + + canvases.forEach(c => { + c.style.width = c.style.height = ''; + c.style.maxWidth = '100%'; + c.style.maxHeight = '100%'; + c.style.objectFit = 'contain'; + }); +} + +onAfterUiUpdate(imageMaskResize); +window.addEventListener('resize', imageMaskResize); diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js new file mode 100644 index 0000000000000000000000000000000000000000..677e95c1bc7b700e61bd5e6263e980dd703165e2 --- /dev/null +++ b/javascript/imageviewer.js @@ -0,0 +1,254 @@ +// A full size 'lightbox' preview modal shown when left clicking on gallery previews +function closeModal() { + gradioApp().getElementById("lightboxModal").style.display = "none"; +} + +function showModal(event) { + const source = event.target || event.srcElement; + const modalImage = gradioApp().getElementById("modalImage"); + const lb = gradioApp().getElementById("lightboxModal"); + modalImage.src = source.src; + if (modalImage.style.display === 'none') { + lb.style.setProperty('background-image', 'url(' + source.src + ')'); + } + lb.style.display = "flex"; + lb.focus(); + + const tabTxt2Img = gradioApp().getElementById("tab_txt2img"); + const tabImg2Img = gradioApp().getElementById("tab_img2img"); + // show the save button in modal only on txt2img or img2img tabs + if (tabTxt2Img.style.display != "none" || tabImg2Img.style.display != "none") { + gradioApp().getElementById("modal_save").style.display = "inline"; + } else { + gradioApp().getElementById("modal_save").style.display = "none"; + } + event.stopPropagation(); +} + +function negmod(n, m) { + return ((n % m) + m) % m; +} + +function updateOnBackgroundChange() { + const modalImage = gradioApp().getElementById("modalImage"); + if (modalImage && modalImage.offsetParent) { + let currentButton = selected_gallery_button(); + + if (currentButton?.children?.length > 0 && modalImage.src != currentButton.children[0].src) { + modalImage.src = currentButton.children[0].src; + if (modalImage.style.display === 'none') { + const modal = gradioApp().getElementById("lightboxModal"); + modal.style.setProperty('background-image', `url(${modalImage.src})`); + } + } + } +} + +function modalImageSwitch(offset) { + var galleryButtons = all_gallery_buttons(); + + if (galleryButtons.length > 1) { + var currentButton = selected_gallery_button(); + + var result = -1; + galleryButtons.forEach(function(v, i) { + if (v == currentButton) { + result = i; + } + }); + + if (result != -1) { + var nextButton = galleryButtons[negmod((result + offset), galleryButtons.length)]; + nextButton.click(); + const modalImage = gradioApp().getElementById("modalImage"); + const modal = gradioApp().getElementById("lightboxModal"); + modalImage.src = nextButton.children[0].src; + if (modalImage.style.display === 'none') { + modal.style.setProperty('background-image', `url(${modalImage.src})`); + } + setTimeout(function() { + modal.focus(); + }, 10); + } + } +} + +function saveImage() { + const tabTxt2Img = gradioApp().getElementById("tab_txt2img"); + const tabImg2Img = gradioApp().getElementById("tab_img2img"); + const saveTxt2Img = "save_txt2img"; + const saveImg2Img = "save_img2img"; + if (tabTxt2Img.style.display != "none") { + gradioApp().getElementById(saveTxt2Img).click(); + } else if (tabImg2Img.style.display != "none") { + gradioApp().getElementById(saveImg2Img).click(); + } else { + console.error("missing implementation for saving modal of this type"); + } +} + +function modalSaveImage(event) { + saveImage(); + event.stopPropagation(); +} + +function modalNextImage(event) { + modalImageSwitch(1); + event.stopPropagation(); +} + +function modalPrevImage(event) { + modalImageSwitch(-1); + event.stopPropagation(); +} + +function modalKeyHandler(event) { + switch (event.key) { + case "s": + saveImage(); + break; + case "ArrowLeft": + modalPrevImage(event); + break; + case "ArrowRight": + modalNextImage(event); + break; + case "Escape": + closeModal(); + break; + } +} + +function setupImageForLightbox(e) { + if (e.dataset.modded) { + return; + } + + e.dataset.modded = true; + e.style.cursor = 'pointer'; + e.style.userSelect = 'none'; + + var isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1; + + // For Firefox, listening on click first switched to next image then shows the lightbox. + // If you know how to fix this without switching to mousedown event, please. + // For other browsers the event is click to make it possiblr to drag picture. + var event = isFirefox ? 'mousedown' : 'click'; + + e.addEventListener(event, function(evt) { + if (!opts.js_modal_lightbox || evt.button != 0) return; + + modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed); + evt.preventDefault(); + showModal(evt); + }, true); + +} + +function modalZoomSet(modalImage, enable) { + if (modalImage) modalImage.classList.toggle('modalImageFullscreen', !!enable); +} + +function modalZoomToggle(event) { + var modalImage = gradioApp().getElementById("modalImage"); + modalZoomSet(modalImage, !modalImage.classList.contains('modalImageFullscreen')); + event.stopPropagation(); +} + +function modalTileImageToggle(event) { + const modalImage = gradioApp().getElementById("modalImage"); + const modal = gradioApp().getElementById("lightboxModal"); + const isTiling = modalImage.style.display === 'none'; + if (isTiling) { + modalImage.style.display = 'block'; + modal.style.setProperty('background-image', 'none'); + } else { + modalImage.style.display = 'none'; + modal.style.setProperty('background-image', `url(${modalImage.src})`); + } + + event.stopPropagation(); +} + +onAfterUiUpdate(function() { + var fullImg_preview = gradioApp().querySelectorAll('.gradio-gallery > div > img'); + if (fullImg_preview != null) { + fullImg_preview.forEach(setupImageForLightbox); + } + updateOnBackgroundChange(); +}); + +document.addEventListener("DOMContentLoaded", function() { + //const modalFragment = document.createDocumentFragment(); + const modal = document.createElement('div'); + modal.onclick = closeModal; + modal.id = "lightboxModal"; + modal.tabIndex = 0; + modal.addEventListener('keydown', modalKeyHandler, true); + + const modalControls = document.createElement('div'); + modalControls.className = 'modalControls gradio-container'; + modal.append(modalControls); + + const modalZoom = document.createElement('span'); + modalZoom.className = 'modalZoom cursor'; + modalZoom.innerHTML = '⤡'; + modalZoom.addEventListener('click', modalZoomToggle, true); + modalZoom.title = "Toggle zoomed view"; + modalControls.appendChild(modalZoom); + + const modalTileImage = document.createElement('span'); + modalTileImage.className = 'modalTileImage cursor'; + modalTileImage.innerHTML = '⊞'; + modalTileImage.addEventListener('click', modalTileImageToggle, true); + modalTileImage.title = "Preview tiling"; + modalControls.appendChild(modalTileImage); + + const modalSave = document.createElement("span"); + modalSave.className = "modalSave cursor"; + modalSave.id = "modal_save"; + modalSave.innerHTML = "🖫"; + modalSave.addEventListener("click", modalSaveImage, true); + modalSave.title = "Save Image(s)"; + modalControls.appendChild(modalSave); + + const modalClose = document.createElement('span'); + modalClose.className = 'modalClose cursor'; + modalClose.innerHTML = '×'; + modalClose.onclick = closeModal; + modalClose.title = "Close image viewer"; + modalControls.appendChild(modalClose); + + const modalImage = document.createElement('img'); + modalImage.id = 'modalImage'; + modalImage.onclick = closeModal; + modalImage.tabIndex = 0; + modalImage.addEventListener('keydown', modalKeyHandler, true); + modal.appendChild(modalImage); + + const modalPrev = document.createElement('a'); + modalPrev.className = 'modalPrev'; + modalPrev.innerHTML = '❮'; + modalPrev.tabIndex = 0; + modalPrev.addEventListener('click', modalPrevImage, true); + modalPrev.addEventListener('keydown', modalKeyHandler, true); + modal.appendChild(modalPrev); + + const modalNext = document.createElement('a'); + modalNext.className = 'modalNext'; + modalNext.innerHTML = '❯'; + modalNext.tabIndex = 0; + modalNext.addEventListener('click', modalNextImage, true); + modalNext.addEventListener('keydown', modalKeyHandler, true); + + modal.appendChild(modalNext); + + try { + gradioApp().appendChild(modal); + } catch (e) { + gradioApp().body.appendChild(modal); + } + + document.body.appendChild(modal); + +}); diff --git a/javascript/imageviewerGamepad.js b/javascript/imageviewerGamepad.js new file mode 100644 index 0000000000000000000000000000000000000000..a22c7e6e6435f677c7a86dbbae5da86af8fdc9eb --- /dev/null +++ b/javascript/imageviewerGamepad.js @@ -0,0 +1,63 @@ +let gamepads = []; + +window.addEventListener('gamepadconnected', (e) => { + const index = e.gamepad.index; + let isWaiting = false; + gamepads[index] = setInterval(async() => { + if (!opts.js_modal_lightbox_gamepad || isWaiting) return; + const gamepad = navigator.getGamepads()[index]; + const xValue = gamepad.axes[0]; + if (xValue <= -0.3) { + modalPrevImage(e); + isWaiting = true; + } else if (xValue >= 0.3) { + modalNextImage(e); + isWaiting = true; + } + if (isWaiting) { + await sleepUntil(() => { + const xValue = navigator.getGamepads()[index].axes[0]; + if (xValue < 0.3 && xValue > -0.3) { + return true; + } + }, opts.js_modal_lightbox_gamepad_repeat); + isWaiting = false; + } + }, 10); +}); + +window.addEventListener('gamepaddisconnected', (e) => { + clearInterval(gamepads[e.gamepad.index]); +}); + +/* +Primarily for vr controller type pointer devices. +I use the wheel event because there's currently no way to do it properly with web xr. + */ +let isScrolling = false; +window.addEventListener('wheel', (e) => { + if (!opts.js_modal_lightbox_gamepad || isScrolling) return; + isScrolling = true; + + if (e.deltaX <= -0.6) { + modalPrevImage(e); + } else if (e.deltaX >= 0.6) { + modalNextImage(e); + } + + setTimeout(() => { + isScrolling = false; + }, opts.js_modal_lightbox_gamepad_repeat); +}); + +function sleepUntil(f, timeout) { + return new Promise((resolve) => { + const timeStart = new Date(); + const wait = setInterval(function() { + if (f() || new Date() - timeStart > timeout) { + clearInterval(wait); + resolve(); + } + }, 20); + }); +} diff --git a/javascript/localization.js b/javascript/localization.js new file mode 100644 index 0000000000000000000000000000000000000000..eb22b8a7e99c4c9a0c4d6a52c3b9acefd74464ae --- /dev/null +++ b/javascript/localization.js @@ -0,0 +1,176 @@ + +// localization = {} -- the dict with translations is created by the backend + +var ignore_ids_for_localization = { + setting_sd_hypernetwork: 'OPTION', + setting_sd_model_checkpoint: 'OPTION', + modelmerger_primary_model_name: 'OPTION', + modelmerger_secondary_model_name: 'OPTION', + modelmerger_tertiary_model_name: 'OPTION', + train_embedding: 'OPTION', + train_hypernetwork: 'OPTION', + txt2img_styles: 'OPTION', + img2img_styles: 'OPTION', + setting_random_artist_categories: 'SPAN', + setting_face_restoration_model: 'SPAN', + setting_realesrgan_enabled_models: 'SPAN', + extras_upscaler_1: 'SPAN', + extras_upscaler_2: 'SPAN', +}; + +var re_num = /^[.\d]+$/; +var re_emoji = /[\p{Extended_Pictographic}\u{1F3FB}-\u{1F3FF}\u{1F9B0}-\u{1F9B3}]/u; + +var original_lines = {}; +var translated_lines = {}; + +function hasLocalization() { + return window.localization && Object.keys(window.localization).length > 0; +} + +function textNodesUnder(el) { + var n, a = [], walk = document.createTreeWalker(el, NodeFilter.SHOW_TEXT, null, false); + while ((n = walk.nextNode())) a.push(n); + return a; +} + +function canBeTranslated(node, text) { + if (!text) return false; + if (!node.parentElement) return false; + + var parentType = node.parentElement.nodeName; + if (parentType == 'SCRIPT' || parentType == 'STYLE' || parentType == 'TEXTAREA') return false; + + if (parentType == 'OPTION' || parentType == 'SPAN') { + var pnode = node; + for (var level = 0; level < 4; level++) { + pnode = pnode.parentElement; + if (!pnode) break; + + if (ignore_ids_for_localization[pnode.id] == parentType) return false; + } + } + + if (re_num.test(text)) return false; + if (re_emoji.test(text)) return false; + return true; +} + +function getTranslation(text) { + if (!text) return undefined; + + if (translated_lines[text] === undefined) { + original_lines[text] = 1; + } + + var tl = localization[text]; + if (tl !== undefined) { + translated_lines[tl] = 1; + } + + return tl; +} + +function processTextNode(node) { + var text = node.textContent.trim(); + + if (!canBeTranslated(node, text)) return; + + var tl = getTranslation(text); + if (tl !== undefined) { + node.textContent = tl; + } +} + +function processNode(node) { + if (node.nodeType == 3) { + processTextNode(node); + return; + } + + if (node.title) { + let tl = getTranslation(node.title); + if (tl !== undefined) { + node.title = tl; + } + } + + if (node.placeholder) { + let tl = getTranslation(node.placeholder); + if (tl !== undefined) { + node.placeholder = tl; + } + } + + textNodesUnder(node).forEach(function(node) { + processTextNode(node); + }); +} + +function dumpTranslations() { + if (!hasLocalization()) { + // If we don't have any localization, + // we will not have traversed the app to find + // original_lines, so do that now. + processNode(gradioApp()); + } + var dumped = {}; + if (localization.rtl) { + dumped.rtl = true; + } + + for (const text in original_lines) { + if (dumped[text] !== undefined) continue; + dumped[text] = localization[text] || text; + } + + return dumped; +} + +function download_localization() { + var text = JSON.stringify(dumpTranslations(), null, 4); + + var element = document.createElement('a'); + element.setAttribute('href', 'data:text/plain;charset=utf-8,' + encodeURIComponent(text)); + element.setAttribute('download', "localization.json"); + element.style.display = 'none'; + document.body.appendChild(element); + + element.click(); + + document.body.removeChild(element); +} + +document.addEventListener("DOMContentLoaded", function() { + if (!hasLocalization()) { + return; + } + + onUiUpdate(function(m) { + m.forEach(function(mutation) { + mutation.addedNodes.forEach(function(node) { + processNode(node); + }); + }); + }); + + processNode(gradioApp()); + + if (localization.rtl) { // if the language is from right to left, + (new MutationObserver((mutations, observer) => { // wait for the style to load + mutations.forEach(mutation => { + mutation.addedNodes.forEach(node => { + if (node.tagName === 'STYLE') { + observer.disconnect(); + + for (const x of node.sheet.rules) { // find all rtl media rules + if (Array.from(x.media || []).includes('rtl')) { + x.media.appendMedium('all'); // enable them + } + } + } + }); + }); + })).observe(gradioApp(), {childList: true}); + } +}); diff --git a/javascript/notification.js b/javascript/notification.js new file mode 100644 index 0000000000000000000000000000000000000000..76c5715dab43923736949a7b9cec6d0799dd5672 --- /dev/null +++ b/javascript/notification.js @@ -0,0 +1,49 @@ +// Monitors the gallery and sends a browser notification when the leading image is new. + +let lastHeadImg = null; + +let notificationButton = null; + +onAfterUiUpdate(function() { + if (notificationButton == null) { + notificationButton = gradioApp().getElementById('request_notifications'); + + if (notificationButton != null) { + notificationButton.addEventListener('click', () => { + void Notification.requestPermission(); + }, true); + } + } + + const galleryPreviews = gradioApp().querySelectorAll('div[id^="tab_"][style*="display: block"] div[id$="_results"] .thumbnail-item > img'); + + if (galleryPreviews == null) return; + + const headImg = galleryPreviews[0]?.src; + + if (headImg == null || headImg == lastHeadImg) return; + + lastHeadImg = headImg; + + // play notification sound if available + gradioApp().querySelector('#audio_notification audio')?.play(); + + if (document.hasFocus()) return; + + // Multiple copies of the images are in the DOM when one is selected. Dedup with a Set to get the real number generated. + const imgs = new Set(Array.from(galleryPreviews).map(img => img.src)); + + const notification = new Notification( + 'Stable Diffusion', + { + body: `Generated ${imgs.size > 1 ? imgs.size - opts.return_grid : 1} image${imgs.size > 1 ? 's' : ''}`, + icon: headImg, + image: headImg, + } + ); + + notification.onclick = function(_) { + parent.focus(); + this.close(); + }; +}); diff --git a/javascript/profilerVisualization.js b/javascript/profilerVisualization.js new file mode 100644 index 0000000000000000000000000000000000000000..9d8e5f42f327f93db42773ebf0b97ee1e9671806 --- /dev/null +++ b/javascript/profilerVisualization.js @@ -0,0 +1,153 @@ + +function createRow(table, cellName, items) { + var tr = document.createElement('tr'); + var res = []; + + items.forEach(function(x, i) { + if (x === undefined) { + res.push(null); + return; + } + + var td = document.createElement(cellName); + td.textContent = x; + tr.appendChild(td); + res.push(td); + + var colspan = 1; + for (var n = i + 1; n < items.length; n++) { + if (items[n] !== undefined) { + break; + } + + colspan += 1; + } + + if (colspan > 1) { + td.colSpan = colspan; + } + }); + + table.appendChild(tr); + + return res; +} + +function showProfile(path, cutoff = 0.05) { + requestGet(path, {}, function(data) { + var table = document.createElement('table'); + table.className = 'popup-table'; + + data.records['total'] = data.total; + var keys = Object.keys(data.records).sort(function(a, b) { + return data.records[b] - data.records[a]; + }); + var items = keys.map(function(x) { + return {key: x, parts: x.split('/'), time: data.records[x]}; + }); + var maxLength = items.reduce(function(a, b) { + return Math.max(a, b.parts.length); + }, 0); + + var cols = createRow(table, 'th', ['record', 'seconds']); + cols[0].colSpan = maxLength; + + function arraysEqual(a, b) { + return !(a < b || b < a); + } + + var addLevel = function(level, parent, hide) { + var matching = items.filter(function(x) { + return x.parts[level] && !x.parts[level + 1] && arraysEqual(x.parts.slice(0, level), parent); + }); + var sorted = matching.sort(function(a, b) { + return b.time - a.time; + }); + var othersTime = 0; + var othersList = []; + var othersRows = []; + var childrenRows = []; + sorted.forEach(function(x) { + var visible = x.time >= cutoff && !hide; + + var cells = []; + for (var i = 0; i < maxLength; i++) { + cells.push(x.parts[i]); + } + cells.push(x.time.toFixed(3)); + var cols = createRow(table, 'td', cells); + for (i = 0; i < level; i++) { + cols[i].className = 'muted'; + } + + var tr = cols[0].parentNode; + if (!visible) { + tr.classList.add("hidden"); + } + + if (x.time >= cutoff) { + childrenRows.push(tr); + } else { + othersTime += x.time; + othersList.push(x.parts[level]); + othersRows.push(tr); + } + + var children = addLevel(level + 1, parent.concat([x.parts[level]]), true); + if (children.length > 0) { + var cell = cols[level]; + var onclick = function() { + cell.classList.remove("link"); + cell.removeEventListener("click", onclick); + children.forEach(function(x) { + x.classList.remove("hidden"); + }); + }; + cell.classList.add("link"); + cell.addEventListener("click", onclick); + } + }); + + if (othersTime > 0) { + var cells = []; + for (var i = 0; i < maxLength; i++) { + cells.push(parent[i]); + } + cells.push(othersTime.toFixed(3)); + cells[level] = 'others'; + var cols = createRow(table, 'td', cells); + for (i = 0; i < level; i++) { + cols[i].className = 'muted'; + } + + var cell = cols[level]; + var tr = cell.parentNode; + var onclick = function() { + tr.classList.add("hidden"); + cell.classList.remove("link"); + cell.removeEventListener("click", onclick); + othersRows.forEach(function(x) { + x.classList.remove("hidden"); + }); + }; + + cell.title = othersList.join(", "); + cell.classList.add("link"); + cell.addEventListener("click", onclick); + + if (hide) { + tr.classList.add("hidden"); + } + + childrenRows.push(tr); + } + + return childrenRows; + }; + + addLevel(0, []); + + popup(table); + }); +} + diff --git a/javascript/progressbar.js b/javascript/progressbar.js new file mode 100644 index 0000000000000000000000000000000000000000..29299787e30eef0c6d411dd018561ad7976ca512 --- /dev/null +++ b/javascript/progressbar.js @@ -0,0 +1,177 @@ +// code related to showing and updating progressbar shown as the image is being made + +function rememberGallerySelection() { + +} + +function getGallerySelectedIndex() { + +} + +function request(url, data, handler, errorHandler) { + var xhr = new XMLHttpRequest(); + xhr.open("POST", url, true); + xhr.setRequestHeader("Content-Type", "application/json"); + xhr.onreadystatechange = function() { + if (xhr.readyState === 4) { + if (xhr.status === 200) { + try { + var js = JSON.parse(xhr.responseText); + handler(js); + } catch (error) { + console.error(error); + errorHandler(); + } + } else { + errorHandler(); + } + } + }; + var js = JSON.stringify(data); + xhr.send(js); +} + +function pad2(x) { + return x < 10 ? '0' + x : x; +} + +function formatTime(secs) { + if (secs > 3600) { + return pad2(Math.floor(secs / 60 / 60)) + ":" + pad2(Math.floor(secs / 60) % 60) + ":" + pad2(Math.floor(secs) % 60); + } else if (secs > 60) { + return pad2(Math.floor(secs / 60)) + ":" + pad2(Math.floor(secs) % 60); + } else { + return Math.floor(secs) + "s"; + } +} + +function setTitle(progress) { + var title = 'Stable Diffusion'; + + if (opts.show_progress_in_title && progress) { + title = '[' + progress.trim() + '] ' + title; + } + + if (document.title != title) { + document.title = title; + } +} + + +function randomId() { + return "task(" + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7) + ")"; +} + +// starts sending progress requests to "/internal/progress" uri, creating progressbar above progressbarContainer element and +// preview inside gallery element. Cleans up all created stuff when the task is over and calls atEnd. +// calls onProgress every time there is a progress update +function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgress, inactivityTimeout = 40) { + var dateStart = new Date(); + var wasEverActive = false; + var parentProgressbar = progressbarContainer.parentNode; + var parentGallery = gallery ? gallery.parentNode : null; + + var divProgress = document.createElement('div'); + divProgress.className = 'progressDiv'; + divProgress.style.display = opts.show_progressbar ? "block" : "none"; + var divInner = document.createElement('div'); + divInner.className = 'progress'; + + divProgress.appendChild(divInner); + parentProgressbar.insertBefore(divProgress, progressbarContainer); + + if (parentGallery) { + var livePreview = document.createElement('div'); + livePreview.className = 'livePreview'; + parentGallery.insertBefore(livePreview, gallery); + } + + var removeProgressBar = function() { + setTitle(""); + parentProgressbar.removeChild(divProgress); + if (parentGallery) parentGallery.removeChild(livePreview); + atEnd(); + }; + + var fun = function(id_task, id_live_preview) { + request("./internal/progress", {id_task: id_task, id_live_preview: id_live_preview}, function(res) { + if (res.completed) { + removeProgressBar(); + return; + } + + var rect = progressbarContainer.getBoundingClientRect(); + + if (rect.width) { + divProgress.style.width = rect.width + "px"; + } + + let progressText = ""; + + divInner.style.width = ((res.progress || 0) * 100.0) + '%'; + divInner.style.background = res.progress ? "" : "transparent"; + + if (res.progress > 0) { + progressText = ((res.progress || 0) * 100.0).toFixed(0) + '%'; + } + + if (res.eta) { + progressText += " ETA: " + formatTime(res.eta); + } + + + setTitle(progressText); + + if (res.textinfo && res.textinfo.indexOf("\n") == -1) { + progressText = res.textinfo + " " + progressText; + } + + divInner.textContent = progressText; + + var elapsedFromStart = (new Date() - dateStart) / 1000; + + if (res.active) wasEverActive = true; + + if (!res.active && wasEverActive) { + removeProgressBar(); + return; + } + + if (elapsedFromStart > inactivityTimeout && !res.queued && !res.active) { + removeProgressBar(); + return; + } + + + if (res.live_preview && gallery) { + rect = gallery.getBoundingClientRect(); + if (rect.width) { + livePreview.style.width = rect.width + "px"; + livePreview.style.height = rect.height + "px"; + } + + var img = new Image(); + img.onload = function() { + livePreview.appendChild(img); + if (livePreview.childElementCount > 2) { + livePreview.removeChild(livePreview.firstElementChild); + } + }; + img.src = res.live_preview; + } + + + if (onProgress) { + onProgress(res); + } + + setTimeout(() => { + fun(id_task, res.id_live_preview); + }, opts.live_preview_refresh_period || 500); + }, function() { + removeProgressBar(); + }); + }; + + fun(id_task, 0); +} diff --git a/javascript/textualInversion.js b/javascript/textualInversion.js new file mode 100644 index 0000000000000000000000000000000000000000..20443fcca01bbba6712e40136c57dbcdb78ca945 --- /dev/null +++ b/javascript/textualInversion.js @@ -0,0 +1,17 @@ + + + +function start_training_textual_inversion() { + gradioApp().querySelector('#ti_error').innerHTML = ''; + + var id = randomId(); + requestProgress(id, gradioApp().getElementById('ti_output'), gradioApp().getElementById('ti_gallery'), function() {}, function(progress) { + gradioApp().getElementById('ti_progress').innerHTML = progress.textinfo; + }); + + var res = Array.from(arguments); + + res[0] = id; + + return res; +} diff --git a/javascript/token-counters.js b/javascript/token-counters.js new file mode 100644 index 0000000000000000000000000000000000000000..9d81a723b01f8b6e3c0894b7a5191dc6b1614c2d --- /dev/null +++ b/javascript/token-counters.js @@ -0,0 +1,83 @@ +let promptTokenCountDebounceTime = 800; +let promptTokenCountTimeouts = {}; +var promptTokenCountUpdateFunctions = {}; + +function update_txt2img_tokens(...args) { + // Called from Gradio + update_token_counter("txt2img_token_button"); + if (args.length == 2) { + return args[0]; + } + return args; +} + +function update_img2img_tokens(...args) { + // Called from Gradio + update_token_counter("img2img_token_button"); + if (args.length == 2) { + return args[0]; + } + return args; +} + +function update_token_counter(button_id) { + if (opts.disable_token_counters) { + return; + } + if (promptTokenCountTimeouts[button_id]) { + clearTimeout(promptTokenCountTimeouts[button_id]); + } + promptTokenCountTimeouts[button_id] = setTimeout( + () => gradioApp().getElementById(button_id)?.click(), + promptTokenCountDebounceTime, + ); +} + + +function recalculatePromptTokens(name) { + promptTokenCountUpdateFunctions[name]?.(); +} + +function recalculate_prompts_txt2img() { + // Called from Gradio + recalculatePromptTokens('txt2img_prompt'); + recalculatePromptTokens('txt2img_neg_prompt'); + return Array.from(arguments); +} + +function recalculate_prompts_img2img() { + // Called from Gradio + recalculatePromptTokens('img2img_prompt'); + recalculatePromptTokens('img2img_neg_prompt'); + return Array.from(arguments); +} + +function setupTokenCounting(id, id_counter, id_button) { + var prompt = gradioApp().getElementById(id); + var counter = gradioApp().getElementById(id_counter); + var textarea = gradioApp().querySelector(`#${id} > label > textarea`); + + if (opts.disable_token_counters) { + counter.style.display = "none"; + return; + } + + if (counter.parentElement == prompt.parentElement) { + return; + } + + prompt.parentElement.insertBefore(counter, prompt); + prompt.parentElement.style.position = "relative"; + + promptTokenCountUpdateFunctions[id] = function() { + update_token_counter(id_button); + }; + textarea.addEventListener("input", promptTokenCountUpdateFunctions[id]); +} + +function setupTokenCounters() { + setupTokenCounting('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button'); + setupTokenCounting('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button'); + setupTokenCounting('img2img_prompt', 'img2img_token_counter', 'img2img_token_button'); + setupTokenCounting('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button'); +} diff --git a/javascript/ui.js b/javascript/ui.js new file mode 100644 index 0000000000000000000000000000000000000000..d70a681bff7b45fe5711431ee8ec55c444443a5b --- /dev/null +++ b/javascript/ui.js @@ -0,0 +1,387 @@ +// various functions for interaction with ui.py not large enough to warrant putting them in separate files + +function set_theme(theme) { + var gradioURL = window.location.href; + if (!gradioURL.includes('?__theme=')) { + window.location.replace(gradioURL + '?__theme=' + theme); + } +} + +function all_gallery_buttons() { + var allGalleryButtons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery].gradio-gallery .thumbnails > .thumbnail-item.thumbnail-small'); + var visibleGalleryButtons = []; + allGalleryButtons.forEach(function(elem) { + if (elem.parentElement.offsetParent) { + visibleGalleryButtons.push(elem); + } + }); + return visibleGalleryButtons; +} + +function selected_gallery_button() { + var allCurrentButtons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery].gradio-gallery .thumbnail-item.thumbnail-small.selected'); + var visibleCurrentButton = null; + allCurrentButtons.forEach(function(elem) { + if (elem.parentElement.offsetParent) { + visibleCurrentButton = elem; + } + }); + return visibleCurrentButton; +} + +function selected_gallery_index() { + var buttons = all_gallery_buttons(); + var button = selected_gallery_button(); + + var result = -1; + buttons.forEach(function(v, i) { + if (v == button) { + result = i; + } + }); + + return result; +} + +function extract_image_from_gallery(gallery) { + if (gallery.length == 0) { + return [null]; + } + if (gallery.length == 1) { + return [gallery[0]]; + } + + var index = selected_gallery_index(); + + if (index < 0 || index >= gallery.length) { + // Use the first image in the gallery as the default + index = 0; + } + + return [gallery[index]]; +} + +window.args_to_array = Array.from; // Compatibility with e.g. extensions that may expect this to be around + +function switch_to_txt2img() { + gradioApp().querySelector('#tabs').querySelectorAll('button')[0].click(); + + return Array.from(arguments); +} + +function switch_to_img2img_tab(no) { + gradioApp().querySelector('#tabs').querySelectorAll('button')[1].click(); + gradioApp().getElementById('mode_img2img').querySelectorAll('button')[no].click(); +} +function switch_to_img2img() { + switch_to_img2img_tab(0); + return Array.from(arguments); +} + +function switch_to_sketch() { + switch_to_img2img_tab(1); + return Array.from(arguments); +} + +function switch_to_inpaint() { + switch_to_img2img_tab(2); + return Array.from(arguments); +} + +function switch_to_inpaint_sketch() { + switch_to_img2img_tab(3); + return Array.from(arguments); +} + +function switch_to_extras() { + gradioApp().querySelector('#tabs').querySelectorAll('button')[2].click(); + + return Array.from(arguments); +} + +function get_tab_index(tabId) { + let buttons = gradioApp().getElementById(tabId).querySelector('div').querySelectorAll('button'); + for (let i = 0; i < buttons.length; i++) { + if (buttons[i].classList.contains('selected')) { + return i; + } + } + return 0; +} + +function create_tab_index_args(tabId, args) { + var res = Array.from(args); + res[0] = get_tab_index(tabId); + return res; +} + +function get_img2img_tab_index() { + let res = Array.from(arguments); + res.splice(-2); + res[0] = get_tab_index('mode_img2img'); + return res; +} + +function create_submit_args(args) { + var res = Array.from(args); + + // As it is currently, txt2img and img2img send back the previous output args (txt2img_gallery, generation_info, html_info) whenever you generate a new image. + // This can lead to uploading a huge gallery of previously generated images, which leads to an unnecessary delay between submitting and beginning to generate. + // I don't know why gradio is sending outputs along with inputs, but we can prevent sending the image gallery here, which seems to be an issue for some. + // If gradio at some point stops sending outputs, this may break something + if (Array.isArray(res[res.length - 3])) { + res[res.length - 3] = null; + } + + return res; +} + +function showSubmitButtons(tabname, show) { + gradioApp().getElementById(tabname + '_interrupt').style.display = show ? "none" : "block"; + gradioApp().getElementById(tabname + '_skip').style.display = show ? "none" : "block"; +} + +function showRestoreProgressButton(tabname, show) { + var button = gradioApp().getElementById(tabname + "_restore_progress"); + if (!button) return; + + button.style.display = show ? "flex" : "none"; +} + +function submit() { + showSubmitButtons('txt2img', false); + + var id = randomId(); + localStorage.setItem("txt2img_task_id", id); + + requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() { + showSubmitButtons('txt2img', true); + localStorage.removeItem("txt2img_task_id"); + showRestoreProgressButton('txt2img', false); + }); + + var res = create_submit_args(arguments); + + res[0] = id; + + return res; +} + +function submit_img2img() { + showSubmitButtons('img2img', false); + + var id = randomId(); + localStorage.setItem("img2img_task_id", id); + + requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() { + showSubmitButtons('img2img', true); + localStorage.removeItem("img2img_task_id"); + showRestoreProgressButton('img2img', false); + }); + + var res = create_submit_args(arguments); + + res[0] = id; + res[1] = get_tab_index('mode_img2img'); + + return res; +} + +function restoreProgressTxt2img() { + showRestoreProgressButton("txt2img", false); + var id = localStorage.getItem("txt2img_task_id"); + + id = localStorage.getItem("txt2img_task_id"); + + if (id) { + requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() { + showSubmitButtons('txt2img', true); + }, null, 0); + } + + return id; +} + +function restoreProgressImg2img() { + showRestoreProgressButton("img2img", false); + + var id = localStorage.getItem("img2img_task_id"); + + if (id) { + requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() { + showSubmitButtons('img2img', true); + }, null, 0); + } + + return id; +} + + +onUiLoaded(function() { + showRestoreProgressButton('txt2img', localStorage.getItem("txt2img_task_id")); + showRestoreProgressButton('img2img', localStorage.getItem("img2img_task_id")); +}); + + +function modelmerger() { + var id = randomId(); + requestProgress(id, gradioApp().getElementById('modelmerger_results_panel'), null, function() {}); + + var res = create_submit_args(arguments); + res[0] = id; + return res; +} + + +function ask_for_style_name(_, prompt_text, negative_prompt_text) { + var name_ = prompt('Style name:'); + return [name_, prompt_text, negative_prompt_text]; +} + +function confirm_clear_prompt(prompt, negative_prompt) { + if (confirm("Delete prompt?")) { + prompt = ""; + negative_prompt = ""; + } + + return [prompt, negative_prompt]; +} + + +var opts = {}; +onAfterUiUpdate(function() { + if (Object.keys(opts).length != 0) return; + + var json_elem = gradioApp().getElementById('settings_json'); + if (json_elem == null) return; + + var textarea = json_elem.querySelector('textarea'); + var jsdata = textarea.value; + opts = JSON.parse(jsdata); + + executeCallbacks(optionsChangedCallbacks); /*global optionsChangedCallbacks*/ + + Object.defineProperty(textarea, 'value', { + set: function(newValue) { + var valueProp = Object.getOwnPropertyDescriptor(HTMLTextAreaElement.prototype, 'value'); + var oldValue = valueProp.get.call(textarea); + valueProp.set.call(textarea, newValue); + + if (oldValue != newValue) { + opts = JSON.parse(textarea.value); + } + + executeCallbacks(optionsChangedCallbacks); + }, + get: function() { + var valueProp = Object.getOwnPropertyDescriptor(HTMLTextAreaElement.prototype, 'value'); + return valueProp.get.call(textarea); + } + }); + + json_elem.parentElement.style.display = "none"; + + setupTokenCounters(); + + var show_all_pages = gradioApp().getElementById('settings_show_all_pages'); + var settings_tabs = gradioApp().querySelector('#settings div'); + if (show_all_pages && settings_tabs) { + settings_tabs.appendChild(show_all_pages); + show_all_pages.onclick = function() { + gradioApp().querySelectorAll('#settings > div').forEach(function(elem) { + if (elem.id == "settings_tab_licenses") { + return; + } + + elem.style.display = "block"; + }); + }; + } +}); + +onOptionsChanged(function() { + var elem = gradioApp().getElementById('sd_checkpoint_hash'); + var sd_checkpoint_hash = opts.sd_checkpoint_hash || ""; + var shorthash = sd_checkpoint_hash.substring(0, 10); + + if (elem && elem.textContent != shorthash) { + elem.textContent = shorthash; + elem.title = sd_checkpoint_hash; + elem.href = "https://google.com/search?q=" + sd_checkpoint_hash; + } +}); + +let txt2img_textarea, img2img_textarea = undefined; + +function restart_reload() { + document.body.innerHTML = '

Reloading...

'; + + var requestPing = function() { + requestGet("./internal/ping", {}, function(data) { + location.reload(); + }, function() { + setTimeout(requestPing, 500); + }); + }; + + setTimeout(requestPing, 2000); + + return []; +} + +// Simulate an `input` DOM event for Gradio Textbox component. Needed after you edit its contents in javascript, otherwise your edits +// will only visible on web page and not sent to python. +function updateInput(target) { + let e = new Event("input", {bubbles: true}); + Object.defineProperty(e, "target", {value: target}); + target.dispatchEvent(e); +} + + +var desiredCheckpointName = null; +function selectCheckpoint(name) { + desiredCheckpointName = name; + gradioApp().getElementById('change_checkpoint').click(); +} + +function currentImg2imgSourceResolution(w, h, scaleBy) { + var img = gradioApp().querySelector('#mode_img2img > div[style="display: block;"] img'); + return img ? [img.naturalWidth, img.naturalHeight, scaleBy] : [0, 0, scaleBy]; +} + +function updateImg2imgResizeToTextAfterChangingImage() { + // At the time this is called from gradio, the image has no yet been replaced. + // There may be a better solution, but this is simple and straightforward so I'm going with it. + + setTimeout(function() { + gradioApp().getElementById('img2img_update_resize_to').click(); + }, 500); + + return []; + +} + + + +function setRandomSeed(elem_id) { + var input = gradioApp().querySelector("#" + elem_id + " input"); + if (!input) return []; + + input.value = "-1"; + updateInput(input); + return []; +} + +function switchWidthHeight(tabname) { + var width = gradioApp().querySelector("#" + tabname + "_width input[type=number]"); + var height = gradioApp().querySelector("#" + tabname + "_height input[type=number]"); + if (!width || !height) return []; + + var tmp = width.value; + width.value = height.value; + height.value = tmp; + + updateInput(width); + updateInput(height); + return []; +} diff --git a/javascript/ui_settings_hints.js b/javascript/ui_settings_hints.js new file mode 100644 index 0000000000000000000000000000000000000000..d088f9494f826d9534dc105ac2f99bda702d22c0 --- /dev/null +++ b/javascript/ui_settings_hints.js @@ -0,0 +1,62 @@ +// various hints and extra info for the settings tab + +var settingsHintsSetup = false; + +onOptionsChanged(function() { + if (settingsHintsSetup) return; + settingsHintsSetup = true; + + gradioApp().querySelectorAll('#settings [id^=setting_]').forEach(function(div) { + var name = div.id.substr(8); + var commentBefore = opts._comments_before[name]; + var commentAfter = opts._comments_after[name]; + + if (!commentBefore && !commentAfter) return; + + var span = null; + if (div.classList.contains('gradio-checkbox')) span = div.querySelector('label span'); + else if (div.classList.contains('gradio-checkboxgroup')) span = div.querySelector('span').firstChild; + else if (div.classList.contains('gradio-radio')) span = div.querySelector('span').firstChild; + else span = div.querySelector('label span').firstChild; + + if (!span) return; + + if (commentBefore) { + var comment = document.createElement('DIV'); + comment.className = 'settings-comment'; + comment.innerHTML = commentBefore; + span.parentElement.insertBefore(document.createTextNode('\xa0'), span); + span.parentElement.insertBefore(comment, span); + span.parentElement.insertBefore(document.createTextNode('\xa0'), span); + } + if (commentAfter) { + comment = document.createElement('DIV'); + comment.className = 'settings-comment'; + comment.innerHTML = commentAfter; + span.parentElement.insertBefore(comment, span.nextSibling); + span.parentElement.insertBefore(document.createTextNode('\xa0'), span.nextSibling); + } + }); +}); + +function settingsHintsShowQuicksettings() { + requestGet("./internal/quicksettings-hint", {}, function(data) { + var table = document.createElement('table'); + table.className = 'popup-table'; + + data.forEach(function(obj) { + var tr = document.createElement('tr'); + var td = document.createElement('td'); + td.textContent = obj.name; + tr.appendChild(td); + + td = document.createElement('td'); + td.textContent = obj.label; + tr.appendChild(td); + + table.appendChild(tr); + }); + + popup(table); + }); +} diff --git a/launch.py b/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..b8515568bf400c6f33aab099c81f135b7b9d5fc9 --- /dev/null +++ b/launch.py @@ -0,0 +1,39 @@ +from modules import launch_utils + + +args = launch_utils.args +python = launch_utils.python +git = launch_utils.git +index_url = launch_utils.index_url +dir_repos = launch_utils.dir_repos + +commit_hash = launch_utils.commit_hash +git_tag = launch_utils.git_tag + +run = launch_utils.run +is_installed = launch_utils.is_installed +repo_dir = launch_utils.repo_dir + +run_pip = launch_utils.run_pip +check_run_python = launch_utils.check_run_python +git_clone = launch_utils.git_clone +git_pull_recursive = launch_utils.git_pull_recursive +list_extensions = launch_utils.list_extensions +run_extension_installer = launch_utils.run_extension_installer +prepare_environment = launch_utils.prepare_environment +configure_for_tests = launch_utils.configure_for_tests +start = launch_utils.start + + +def main(): + if not args.skip_prepare_environment: + prepare_environment() + + if args.test_server: + configure_for_tests() + + start() + + +if __name__ == "__main__": + main() diff --git a/localizations/Put localization files here.txt b/localizations/Put localization files here.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Lora/141887?type=Model b/models/Lora/141887?type=Model new file mode 100644 index 0000000000000000000000000000000000000000..605f0a1ada672cdd2f1fc7effda111b2ef82c932 --- /dev/null +++ b/models/Lora/141887?type=Model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f465ddefe946e9cafc52d284e06ce6d4c2381b10560e846a95fc38b89eb9325 +size 912552468 diff --git a/models/Lora/sd_xl_offset_example-lora_1.0.safetensors b/models/Lora/sd_xl_offset_example-lora_1.0.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..eae3ad1bb0e28c0b70c7d73900cbdd28913d5f89 --- /dev/null +++ b/models/Lora/sd_xl_offset_example-lora_1.0.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4852686128f953d0277d0793e2f0335352f96a919c9c16a09787d77f55cbdf6f +size 49553604 diff --git a/models/Lora/szdnpp_lora_v1.safetensors b/models/Lora/szdnpp_lora_v1.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5a6cdd522a9747e2c3546efddb5224e4bb67dd88 --- /dev/null +++ b/models/Lora/szdnpp_lora_v1.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c0a9096d4a7c13a1f0361a36151eb98ff94df7f242f89819b5cc02f1726ef3d +size 151111384 diff --git a/models/Lora/wget-log b/models/Lora/wget-log new file mode 100644 index 0000000000000000000000000000000000000000..f0ae00a0798ae0a38cf7ec6773e89c9af26d6303 --- /dev/null +++ b/models/Lora/wget-log @@ -0,0 +1,16 @@ +--2023-08-17 23:55:03-- https://civitai.com/api/download/models/141887?type=Model +Resolving civitai.com (civitai.com)... 104.18.22.206, 104.18.23.206, 2606:4700::6812:16ce, ... +Connecting to civitai.com (civitai.com)|104.18.22.206|:443... connected. +HTTP request sent, awaiting response... 307 Temporary Redirect +Location: https://civitai-delivery-worker-prod-2023-08-01.5ac0637cfd0766c97916cefa3764fbdf.r2.cloudflarestorage.com/1332236/model/emrataV3.i3eC.safetensors?X-Amz-Expires=86400&response-content-disposition=attachment%3B%20filename%3D%22emrata_v3.safetensors%22&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=2fea663d76bd24a496545da373d610fc/20230817/us-east-1/s3/aws4_request&X-Amz-Date=20230817T235503Z&X-Amz-SignedHeaders=host&X-Amz-Signature=b42f414224ec610a89f997e4289e1af49d2ec90f18a611b8fa58dfa2fe49c939 [following] +--2023-08-17 23:55:03-- https://civitai-delivery-worker-prod-2023-08-01.5ac0637cfd0766c97916cefa3764fbdf.r2.cloudflarestorage.com/1332236/model/emrataV3.i3eC.safetensors?X-Amz-Expires=86400&response-content-disposition=attachment%3B%20filename%3D%22emrata_v3.safetensors%22&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=2fea663d76bd24a496545da373d610fc/20230817/us-east-1/s3/aws4_request&X-Amz-Date=20230817T235503Z&X-Amz-SignedHeaders=host&X-Amz-Signature=b42f414224ec610a89f997e4289e1af49d2ec90f18a611b8fa58dfa2fe49c939 +Resolving civitai-delivery-worker-prod-2023-08-01.5ac0637cfd0766c97916cefa3764fbdf.r2.cloudflarestorage.com (civitai-delivery-worker-prod-2023-08-01.5ac0637cfd0766c97916cefa3764fbdf.r2.cloudflarestorage.com)... 104.18.8.90, 104.18.9.90, 2606:4700::6812:85a, ... +Connecting to civitai-delivery-worker-prod-2023-08-01.5ac0637cfd0766c97916cefa3764fbdf.r2.cloudflarestorage.com (civitai-delivery-worker-prod-2023-08-01.5ac0637cfd0766c97916cefa3764fbdf.r2.cloudflarestorage.com)|104.18.8.90|:443... connected. +HTTP request sent, awaiting response... 200 OK +Length: 912552468 (870M) +Saving to: ‘141887?type=Model’ + + 141887?type=Model 0%[ ] 0 --.-KB/s 141887?type=Model 1%[ ] 11.79M 59.0MB/s 141887?type=Model 3%[=> ] 30.84M 75.9MB/s 141887?type=Model 4%[=> ] 42.07M 61.7MB/s 141887?type=Model 6%[==> ] 55.33M 62.8MB/s 141887?type=Model 8%[====> ] 77.13M 71.0MB/s 141887?type=Model 9%[====> ] 84.42M 65.5MB/s 141887?type=Model 10%[=====> ] 92.46M 62.1MB/s 141887?type=Model 12%[======> ] 106.34M 62.9MB/s 141887?type=Model 13%[=======> ] 120.80M 63.9MB/s 141887?type=Model 15%[========> ] 135.73M 64.9MB/s 141887?type=Model 17%[=========> ] 154.56M 67.5MB/s 141887?type=Model 20%[===========> ] 177.42M 71.3MB/s 141887?type=Model 22%[============> ] 198.76M 73.9MB/s 141887?type=Model 23%[=============> ] 208.50M 72.1MB/s 141887?type=Model 25%[=============> ] 221.24M 71.5MB/s eta 9s 141887?type=Model 26%[==============> ] 233.70M 72.3MB/s eta 9s 141887?type=Model 28%[===============> ] 246.57M 70.3MB/s eta 9s 141887?type=Model 29%[================> ] 257.29M 68.5MB/s eta 9s 141887?type=Model 30%[=================> ] 269.77M 71.5MB/s eta 9s 141887?type=Model 32%[==================> ] 281.28M 69.0MB/s eta 9s 141887?type=Model 33%[==================> ] 293.21M 67.7MB/s eta 9s 141887?type=Model 34%[===================> ] 300.01M 64.5MB/s eta 9s 141887?type=Model 34%[===================> ] 301.51M 60.5MB/s eta 9s 141887?type=Model 34%[===================> ] 303.38M 57.3MB/s eta 10s 141887?type=Model 35%[===================> ] 306.50M 53.9MB/s eta 10s 141887?type=Model 35%[===================> ] 309.66M 50.8MB/s eta 10s 141887?type=Model 37%[=====================> ] 326.58M 49.0MB/s eta 10s 141887?type=Model 39%[======================> ] 342.52M 47.9MB/s eta 10s 141887?type=Model 41%[=======================> ] 359.54M 47.2MB/s eta 9s 141887?type=Model 43%[========================> ] 374.93M 48.1MB/s eta 9s 141887?type=Model 44%[=========================> ] 389.16M 48.7MB/s eta 9s 141887?type=Model 45%[==========================> ] 400.00M 47.6MB/s eta 9s 141887?type=Model 47%[==========================> ] 411.61M 47.5MB/s eta 9s 141887?type=Model 49%[============================> ] 430.44M 50.0MB/s eta 7s 141887?type=Model 51%[=============================> ] 446.68M 51.3MB/s eta 7s 141887?type=Model 53%[==============================> ] 465.46M 53.2MB/s eta 7s 141887?type=Model 55%[===============================> ] 484.45M 55.2MB/s eta 7s 141887?type=Model 57%[================================> ] 500.00M 64.3MB/s eta 7s 141887?type=Model 59%[=================================> ] 515.58M 67.7MB/s eta 6s 141887?type=Model 61%[===================================> ] 532.69M 71.8MB/s eta 6s 141887?type=Model 61%[===================================> ] 536.07M 70.8MB/s eta 6s 141887?type=Model 63%[====================================> ] 548.65M 72.9MB/s eta 6s 141887?type=Model 65%[=====================================> ] 569.07M 73.6MB/s eta 6s 141887?type=Model 67%[======================================> ] 585.95M 74.2MB/s eta 4s 141887?type=Model 68%[=======================================> ] 593.84M 71.2MB/s eta 4s 141887?type=Model 69%[=======================================> ] 600.94M 68.5MB/s eta 4s 141887?type=Model 70%[========================================> ] 610.11M 67.2MB/s eta 4s 141887?type=Model 71%[=========================================> ] 622.45M 68.1MB/s eta 4s 141887?type=Model 72%[==========================================> ] 634.98M 68.6MB/s eta 4s 141887?type=Model 75%[===========================================> ] 656.22M 68.9MB/s eta 4s 141887?type=Model 77%[============================================> ] 672.13M 67.7MB/s eta 4s 141887?type=Model 78%[=============================================> ] 682.62M 64.1MB/s eta 4s 141887?type=Model 80%[==============================================> ] 700.01M 64.2MB/s eta 4s 141887?type=Model 82%[===============================================> ] 717.23M 65.8MB/s eta 2s 141887?type=Model 84%[================================================> ] 736.59M 66.4MB/s eta 2s 141887?type=Model 86%[==================================================> ] 754.48M 70.7MB/s eta 2s 141887?type=Model 87%[==================================================> ] 757.20M 67.1MB/s eta 2s 141887?type=Model 88%[===================================================> ] 767.63M 62.8MB/s eta 2s 141887?type=Model 89%[===================================================> ] 776.18M 60.5MB/s eta 1s 141887?type=Model 90%[====================================================> ] 784.06M 60.0MB/s eta 1s 141887?type=Model 90%[====================================================> ] 791.55M 60.0MB/s eta 1s 141887?type=Model 92%[=====================================================> ] 803.16M 61.4MB/s eta 1s 141887?type=Model 94%[======================================================> ] 820.91M 63.1MB/s eta 1s 141887?type=Model 96%[=======================================================> ] 838.84M 64.9MB/s eta 1s 141887?type=Model 98%[=========================================================> ] 855.72M 63.5MB/s eta 1s 141887?type=Model 100%[==========================================================>] 870.28M 62.5MB/s in 14s + +2023-08-17 23:55:17 (63.3 MB/s) - ‘141887?type=Model’ saved [912552468/912552468] + diff --git a/models/Stable-diffusion/Put Stable Diffusion checkpoints here.txt b/models/Stable-diffusion/Put Stable Diffusion checkpoints here.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/Stable-diffusion/sd_xl_base_1.0.safetensors b/models/Stable-diffusion/sd_xl_base_1.0.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..a4e26e69370f72c43e5b5f53879919c86bcd6822 --- /dev/null +++ b/models/Stable-diffusion/sd_xl_base_1.0.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31e35c80fc4829d14f90153f4c74cd59c90b779f6afe05a74cd6120b893f7e5b +size 6938078334 diff --git a/models/Stable-diffusion/sd_xl_refiner_1.0.safetensors b/models/Stable-diffusion/sd_xl_refiner_1.0.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..73f461bed1c6ccf74548ebd9da0e62122f7a25bd --- /dev/null +++ b/models/Stable-diffusion/sd_xl_refiner_1.0.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7440042bbdc8a24813002c09b6b69b64dc90fded4472613437b7f55f9b7d9c5f +size 6075981930 diff --git a/models/Stable-diffusion/v1-5-pruned-emaonly.safetensors b/models/Stable-diffusion/v1-5-pruned-emaonly.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b72f1e8260a0b1d4ad3bc9b4b4ffd0ea175e33fb --- /dev/null +++ b/models/Stable-diffusion/v1-5-pruned-emaonly.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa +size 4265146304 diff --git a/models/VAE-approx/model.pt b/models/VAE-approx/model.pt new file mode 100644 index 0000000000000000000000000000000000000000..09c6b8f7fda5e15495c6203ca323d6573745d0af --- /dev/null +++ b/models/VAE-approx/model.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f88c9078bb2238cdd0d8864671dd33e3f42e091e41f08903f3c15e4a54a9b39 +size 213777 diff --git a/models/VAE/Put VAE here.txt b/models/VAE/Put VAE here.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/VAE/sd_xl_base_1.0.safetensors b/models/VAE/sd_xl_base_1.0.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5c5bfd44fb0132b3cbd9d122244f502a7cd2bb24 --- /dev/null +++ b/models/VAE/sd_xl_base_1.0.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63aeecb90ff7bc1c115395962d3e803571385b61938377bc7089b36e81e92e2e +size 334641164 diff --git a/models/VAE/sdxl_vae.safetensors b/models/VAE/sdxl_vae.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5c5bfd44fb0132b3cbd9d122244f502a7cd2bb24 --- /dev/null +++ b/models/VAE/sdxl_vae.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63aeecb90ff7bc1c115395962d3e803571385b61938377bc7089b36e81e92e2e +size 334641164 diff --git a/models/deepbooru/Put your deepbooru release project folder here.txt b/models/deepbooru/Put your deepbooru release project folder here.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/karlo/ViT-L-14_stats.th b/models/karlo/ViT-L-14_stats.th new file mode 100644 index 0000000000000000000000000000000000000000..a6a06e94ecaa4f2977972ff991f75db6c90403ea Binary files /dev/null and b/models/karlo/ViT-L-14_stats.th differ diff --git a/modules/Roboto-Regular.ttf b/modules/Roboto-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..500b1045b0c94d83d2e6798aaf1faa55a2dab6fc Binary files /dev/null and b/modules/Roboto-Regular.ttf differ diff --git a/modules/__pycache__/cache.cpython-310.pyc b/modules/__pycache__/cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..909ad9ec17766bf6e59107e59355e02fd55d932c Binary files /dev/null and b/modules/__pycache__/cache.cpython-310.pyc differ diff --git a/modules/__pycache__/call_queue.cpython-310.pyc b/modules/__pycache__/call_queue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a82da956dc2807c2e231b4a734b6e77e1eee699 Binary files /dev/null and b/modules/__pycache__/call_queue.cpython-310.pyc differ diff --git a/modules/__pycache__/cmd_args.cpython-310.pyc b/modules/__pycache__/cmd_args.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f38612294238beb5f2fcc87f596a5fdfe999a27 Binary files /dev/null and b/modules/__pycache__/cmd_args.cpython-310.pyc differ diff --git a/modules/__pycache__/codeformer_model.cpython-310.pyc b/modules/__pycache__/codeformer_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab0dfa2794ab33b11194804e536a96bc112a0dce Binary files /dev/null and b/modules/__pycache__/codeformer_model.cpython-310.pyc differ diff --git a/modules/__pycache__/config_states.cpython-310.pyc b/modules/__pycache__/config_states.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff34c849da3e98047ebb7678535a880189582314 Binary files /dev/null and b/modules/__pycache__/config_states.cpython-310.pyc differ diff --git a/modules/__pycache__/deepbooru.cpython-310.pyc b/modules/__pycache__/deepbooru.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecf30799e0fc7c3ef63f521b99560e4ade75d682 Binary files /dev/null and b/modules/__pycache__/deepbooru.cpython-310.pyc differ diff --git a/modules/__pycache__/deepbooru_model.cpython-310.pyc b/modules/__pycache__/deepbooru_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a99e016c0f871809baf7a716a3beac33a773786 Binary files /dev/null and b/modules/__pycache__/deepbooru_model.cpython-310.pyc differ diff --git a/modules/__pycache__/devices.cpython-310.pyc b/modules/__pycache__/devices.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe7ddd86c1edd76294aa2c9e97be197c7ff520b7 Binary files /dev/null and b/modules/__pycache__/devices.cpython-310.pyc differ diff --git a/modules/__pycache__/errors.cpython-310.pyc b/modules/__pycache__/errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a60eb26b96dadb9348bfa725a007e1fbd7c6b06 Binary files /dev/null and b/modules/__pycache__/errors.cpython-310.pyc differ diff --git a/modules/__pycache__/esrgan_model.cpython-310.pyc b/modules/__pycache__/esrgan_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9e97d7bb39d6d06d5550f4079e0a20122d498ba Binary files /dev/null and b/modules/__pycache__/esrgan_model.cpython-310.pyc differ diff --git a/modules/__pycache__/esrgan_model_arch.cpython-310.pyc b/modules/__pycache__/esrgan_model_arch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14fec8a9446bd909c72ef32a568cbac3c999aad8 Binary files /dev/null and b/modules/__pycache__/esrgan_model_arch.cpython-310.pyc differ diff --git a/modules/__pycache__/extensions.cpython-310.pyc b/modules/__pycache__/extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d01ba0957588286b9943cdce990dd18d1ff402b7 Binary files /dev/null and b/modules/__pycache__/extensions.cpython-310.pyc differ diff --git a/modules/__pycache__/extra_networks.cpython-310.pyc b/modules/__pycache__/extra_networks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efdadcaff04a82c11a0662365202c6f8a7b40b69 Binary files /dev/null and b/modules/__pycache__/extra_networks.cpython-310.pyc differ diff --git a/modules/__pycache__/extra_networks_hypernet.cpython-310.pyc b/modules/__pycache__/extra_networks_hypernet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1dc9305d10d360ad5e30c89e32ddd25ef6b55a9 Binary files /dev/null and b/modules/__pycache__/extra_networks_hypernet.cpython-310.pyc differ diff --git a/modules/__pycache__/extras.cpython-310.pyc b/modules/__pycache__/extras.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d165b6254b11fa8798e1923119ccd1c2a3ab54c6 Binary files /dev/null and b/modules/__pycache__/extras.cpython-310.pyc differ diff --git a/modules/__pycache__/face_restoration.cpython-310.pyc b/modules/__pycache__/face_restoration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..447dd93466d74fb75d7479e9bad09295ebe63c12 Binary files /dev/null and b/modules/__pycache__/face_restoration.cpython-310.pyc differ diff --git a/modules/__pycache__/generation_parameters_copypaste.cpython-310.pyc b/modules/__pycache__/generation_parameters_copypaste.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df505ab152b5bf361072937ee31b35dd765209bb Binary files /dev/null and b/modules/__pycache__/generation_parameters_copypaste.cpython-310.pyc differ diff --git a/modules/__pycache__/gfpgan_model.cpython-310.pyc b/modules/__pycache__/gfpgan_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..449402fe9423a1af7b5fcf5aa2bfc55d10940cc2 Binary files /dev/null and b/modules/__pycache__/gfpgan_model.cpython-310.pyc differ diff --git a/modules/__pycache__/gitpython_hack.cpython-310.pyc b/modules/__pycache__/gitpython_hack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..758a2a860f8c52b210ca758379a48f43e7550424 Binary files /dev/null and b/modules/__pycache__/gitpython_hack.cpython-310.pyc differ diff --git a/modules/__pycache__/hashes.cpython-310.pyc b/modules/__pycache__/hashes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84179708c3de36c983c922d4b8e74006df2df286 Binary files /dev/null and b/modules/__pycache__/hashes.cpython-310.pyc differ diff --git a/modules/__pycache__/images.cpython-310.pyc b/modules/__pycache__/images.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef24c81ceb8269cc7491ebd43d157bc35f736a59 Binary files /dev/null and b/modules/__pycache__/images.cpython-310.pyc differ diff --git a/modules/__pycache__/img2img.cpython-310.pyc b/modules/__pycache__/img2img.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..181560c75ac4535ef47a01c514b3f9a8e1608906 Binary files /dev/null and b/modules/__pycache__/img2img.cpython-310.pyc differ diff --git a/modules/__pycache__/import_hook.cpython-310.pyc b/modules/__pycache__/import_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de095167381c5583614bbce0fd6e6848c708171c Binary files /dev/null and b/modules/__pycache__/import_hook.cpython-310.pyc differ diff --git a/modules/__pycache__/interrogate.cpython-310.pyc b/modules/__pycache__/interrogate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbd69bd037bbd71c9d2bd321555c68d027b77851 Binary files /dev/null and b/modules/__pycache__/interrogate.cpython-310.pyc differ diff --git a/modules/__pycache__/launch_utils.cpython-310.pyc b/modules/__pycache__/launch_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f006f9a36666f05eaa19904558dac163af561bfa Binary files /dev/null and b/modules/__pycache__/launch_utils.cpython-310.pyc differ diff --git a/modules/__pycache__/localization.cpython-310.pyc b/modules/__pycache__/localization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d584834bba9a7b3c323fba636dcbb46f527853c2 Binary files /dev/null and b/modules/__pycache__/localization.cpython-310.pyc differ diff --git a/modules/__pycache__/lowvram.cpython-310.pyc b/modules/__pycache__/lowvram.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b01f732ef21cc1a391c3fd60bf8694f71d0c5b4 Binary files /dev/null and b/modules/__pycache__/lowvram.cpython-310.pyc differ diff --git a/modules/__pycache__/masking.cpython-310.pyc b/modules/__pycache__/masking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1223e667a3171f9bbbd9611e227ea2bbd9b7498b Binary files /dev/null and b/modules/__pycache__/masking.cpython-310.pyc differ diff --git a/modules/__pycache__/memmon.cpython-310.pyc b/modules/__pycache__/memmon.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a800727a18b3dd2a5dd6d4fc011cf40f13b65268 Binary files /dev/null and b/modules/__pycache__/memmon.cpython-310.pyc differ diff --git a/modules/__pycache__/modelloader.cpython-310.pyc b/modules/__pycache__/modelloader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64c2a1c26be0f112a90669034dd049627f99b36a Binary files /dev/null and b/modules/__pycache__/modelloader.cpython-310.pyc differ diff --git a/modules/__pycache__/paths.cpython-310.pyc b/modules/__pycache__/paths.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..998d8e8705f05b5e4a21fecc3fff5ffaaa827d2e Binary files /dev/null and b/modules/__pycache__/paths.cpython-310.pyc differ diff --git a/modules/__pycache__/paths_internal.cpython-310.pyc b/modules/__pycache__/paths_internal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..316f886972a3dcdb06c00624e0dda3184e13d124 Binary files /dev/null and b/modules/__pycache__/paths_internal.cpython-310.pyc differ diff --git a/modules/__pycache__/postprocessing.cpython-310.pyc b/modules/__pycache__/postprocessing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b358df780e9a9aa3b6285417c69d6cbc114b7499 Binary files /dev/null and b/modules/__pycache__/postprocessing.cpython-310.pyc differ diff --git a/modules/__pycache__/processing.cpython-310.pyc b/modules/__pycache__/processing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2acffce605104424cfc58fc28c23496b565ec8b1 Binary files /dev/null and b/modules/__pycache__/processing.cpython-310.pyc differ diff --git a/modules/__pycache__/progress.cpython-310.pyc b/modules/__pycache__/progress.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..402c87307352533f596ea5cbf033ab90db96426d Binary files /dev/null and b/modules/__pycache__/progress.cpython-310.pyc differ diff --git a/modules/__pycache__/prompt_parser.cpython-310.pyc b/modules/__pycache__/prompt_parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a9145be5856bffe320aa4882041d0b5a773ae74 Binary files /dev/null and b/modules/__pycache__/prompt_parser.cpython-310.pyc differ diff --git a/modules/__pycache__/realesrgan_model.cpython-310.pyc b/modules/__pycache__/realesrgan_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aca750db6c60d28c1a24227a93380266d105b6b4 Binary files /dev/null and b/modules/__pycache__/realesrgan_model.cpython-310.pyc differ diff --git a/modules/__pycache__/restart.cpython-310.pyc b/modules/__pycache__/restart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22c62b82b5333d95976c8de3231e3dc2d137e88b Binary files /dev/null and b/modules/__pycache__/restart.cpython-310.pyc differ diff --git a/modules/__pycache__/safe.cpython-310.pyc b/modules/__pycache__/safe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..918bd26d58381c5accb28794196324297b4e1ae0 Binary files /dev/null and b/modules/__pycache__/safe.cpython-310.pyc differ diff --git a/modules/__pycache__/script_callbacks.cpython-310.pyc b/modules/__pycache__/script_callbacks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b80c3fc4e67d575d02f6f7f654c9f56f87a6cc04 Binary files /dev/null and b/modules/__pycache__/script_callbacks.cpython-310.pyc differ diff --git a/modules/__pycache__/script_loading.cpython-310.pyc b/modules/__pycache__/script_loading.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa49abce669c627b078aecf19ff0f22185cf2afc Binary files /dev/null and b/modules/__pycache__/script_loading.cpython-310.pyc differ diff --git a/modules/__pycache__/scripts.cpython-310.pyc b/modules/__pycache__/scripts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbc7efd0ad0226428aa9df0039047426d6f25c4c Binary files /dev/null and b/modules/__pycache__/scripts.cpython-310.pyc differ diff --git a/modules/__pycache__/scripts_auto_postprocessing.cpython-310.pyc b/modules/__pycache__/scripts_auto_postprocessing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b94e9dc736c3b6b248fd2022590456bc8e51d279 Binary files /dev/null and b/modules/__pycache__/scripts_auto_postprocessing.cpython-310.pyc differ diff --git a/modules/__pycache__/scripts_postprocessing.cpython-310.pyc b/modules/__pycache__/scripts_postprocessing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c560ceb335b46f79a3c10eb25e784f3134538cae Binary files /dev/null and b/modules/__pycache__/scripts_postprocessing.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_disable_initialization.cpython-310.pyc b/modules/__pycache__/sd_disable_initialization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40ac8fbfe170d29734048ef2f4ace3554a63c6ee Binary files /dev/null and b/modules/__pycache__/sd_disable_initialization.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_hijack.cpython-310.pyc b/modules/__pycache__/sd_hijack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f230a74f391bfe2cb4b88a280a10839257af6aa Binary files /dev/null and b/modules/__pycache__/sd_hijack.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_hijack_checkpoint.cpython-310.pyc b/modules/__pycache__/sd_hijack_checkpoint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd56389fe3554a00ff21077ddeaec1ec87e36891 Binary files /dev/null and b/modules/__pycache__/sd_hijack_checkpoint.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_hijack_clip.cpython-310.pyc b/modules/__pycache__/sd_hijack_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3da67f2267f296e65d853986fb23ce7778950c2 Binary files /dev/null and b/modules/__pycache__/sd_hijack_clip.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_hijack_inpainting.cpython-310.pyc b/modules/__pycache__/sd_hijack_inpainting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18a0f764ffec6d94b656154d63bae42ab152a54b Binary files /dev/null and b/modules/__pycache__/sd_hijack_inpainting.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_hijack_open_clip.cpython-310.pyc b/modules/__pycache__/sd_hijack_open_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e622803d9c0c96d72445c2274bfd218ce5728d23 Binary files /dev/null and b/modules/__pycache__/sd_hijack_open_clip.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_hijack_optimizations.cpython-310.pyc b/modules/__pycache__/sd_hijack_optimizations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8312d771a3d591d5a2c34eeb4dc91b75fdd83889 Binary files /dev/null and b/modules/__pycache__/sd_hijack_optimizations.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_hijack_unet.cpython-310.pyc b/modules/__pycache__/sd_hijack_unet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b077a9c6df1e0811ba4af9ca739b129372bcec40 Binary files /dev/null and b/modules/__pycache__/sd_hijack_unet.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_hijack_utils.cpython-310.pyc b/modules/__pycache__/sd_hijack_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b02033c2c996c9327df0166547d1609878f226e Binary files /dev/null and b/modules/__pycache__/sd_hijack_utils.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_hijack_xlmr.cpython-310.pyc b/modules/__pycache__/sd_hijack_xlmr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfade21c9c8116aa901ffa7b63e92ec53359bb46 Binary files /dev/null and b/modules/__pycache__/sd_hijack_xlmr.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_models.cpython-310.pyc b/modules/__pycache__/sd_models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b15e687f409f4d055340993b2d178442e902b9b2 Binary files /dev/null and b/modules/__pycache__/sd_models.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_models_config.cpython-310.pyc b/modules/__pycache__/sd_models_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48b4853b1941ab31aedd5362a9851c0635fa437d Binary files /dev/null and b/modules/__pycache__/sd_models_config.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_models_xl.cpython-310.pyc b/modules/__pycache__/sd_models_xl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61f3564cab46ec544d2538aaef957db1ba3ba6ee Binary files /dev/null and b/modules/__pycache__/sd_models_xl.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_samplers.cpython-310.pyc b/modules/__pycache__/sd_samplers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c24d2e918b6d30910d2e65fed2f418d4b3c30f45 Binary files /dev/null and b/modules/__pycache__/sd_samplers.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_samplers_common.cpython-310.pyc b/modules/__pycache__/sd_samplers_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83fe5d604f6af9f7aa8436766f4cc00491cba028 Binary files /dev/null and b/modules/__pycache__/sd_samplers_common.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_samplers_compvis.cpython-310.pyc b/modules/__pycache__/sd_samplers_compvis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7f02f29188d85afaf59ea4f8e4d8fed7cad51ad Binary files /dev/null and b/modules/__pycache__/sd_samplers_compvis.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_samplers_kdiffusion.cpython-310.pyc b/modules/__pycache__/sd_samplers_kdiffusion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42e94e4baba5642811f4b3e420f722995f3ac0df Binary files /dev/null and b/modules/__pycache__/sd_samplers_kdiffusion.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_unet.cpython-310.pyc b/modules/__pycache__/sd_unet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f555acbd29d0b0e36c22a3d7ef25dd56edcdf406 Binary files /dev/null and b/modules/__pycache__/sd_unet.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_vae.cpython-310.pyc b/modules/__pycache__/sd_vae.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..def5a15c1d64769d8200d472807e2afbfcc7c8ec Binary files /dev/null and b/modules/__pycache__/sd_vae.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_vae_approx.cpython-310.pyc b/modules/__pycache__/sd_vae_approx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..457b19331f7d0e249e2f882d6e7617e859a77c06 Binary files /dev/null and b/modules/__pycache__/sd_vae_approx.cpython-310.pyc differ diff --git a/modules/__pycache__/sd_vae_taesd.cpython-310.pyc b/modules/__pycache__/sd_vae_taesd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9038d0631fddcab83db5eb73168233179881165b Binary files /dev/null and b/modules/__pycache__/sd_vae_taesd.cpython-310.pyc differ diff --git a/modules/__pycache__/shared.cpython-310.pyc b/modules/__pycache__/shared.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e0f0fc1b783622dfe12610afd57df56e015c5ad Binary files /dev/null and b/modules/__pycache__/shared.cpython-310.pyc differ diff --git a/modules/__pycache__/shared_items.cpython-310.pyc b/modules/__pycache__/shared_items.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..475709de6aa4b0a2cb45e761333f8cbd8092162b Binary files /dev/null and b/modules/__pycache__/shared_items.cpython-310.pyc differ diff --git a/modules/__pycache__/styles.cpython-310.pyc b/modules/__pycache__/styles.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e69a3a2fb0ad4874aee7d32b479ba35b55bb0cd Binary files /dev/null and b/modules/__pycache__/styles.cpython-310.pyc differ diff --git a/modules/__pycache__/sub_quadratic_attention.cpython-310.pyc b/modules/__pycache__/sub_quadratic_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..409af7abc605e52090483d821fa6590d7a6e5efe Binary files /dev/null and b/modules/__pycache__/sub_quadratic_attention.cpython-310.pyc differ diff --git a/modules/__pycache__/sysinfo.cpython-310.pyc b/modules/__pycache__/sysinfo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f43111bce6e9d1a23b3fd276851fabdf8bbff28 Binary files /dev/null and b/modules/__pycache__/sysinfo.cpython-310.pyc differ diff --git a/modules/__pycache__/timer.cpython-310.pyc b/modules/__pycache__/timer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2ac96118fbd5d166c4e7e37024b9d869183fd8c Binary files /dev/null and b/modules/__pycache__/timer.cpython-310.pyc differ diff --git a/modules/__pycache__/txt2img.cpython-310.pyc b/modules/__pycache__/txt2img.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43de64fd69d804579b286d64c23c5a78ab4efc00 Binary files /dev/null and b/modules/__pycache__/txt2img.cpython-310.pyc differ diff --git a/modules/__pycache__/ui.cpython-310.pyc b/modules/__pycache__/ui.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c03e72ee9f0ad88e7965b45073e73450b2bb9dad Binary files /dev/null and b/modules/__pycache__/ui.cpython-310.pyc differ diff --git a/modules/__pycache__/ui_common.cpython-310.pyc b/modules/__pycache__/ui_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa2bda9391c30a0d2a57b2ee827b13bd164f8bc8 Binary files /dev/null and b/modules/__pycache__/ui_common.cpython-310.pyc differ diff --git a/modules/__pycache__/ui_components.cpython-310.pyc b/modules/__pycache__/ui_components.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c4320c64f6285ef77baeb898adb4b4032443114 Binary files /dev/null and b/modules/__pycache__/ui_components.cpython-310.pyc differ diff --git a/modules/__pycache__/ui_extensions.cpython-310.pyc b/modules/__pycache__/ui_extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04acc2c066331659531d2c807536bf690f1aee81 Binary files /dev/null and b/modules/__pycache__/ui_extensions.cpython-310.pyc differ diff --git a/modules/__pycache__/ui_extra_networks.cpython-310.pyc b/modules/__pycache__/ui_extra_networks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac6dfae59bdd0cbe13564386e36357d4600625cf Binary files /dev/null and b/modules/__pycache__/ui_extra_networks.cpython-310.pyc differ diff --git a/modules/__pycache__/ui_extra_networks_checkpoints.cpython-310.pyc b/modules/__pycache__/ui_extra_networks_checkpoints.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..942a0cf60c7f4bcc1c3975633f1eaa9bf2146a3f Binary files /dev/null and b/modules/__pycache__/ui_extra_networks_checkpoints.cpython-310.pyc differ diff --git a/modules/__pycache__/ui_extra_networks_hypernets.cpython-310.pyc b/modules/__pycache__/ui_extra_networks_hypernets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ab3e93b7412395f212f45b3f4508d5460afd477 Binary files /dev/null and b/modules/__pycache__/ui_extra_networks_hypernets.cpython-310.pyc differ diff --git a/modules/__pycache__/ui_extra_networks_textual_inversion.cpython-310.pyc b/modules/__pycache__/ui_extra_networks_textual_inversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a3aa18a4d703dbac65d252dbabfeb897b664865 Binary files /dev/null and b/modules/__pycache__/ui_extra_networks_textual_inversion.cpython-310.pyc differ diff --git a/modules/__pycache__/ui_extra_networks_user_metadata.cpython-310.pyc b/modules/__pycache__/ui_extra_networks_user_metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe8a39ef148652a4cb7e53665e0bd8c05e87dc9b Binary files /dev/null and b/modules/__pycache__/ui_extra_networks_user_metadata.cpython-310.pyc differ diff --git a/modules/__pycache__/ui_gradio_extensions.cpython-310.pyc b/modules/__pycache__/ui_gradio_extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c417f3f568c737e07688bdb01bac8405c3536509 Binary files /dev/null and b/modules/__pycache__/ui_gradio_extensions.cpython-310.pyc differ diff --git a/modules/__pycache__/ui_loadsave.cpython-310.pyc b/modules/__pycache__/ui_loadsave.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b3e1f3d6e4b40e6c6343c07032bc64e5c623e30 Binary files /dev/null and b/modules/__pycache__/ui_loadsave.cpython-310.pyc differ diff --git a/modules/__pycache__/ui_postprocessing.cpython-310.pyc b/modules/__pycache__/ui_postprocessing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11362b66c6fd1b5ff3d3a56f66ca330d241ff73f Binary files /dev/null and b/modules/__pycache__/ui_postprocessing.cpython-310.pyc differ diff --git a/modules/__pycache__/ui_settings.cpython-310.pyc b/modules/__pycache__/ui_settings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0b82da8f44a83fd740e2d59b8e3e17a57d05df5 Binary files /dev/null and b/modules/__pycache__/ui_settings.cpython-310.pyc differ diff --git a/modules/__pycache__/ui_tempdir.cpython-310.pyc b/modules/__pycache__/ui_tempdir.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d84e587d0bb4c61cebc2dae6e97e249420c20277 Binary files /dev/null and b/modules/__pycache__/ui_tempdir.cpython-310.pyc differ diff --git a/modules/__pycache__/upscaler.cpython-310.pyc b/modules/__pycache__/upscaler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4387c0c50cc6c0c0f9e328ff64416cac7ddb8136 Binary files /dev/null and b/modules/__pycache__/upscaler.cpython-310.pyc differ diff --git a/modules/__pycache__/xlmr.cpython-310.pyc b/modules/__pycache__/xlmr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abff79e5119ab3a95b09c4e07cd9f826074d1a7e Binary files /dev/null and b/modules/__pycache__/xlmr.cpython-310.pyc differ diff --git a/modules/api/__pycache__/models.cpython-310.pyc b/modules/api/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c42bfe38d70871c034c3bea22b20371c8c06c4f9 Binary files /dev/null and b/modules/api/__pycache__/models.cpython-310.pyc differ diff --git a/modules/api/api.py b/modules/api/api.py new file mode 100644 index 0000000000000000000000000000000000000000..606db179d4c35ecfc1875e48a49eb9e4c4383cf1 --- /dev/null +++ b/modules/api/api.py @@ -0,0 +1,742 @@ +import base64 +import io +import os +import time +import datetime +import uvicorn +import gradio as gr +from threading import Lock +from io import BytesIO +from fastapi import APIRouter, Depends, FastAPI, Request, Response +from fastapi.security import HTTPBasic, HTTPBasicCredentials +from fastapi.exceptions import HTTPException +from fastapi.responses import JSONResponse +from fastapi.encoders import jsonable_encoder +from secrets import compare_digest + +import modules.shared as shared +from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart +from modules.api import models +from modules.shared import opts +from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images +from modules.textual_inversion.textual_inversion import create_embedding, train_embedding +from modules.textual_inversion.preprocess import preprocess +from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork +from PIL import PngImagePlugin,Image +from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights, checkpoint_aliases +from modules.sd_vae import vae_dict +from modules.sd_models_config import find_checkpoint_config_near_filename +from modules.realesrgan_model import get_realesrgan_models +from modules import devices +from typing import Dict, List, Any +import piexif +import piexif.helper +from contextlib import closing + + +def script_name_to_index(name, scripts): + try: + return [script.title().lower() for script in scripts].index(name.lower()) + except Exception as e: + raise HTTPException(status_code=422, detail=f"Script '{name}' not found") from e + + +def validate_sampler_name(name): + config = sd_samplers.all_samplers_map.get(name, None) + if config is None: + raise HTTPException(status_code=404, detail="Sampler not found") + + return name + + +def setUpscalers(req: dict): + reqDict = vars(req) + reqDict['extras_upscaler_1'] = reqDict.pop('upscaler_1', None) + reqDict['extras_upscaler_2'] = reqDict.pop('upscaler_2', None) + return reqDict + + +def decode_base64_to_image(encoding): + if encoding.startswith("data:image/"): + encoding = encoding.split(";")[1].split(",")[1] + try: + image = Image.open(BytesIO(base64.b64decode(encoding))) + return image + except Exception as e: + raise HTTPException(status_code=500, detail="Invalid encoded image") from e + + +def encode_pil_to_base64(image): + with io.BytesIO() as output_bytes: + + if opts.samples_format.lower() == 'png': + use_metadata = False + metadata = PngImagePlugin.PngInfo() + for key, value in image.info.items(): + if isinstance(key, str) and isinstance(value, str): + metadata.add_text(key, value) + use_metadata = True + image.save(output_bytes, format="PNG", pnginfo=(metadata if use_metadata else None), quality=opts.jpeg_quality) + + elif opts.samples_format.lower() in ("jpg", "jpeg", "webp"): + if image.mode == "RGBA": + image = image.convert("RGB") + parameters = image.info.get('parameters', None) + exif_bytes = piexif.dump({ + "Exif": { piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(parameters or "", encoding="unicode") } + }) + if opts.samples_format.lower() in ("jpg", "jpeg"): + image.save(output_bytes, format="JPEG", exif = exif_bytes, quality=opts.jpeg_quality) + else: + image.save(output_bytes, format="WEBP", exif = exif_bytes, quality=opts.jpeg_quality) + + else: + raise HTTPException(status_code=500, detail="Invalid image format") + + bytes_data = output_bytes.getvalue() + + return base64.b64encode(bytes_data) + + +def api_middleware(app: FastAPI): + rich_available = False + try: + if os.environ.get('WEBUI_RICH_EXCEPTIONS', None) is not None: + import anyio # importing just so it can be placed on silent list + import starlette # importing just so it can be placed on silent list + from rich.console import Console + console = Console() + rich_available = True + except Exception: + pass + + @app.middleware("http") + async def log_and_time(req: Request, call_next): + ts = time.time() + res: Response = await call_next(req) + duration = str(round(time.time() - ts, 4)) + res.headers["X-Process-Time"] = duration + endpoint = req.scope.get('path', 'err') + if shared.cmd_opts.api_log and endpoint.startswith('/sdapi'): + print('API {t} {code} {prot}/{ver} {method} {endpoint} {cli} {duration}'.format( + t=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"), + code=res.status_code, + ver=req.scope.get('http_version', '0.0'), + cli=req.scope.get('client', ('0:0.0.0', 0))[0], + prot=req.scope.get('scheme', 'err'), + method=req.scope.get('method', 'err'), + endpoint=endpoint, + duration=duration, + )) + return res + + def handle_exception(request: Request, e: Exception): + err = { + "error": type(e).__name__, + "detail": vars(e).get('detail', ''), + "body": vars(e).get('body', ''), + "errors": str(e), + } + if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions + message = f"API error: {request.method}: {request.url} {err}" + if rich_available: + print(message) + console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200])) + else: + errors.report(message, exc_info=True) + return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err)) + + @app.middleware("http") + async def exception_handling(request: Request, call_next): + try: + return await call_next(request) + except Exception as e: + return handle_exception(request, e) + + @app.exception_handler(Exception) + async def fastapi_exception_handler(request: Request, e: Exception): + return handle_exception(request, e) + + @app.exception_handler(HTTPException) + async def http_exception_handler(request: Request, e: HTTPException): + return handle_exception(request, e) + + +class Api: + def __init__(self, app: FastAPI, queue_lock: Lock): + if shared.cmd_opts.api_auth: + self.credentials = {} + for auth in shared.cmd_opts.api_auth.split(","): + user, password = auth.split(":") + self.credentials[user] = password + + self.router = APIRouter() + self.app = app + self.queue_lock = queue_lock + api_middleware(self.app) + self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=models.TextToImageResponse) + self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=models.ImageToImageResponse) + self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=models.ExtrasSingleImageResponse) + self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=models.ExtrasBatchImagesResponse) + self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=models.PNGInfoResponse) + self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=models.ProgressResponse) + self.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"]) + self.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"]) + self.add_api_route("/sdapi/v1/skip", self.skip, methods=["POST"]) + self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=models.OptionsModel) + self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"]) + self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel) + self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem]) + self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem]) + self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=List[models.LatentUpscalerModeItem]) + self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem]) + self.add_api_route("/sdapi/v1/sd-vae", self.get_sd_vaes, methods=["GET"], response_model=List[models.SDVaeItem]) + self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem]) + self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem]) + self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem]) + self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem]) + self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse) + self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"]) + self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse) + self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse) + self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse) + self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse) + self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse) + self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse) + self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"]) + self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"]) + self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList) + self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=List[models.ScriptInfo]) + + if shared.cmd_opts.api_server_stop: + self.add_api_route("/sdapi/v1/server-kill", self.kill_webui, methods=["POST"]) + self.add_api_route("/sdapi/v1/server-restart", self.restart_webui, methods=["POST"]) + self.add_api_route("/sdapi/v1/server-stop", self.stop_webui, methods=["POST"]) + + self.default_script_arg_txt2img = [] + self.default_script_arg_img2img = [] + + def add_api_route(self, path: str, endpoint, **kwargs): + if shared.cmd_opts.api_auth: + return self.app.add_api_route(path, endpoint, dependencies=[Depends(self.auth)], **kwargs) + return self.app.add_api_route(path, endpoint, **kwargs) + + def auth(self, credentials: HTTPBasicCredentials = Depends(HTTPBasic())): + if credentials.username in self.credentials: + if compare_digest(credentials.password, self.credentials[credentials.username]): + return True + + raise HTTPException(status_code=401, detail="Incorrect username or password", headers={"WWW-Authenticate": "Basic"}) + + def get_selectable_script(self, script_name, script_runner): + if script_name is None or script_name == "": + return None, None + + script_idx = script_name_to_index(script_name, script_runner.selectable_scripts) + script = script_runner.selectable_scripts[script_idx] + return script, script_idx + + def get_scripts_list(self): + t2ilist = [script.name for script in scripts.scripts_txt2img.scripts if script.name is not None] + i2ilist = [script.name for script in scripts.scripts_img2img.scripts if script.name is not None] + + return models.ScriptsList(txt2img=t2ilist, img2img=i2ilist) + + def get_script_info(self): + res = [] + + for script_list in [scripts.scripts_txt2img.scripts, scripts.scripts_img2img.scripts]: + res += [script.api_info for script in script_list if script.api_info is not None] + + return res + + def get_script(self, script_name, script_runner): + if script_name is None or script_name == "": + return None, None + + script_idx = script_name_to_index(script_name, script_runner.scripts) + return script_runner.scripts[script_idx] + + def init_default_script_args(self, script_runner): + #find max idx from the scripts in runner and generate a none array to init script_args + last_arg_index = 1 + for script in script_runner.scripts: + if last_arg_index < script.args_to: + last_arg_index = script.args_to + # None everywhere except position 0 to initialize script args + script_args = [None]*last_arg_index + script_args[0] = 0 + + # get default values + with gr.Blocks(): # will throw errors calling ui function without this + for script in script_runner.scripts: + if script.ui(script.is_img2img): + ui_default_values = [] + for elem in script.ui(script.is_img2img): + ui_default_values.append(elem.value) + script_args[script.args_from:script.args_to] = ui_default_values + return script_args + + def init_script_args(self, request, default_script_args, selectable_scripts, selectable_idx, script_runner): + script_args = default_script_args.copy() + # position 0 in script_arg is the idx+1 of the selectable script that is going to be run when using scripts.scripts_*2img.run() + if selectable_scripts: + script_args[selectable_scripts.args_from:selectable_scripts.args_to] = request.script_args + script_args[0] = selectable_idx + 1 + + # Now check for always on scripts + if request.alwayson_scripts: + for alwayson_script_name in request.alwayson_scripts.keys(): + alwayson_script = self.get_script(alwayson_script_name, script_runner) + if alwayson_script is None: + raise HTTPException(status_code=422, detail=f"always on script {alwayson_script_name} not found") + # Selectable script in always on script param check + if alwayson_script.alwayson is False: + raise HTTPException(status_code=422, detail="Cannot have a selectable script in the always on scripts params") + # always on script with no arg should always run so you don't really need to add them to the requests + if "args" in request.alwayson_scripts[alwayson_script_name]: + # min between arg length in scriptrunner and arg length in the request + for idx in range(0, min((alwayson_script.args_to - alwayson_script.args_from), len(request.alwayson_scripts[alwayson_script_name]["args"]))): + script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx] + return script_args + + def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcessingAPI): + script_runner = scripts.scripts_txt2img + if not script_runner.scripts: + script_runner.initialize_scripts(False) + ui.create_ui() + if not self.default_script_arg_txt2img: + self.default_script_arg_txt2img = self.init_default_script_args(script_runner) + selectable_scripts, selectable_script_idx = self.get_selectable_script(txt2imgreq.script_name, script_runner) + + populate = txt2imgreq.copy(update={ # Override __init__ params + "sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index), + "do_not_save_samples": not txt2imgreq.save_images, + "do_not_save_grid": not txt2imgreq.save_images, + }) + if populate.sampler_name: + populate.sampler_index = None # prevent a warning later on + + args = vars(populate) + args.pop('script_name', None) + args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them + args.pop('alwayson_scripts', None) + + script_args = self.init_script_args(txt2imgreq, self.default_script_arg_txt2img, selectable_scripts, selectable_script_idx, script_runner) + + send_images = args.pop('send_images', True) + args.pop('save_images', None) + + with self.queue_lock: + with closing(StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **args)) as p: + p.scripts = script_runner + p.outpath_grids = opts.outdir_txt2img_grids + p.outpath_samples = opts.outdir_txt2img_samples + + try: + shared.state.begin(job="scripts_txt2img") + if selectable_scripts is not None: + p.script_args = script_args + processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here + else: + p.script_args = tuple(script_args) # Need to pass args as tuple here + processed = process_images(p) + finally: + shared.state.end() + + b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else [] + + return models.TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js()) + + def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessingAPI): + init_images = img2imgreq.init_images + if init_images is None: + raise HTTPException(status_code=404, detail="Init image not found") + + mask = img2imgreq.mask + if mask: + mask = decode_base64_to_image(mask) + + script_runner = scripts.scripts_img2img + if not script_runner.scripts: + script_runner.initialize_scripts(True) + ui.create_ui() + if not self.default_script_arg_img2img: + self.default_script_arg_img2img = self.init_default_script_args(script_runner) + selectable_scripts, selectable_script_idx = self.get_selectable_script(img2imgreq.script_name, script_runner) + + populate = img2imgreq.copy(update={ # Override __init__ params + "sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index), + "do_not_save_samples": not img2imgreq.save_images, + "do_not_save_grid": not img2imgreq.save_images, + "mask": mask, + }) + if populate.sampler_name: + populate.sampler_index = None # prevent a warning later on + + args = vars(populate) + args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine. + args.pop('script_name', None) + args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them + args.pop('alwayson_scripts', None) + + script_args = self.init_script_args(img2imgreq, self.default_script_arg_img2img, selectable_scripts, selectable_script_idx, script_runner) + + send_images = args.pop('send_images', True) + args.pop('save_images', None) + + with self.queue_lock: + with closing(StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args)) as p: + p.init_images = [decode_base64_to_image(x) for x in init_images] + p.scripts = script_runner + p.outpath_grids = opts.outdir_img2img_grids + p.outpath_samples = opts.outdir_img2img_samples + + try: + shared.state.begin(job="scripts_img2img") + if selectable_scripts is not None: + p.script_args = script_args + processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here + else: + p.script_args = tuple(script_args) # Need to pass args as tuple here + processed = process_images(p) + finally: + shared.state.end() + + b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else [] + + if not img2imgreq.include_init_images: + img2imgreq.init_images = None + img2imgreq.mask = None + + return models.ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js()) + + def extras_single_image_api(self, req: models.ExtrasSingleImageRequest): + reqDict = setUpscalers(req) + + reqDict['image'] = decode_base64_to_image(reqDict['image']) + + with self.queue_lock: + result = postprocessing.run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", save_output=False, **reqDict) + + return models.ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1]) + + def extras_batch_images_api(self, req: models.ExtrasBatchImagesRequest): + reqDict = setUpscalers(req) + + image_list = reqDict.pop('imageList', []) + image_folder = [decode_base64_to_image(x.data) for x in image_list] + + with self.queue_lock: + result = postprocessing.run_extras(extras_mode=1, image_folder=image_folder, image="", input_dir="", output_dir="", save_output=False, **reqDict) + + return models.ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) + + def pnginfoapi(self, req: models.PNGInfoRequest): + if(not req.image.strip()): + return models.PNGInfoResponse(info="") + + image = decode_base64_to_image(req.image.strip()) + if image is None: + return models.PNGInfoResponse(info="") + + geninfo, items = images.read_info_from_image(image) + if geninfo is None: + geninfo = "" + + items = {**{'parameters': geninfo}, **items} + + return models.PNGInfoResponse(info=geninfo, items=items) + + def progressapi(self, req: models.ProgressRequest = Depends()): + # copy from check_progress_call of ui.py + + if shared.state.job_count == 0: + return models.ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo) + + # avoid dividing zero + progress = 0.01 + + if shared.state.job_count > 0: + progress += shared.state.job_no / shared.state.job_count + if shared.state.sampling_steps > 0: + progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps + + time_since_start = time.time() - shared.state.time_start + eta = (time_since_start/progress) + eta_relative = eta-time_since_start + + progress = min(progress, 1) + + shared.state.set_current_image() + + current_image = None + if shared.state.current_image and not req.skip_current_image: + current_image = encode_pil_to_base64(shared.state.current_image) + + return models.ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo) + + def interrogateapi(self, interrogatereq: models.InterrogateRequest): + image_b64 = interrogatereq.image + if image_b64 is None: + raise HTTPException(status_code=404, detail="Image not found") + + img = decode_base64_to_image(image_b64) + img = img.convert('RGB') + + # Override object param + with self.queue_lock: + if interrogatereq.model == "clip": + processed = shared.interrogator.interrogate(img) + elif interrogatereq.model == "deepdanbooru": + processed = deepbooru.model.tag(img) + else: + raise HTTPException(status_code=404, detail="Model not found") + + return models.InterrogateResponse(caption=processed) + + def interruptapi(self): + shared.state.interrupt() + + return {} + + def unloadapi(self): + unload_model_weights() + + return {} + + def reloadapi(self): + reload_model_weights() + + return {} + + def skip(self): + shared.state.skip() + + def get_config(self): + options = {} + for key in shared.opts.data.keys(): + metadata = shared.opts.data_labels.get(key) + if(metadata is not None): + options.update({key: shared.opts.data.get(key, shared.opts.data_labels.get(key).default)}) + else: + options.update({key: shared.opts.data.get(key, None)}) + + return options + + def set_config(self, req: Dict[str, Any]): + checkpoint_name = req.get("sd_model_checkpoint", None) + if checkpoint_name is not None and checkpoint_name not in checkpoint_aliases: + raise RuntimeError(f"model {checkpoint_name!r} not found") + + for k, v in req.items(): + shared.opts.set(k, v) + + shared.opts.save(shared.config_filename) + return + + def get_cmd_flags(self): + return vars(shared.cmd_opts) + + def get_samplers(self): + return [{"name": sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in sd_samplers.all_samplers] + + def get_upscalers(self): + return [ + { + "name": upscaler.name, + "model_name": upscaler.scaler.model_name, + "model_path": upscaler.data_path, + "model_url": None, + "scale": upscaler.scale, + } + for upscaler in shared.sd_upscalers + ] + + def get_latent_upscale_modes(self): + return [ + { + "name": upscale_mode, + } + for upscale_mode in [*(shared.latent_upscale_modes or {})] + ] + + def get_sd_models(self): + return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()] + + def get_sd_vaes(self): + return [{"model_name": x, "filename": vae_dict[x]} for x in vae_dict.keys()] + + def get_hypernetworks(self): + return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks] + + def get_face_restorers(self): + return [{"name":x.name(), "cmd_dir": getattr(x, "cmd_dir", None)} for x in shared.face_restorers] + + def get_realesrgan_models(self): + return [{"name":x.name,"path":x.data_path, "scale":x.scale} for x in get_realesrgan_models(None)] + + def get_prompt_styles(self): + styleList = [] + for k in shared.prompt_styles.styles: + style = shared.prompt_styles.styles[k] + styleList.append({"name":style[0], "prompt": style[1], "negative_prompt": style[2]}) + + return styleList + + def get_embeddings(self): + db = sd_hijack.model_hijack.embedding_db + + def convert_embedding(embedding): + return { + "step": embedding.step, + "sd_checkpoint": embedding.sd_checkpoint, + "sd_checkpoint_name": embedding.sd_checkpoint_name, + "shape": embedding.shape, + "vectors": embedding.vectors, + } + + def convert_embeddings(embeddings): + return {embedding.name: convert_embedding(embedding) for embedding in embeddings.values()} + + return { + "loaded": convert_embeddings(db.word_embeddings), + "skipped": convert_embeddings(db.skipped_embeddings), + } + + def refresh_checkpoints(self): + with self.queue_lock: + shared.refresh_checkpoints() + + def create_embedding(self, args: dict): + try: + shared.state.begin(job="create_embedding") + filename = create_embedding(**args) # create empty embedding + sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used + return models.CreateResponse(info=f"create embedding filename: {filename}") + except AssertionError as e: + return models.TrainResponse(info=f"create embedding error: {e}") + finally: + shared.state.end() + + + def create_hypernetwork(self, args: dict): + try: + shared.state.begin(job="create_hypernetwork") + filename = create_hypernetwork(**args) # create empty embedding + return models.CreateResponse(info=f"create hypernetwork filename: {filename}") + except AssertionError as e: + return models.TrainResponse(info=f"create hypernetwork error: {e}") + finally: + shared.state.end() + + def preprocess(self, args: dict): + try: + shared.state.begin(job="preprocess") + preprocess(**args) # quick operation unless blip/booru interrogation is enabled + shared.state.end() + return models.PreprocessResponse(info='preprocess complete') + except KeyError as e: + return models.PreprocessResponse(info=f"preprocess error: invalid token: {e}") + except Exception as e: + return models.PreprocessResponse(info=f"preprocess error: {e}") + finally: + shared.state.end() + + def train_embedding(self, args: dict): + try: + shared.state.begin(job="train_embedding") + apply_optimizations = shared.opts.training_xattention_optimizations + error = None + filename = '' + if not apply_optimizations: + sd_hijack.undo_optimizations() + try: + embedding, filename = train_embedding(**args) # can take a long time to complete + except Exception as e: + error = e + finally: + if not apply_optimizations: + sd_hijack.apply_optimizations() + return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") + except Exception as msg: + return models.TrainResponse(info=f"train embedding error: {msg}") + finally: + shared.state.end() + + def train_hypernetwork(self, args: dict): + try: + shared.state.begin(job="train_hypernetwork") + shared.loaded_hypernetworks = [] + apply_optimizations = shared.opts.training_xattention_optimizations + error = None + filename = '' + if not apply_optimizations: + sd_hijack.undo_optimizations() + try: + hypernetwork, filename = train_hypernetwork(**args) + except Exception as e: + error = e + finally: + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + if not apply_optimizations: + sd_hijack.apply_optimizations() + shared.state.end() + return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") + except Exception as exc: + return models.TrainResponse(info=f"train embedding error: {exc}") + finally: + shared.state.end() + + def get_memory(self): + try: + import os + import psutil + process = psutil.Process(os.getpid()) + res = process.memory_info() # only rss is cross-platform guaranteed so we dont rely on other values + ram_total = 100 * res.rss / process.memory_percent() # and total memory is calculated as actual value is not cross-platform safe + ram = { 'free': ram_total - res.rss, 'used': res.rss, 'total': ram_total } + except Exception as err: + ram = { 'error': f'{err}' } + try: + import torch + if torch.cuda.is_available(): + s = torch.cuda.mem_get_info() + system = { 'free': s[0], 'used': s[1] - s[0], 'total': s[1] } + s = dict(torch.cuda.memory_stats(shared.device)) + allocated = { 'current': s['allocated_bytes.all.current'], 'peak': s['allocated_bytes.all.peak'] } + reserved = { 'current': s['reserved_bytes.all.current'], 'peak': s['reserved_bytes.all.peak'] } + active = { 'current': s['active_bytes.all.current'], 'peak': s['active_bytes.all.peak'] } + inactive = { 'current': s['inactive_split_bytes.all.current'], 'peak': s['inactive_split_bytes.all.peak'] } + warnings = { 'retries': s['num_alloc_retries'], 'oom': s['num_ooms'] } + cuda = { + 'system': system, + 'active': active, + 'allocated': allocated, + 'reserved': reserved, + 'inactive': inactive, + 'events': warnings, + } + else: + cuda = {'error': 'unavailable'} + except Exception as err: + cuda = {'error': f'{err}'} + return models.MemoryResponse(ram=ram, cuda=cuda) + + def launch(self, server_name, port, root_path): + self.app.include_router(self.router) + uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=shared.cmd_opts.timeout_keep_alive, root_path=root_path) + + def kill_webui(self): + restart.stop_program() + + def restart_webui(self): + if restart.is_restartable(): + restart.restart_program() + return Response(status_code=501) + + def stop_webui(request): + shared.state.server_command = "stop" + return Response("Stopping.") + diff --git a/modules/api/models.py b/modules/api/models.py new file mode 100644 index 0000000000000000000000000000000000000000..800c9b93f14794f429e32b053e9c24be0426d296 --- /dev/null +++ b/modules/api/models.py @@ -0,0 +1,312 @@ +import inspect + +from pydantic import BaseModel, Field, create_model +from typing import Any, Optional +from typing_extensions import Literal +from inflection import underscore +from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img +from modules.shared import sd_upscalers, opts, parser +from typing import Dict, List + +API_NOT_ALLOWED = [ + "self", + "kwargs", + "sd_model", + "outpath_samples", + "outpath_grids", + "sampler_index", + # "do_not_save_samples", + # "do_not_save_grid", + "extra_generation_params", + "overlay_images", + "do_not_reload_embeddings", + "seed_enable_extras", + "prompt_for_display", + "sampler_noise_scheduler_override", + "ddim_discretize" +] + +class ModelDef(BaseModel): + """Assistance Class for Pydantic Dynamic Model Generation""" + + field: str + field_alias: str + field_type: Any + field_value: Any + field_exclude: bool = False + + +class PydanticModelGenerator: + """ + Takes in created classes and stubs them out in a way FastAPI/Pydantic is happy about: + source_data is a snapshot of the default values produced by the class + params are the names of the actual keys required by __init__ + """ + + def __init__( + self, + model_name: str = None, + class_instance = None, + additional_fields = None, + ): + def field_type_generator(k, v): + # field_type = str if not overrides.get(k) else overrides[k]["type"] + # print(k, v.annotation, v.default) + field_type = v.annotation + + return Optional[field_type] + + def merge_class_params(class_): + all_classes = list(filter(lambda x: x is not object, inspect.getmro(class_))) + parameters = {} + for classes in all_classes: + parameters = {**parameters, **inspect.signature(classes.__init__).parameters} + return parameters + + + self._model_name = model_name + self._class_data = merge_class_params(class_instance) + + self._model_def = [ + ModelDef( + field=underscore(k), + field_alias=k, + field_type=field_type_generator(k, v), + field_value=v.default + ) + for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED + ] + + for fields in additional_fields: + self._model_def.append(ModelDef( + field=underscore(fields["key"]), + field_alias=fields["key"], + field_type=fields["type"], + field_value=fields["default"], + field_exclude=fields["exclude"] if "exclude" in fields else False)) + + def generate_model(self): + """ + Creates a pydantic BaseModel + from the json and overrides provided at initialization + """ + fields = { + d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias, exclude=d.field_exclude)) for d in self._model_def + } + DynamicModel = create_model(self._model_name, **fields) + DynamicModel.__config__.allow_population_by_field_name = True + DynamicModel.__config__.allow_mutation = True + return DynamicModel + +StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( + "StableDiffusionProcessingTxt2Img", + StableDiffusionProcessingTxt2Img, + [ + {"key": "sampler_index", "type": str, "default": "Euler"}, + {"key": "script_name", "type": str, "default": None}, + {"key": "script_args", "type": list, "default": []}, + {"key": "send_images", "type": bool, "default": True}, + {"key": "save_images", "type": bool, "default": False}, + {"key": "alwayson_scripts", "type": dict, "default": {}}, + ] +).generate_model() + +StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( + "StableDiffusionProcessingImg2Img", + StableDiffusionProcessingImg2Img, + [ + {"key": "sampler_index", "type": str, "default": "Euler"}, + {"key": "init_images", "type": list, "default": None}, + {"key": "denoising_strength", "type": float, "default": 0.75}, + {"key": "mask", "type": str, "default": None}, + {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}, + {"key": "script_name", "type": str, "default": None}, + {"key": "script_args", "type": list, "default": []}, + {"key": "send_images", "type": bool, "default": True}, + {"key": "save_images", "type": bool, "default": False}, + {"key": "alwayson_scripts", "type": dict, "default": {}}, + ] +).generate_model() + +class TextToImageResponse(BaseModel): + images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + parameters: dict + info: str + +class ImageToImageResponse(BaseModel): + images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") + parameters: dict + info: str + +class ExtrasBaseRequest(BaseModel): + resize_mode: Literal[0, 1] = Field(default=0, title="Resize Mode", description="Sets the resize mode: 0 to upscale by upscaling_resize amount, 1 to upscale up to upscaling_resize_h x upscaling_resize_w.") + show_extras_results: bool = Field(default=True, title="Show results", description="Should the backend return the generated image?") + gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.") + codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.") + codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.") + upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=8, description="By how much to upscale the image, only used when resize_mode=0.") + upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.") + upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.") + upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the chosen size?") + upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") + upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}") + extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.") + upscale_first: bool = Field(default=False, title="Upscale first", description="Should the upscaler run before restoring faces?") + +class ExtraBaseResponse(BaseModel): + html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.") + +class ExtrasSingleImageRequest(ExtrasBaseRequest): + image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.") + +class ExtrasSingleImageResponse(ExtraBaseResponse): + image: str = Field(default=None, title="Image", description="The generated image in base64 format.") + +class FileData(BaseModel): + data: str = Field(title="File data", description="Base64 representation of the file") + name: str = Field(title="File name") + +class ExtrasBatchImagesRequest(ExtrasBaseRequest): + imageList: List[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings") + +class ExtrasBatchImagesResponse(ExtraBaseResponse): + images: List[str] = Field(title="Images", description="The generated images in base64 format.") + +class PNGInfoRequest(BaseModel): + image: str = Field(title="Image", description="The base64 encoded PNG image") + +class PNGInfoResponse(BaseModel): + info: str = Field(title="Image info", description="A string with the parameters used to generate the image") + items: dict = Field(title="Items", description="An object containing all the info the image had") + +class ProgressRequest(BaseModel): + skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization") + +class ProgressResponse(BaseModel): + progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") + eta_relative: float = Field(title="ETA in secs") + state: dict = Field(title="State", description="The current state snapshot") + current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.") + textinfo: str = Field(default=None, title="Info text", description="Info text used by WebUI.") + +class InterrogateRequest(BaseModel): + image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.") + model: str = Field(default="clip", title="Model", description="The interrogate model used.") + +class InterrogateResponse(BaseModel): + caption: str = Field(default=None, title="Caption", description="The generated caption for the image.") + +class TrainResponse(BaseModel): + info: str = Field(title="Train info", description="Response string from train embedding or hypernetwork task.") + +class CreateResponse(BaseModel): + info: str = Field(title="Create info", description="Response string from create embedding or hypernetwork task.") + +class PreprocessResponse(BaseModel): + info: str = Field(title="Preprocess info", description="Response string from preprocessing task.") + +fields = {} +for key, metadata in opts.data_labels.items(): + value = opts.data.get(key) + optType = opts.typemap.get(type(metadata.default), type(metadata.default)) if metadata.default else Any + + if metadata is not None: + fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))}) + else: + fields.update({key: (Optional[optType], Field())}) + +OptionsModel = create_model("Options", **fields) + +flags = {} +_options = vars(parser)['_option_string_actions'] +for key in _options: + if(_options[key].dest != 'help'): + flag = _options[key] + _type = str + if _options[key].default is not None: + _type = type(_options[key].default) + flags.update({flag.dest: (_type, Field(default=flag.default, description=flag.help))}) + +FlagsModel = create_model("Flags", **flags) + +class SamplerItem(BaseModel): + name: str = Field(title="Name") + aliases: List[str] = Field(title="Aliases") + options: Dict[str, str] = Field(title="Options") + +class UpscalerItem(BaseModel): + name: str = Field(title="Name") + model_name: Optional[str] = Field(title="Model Name") + model_path: Optional[str] = Field(title="Path") + model_url: Optional[str] = Field(title="URL") + scale: Optional[float] = Field(title="Scale") + +class LatentUpscalerModeItem(BaseModel): + name: str = Field(title="Name") + +class SDModelItem(BaseModel): + title: str = Field(title="Title") + model_name: str = Field(title="Model Name") + hash: Optional[str] = Field(title="Short hash") + sha256: Optional[str] = Field(title="sha256 hash") + filename: str = Field(title="Filename") + config: Optional[str] = Field(title="Config file") + +class SDVaeItem(BaseModel): + model_name: str = Field(title="Model Name") + filename: str = Field(title="Filename") + +class HypernetworkItem(BaseModel): + name: str = Field(title="Name") + path: Optional[str] = Field(title="Path") + +class FaceRestorerItem(BaseModel): + name: str = Field(title="Name") + cmd_dir: Optional[str] = Field(title="Path") + +class RealesrganItem(BaseModel): + name: str = Field(title="Name") + path: Optional[str] = Field(title="Path") + scale: Optional[int] = Field(title="Scale") + +class PromptStyleItem(BaseModel): + name: str = Field(title="Name") + prompt: Optional[str] = Field(title="Prompt") + negative_prompt: Optional[str] = Field(title="Negative Prompt") + + +class EmbeddingItem(BaseModel): + step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available") + sd_checkpoint: Optional[str] = Field(title="SD Checkpoint", description="The hash of the checkpoint this embedding was trained on, if available") + sd_checkpoint_name: Optional[str] = Field(title="SD Checkpoint Name", description="The name of the checkpoint this embedding was trained on, if available. Note that this is the name that was used by the trainer; for a stable identifier, use `sd_checkpoint` instead") + shape: int = Field(title="Shape", description="The length of each individual vector in the embedding") + vectors: int = Field(title="Vectors", description="The number of vectors in the embedding") + +class EmbeddingsResponse(BaseModel): + loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model") + skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") + +class MemoryResponse(BaseModel): + ram: dict = Field(title="RAM", description="System memory stats") + cuda: dict = Field(title="CUDA", description="nVidia CUDA memory stats") + + +class ScriptsList(BaseModel): + txt2img: list = Field(default=None, title="Txt2img", description="Titles of scripts (txt2img)") + img2img: list = Field(default=None, title="Img2img", description="Titles of scripts (img2img)") + + +class ScriptArg(BaseModel): + label: str = Field(default=None, title="Label", description="Name of the argument in UI") + value: Optional[Any] = Field(default=None, title="Value", description="Default value of the argument") + minimum: Optional[Any] = Field(default=None, title="Minimum", description="Minimum allowed value for the argumentin UI") + maximum: Optional[Any] = Field(default=None, title="Minimum", description="Maximum allowed value for the argumentin UI") + step: Optional[Any] = Field(default=None, title="Minimum", description="Step for changing value of the argumentin UI") + choices: Optional[List[str]] = Field(default=None, title="Choices", description="Possible values for the argument") + + +class ScriptInfo(BaseModel): + name: str = Field(default=None, title="Name", description="Script name") + is_alwayson: bool = Field(default=None, title="IsAlwayson", description="Flag specifying whether this script is an alwayson script") + is_img2img: bool = Field(default=None, title="IsImg2img", description="Flag specifying whether this script is an img2img script") + args: List[ScriptArg] = Field(title="Arguments", description="List of script's arguments") diff --git a/modules/cache.py b/modules/cache.py new file mode 100644 index 0000000000000000000000000000000000000000..f624cebc7b9e851a20de0b64e67dd26c2d81ef7f --- /dev/null +++ b/modules/cache.py @@ -0,0 +1,120 @@ +import json +import os.path +import threading +import time + +from modules.paths import data_path, script_path + +cache_filename = os.path.join(data_path, "cache.json") +cache_data = None +cache_lock = threading.Lock() + +dump_cache_after = None +dump_cache_thread = None + + +def dump_cache(): + """ + Marks cache for writing to disk. 5 seconds after no one else flags the cache for writing, it is written. + """ + + global dump_cache_after + global dump_cache_thread + + def thread_func(): + global dump_cache_after + global dump_cache_thread + + while dump_cache_after is not None and time.time() < dump_cache_after: + time.sleep(1) + + with cache_lock: + with open(cache_filename, "w", encoding="utf8") as file: + json.dump(cache_data, file, indent=4) + + dump_cache_after = None + dump_cache_thread = None + + with cache_lock: + dump_cache_after = time.time() + 5 + if dump_cache_thread is None: + dump_cache_thread = threading.Thread(name='cache-writer', target=thread_func) + dump_cache_thread.start() + + +def cache(subsection): + """ + Retrieves or initializes a cache for a specific subsection. + + Parameters: + subsection (str): The subsection identifier for the cache. + + Returns: + dict: The cache data for the specified subsection. + """ + + global cache_data + + if cache_data is None: + with cache_lock: + if cache_data is None: + if not os.path.isfile(cache_filename): + cache_data = {} + else: + try: + with open(cache_filename, "r", encoding="utf8") as file: + cache_data = json.load(file) + except Exception: + os.replace(cache_filename, os.path.join(script_path, "tmp", "cache.json")) + print('[ERROR] issue occurred while trying to read cache.json, move current cache to tmp/cache.json and create new cache') + cache_data = {} + + s = cache_data.get(subsection, {}) + cache_data[subsection] = s + + return s + + +def cached_data_for_file(subsection, title, filename, func): + """ + Retrieves or generates data for a specific file, using a caching mechanism. + + Parameters: + subsection (str): The subsection of the cache to use. + title (str): The title of the data entry in the subsection of the cache. + filename (str): The path to the file to be checked for modifications. + func (callable): A function that generates the data if it is not available in the cache. + + Returns: + dict or None: The cached or generated data, or None if data generation fails. + + The `cached_data_for_file` function implements a caching mechanism for data stored in files. + It checks if the data associated with the given `title` is present in the cache and compares the + modification time of the file with the cached modification time. If the file has been modified, + the cache is considered invalid and the data is regenerated using the provided `func`. + Otherwise, the cached data is returned. + + If the data generation fails, None is returned to indicate the failure. Otherwise, the generated + or cached data is returned as a dictionary. + """ + + existing_cache = cache(subsection) + ondisk_mtime = os.path.getmtime(filename) + + entry = existing_cache.get(title) + if entry: + cached_mtime = entry.get("mtime", 0) + if ondisk_mtime > cached_mtime: + entry = None + + if not entry or 'value' not in entry: + value = func() + if value is None: + return None + + entry = {'mtime': ondisk_mtime, 'value': value} + existing_cache[title] = entry + + dump_cache() + + return entry['value'] diff --git a/modules/call_queue.py b/modules/call_queue.py new file mode 100644 index 0000000000000000000000000000000000000000..4a36192cae2406e73893d9a6bd352ee7905670c6 --- /dev/null +++ b/modules/call_queue.py @@ -0,0 +1,117 @@ +from functools import wraps +import html +import threading +import time + +from modules import shared, progress, errors + +queue_lock = threading.Lock() + + +def wrap_queued_call(func): + def f(*args, **kwargs): + with queue_lock: + res = func(*args, **kwargs) + + return res + + return f + + +def wrap_gradio_gpu_call(func, extra_outputs=None): + @wraps(func) + def f(*args, **kwargs): + + # if the first argument is a string that says "task(...)", it is treated as a job id + if args and type(args[0]) == str and args[0].startswith("task(") and args[0].endswith(")"): + id_task = args[0] + progress.add_task_to_queue(id_task) + else: + id_task = None + + with queue_lock: + shared.state.begin(job=id_task) + progress.start_task(id_task) + + try: + res = func(*args, **kwargs) + progress.record_results(id_task, res) + finally: + progress.finish_task(id_task) + + shared.state.end() + + return res + + return wrap_gradio_call(f, extra_outputs=extra_outputs, add_stats=True) + + +def wrap_gradio_call(func, extra_outputs=None, add_stats=False): + @wraps(func) + def f(*args, extra_outputs_array=extra_outputs, **kwargs): + run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats + if run_memmon: + shared.mem_mon.monitor() + t = time.perf_counter() + + try: + res = list(func(*args, **kwargs)) + except Exception as e: + # When printing out our debug argument list, + # do not print out more than a 100 KB of text + max_debug_str_len = 131072 + message = "Error completing request" + arg_str = f"Arguments: {args} {kwargs}"[:max_debug_str_len] + if len(arg_str) > max_debug_str_len: + arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)" + errors.report(f"{message}\n{arg_str}", exc_info=True) + + shared.state.job = "" + shared.state.job_count = 0 + + if extra_outputs_array is None: + extra_outputs_array = [None, ''] + + error_message = f'{type(e).__name__}: {e}' + res = extra_outputs_array + [f"
{html.escape(error_message)}
"] + + shared.state.skipped = False + shared.state.interrupted = False + shared.state.job_count = 0 + + if not add_stats: + return tuple(res) + + elapsed = time.perf_counter() - t + elapsed_m = int(elapsed // 60) + elapsed_s = elapsed % 60 + elapsed_text = f"{elapsed_s:.1f} sec." + if elapsed_m > 0: + elapsed_text = f"{elapsed_m} min. "+elapsed_text + + if run_memmon: + mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()} + active_peak = mem_stats['active_peak'] + reserved_peak = mem_stats['reserved_peak'] + sys_peak = mem_stats['system_peak'] + sys_total = mem_stats['total'] + sys_pct = sys_peak/max(sys_total, 1) * 100 + + toltip_a = "Active: peak amount of video memory used during generation (excluding cached data)" + toltip_r = "Reserved: total amout of video memory allocated by the Torch library " + toltip_sys = "System: peak amout of video memory allocated by all running programs, out of total capacity" + + text_a = f"A: {active_peak/1024:.2f} GB" + text_r = f"R: {reserved_peak/1024:.2f} GB" + text_sys = f"Sys: {sys_peak/1024:.1f}/{sys_total/1024:g} GB ({sys_pct:.1f}%)" + + vram_html = f"

{text_a}, {text_r}, {text_sys}

" + else: + vram_html = '' + + # last item is always HTML + res[-1] += f"

Time taken: {elapsed_text}

{vram_html}
" + + return tuple(res) + + return f diff --git a/modules/cmd_args.py b/modules/cmd_args.py new file mode 100644 index 0000000000000000000000000000000000000000..e6e383f3f144ae6cb0045d89137eadef09a79d80 --- /dev/null +++ b/modules/cmd_args.py @@ -0,0 +1,112 @@ +import argparse +import json +import os +from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file # noqa: F401 + +parser = argparse.ArgumentParser() + +parser.add_argument("-f", action='store_true', help=argparse.SUPPRESS) # allows running as root; implemented outside of webui +parser.add_argument("--update-all-extensions", action='store_true', help="launch.py argument: download updates for all extensions when starting the program") +parser.add_argument("--skip-python-version-check", action='store_true', help="launch.py argument: do not check python version") +parser.add_argument("--skip-torch-cuda-test", action='store_true', help="launch.py argument: do not check if CUDA is able to work properly") +parser.add_argument("--reinstall-xformers", action='store_true', help="launch.py argument: install the appropriate version of xformers even if you have some version already installed") +parser.add_argument("--reinstall-torch", action='store_true', help="launch.py argument: install the appropriate version of torch even if you have some version already installed") +parser.add_argument("--update-check", action='store_true', help="launch.py argument: check for updates at startup") +parser.add_argument("--test-server", action='store_true', help="launch.py argument: configure server for testing") +parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation") +parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages") +parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint") +parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored") +parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",) +parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",) +parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints") +parser.add_argument("--vae-dir", type=str, default=None, help="Path to directory with VAE files") +parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) +parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None) +parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats") +parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats") +parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)") +parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI") +parser.add_argument("--embeddings-dir", type=str, default=os.path.join(data_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)") +parser.add_argument("--textual-inversion-templates-dir", type=str, default=os.path.join(script_path, 'textual_inversion_templates'), help="directory with textual inversion templates") +parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory") +parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory") +parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui") +parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage") +parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage") +parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM") +parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram") +parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.") +parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast") +parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.") +parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site") +parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None) +parser.add_argument("--ngrok-region", type=str, help="does not do anything.", default="") +parser.add_argument("--ngrok-options", type=json.loads, help='The options to pass to ngrok in JSON format, e.g.: \'{"authtoken_from_env":true, "basic_auth":"user:password", "oauth_provider":"google", "oauth_allow_emails":"user@asdf.com"}\'', default=dict()) +parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options") +parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer')) +parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN')) +parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN')) +parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN')) +parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN')) +parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None) +parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers") +parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work") +parser.add_argument("--xformers-flash-attention", action='store_true', help="enable xformers with Flash Attention to improve reproducibility (supported for SD2.x or variant only)") +parser.add_argument("--deepdanbooru", action='store_true', help="does not do anything") +parser.add_argument("--opt-split-attention", action='store_true', help="prefer Doggettx's cross-attention layer optimization for automatic choice of optimization") +parser.add_argument("--opt-sub-quad-attention", action='store_true', help="prefer memory efficient sub-quadratic cross-attention layer optimization for automatic choice of optimization") +parser.add_argument("--sub-quad-q-chunk-size", type=int, help="query chunk size for the sub-quadratic cross-attention layer optimization to use", default=1024) +parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size for the sub-quadratic cross-attention layer optimization to use", default=None) +parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking", default=None) +parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="prefer InvokeAI's cross-attention layer optimization for automatic choice of optimization") +parser.add_argument("--opt-split-attention-v1", action='store_true', help="prefer older version of split attention optimization for automatic choice of optimization") +parser.add_argument("--opt-sdp-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization for automatic choice of optimization; requires PyTorch 2.*") +parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization without memory efficient attention for automatic choice of optimization, makes image generation deterministic; requires PyTorch 2.*") +parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization") +parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI") +parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower) +parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") +parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None) +parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False) +parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(data_path, 'ui-config.json')) +parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False) +parser.add_argument("--freeze-settings", action='store_true', help="disable editing settings", default=False) +parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(data_path, 'config.json')) +parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option") +parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None) +parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None) +parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything') +parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything") +parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it") +parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last") +parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(data_path, 'styles.csv')) +parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False) +parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None) +parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False) +parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False) +parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False) +parser.add_argument('--vae-path', type=str, help='Checkpoint to use as VAE; setting this argument disables all settings related to VAE', default=None) +parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False) +parser.add_argument("--api", action='store_true', help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)") +parser.add_argument("--api-auth", type=str, help='Set authentication for API like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None) +parser.add_argument("--api-log", action='store_true', help="use api-log=True to enable logging of all API requests") +parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the API instead of the webui") +parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI") +parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None) +parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False) +parser.add_argument("--cors-allow-origins", type=str, help="Allowed CORS origin(s) in the form of a comma-separated list (no spaces)", default=None) +parser.add_argument("--cors-allow-origins-regex", type=str, help="Allowed CORS origin(s) in the form of a single regular expression", default=None) +parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None) +parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None) +parser.add_argument("--disable-tls-verify", action="store_false", help="When passed, enables the use of self-signed certificates.", default=None) +parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None) +parser.add_argument("--gradio-queue", action='store_true', help="does not do anything", default=True) +parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gradio queue; causes the webpage to use http requests instead of websockets; was the defaul in earlier versions") +parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers") +parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) +parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) +parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy') +parser.add_argument('--add-stop-route', action='store_true', help='add /_stop route to stop server') +parser.add_argument('--api-server-stop', action='store_true', help='enable server stop/restart/kill via api') +parser.add_argument('--timeout-keep-alive', type=int, default=30, help='set timeout_keep_alive for uvicorn') diff --git a/modules/codeformer/__pycache__/codeformer_arch.cpython-310.pyc b/modules/codeformer/__pycache__/codeformer_arch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5166c003a07991261ae41b30b2df22177dd55f7 Binary files /dev/null and b/modules/codeformer/__pycache__/codeformer_arch.cpython-310.pyc differ diff --git a/modules/codeformer/__pycache__/vqgan_arch.cpython-310.pyc b/modules/codeformer/__pycache__/vqgan_arch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..679e12c47faae7ea74a50cc25eaa3dca36dac75d Binary files /dev/null and b/modules/codeformer/__pycache__/vqgan_arch.cpython-310.pyc differ diff --git a/modules/codeformer/codeformer_arch.py b/modules/codeformer/codeformer_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..12db6814268fdba5a3025f44d1bb24e93d280a69 --- /dev/null +++ b/modules/codeformer/codeformer_arch.py @@ -0,0 +1,276 @@ +# this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py + +import math +import torch +from torch import nn, Tensor +import torch.nn.functional as F +from typing import Optional + +from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock +from basicsr.utils.registry import ARCH_REGISTRY + +def calc_mean_std(feat, eps=1e-5): + """Calculate mean and std for adaptive_instance_normalization. + + Args: + feat (Tensor): 4D tensor. + eps (float): A small value added to the variance to avoid + divide-by-zero. Default: 1e-5. + """ + size = feat.size() + assert len(size) == 4, 'The input feature should be 4D tensor.' + b, c = size[:2] + feat_var = feat.view(b, c, -1).var(dim=2) + eps + feat_std = feat_var.sqrt().view(b, c, 1, 1) + feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1) + return feat_mean, feat_std + + +def adaptive_instance_normalization(content_feat, style_feat): + """Adaptive instance normalization. + + Adjust the reference features to have the similar color and illuminations + as those in the degradate features. + + Args: + content_feat (Tensor): The reference feature. + style_feat (Tensor): The degradate features. + """ + size = content_feat.size() + style_mean, style_std = calc_mean_std(style_feat) + content_mean, content_std = calc_mean_std(content_feat) + normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size) + return normalized_feat * style_std.expand(size) + style_mean.expand(size) + + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + + def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, x, mask=None): + if mask is None: + mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) + not_mask = ~mask + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(F"activation should be relu/gelu, not {activation}.") + + +class TransformerSALayer(nn.Module): + def __init__(self, embed_dim, nhead=8, dim_mlp=2048, dropout=0.0, activation="gelu"): + super().__init__() + self.self_attn = nn.MultiheadAttention(embed_dim, nhead, dropout=dropout) + # Implementation of Feedforward model - MLP + self.linear1 = nn.Linear(embed_dim, dim_mlp) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_mlp, embed_dim) + + self.norm1 = nn.LayerNorm(embed_dim) + self.norm2 = nn.LayerNorm(embed_dim) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward(self, tgt, + tgt_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + + # self attention + tgt2 = self.norm1(tgt) + q = k = self.with_pos_embed(tgt2, query_pos) + tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + + # ffn + tgt2 = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout2(tgt2) + return tgt + +class Fuse_sft_block(nn.Module): + def __init__(self, in_ch, out_ch): + super().__init__() + self.encode_enc = ResBlock(2*in_ch, out_ch) + + self.scale = nn.Sequential( + nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1), + nn.LeakyReLU(0.2, True), + nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1)) + + self.shift = nn.Sequential( + nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1), + nn.LeakyReLU(0.2, True), + nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1)) + + def forward(self, enc_feat, dec_feat, w=1): + enc_feat = self.encode_enc(torch.cat([enc_feat, dec_feat], dim=1)) + scale = self.scale(enc_feat) + shift = self.shift(enc_feat) + residual = w * (dec_feat * scale + shift) + out = dec_feat + residual + return out + + +@ARCH_REGISTRY.register() +class CodeFormer(VQAutoEncoder): + def __init__(self, dim_embd=512, n_head=8, n_layers=9, + codebook_size=1024, latent_size=256, + connect_list=('32', '64', '128', '256'), + fix_modules=('quantize', 'generator')): + super(CodeFormer, self).__init__(512, 64, [1, 2, 2, 4, 4, 8], 'nearest',2, [16], codebook_size) + + if fix_modules is not None: + for module in fix_modules: + for param in getattr(self, module).parameters(): + param.requires_grad = False + + self.connect_list = connect_list + self.n_layers = n_layers + self.dim_embd = dim_embd + self.dim_mlp = dim_embd*2 + + self.position_emb = nn.Parameter(torch.zeros(latent_size, self.dim_embd)) + self.feat_emb = nn.Linear(256, self.dim_embd) + + # transformer + self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0) + for _ in range(self.n_layers)]) + + # logits_predict head + self.idx_pred_layer = nn.Sequential( + nn.LayerNorm(dim_embd), + nn.Linear(dim_embd, codebook_size, bias=False)) + + self.channels = { + '16': 512, + '32': 256, + '64': 256, + '128': 128, + '256': 128, + '512': 64, + } + + # after second residual block for > 16, before attn layer for ==16 + self.fuse_encoder_block = {'512':2, '256':5, '128':8, '64':11, '32':14, '16':18} + # after first residual block for > 16, before attn layer for ==16 + self.fuse_generator_block = {'16':6, '32': 9, '64':12, '128':15, '256':18, '512':21} + + # fuse_convs_dict + self.fuse_convs_dict = nn.ModuleDict() + for f_size in self.connect_list: + in_ch = self.channels[f_size] + self.fuse_convs_dict[f_size] = Fuse_sft_block(in_ch, in_ch) + + def _init_weights(self, module): + if isinstance(module, (nn.Linear, nn.Embedding)): + module.weight.data.normal_(mean=0.0, std=0.02) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def forward(self, x, w=0, detach_16=True, code_only=False, adain=False): + # ################### Encoder ##################### + enc_feat_dict = {} + out_list = [self.fuse_encoder_block[f_size] for f_size in self.connect_list] + for i, block in enumerate(self.encoder.blocks): + x = block(x) + if i in out_list: + enc_feat_dict[str(x.shape[-1])] = x.clone() + + lq_feat = x + # ################# Transformer ################### + # quant_feat, codebook_loss, quant_stats = self.quantize(lq_feat) + pos_emb = self.position_emb.unsqueeze(1).repeat(1,x.shape[0],1) + # BCHW -> BC(HW) -> (HW)BC + feat_emb = self.feat_emb(lq_feat.flatten(2).permute(2,0,1)) + query_emb = feat_emb + # Transformer encoder + for layer in self.ft_layers: + query_emb = layer(query_emb, query_pos=pos_emb) + + # output logits + logits = self.idx_pred_layer(query_emb) # (hw)bn + logits = logits.permute(1,0,2) # (hw)bn -> b(hw)n + + if code_only: # for training stage II + # logits doesn't need softmax before cross_entropy loss + return logits, lq_feat + + # ################# Quantization ################### + # if self.training: + # quant_feat = torch.einsum('btn,nc->btc', [soft_one_hot, self.quantize.embedding.weight]) + # # b(hw)c -> bc(hw) -> bchw + # quant_feat = quant_feat.permute(0,2,1).view(lq_feat.shape) + # ------------ + soft_one_hot = F.softmax(logits, dim=2) + _, top_idx = torch.topk(soft_one_hot, 1, dim=2) + quant_feat = self.quantize.get_codebook_feat(top_idx, shape=[x.shape[0],16,16,256]) + # preserve gradients + # quant_feat = lq_feat + (quant_feat - lq_feat).detach() + + if detach_16: + quant_feat = quant_feat.detach() # for training stage III + if adain: + quant_feat = adaptive_instance_normalization(quant_feat, lq_feat) + + # ################## Generator #################### + x = quant_feat + fuse_list = [self.fuse_generator_block[f_size] for f_size in self.connect_list] + + for i, block in enumerate(self.generator.blocks): + x = block(x) + if i in fuse_list: # fuse after i-th block + f_size = str(x.shape[-1]) + if w>0: + x = self.fuse_convs_dict[f_size](enc_feat_dict[f_size].detach(), x, w) + out = x + # logits doesn't need softmax before cross_entropy loss + return out, logits, lq_feat diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..09ee6660dc537e41fb9d9c7be7196c94c04aa8c6 --- /dev/null +++ b/modules/codeformer/vqgan_arch.py @@ -0,0 +1,435 @@ +# this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py + +''' +VQGAN code, adapted from the original created by the Unleashing Transformers authors: +https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py + +''' +import torch +import torch.nn as nn +import torch.nn.functional as F +from basicsr.utils import get_root_logger +from basicsr.utils.registry import ARCH_REGISTRY + +def normalize(in_channels): + return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) + + +@torch.jit.script +def swish(x): + return x*torch.sigmoid(x) + + +# Define VQVAE classes +class VectorQuantizer(nn.Module): + def __init__(self, codebook_size, emb_dim, beta): + super(VectorQuantizer, self).__init__() + self.codebook_size = codebook_size # number of embeddings + self.emb_dim = emb_dim # dimension of embedding + self.beta = beta # commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2 + self.embedding = nn.Embedding(self.codebook_size, self.emb_dim) + self.embedding.weight.data.uniform_(-1.0 / self.codebook_size, 1.0 / self.codebook_size) + + def forward(self, z): + # reshape z -> (batch, height, width, channel) and flatten + z = z.permute(0, 2, 3, 1).contiguous() + z_flattened = z.view(-1, self.emb_dim) + + # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z + d = (z_flattened ** 2).sum(dim=1, keepdim=True) + (self.embedding.weight**2).sum(1) - \ + 2 * torch.matmul(z_flattened, self.embedding.weight.t()) + + mean_distance = torch.mean(d) + # find closest encodings + # min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1) + min_encoding_scores, min_encoding_indices = torch.topk(d, 1, dim=1, largest=False) + # [0-1], higher score, higher confidence + min_encoding_scores = torch.exp(-min_encoding_scores/10) + + min_encodings = torch.zeros(min_encoding_indices.shape[0], self.codebook_size).to(z) + min_encodings.scatter_(1, min_encoding_indices, 1) + + # get quantized latent vectors + z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape) + # compute loss for embedding + loss = torch.mean((z_q.detach()-z)**2) + self.beta * torch.mean((z_q - z.detach()) ** 2) + # preserve gradients + z_q = z + (z_q - z).detach() + + # perplexity + e_mean = torch.mean(min_encodings, dim=0) + perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10))) + # reshape back to match original input shape + z_q = z_q.permute(0, 3, 1, 2).contiguous() + + return z_q, loss, { + "perplexity": perplexity, + "min_encodings": min_encodings, + "min_encoding_indices": min_encoding_indices, + "min_encoding_scores": min_encoding_scores, + "mean_distance": mean_distance + } + + def get_codebook_feat(self, indices, shape): + # input indices: batch*token_num -> (batch*token_num)*1 + # shape: batch, height, width, channel + indices = indices.view(-1,1) + min_encodings = torch.zeros(indices.shape[0], self.codebook_size).to(indices) + min_encodings.scatter_(1, indices, 1) + # get quantized latent vectors + z_q = torch.matmul(min_encodings.float(), self.embedding.weight) + + if shape is not None: # reshape back to match original input shape + z_q = z_q.view(shape).permute(0, 3, 1, 2).contiguous() + + return z_q + + +class GumbelQuantizer(nn.Module): + def __init__(self, codebook_size, emb_dim, num_hiddens, straight_through=False, kl_weight=5e-4, temp_init=1.0): + super().__init__() + self.codebook_size = codebook_size # number of embeddings + self.emb_dim = emb_dim # dimension of embedding + self.straight_through = straight_through + self.temperature = temp_init + self.kl_weight = kl_weight + self.proj = nn.Conv2d(num_hiddens, codebook_size, 1) # projects last encoder layer to quantized logits + self.embed = nn.Embedding(codebook_size, emb_dim) + + def forward(self, z): + hard = self.straight_through if self.training else True + + logits = self.proj(z) + + soft_one_hot = F.gumbel_softmax(logits, tau=self.temperature, dim=1, hard=hard) + + z_q = torch.einsum("b n h w, n d -> b d h w", soft_one_hot, self.embed.weight) + + # + kl divergence to the prior loss + qy = F.softmax(logits, dim=1) + diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.codebook_size + 1e-10), dim=1).mean() + min_encoding_indices = soft_one_hot.argmax(dim=1) + + return z_q, diff, { + "min_encoding_indices": min_encoding_indices + } + + +class Downsample(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) + + def forward(self, x): + pad = (0, 1, 0, 1) + x = torch.nn.functional.pad(x, pad, mode="constant", value=0) + x = self.conv(x) + return x + + +class Upsample(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) + + def forward(self, x): + x = F.interpolate(x, scale_factor=2.0, mode="nearest") + x = self.conv(x) + + return x + + +class ResBlock(nn.Module): + def __init__(self, in_channels, out_channels=None): + super(ResBlock, self).__init__() + self.in_channels = in_channels + self.out_channels = in_channels if out_channels is None else out_channels + self.norm1 = normalize(in_channels) + self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) + self.norm2 = normalize(out_channels) + self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) + if self.in_channels != self.out_channels: + self.conv_out = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) + + def forward(self, x_in): + x = x_in + x = self.norm1(x) + x = swish(x) + x = self.conv1(x) + x = self.norm2(x) + x = swish(x) + x = self.conv2(x) + if self.in_channels != self.out_channels: + x_in = self.conv_out(x_in) + + return x + x_in + + +class AttnBlock(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = normalize(in_channels) + self.q = torch.nn.Conv2d( + in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0 + ) + self.k = torch.nn.Conv2d( + in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0 + ) + self.v = torch.nn.Conv2d( + in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0 + ) + self.proj_out = torch.nn.Conv2d( + in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0 + ) + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b, c, h, w = q.shape + q = q.reshape(b, c, h*w) + q = q.permute(0, 2, 1) + k = k.reshape(b, c, h*w) + w_ = torch.bmm(q, k) + w_ = w_ * (int(c)**(-0.5)) + w_ = F.softmax(w_, dim=2) + + # attend to values + v = v.reshape(b, c, h*w) + w_ = w_.permute(0, 2, 1) + h_ = torch.bmm(v, w_) + h_ = h_.reshape(b, c, h, w) + + h_ = self.proj_out(h_) + + return x+h_ + + +class Encoder(nn.Module): + def __init__(self, in_channels, nf, emb_dim, ch_mult, num_res_blocks, resolution, attn_resolutions): + super().__init__() + self.nf = nf + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.attn_resolutions = attn_resolutions + + curr_res = self.resolution + in_ch_mult = (1,)+tuple(ch_mult) + + blocks = [] + # initial convultion + blocks.append(nn.Conv2d(in_channels, nf, kernel_size=3, stride=1, padding=1)) + + # residual and downsampling blocks, with attention on smaller res (16x16) + for i in range(self.num_resolutions): + block_in_ch = nf * in_ch_mult[i] + block_out_ch = nf * ch_mult[i] + for _ in range(self.num_res_blocks): + blocks.append(ResBlock(block_in_ch, block_out_ch)) + block_in_ch = block_out_ch + if curr_res in attn_resolutions: + blocks.append(AttnBlock(block_in_ch)) + + if i != self.num_resolutions - 1: + blocks.append(Downsample(block_in_ch)) + curr_res = curr_res // 2 + + # non-local attention block + blocks.append(ResBlock(block_in_ch, block_in_ch)) + blocks.append(AttnBlock(block_in_ch)) + blocks.append(ResBlock(block_in_ch, block_in_ch)) + + # normalise and convert to latent size + blocks.append(normalize(block_in_ch)) + blocks.append(nn.Conv2d(block_in_ch, emb_dim, kernel_size=3, stride=1, padding=1)) + self.blocks = nn.ModuleList(blocks) + + def forward(self, x): + for block in self.blocks: + x = block(x) + + return x + + +class Generator(nn.Module): + def __init__(self, nf, emb_dim, ch_mult, res_blocks, img_size, attn_resolutions): + super().__init__() + self.nf = nf + self.ch_mult = ch_mult + self.num_resolutions = len(self.ch_mult) + self.num_res_blocks = res_blocks + self.resolution = img_size + self.attn_resolutions = attn_resolutions + self.in_channels = emb_dim + self.out_channels = 3 + block_in_ch = self.nf * self.ch_mult[-1] + curr_res = self.resolution // 2 ** (self.num_resolutions-1) + + blocks = [] + # initial conv + blocks.append(nn.Conv2d(self.in_channels, block_in_ch, kernel_size=3, stride=1, padding=1)) + + # non-local attention block + blocks.append(ResBlock(block_in_ch, block_in_ch)) + blocks.append(AttnBlock(block_in_ch)) + blocks.append(ResBlock(block_in_ch, block_in_ch)) + + for i in reversed(range(self.num_resolutions)): + block_out_ch = self.nf * self.ch_mult[i] + + for _ in range(self.num_res_blocks): + blocks.append(ResBlock(block_in_ch, block_out_ch)) + block_in_ch = block_out_ch + + if curr_res in self.attn_resolutions: + blocks.append(AttnBlock(block_in_ch)) + + if i != 0: + blocks.append(Upsample(block_in_ch)) + curr_res = curr_res * 2 + + blocks.append(normalize(block_in_ch)) + blocks.append(nn.Conv2d(block_in_ch, self.out_channels, kernel_size=3, stride=1, padding=1)) + + self.blocks = nn.ModuleList(blocks) + + + def forward(self, x): + for block in self.blocks: + x = block(x) + + return x + + +@ARCH_REGISTRY.register() +class VQAutoEncoder(nn.Module): + def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=None, codebook_size=1024, emb_dim=256, + beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None): + super().__init__() + logger = get_root_logger() + self.in_channels = 3 + self.nf = nf + self.n_blocks = res_blocks + self.codebook_size = codebook_size + self.embed_dim = emb_dim + self.ch_mult = ch_mult + self.resolution = img_size + self.attn_resolutions = attn_resolutions or [16] + self.quantizer_type = quantizer + self.encoder = Encoder( + self.in_channels, + self.nf, + self.embed_dim, + self.ch_mult, + self.n_blocks, + self.resolution, + self.attn_resolutions + ) + if self.quantizer_type == "nearest": + self.beta = beta #0.25 + self.quantize = VectorQuantizer(self.codebook_size, self.embed_dim, self.beta) + elif self.quantizer_type == "gumbel": + self.gumbel_num_hiddens = emb_dim + self.straight_through = gumbel_straight_through + self.kl_weight = gumbel_kl_weight + self.quantize = GumbelQuantizer( + self.codebook_size, + self.embed_dim, + self.gumbel_num_hiddens, + self.straight_through, + self.kl_weight + ) + self.generator = Generator( + self.nf, + self.embed_dim, + self.ch_mult, + self.n_blocks, + self.resolution, + self.attn_resolutions + ) + + if model_path is not None: + chkpt = torch.load(model_path, map_location='cpu') + if 'params_ema' in chkpt: + self.load_state_dict(torch.load(model_path, map_location='cpu')['params_ema']) + logger.info(f'vqgan is loaded from: {model_path} [params_ema]') + elif 'params' in chkpt: + self.load_state_dict(torch.load(model_path, map_location='cpu')['params']) + logger.info(f'vqgan is loaded from: {model_path} [params]') + else: + raise ValueError('Wrong params!') + + + def forward(self, x): + x = self.encoder(x) + quant, codebook_loss, quant_stats = self.quantize(x) + x = self.generator(quant) + return x, codebook_loss, quant_stats + + + +# patch based discriminator +@ARCH_REGISTRY.register() +class VQGANDiscriminator(nn.Module): + def __init__(self, nc=3, ndf=64, n_layers=4, model_path=None): + super().__init__() + + layers = [nn.Conv2d(nc, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True)] + ndf_mult = 1 + ndf_mult_prev = 1 + for n in range(1, n_layers): # gradually increase the number of filters + ndf_mult_prev = ndf_mult + ndf_mult = min(2 ** n, 8) + layers += [ + nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=2, padding=1, bias=False), + nn.BatchNorm2d(ndf * ndf_mult), + nn.LeakyReLU(0.2, True) + ] + + ndf_mult_prev = ndf_mult + ndf_mult = min(2 ** n_layers, 8) + + layers += [ + nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=1, padding=1, bias=False), + nn.BatchNorm2d(ndf * ndf_mult), + nn.LeakyReLU(0.2, True) + ] + + layers += [ + nn.Conv2d(ndf * ndf_mult, 1, kernel_size=4, stride=1, padding=1)] # output 1 channel prediction map + self.main = nn.Sequential(*layers) + + if model_path is not None: + chkpt = torch.load(model_path, map_location='cpu') + if 'params_d' in chkpt: + self.load_state_dict(torch.load(model_path, map_location='cpu')['params_d']) + elif 'params' in chkpt: + self.load_state_dict(torch.load(model_path, map_location='cpu')['params']) + else: + raise ValueError('Wrong params!') + + def forward(self, x): + return self.main(x) diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py new file mode 100644 index 0000000000000000000000000000000000000000..3ad8a9db806d3406610d81534a6d7c85301cceb0 --- /dev/null +++ b/modules/codeformer_model.py @@ -0,0 +1,132 @@ +import os + +import cv2 +import torch + +import modules.face_restoration +import modules.shared +from modules import shared, devices, modelloader, errors +from modules.paths import models_path + +# codeformer people made a choice to include modified basicsr library to their project which makes +# it utterly impossible to use it alongside with other libraries that also use basicsr, like GFPGAN. +# I am making a choice to include some files from codeformer to work around this issue. +model_dir = "Codeformer" +model_path = os.path.join(models_path, model_dir) +model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth' + +codeformer = None + + +def setup_model(dirname): + os.makedirs(model_path, exist_ok=True) + + path = modules.paths.paths.get("CodeFormer", None) + if path is None: + return + + try: + from torchvision.transforms.functional import normalize + from modules.codeformer.codeformer_arch import CodeFormer + from basicsr.utils import img2tensor, tensor2img + from facelib.utils.face_restoration_helper import FaceRestoreHelper + from facelib.detection.retinaface import retinaface + + net_class = CodeFormer + + class FaceRestorerCodeFormer(modules.face_restoration.FaceRestoration): + def name(self): + return "CodeFormer" + + def __init__(self, dirname): + self.net = None + self.face_helper = None + self.cmd_dir = dirname + + def create_models(self): + + if self.net is not None and self.face_helper is not None: + self.net.to(devices.device_codeformer) + return self.net, self.face_helper + model_paths = modelloader.load_models(model_path, model_url, self.cmd_dir, download_name='codeformer-v0.1.0.pth', ext_filter=['.pth']) + if len(model_paths) != 0: + ckpt_path = model_paths[0] + else: + print("Unable to load codeformer model.") + return None, None + net = net_class(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, connect_list=['32', '64', '128', '256']).to(devices.device_codeformer) + checkpoint = torch.load(ckpt_path)['params_ema'] + net.load_state_dict(checkpoint) + net.eval() + + if hasattr(retinaface, 'device'): + retinaface.device = devices.device_codeformer + face_helper = FaceRestoreHelper(1, face_size=512, crop_ratio=(1, 1), det_model='retinaface_resnet50', save_ext='png', use_parse=True, device=devices.device_codeformer) + + self.net = net + self.face_helper = face_helper + + return net, face_helper + + def send_model_to(self, device): + self.net.to(device) + self.face_helper.face_det.to(device) + self.face_helper.face_parse.to(device) + + def restore(self, np_image, w=None): + np_image = np_image[:, :, ::-1] + + original_resolution = np_image.shape[0:2] + + self.create_models() + if self.net is None or self.face_helper is None: + return np_image + + self.send_model_to(devices.device_codeformer) + + self.face_helper.clean_all() + self.face_helper.read_image(np_image) + self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5) + self.face_helper.align_warp_face() + + for cropped_face in self.face_helper.cropped_faces: + cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True) + normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) + cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer) + + try: + with torch.no_grad(): + output = self.net(cropped_face_t, w=w if w is not None else shared.opts.code_former_weight, adain=True)[0] + restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1)) + del output + devices.torch_gc() + except Exception: + errors.report('Failed inference for CodeFormer', exc_info=True) + restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1)) + + restored_face = restored_face.astype('uint8') + self.face_helper.add_restored_face(restored_face) + + self.face_helper.get_inverse_affine(None) + + restored_img = self.face_helper.paste_faces_to_input_image() + restored_img = restored_img[:, :, ::-1] + + if original_resolution != restored_img.shape[0:2]: + restored_img = cv2.resize(restored_img, (0, 0), fx=original_resolution[1]/restored_img.shape[1], fy=original_resolution[0]/restored_img.shape[0], interpolation=cv2.INTER_LINEAR) + + self.face_helper.clean_all() + + if shared.opts.face_restoration_unload: + self.send_model_to(devices.cpu) + + return restored_img + + global codeformer + codeformer = FaceRestorerCodeFormer(dirname) + shared.face_restorers.append(codeformer) + + except Exception: + errors.report("Error setting up CodeFormer", exc_info=True) + + # sys.path = stored_sys_path diff --git a/modules/config_states.py b/modules/config_states.py new file mode 100644 index 0000000000000000000000000000000000000000..6f1ab53fc5909888413d42ede0f09c28f90b90cc --- /dev/null +++ b/modules/config_states.py @@ -0,0 +1,197 @@ +""" +Supports saving and restoring webui and extensions from a known working set of commits +""" + +import os +import json +import time +import tqdm + +from datetime import datetime +from collections import OrderedDict +import git + +from modules import shared, extensions, errors +from modules.paths_internal import script_path, config_states_dir + + +all_config_states = OrderedDict() + + +def list_config_states(): + global all_config_states + + all_config_states.clear() + os.makedirs(config_states_dir, exist_ok=True) + + config_states = [] + for filename in os.listdir(config_states_dir): + if filename.endswith(".json"): + path = os.path.join(config_states_dir, filename) + with open(path, "r", encoding="utf-8") as f: + j = json.load(f) + j["filepath"] = path + config_states.append(j) + + config_states = sorted(config_states, key=lambda cs: cs["created_at"], reverse=True) + + for cs in config_states: + timestamp = time.asctime(time.gmtime(cs["created_at"])) + name = cs.get("name", "Config") + full_name = f"{name}: {timestamp}" + all_config_states[full_name] = cs + + return all_config_states + + +def get_webui_config(): + webui_repo = None + + try: + if os.path.exists(os.path.join(script_path, ".git")): + webui_repo = git.Repo(script_path) + except Exception: + errors.report(f"Error reading webui git info from {script_path}", exc_info=True) + + webui_remote = None + webui_commit_hash = None + webui_commit_date = None + webui_branch = None + if webui_repo and not webui_repo.bare: + try: + webui_remote = next(webui_repo.remote().urls, None) + head = webui_repo.head.commit + webui_commit_date = webui_repo.head.commit.committed_date + webui_commit_hash = head.hexsha + webui_branch = webui_repo.active_branch.name + + except Exception: + webui_remote = None + + return { + "remote": webui_remote, + "commit_hash": webui_commit_hash, + "commit_date": webui_commit_date, + "branch": webui_branch, + } + + +def get_extension_config(): + ext_config = {} + + for ext in extensions.extensions: + ext.read_info_from_repo() + + entry = { + "name": ext.name, + "path": ext.path, + "enabled": ext.enabled, + "is_builtin": ext.is_builtin, + "remote": ext.remote, + "commit_hash": ext.commit_hash, + "commit_date": ext.commit_date, + "branch": ext.branch, + "have_info_from_repo": ext.have_info_from_repo + } + + ext_config[ext.name] = entry + + return ext_config + + +def get_config(): + creation_time = datetime.now().timestamp() + webui_config = get_webui_config() + ext_config = get_extension_config() + + return { + "created_at": creation_time, + "webui": webui_config, + "extensions": ext_config + } + + +def restore_webui_config(config): + print("* Restoring webui state...") + + if "webui" not in config: + print("Error: No webui data saved to config") + return + + webui_config = config["webui"] + + if "commit_hash" not in webui_config: + print("Error: No commit saved to webui config") + return + + webui_commit_hash = webui_config.get("commit_hash", None) + webui_repo = None + + try: + if os.path.exists(os.path.join(script_path, ".git")): + webui_repo = git.Repo(script_path) + except Exception: + errors.report(f"Error reading webui git info from {script_path}", exc_info=True) + return + + try: + webui_repo.git.fetch(all=True) + webui_repo.git.reset(webui_commit_hash, hard=True) + print(f"* Restored webui to commit {webui_commit_hash}.") + except Exception: + errors.report(f"Error restoring webui to commit{webui_commit_hash}") + + +def restore_extension_config(config): + print("* Restoring extension state...") + + if "extensions" not in config: + print("Error: No extension data saved to config") + return + + ext_config = config["extensions"] + + results = [] + disabled = [] + + for ext in tqdm.tqdm(extensions.extensions): + if ext.is_builtin: + continue + + ext.read_info_from_repo() + current_commit = ext.commit_hash + + if ext.name not in ext_config: + ext.disabled = True + disabled.append(ext.name) + results.append((ext, current_commit[:8], False, "Saved extension state not found in config, marking as disabled")) + continue + + entry = ext_config[ext.name] + + if "commit_hash" in entry and entry["commit_hash"]: + try: + ext.fetch_and_reset_hard(entry["commit_hash"]) + ext.read_info_from_repo() + if current_commit != entry["commit_hash"]: + results.append((ext, current_commit[:8], True, entry["commit_hash"][:8])) + except Exception as ex: + results.append((ext, current_commit[:8], False, ex)) + else: + results.append((ext, current_commit[:8], False, "No commit hash found in config")) + + if not entry.get("enabled", False): + ext.disabled = True + disabled.append(ext.name) + else: + ext.disabled = False + + shared.opts.disabled_extensions = disabled + shared.opts.save(shared.config_filename) + + print("* Finished restoring extensions. Results:") + for ext, prev_commit, success, result in results: + if success: + print(f" + {ext.name}: {prev_commit} -> {result}") + else: + print(f" ! {ext.name}: FAILURE ({result})") diff --git a/modules/deepbooru.py b/modules/deepbooru.py new file mode 100644 index 0000000000000000000000000000000000000000..547e1b4c67aeb75a06c9991f957f51b0ef6fdd0f --- /dev/null +++ b/modules/deepbooru.py @@ -0,0 +1,98 @@ +import os +import re + +import torch +import numpy as np + +from modules import modelloader, paths, deepbooru_model, devices, images, shared + +re_special = re.compile(r'([\\()])') + + +class DeepDanbooru: + def __init__(self): + self.model = None + + def load(self): + if self.model is not None: + return + + files = modelloader.load_models( + model_path=os.path.join(paths.models_path, "torch_deepdanbooru"), + model_url='https://github.com/AUTOMATIC1111/TorchDeepDanbooru/releases/download/v1/model-resnet_custom_v3.pt', + ext_filter=[".pt"], + download_name='model-resnet_custom_v3.pt', + ) + + self.model = deepbooru_model.DeepDanbooruModel() + self.model.load_state_dict(torch.load(files[0], map_location="cpu")) + + self.model.eval() + self.model.to(devices.cpu, devices.dtype) + + def start(self): + self.load() + self.model.to(devices.device) + + def stop(self): + if not shared.opts.interrogate_keep_models_in_memory: + self.model.to(devices.cpu) + devices.torch_gc() + + def tag(self, pil_image): + self.start() + res = self.tag_multi(pil_image) + self.stop() + + return res + + def tag_multi(self, pil_image, force_disable_ranks=False): + threshold = shared.opts.interrogate_deepbooru_score_threshold + use_spaces = shared.opts.deepbooru_use_spaces + use_escape = shared.opts.deepbooru_escape + alpha_sort = shared.opts.deepbooru_sort_alpha + include_ranks = shared.opts.interrogate_return_ranks and not force_disable_ranks + + pic = images.resize_image(2, pil_image.convert("RGB"), 512, 512) + a = np.expand_dims(np.array(pic, dtype=np.float32), 0) / 255 + + with torch.no_grad(), devices.autocast(): + x = torch.from_numpy(a).to(devices.device) + y = self.model(x)[0].detach().cpu().numpy() + + probability_dict = {} + + for tag, probability in zip(self.model.tags, y): + if probability < threshold: + continue + + if tag.startswith("rating:"): + continue + + probability_dict[tag] = probability + + if alpha_sort: + tags = sorted(probability_dict) + else: + tags = [tag for tag, _ in sorted(probability_dict.items(), key=lambda x: -x[1])] + + res = [] + + filtertags = {x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")} + + for tag in [x for x in tags if x not in filtertags]: + probability = probability_dict[tag] + tag_outformat = tag + if use_spaces: + tag_outformat = tag_outformat.replace('_', ' ') + if use_escape: + tag_outformat = re.sub(re_special, r'\\\1', tag_outformat) + if include_ranks: + tag_outformat = f"({tag_outformat}:{probability:.3f})" + + res.append(tag_outformat) + + return ", ".join(res) + + +model = DeepDanbooru() diff --git a/modules/deepbooru_model.py b/modules/deepbooru_model.py new file mode 100644 index 0000000000000000000000000000000000000000..7a53884624e96284c35214ce02b8a2891d92c3e8 --- /dev/null +++ b/modules/deepbooru_model.py @@ -0,0 +1,678 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from modules import devices + +# see https://github.com/AUTOMATIC1111/TorchDeepDanbooru for more + + +class DeepDanbooruModel(nn.Module): + def __init__(self): + super(DeepDanbooruModel, self).__init__() + + self.tags = [] + + self.n_Conv_0 = nn.Conv2d(kernel_size=(7, 7), in_channels=3, out_channels=64, stride=(2, 2)) + self.n_MaxPool_0 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2)) + self.n_Conv_1 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) + self.n_Conv_2 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=64) + self.n_Conv_3 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64) + self.n_Conv_4 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) + self.n_Conv_5 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=64) + self.n_Conv_6 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64) + self.n_Conv_7 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) + self.n_Conv_8 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=64) + self.n_Conv_9 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64) + self.n_Conv_10 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256) + self.n_Conv_11 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=512, stride=(2, 2)) + self.n_Conv_12 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=128) + self.n_Conv_13 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128, stride=(2, 2)) + self.n_Conv_14 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_15 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_16 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_17 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_18 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_19 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_20 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_21 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_22 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_23 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_24 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_25 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_26 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_27 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_28 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_29 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_30 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_31 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_32 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_33 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128) + self.n_Conv_34 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128) + self.n_Conv_35 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512) + self.n_Conv_36 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=1024, stride=(2, 2)) + self.n_Conv_37 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=256) + self.n_Conv_38 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256, stride=(2, 2)) + self.n_Conv_39 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_40 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_41 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_42 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_43 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_44 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_45 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_46 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_47 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_48 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_49 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_50 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_51 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_52 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_53 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_54 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_55 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_56 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_57 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_58 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_59 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_60 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_61 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_62 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_63 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_64 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_65 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_66 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_67 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_68 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_69 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_70 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_71 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_72 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_73 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_74 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_75 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_76 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_77 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_78 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_79 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_80 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_81 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_82 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_83 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_84 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_85 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_86 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_87 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_88 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_89 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_90 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_91 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_92 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_93 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_94 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_95 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_96 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_97 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_98 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256, stride=(2, 2)) + self.n_Conv_99 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_100 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=1024, stride=(2, 2)) + self.n_Conv_101 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_102 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_103 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_104 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_105 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_106 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_107 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_108 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_109 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_110 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_111 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_112 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_113 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_114 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_115 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_116 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_117 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_118 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_119 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_120 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_121 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_122 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_123 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_124 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_125 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_126 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_127 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_128 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_129 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_130 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_131 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_132 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_133 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_134 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_135 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_136 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_137 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_138 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_139 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_140 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_141 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_142 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_143 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_144 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_145 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_146 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_147 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_148 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_149 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_150 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_151 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_152 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_153 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_154 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_155 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256) + self.n_Conv_156 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256) + self.n_Conv_157 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024) + self.n_Conv_158 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=2048, stride=(2, 2)) + self.n_Conv_159 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=512) + self.n_Conv_160 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512, stride=(2, 2)) + self.n_Conv_161 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048) + self.n_Conv_162 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=512) + self.n_Conv_163 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512) + self.n_Conv_164 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048) + self.n_Conv_165 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=512) + self.n_Conv_166 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512) + self.n_Conv_167 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048) + self.n_Conv_168 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=4096, stride=(2, 2)) + self.n_Conv_169 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=1024) + self.n_Conv_170 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024, stride=(2, 2)) + self.n_Conv_171 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096) + self.n_Conv_172 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=1024) + self.n_Conv_173 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024) + self.n_Conv_174 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096) + self.n_Conv_175 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=1024) + self.n_Conv_176 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024) + self.n_Conv_177 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096) + self.n_Conv_178 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=9176, bias=False) + + def forward(self, *inputs): + t_358, = inputs + t_359 = t_358.permute(*[0, 3, 1, 2]) + t_359_padded = F.pad(t_359, [2, 3, 2, 3], value=0) + t_360 = self.n_Conv_0(t_359_padded.to(self.n_Conv_0.bias.dtype) if devices.unet_needs_upcast else t_359_padded) + t_361 = F.relu(t_360) + t_361 = F.pad(t_361, [0, 1, 0, 1], value=float('-inf')) + t_362 = self.n_MaxPool_0(t_361) + t_363 = self.n_Conv_1(t_362) + t_364 = self.n_Conv_2(t_362) + t_365 = F.relu(t_364) + t_365_padded = F.pad(t_365, [1, 1, 1, 1], value=0) + t_366 = self.n_Conv_3(t_365_padded) + t_367 = F.relu(t_366) + t_368 = self.n_Conv_4(t_367) + t_369 = torch.add(t_368, t_363) + t_370 = F.relu(t_369) + t_371 = self.n_Conv_5(t_370) + t_372 = F.relu(t_371) + t_372_padded = F.pad(t_372, [1, 1, 1, 1], value=0) + t_373 = self.n_Conv_6(t_372_padded) + t_374 = F.relu(t_373) + t_375 = self.n_Conv_7(t_374) + t_376 = torch.add(t_375, t_370) + t_377 = F.relu(t_376) + t_378 = self.n_Conv_8(t_377) + t_379 = F.relu(t_378) + t_379_padded = F.pad(t_379, [1, 1, 1, 1], value=0) + t_380 = self.n_Conv_9(t_379_padded) + t_381 = F.relu(t_380) + t_382 = self.n_Conv_10(t_381) + t_383 = torch.add(t_382, t_377) + t_384 = F.relu(t_383) + t_385 = self.n_Conv_11(t_384) + t_386 = self.n_Conv_12(t_384) + t_387 = F.relu(t_386) + t_387_padded = F.pad(t_387, [0, 1, 0, 1], value=0) + t_388 = self.n_Conv_13(t_387_padded) + t_389 = F.relu(t_388) + t_390 = self.n_Conv_14(t_389) + t_391 = torch.add(t_390, t_385) + t_392 = F.relu(t_391) + t_393 = self.n_Conv_15(t_392) + t_394 = F.relu(t_393) + t_394_padded = F.pad(t_394, [1, 1, 1, 1], value=0) + t_395 = self.n_Conv_16(t_394_padded) + t_396 = F.relu(t_395) + t_397 = self.n_Conv_17(t_396) + t_398 = torch.add(t_397, t_392) + t_399 = F.relu(t_398) + t_400 = self.n_Conv_18(t_399) + t_401 = F.relu(t_400) + t_401_padded = F.pad(t_401, [1, 1, 1, 1], value=0) + t_402 = self.n_Conv_19(t_401_padded) + t_403 = F.relu(t_402) + t_404 = self.n_Conv_20(t_403) + t_405 = torch.add(t_404, t_399) + t_406 = F.relu(t_405) + t_407 = self.n_Conv_21(t_406) + t_408 = F.relu(t_407) + t_408_padded = F.pad(t_408, [1, 1, 1, 1], value=0) + t_409 = self.n_Conv_22(t_408_padded) + t_410 = F.relu(t_409) + t_411 = self.n_Conv_23(t_410) + t_412 = torch.add(t_411, t_406) + t_413 = F.relu(t_412) + t_414 = self.n_Conv_24(t_413) + t_415 = F.relu(t_414) + t_415_padded = F.pad(t_415, [1, 1, 1, 1], value=0) + t_416 = self.n_Conv_25(t_415_padded) + t_417 = F.relu(t_416) + t_418 = self.n_Conv_26(t_417) + t_419 = torch.add(t_418, t_413) + t_420 = F.relu(t_419) + t_421 = self.n_Conv_27(t_420) + t_422 = F.relu(t_421) + t_422_padded = F.pad(t_422, [1, 1, 1, 1], value=0) + t_423 = self.n_Conv_28(t_422_padded) + t_424 = F.relu(t_423) + t_425 = self.n_Conv_29(t_424) + t_426 = torch.add(t_425, t_420) + t_427 = F.relu(t_426) + t_428 = self.n_Conv_30(t_427) + t_429 = F.relu(t_428) + t_429_padded = F.pad(t_429, [1, 1, 1, 1], value=0) + t_430 = self.n_Conv_31(t_429_padded) + t_431 = F.relu(t_430) + t_432 = self.n_Conv_32(t_431) + t_433 = torch.add(t_432, t_427) + t_434 = F.relu(t_433) + t_435 = self.n_Conv_33(t_434) + t_436 = F.relu(t_435) + t_436_padded = F.pad(t_436, [1, 1, 1, 1], value=0) + t_437 = self.n_Conv_34(t_436_padded) + t_438 = F.relu(t_437) + t_439 = self.n_Conv_35(t_438) + t_440 = torch.add(t_439, t_434) + t_441 = F.relu(t_440) + t_442 = self.n_Conv_36(t_441) + t_443 = self.n_Conv_37(t_441) + t_444 = F.relu(t_443) + t_444_padded = F.pad(t_444, [0, 1, 0, 1], value=0) + t_445 = self.n_Conv_38(t_444_padded) + t_446 = F.relu(t_445) + t_447 = self.n_Conv_39(t_446) + t_448 = torch.add(t_447, t_442) + t_449 = F.relu(t_448) + t_450 = self.n_Conv_40(t_449) + t_451 = F.relu(t_450) + t_451_padded = F.pad(t_451, [1, 1, 1, 1], value=0) + t_452 = self.n_Conv_41(t_451_padded) + t_453 = F.relu(t_452) + t_454 = self.n_Conv_42(t_453) + t_455 = torch.add(t_454, t_449) + t_456 = F.relu(t_455) + t_457 = self.n_Conv_43(t_456) + t_458 = F.relu(t_457) + t_458_padded = F.pad(t_458, [1, 1, 1, 1], value=0) + t_459 = self.n_Conv_44(t_458_padded) + t_460 = F.relu(t_459) + t_461 = self.n_Conv_45(t_460) + t_462 = torch.add(t_461, t_456) + t_463 = F.relu(t_462) + t_464 = self.n_Conv_46(t_463) + t_465 = F.relu(t_464) + t_465_padded = F.pad(t_465, [1, 1, 1, 1], value=0) + t_466 = self.n_Conv_47(t_465_padded) + t_467 = F.relu(t_466) + t_468 = self.n_Conv_48(t_467) + t_469 = torch.add(t_468, t_463) + t_470 = F.relu(t_469) + t_471 = self.n_Conv_49(t_470) + t_472 = F.relu(t_471) + t_472_padded = F.pad(t_472, [1, 1, 1, 1], value=0) + t_473 = self.n_Conv_50(t_472_padded) + t_474 = F.relu(t_473) + t_475 = self.n_Conv_51(t_474) + t_476 = torch.add(t_475, t_470) + t_477 = F.relu(t_476) + t_478 = self.n_Conv_52(t_477) + t_479 = F.relu(t_478) + t_479_padded = F.pad(t_479, [1, 1, 1, 1], value=0) + t_480 = self.n_Conv_53(t_479_padded) + t_481 = F.relu(t_480) + t_482 = self.n_Conv_54(t_481) + t_483 = torch.add(t_482, t_477) + t_484 = F.relu(t_483) + t_485 = self.n_Conv_55(t_484) + t_486 = F.relu(t_485) + t_486_padded = F.pad(t_486, [1, 1, 1, 1], value=0) + t_487 = self.n_Conv_56(t_486_padded) + t_488 = F.relu(t_487) + t_489 = self.n_Conv_57(t_488) + t_490 = torch.add(t_489, t_484) + t_491 = F.relu(t_490) + t_492 = self.n_Conv_58(t_491) + t_493 = F.relu(t_492) + t_493_padded = F.pad(t_493, [1, 1, 1, 1], value=0) + t_494 = self.n_Conv_59(t_493_padded) + t_495 = F.relu(t_494) + t_496 = self.n_Conv_60(t_495) + t_497 = torch.add(t_496, t_491) + t_498 = F.relu(t_497) + t_499 = self.n_Conv_61(t_498) + t_500 = F.relu(t_499) + t_500_padded = F.pad(t_500, [1, 1, 1, 1], value=0) + t_501 = self.n_Conv_62(t_500_padded) + t_502 = F.relu(t_501) + t_503 = self.n_Conv_63(t_502) + t_504 = torch.add(t_503, t_498) + t_505 = F.relu(t_504) + t_506 = self.n_Conv_64(t_505) + t_507 = F.relu(t_506) + t_507_padded = F.pad(t_507, [1, 1, 1, 1], value=0) + t_508 = self.n_Conv_65(t_507_padded) + t_509 = F.relu(t_508) + t_510 = self.n_Conv_66(t_509) + t_511 = torch.add(t_510, t_505) + t_512 = F.relu(t_511) + t_513 = self.n_Conv_67(t_512) + t_514 = F.relu(t_513) + t_514_padded = F.pad(t_514, [1, 1, 1, 1], value=0) + t_515 = self.n_Conv_68(t_514_padded) + t_516 = F.relu(t_515) + t_517 = self.n_Conv_69(t_516) + t_518 = torch.add(t_517, t_512) + t_519 = F.relu(t_518) + t_520 = self.n_Conv_70(t_519) + t_521 = F.relu(t_520) + t_521_padded = F.pad(t_521, [1, 1, 1, 1], value=0) + t_522 = self.n_Conv_71(t_521_padded) + t_523 = F.relu(t_522) + t_524 = self.n_Conv_72(t_523) + t_525 = torch.add(t_524, t_519) + t_526 = F.relu(t_525) + t_527 = self.n_Conv_73(t_526) + t_528 = F.relu(t_527) + t_528_padded = F.pad(t_528, [1, 1, 1, 1], value=0) + t_529 = self.n_Conv_74(t_528_padded) + t_530 = F.relu(t_529) + t_531 = self.n_Conv_75(t_530) + t_532 = torch.add(t_531, t_526) + t_533 = F.relu(t_532) + t_534 = self.n_Conv_76(t_533) + t_535 = F.relu(t_534) + t_535_padded = F.pad(t_535, [1, 1, 1, 1], value=0) + t_536 = self.n_Conv_77(t_535_padded) + t_537 = F.relu(t_536) + t_538 = self.n_Conv_78(t_537) + t_539 = torch.add(t_538, t_533) + t_540 = F.relu(t_539) + t_541 = self.n_Conv_79(t_540) + t_542 = F.relu(t_541) + t_542_padded = F.pad(t_542, [1, 1, 1, 1], value=0) + t_543 = self.n_Conv_80(t_542_padded) + t_544 = F.relu(t_543) + t_545 = self.n_Conv_81(t_544) + t_546 = torch.add(t_545, t_540) + t_547 = F.relu(t_546) + t_548 = self.n_Conv_82(t_547) + t_549 = F.relu(t_548) + t_549_padded = F.pad(t_549, [1, 1, 1, 1], value=0) + t_550 = self.n_Conv_83(t_549_padded) + t_551 = F.relu(t_550) + t_552 = self.n_Conv_84(t_551) + t_553 = torch.add(t_552, t_547) + t_554 = F.relu(t_553) + t_555 = self.n_Conv_85(t_554) + t_556 = F.relu(t_555) + t_556_padded = F.pad(t_556, [1, 1, 1, 1], value=0) + t_557 = self.n_Conv_86(t_556_padded) + t_558 = F.relu(t_557) + t_559 = self.n_Conv_87(t_558) + t_560 = torch.add(t_559, t_554) + t_561 = F.relu(t_560) + t_562 = self.n_Conv_88(t_561) + t_563 = F.relu(t_562) + t_563_padded = F.pad(t_563, [1, 1, 1, 1], value=0) + t_564 = self.n_Conv_89(t_563_padded) + t_565 = F.relu(t_564) + t_566 = self.n_Conv_90(t_565) + t_567 = torch.add(t_566, t_561) + t_568 = F.relu(t_567) + t_569 = self.n_Conv_91(t_568) + t_570 = F.relu(t_569) + t_570_padded = F.pad(t_570, [1, 1, 1, 1], value=0) + t_571 = self.n_Conv_92(t_570_padded) + t_572 = F.relu(t_571) + t_573 = self.n_Conv_93(t_572) + t_574 = torch.add(t_573, t_568) + t_575 = F.relu(t_574) + t_576 = self.n_Conv_94(t_575) + t_577 = F.relu(t_576) + t_577_padded = F.pad(t_577, [1, 1, 1, 1], value=0) + t_578 = self.n_Conv_95(t_577_padded) + t_579 = F.relu(t_578) + t_580 = self.n_Conv_96(t_579) + t_581 = torch.add(t_580, t_575) + t_582 = F.relu(t_581) + t_583 = self.n_Conv_97(t_582) + t_584 = F.relu(t_583) + t_584_padded = F.pad(t_584, [0, 1, 0, 1], value=0) + t_585 = self.n_Conv_98(t_584_padded) + t_586 = F.relu(t_585) + t_587 = self.n_Conv_99(t_586) + t_588 = self.n_Conv_100(t_582) + t_589 = torch.add(t_587, t_588) + t_590 = F.relu(t_589) + t_591 = self.n_Conv_101(t_590) + t_592 = F.relu(t_591) + t_592_padded = F.pad(t_592, [1, 1, 1, 1], value=0) + t_593 = self.n_Conv_102(t_592_padded) + t_594 = F.relu(t_593) + t_595 = self.n_Conv_103(t_594) + t_596 = torch.add(t_595, t_590) + t_597 = F.relu(t_596) + t_598 = self.n_Conv_104(t_597) + t_599 = F.relu(t_598) + t_599_padded = F.pad(t_599, [1, 1, 1, 1], value=0) + t_600 = self.n_Conv_105(t_599_padded) + t_601 = F.relu(t_600) + t_602 = self.n_Conv_106(t_601) + t_603 = torch.add(t_602, t_597) + t_604 = F.relu(t_603) + t_605 = self.n_Conv_107(t_604) + t_606 = F.relu(t_605) + t_606_padded = F.pad(t_606, [1, 1, 1, 1], value=0) + t_607 = self.n_Conv_108(t_606_padded) + t_608 = F.relu(t_607) + t_609 = self.n_Conv_109(t_608) + t_610 = torch.add(t_609, t_604) + t_611 = F.relu(t_610) + t_612 = self.n_Conv_110(t_611) + t_613 = F.relu(t_612) + t_613_padded = F.pad(t_613, [1, 1, 1, 1], value=0) + t_614 = self.n_Conv_111(t_613_padded) + t_615 = F.relu(t_614) + t_616 = self.n_Conv_112(t_615) + t_617 = torch.add(t_616, t_611) + t_618 = F.relu(t_617) + t_619 = self.n_Conv_113(t_618) + t_620 = F.relu(t_619) + t_620_padded = F.pad(t_620, [1, 1, 1, 1], value=0) + t_621 = self.n_Conv_114(t_620_padded) + t_622 = F.relu(t_621) + t_623 = self.n_Conv_115(t_622) + t_624 = torch.add(t_623, t_618) + t_625 = F.relu(t_624) + t_626 = self.n_Conv_116(t_625) + t_627 = F.relu(t_626) + t_627_padded = F.pad(t_627, [1, 1, 1, 1], value=0) + t_628 = self.n_Conv_117(t_627_padded) + t_629 = F.relu(t_628) + t_630 = self.n_Conv_118(t_629) + t_631 = torch.add(t_630, t_625) + t_632 = F.relu(t_631) + t_633 = self.n_Conv_119(t_632) + t_634 = F.relu(t_633) + t_634_padded = F.pad(t_634, [1, 1, 1, 1], value=0) + t_635 = self.n_Conv_120(t_634_padded) + t_636 = F.relu(t_635) + t_637 = self.n_Conv_121(t_636) + t_638 = torch.add(t_637, t_632) + t_639 = F.relu(t_638) + t_640 = self.n_Conv_122(t_639) + t_641 = F.relu(t_640) + t_641_padded = F.pad(t_641, [1, 1, 1, 1], value=0) + t_642 = self.n_Conv_123(t_641_padded) + t_643 = F.relu(t_642) + t_644 = self.n_Conv_124(t_643) + t_645 = torch.add(t_644, t_639) + t_646 = F.relu(t_645) + t_647 = self.n_Conv_125(t_646) + t_648 = F.relu(t_647) + t_648_padded = F.pad(t_648, [1, 1, 1, 1], value=0) + t_649 = self.n_Conv_126(t_648_padded) + t_650 = F.relu(t_649) + t_651 = self.n_Conv_127(t_650) + t_652 = torch.add(t_651, t_646) + t_653 = F.relu(t_652) + t_654 = self.n_Conv_128(t_653) + t_655 = F.relu(t_654) + t_655_padded = F.pad(t_655, [1, 1, 1, 1], value=0) + t_656 = self.n_Conv_129(t_655_padded) + t_657 = F.relu(t_656) + t_658 = self.n_Conv_130(t_657) + t_659 = torch.add(t_658, t_653) + t_660 = F.relu(t_659) + t_661 = self.n_Conv_131(t_660) + t_662 = F.relu(t_661) + t_662_padded = F.pad(t_662, [1, 1, 1, 1], value=0) + t_663 = self.n_Conv_132(t_662_padded) + t_664 = F.relu(t_663) + t_665 = self.n_Conv_133(t_664) + t_666 = torch.add(t_665, t_660) + t_667 = F.relu(t_666) + t_668 = self.n_Conv_134(t_667) + t_669 = F.relu(t_668) + t_669_padded = F.pad(t_669, [1, 1, 1, 1], value=0) + t_670 = self.n_Conv_135(t_669_padded) + t_671 = F.relu(t_670) + t_672 = self.n_Conv_136(t_671) + t_673 = torch.add(t_672, t_667) + t_674 = F.relu(t_673) + t_675 = self.n_Conv_137(t_674) + t_676 = F.relu(t_675) + t_676_padded = F.pad(t_676, [1, 1, 1, 1], value=0) + t_677 = self.n_Conv_138(t_676_padded) + t_678 = F.relu(t_677) + t_679 = self.n_Conv_139(t_678) + t_680 = torch.add(t_679, t_674) + t_681 = F.relu(t_680) + t_682 = self.n_Conv_140(t_681) + t_683 = F.relu(t_682) + t_683_padded = F.pad(t_683, [1, 1, 1, 1], value=0) + t_684 = self.n_Conv_141(t_683_padded) + t_685 = F.relu(t_684) + t_686 = self.n_Conv_142(t_685) + t_687 = torch.add(t_686, t_681) + t_688 = F.relu(t_687) + t_689 = self.n_Conv_143(t_688) + t_690 = F.relu(t_689) + t_690_padded = F.pad(t_690, [1, 1, 1, 1], value=0) + t_691 = self.n_Conv_144(t_690_padded) + t_692 = F.relu(t_691) + t_693 = self.n_Conv_145(t_692) + t_694 = torch.add(t_693, t_688) + t_695 = F.relu(t_694) + t_696 = self.n_Conv_146(t_695) + t_697 = F.relu(t_696) + t_697_padded = F.pad(t_697, [1, 1, 1, 1], value=0) + t_698 = self.n_Conv_147(t_697_padded) + t_699 = F.relu(t_698) + t_700 = self.n_Conv_148(t_699) + t_701 = torch.add(t_700, t_695) + t_702 = F.relu(t_701) + t_703 = self.n_Conv_149(t_702) + t_704 = F.relu(t_703) + t_704_padded = F.pad(t_704, [1, 1, 1, 1], value=0) + t_705 = self.n_Conv_150(t_704_padded) + t_706 = F.relu(t_705) + t_707 = self.n_Conv_151(t_706) + t_708 = torch.add(t_707, t_702) + t_709 = F.relu(t_708) + t_710 = self.n_Conv_152(t_709) + t_711 = F.relu(t_710) + t_711_padded = F.pad(t_711, [1, 1, 1, 1], value=0) + t_712 = self.n_Conv_153(t_711_padded) + t_713 = F.relu(t_712) + t_714 = self.n_Conv_154(t_713) + t_715 = torch.add(t_714, t_709) + t_716 = F.relu(t_715) + t_717 = self.n_Conv_155(t_716) + t_718 = F.relu(t_717) + t_718_padded = F.pad(t_718, [1, 1, 1, 1], value=0) + t_719 = self.n_Conv_156(t_718_padded) + t_720 = F.relu(t_719) + t_721 = self.n_Conv_157(t_720) + t_722 = torch.add(t_721, t_716) + t_723 = F.relu(t_722) + t_724 = self.n_Conv_158(t_723) + t_725 = self.n_Conv_159(t_723) + t_726 = F.relu(t_725) + t_726_padded = F.pad(t_726, [0, 1, 0, 1], value=0) + t_727 = self.n_Conv_160(t_726_padded) + t_728 = F.relu(t_727) + t_729 = self.n_Conv_161(t_728) + t_730 = torch.add(t_729, t_724) + t_731 = F.relu(t_730) + t_732 = self.n_Conv_162(t_731) + t_733 = F.relu(t_732) + t_733_padded = F.pad(t_733, [1, 1, 1, 1], value=0) + t_734 = self.n_Conv_163(t_733_padded) + t_735 = F.relu(t_734) + t_736 = self.n_Conv_164(t_735) + t_737 = torch.add(t_736, t_731) + t_738 = F.relu(t_737) + t_739 = self.n_Conv_165(t_738) + t_740 = F.relu(t_739) + t_740_padded = F.pad(t_740, [1, 1, 1, 1], value=0) + t_741 = self.n_Conv_166(t_740_padded) + t_742 = F.relu(t_741) + t_743 = self.n_Conv_167(t_742) + t_744 = torch.add(t_743, t_738) + t_745 = F.relu(t_744) + t_746 = self.n_Conv_168(t_745) + t_747 = self.n_Conv_169(t_745) + t_748 = F.relu(t_747) + t_748_padded = F.pad(t_748, [0, 1, 0, 1], value=0) + t_749 = self.n_Conv_170(t_748_padded) + t_750 = F.relu(t_749) + t_751 = self.n_Conv_171(t_750) + t_752 = torch.add(t_751, t_746) + t_753 = F.relu(t_752) + t_754 = self.n_Conv_172(t_753) + t_755 = F.relu(t_754) + t_755_padded = F.pad(t_755, [1, 1, 1, 1], value=0) + t_756 = self.n_Conv_173(t_755_padded) + t_757 = F.relu(t_756) + t_758 = self.n_Conv_174(t_757) + t_759 = torch.add(t_758, t_753) + t_760 = F.relu(t_759) + t_761 = self.n_Conv_175(t_760) + t_762 = F.relu(t_761) + t_762_padded = F.pad(t_762, [1, 1, 1, 1], value=0) + t_763 = self.n_Conv_176(t_762_padded) + t_764 = F.relu(t_763) + t_765 = self.n_Conv_177(t_764) + t_766 = torch.add(t_765, t_760) + t_767 = F.relu(t_766) + t_768 = self.n_Conv_178(t_767) + t_769 = F.avg_pool2d(t_768, kernel_size=t_768.shape[-2:]) + t_770 = torch.squeeze(t_769, 3) + t_770 = torch.squeeze(t_770, 2) + t_771 = torch.sigmoid(t_770) + return t_771 + + def load_state_dict(self, state_dict, **kwargs): + self.tags = state_dict.get('tags', []) + + super(DeepDanbooruModel, self).load_state_dict({k: v for k, v in state_dict.items() if k != 'tags'}) + diff --git a/modules/devices.py b/modules/devices.py new file mode 100644 index 0000000000000000000000000000000000000000..57e51da30e26f0586c14321b5c0453f8a3ba5c64 --- /dev/null +++ b/modules/devices.py @@ -0,0 +1,171 @@ +import sys +import contextlib +from functools import lru_cache + +import torch +from modules import errors + +if sys.platform == "darwin": + from modules import mac_specific + + +def has_mps() -> bool: + if sys.platform != "darwin": + return False + else: + return mac_specific.has_mps + + +def get_cuda_device_string(): + from modules import shared + + if shared.cmd_opts.device_id is not None: + return f"cuda:{shared.cmd_opts.device_id}" + + return "cuda" + + +def get_optimal_device_name(): + if torch.cuda.is_available(): + return get_cuda_device_string() + + if has_mps(): + return "mps" + + return "cpu" + + +def get_optimal_device(): + return torch.device(get_optimal_device_name()) + + +def get_device_for(task): + from modules import shared + + if task in shared.cmd_opts.use_cpu: + return cpu + + return get_optimal_device() + + +def torch_gc(): + + if torch.cuda.is_available(): + with torch.cuda.device(get_cuda_device_string()): + torch.cuda.empty_cache() + torch.cuda.ipc_collect() + + if has_mps(): + mac_specific.torch_mps_gc() + + +def enable_tf32(): + if torch.cuda.is_available(): + + # enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't + # see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407 + if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())): + torch.backends.cudnn.benchmark = True + + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True + + + +errors.run(enable_tf32, "Enabling TF32") + +cpu = torch.device("cpu") +device = device_interrogate = device_gfpgan = device_esrgan = device_codeformer = None +dtype = torch.float16 +dtype_vae = torch.float16 +dtype_unet = torch.float16 +unet_needs_upcast = False + + +def cond_cast_unet(input): + return input.to(dtype_unet) if unet_needs_upcast else input + + +def cond_cast_float(input): + return input.float() if unet_needs_upcast else input + + +def randn(seed, shape): + from modules.shared import opts + + torch.manual_seed(seed) + if opts.randn_source == "CPU" or device.type == 'mps': + return torch.randn(shape, device=cpu).to(device) + return torch.randn(shape, device=device) + + +def randn_without_seed(shape): + from modules.shared import opts + + if opts.randn_source == "CPU" or device.type == 'mps': + return torch.randn(shape, device=cpu).to(device) + return torch.randn(shape, device=device) + + +def autocast(disable=False): + from modules import shared + + if disable: + return contextlib.nullcontext() + + if dtype == torch.float32 or shared.cmd_opts.precision == "full": + return contextlib.nullcontext() + + return torch.autocast("cuda") + + +def without_autocast(disable=False): + return torch.autocast("cuda", enabled=False) if torch.is_autocast_enabled() and not disable else contextlib.nullcontext() + + +class NansException(Exception): + pass + + +def test_for_nans(x, where): + from modules import shared + + if shared.cmd_opts.disable_nan_check: + return + + if not torch.all(torch.isnan(x)).item(): + return + + if where == "unet": + message = "A tensor with all NaNs was produced in Unet." + + if not shared.cmd_opts.no_half: + message += " This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the \"Upcast cross attention layer to float32\" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this." + + elif where == "vae": + message = "A tensor with all NaNs was produced in VAE." + + if not shared.cmd_opts.no_half and not shared.cmd_opts.no_half_vae: + message += " This could be because there's not enough precision to represent the picture. Try adding --no-half-vae commandline argument to fix this." + else: + message = "A tensor with all NaNs was produced." + + message += " Use --disable-nan-check commandline argument to disable this check." + + raise NansException(message) + + +@lru_cache +def first_time_calculation(): + """ + just do any calculation with pytorch layers - the first time this is done it allocaltes about 700MB of memory and + spends about 2.7 seconds doing that, at least wih NVidia. + """ + + x = torch.zeros((1, 1)).to(device, dtype) + linear = torch.nn.Linear(1, 1).to(device, dtype) + linear(x) + + x = torch.zeros((1, 1, 3, 3)).to(device, dtype) + conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype) + conv2d(x) diff --git a/modules/errors.py b/modules/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..23bc885d714817c17d3bd48b49ff64b830b29159 --- /dev/null +++ b/modules/errors.py @@ -0,0 +1,85 @@ +import sys +import textwrap +import traceback + + +exception_records = [] + + +def record_exception(): + _, e, tb = sys.exc_info() + if e is None: + return + + if exception_records and exception_records[-1] == e: + return + + exception_records.append((e, tb)) + + if len(exception_records) > 5: + exception_records.pop(0) + + +def report(message: str, *, exc_info: bool = False) -> None: + """ + Print an error message to stderr, with optional traceback. + """ + + record_exception() + + for line in message.splitlines(): + print("***", line, file=sys.stderr) + if exc_info: + print(textwrap.indent(traceback.format_exc(), " "), file=sys.stderr) + print("---", file=sys.stderr) + + +def print_error_explanation(message): + record_exception() + + lines = message.strip().split("\n") + max_len = max([len(x) for x in lines]) + + print('=' * max_len, file=sys.stderr) + for line in lines: + print(line, file=sys.stderr) + print('=' * max_len, file=sys.stderr) + + +def display(e: Exception, task, *, full_traceback=False): + record_exception() + + print(f"{task or 'error'}: {type(e).__name__}", file=sys.stderr) + te = traceback.TracebackException.from_exception(e) + if full_traceback: + # include frames leading up to the try-catch block + te.stack = traceback.StackSummary(traceback.extract_stack()[:-2] + te.stack) + print(*te.format(), sep="", file=sys.stderr) + + message = str(e) + if "copying a param with shape torch.Size([640, 1024]) from checkpoint, the shape in current model is torch.Size([640, 768])" in message: + print_error_explanation(""" +The most likely cause of this is you are trying to load Stable Diffusion 2.0 model without specifying its config file. +See https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20 for how to solve this. + """) + + +already_displayed = {} + + +def display_once(e: Exception, task): + record_exception() + + if task in already_displayed: + return + + display(e, task) + + already_displayed[task] = 1 + + +def run(code, task): + try: + code() + except Exception as e: + display(task, e) diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py new file mode 100644 index 0000000000000000000000000000000000000000..1e4260e2c62dbb14387e90e369dc109f435867b0 --- /dev/null +++ b/modules/esrgan_model.py @@ -0,0 +1,229 @@ +import sys + +import numpy as np +import torch +from PIL import Image + +import modules.esrgan_model_arch as arch +from modules import modelloader, images, devices +from modules.shared import opts +from modules.upscaler import Upscaler, UpscalerData + + +def mod2normal(state_dict): + # this code is copied from https://github.com/victorca25/iNNfer + if 'conv_first.weight' in state_dict: + crt_net = {} + items = list(state_dict) + + crt_net['model.0.weight'] = state_dict['conv_first.weight'] + crt_net['model.0.bias'] = state_dict['conv_first.bias'] + + for k in items.copy(): + if 'RDB' in k: + ori_k = k.replace('RRDB_trunk.', 'model.1.sub.') + if '.weight' in k: + ori_k = ori_k.replace('.weight', '.0.weight') + elif '.bias' in k: + ori_k = ori_k.replace('.bias', '.0.bias') + crt_net[ori_k] = state_dict[k] + items.remove(k) + + crt_net['model.1.sub.23.weight'] = state_dict['trunk_conv.weight'] + crt_net['model.1.sub.23.bias'] = state_dict['trunk_conv.bias'] + crt_net['model.3.weight'] = state_dict['upconv1.weight'] + crt_net['model.3.bias'] = state_dict['upconv1.bias'] + crt_net['model.6.weight'] = state_dict['upconv2.weight'] + crt_net['model.6.bias'] = state_dict['upconv2.bias'] + crt_net['model.8.weight'] = state_dict['HRconv.weight'] + crt_net['model.8.bias'] = state_dict['HRconv.bias'] + crt_net['model.10.weight'] = state_dict['conv_last.weight'] + crt_net['model.10.bias'] = state_dict['conv_last.bias'] + state_dict = crt_net + return state_dict + + +def resrgan2normal(state_dict, nb=23): + # this code is copied from https://github.com/victorca25/iNNfer + if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict: + re8x = 0 + crt_net = {} + items = list(state_dict) + + crt_net['model.0.weight'] = state_dict['conv_first.weight'] + crt_net['model.0.bias'] = state_dict['conv_first.bias'] + + for k in items.copy(): + if "rdb" in k: + ori_k = k.replace('body.', 'model.1.sub.') + ori_k = ori_k.replace('.rdb', '.RDB') + if '.weight' in k: + ori_k = ori_k.replace('.weight', '.0.weight') + elif '.bias' in k: + ori_k = ori_k.replace('.bias', '.0.bias') + crt_net[ori_k] = state_dict[k] + items.remove(k) + + crt_net[f'model.1.sub.{nb}.weight'] = state_dict['conv_body.weight'] + crt_net[f'model.1.sub.{nb}.bias'] = state_dict['conv_body.bias'] + crt_net['model.3.weight'] = state_dict['conv_up1.weight'] + crt_net['model.3.bias'] = state_dict['conv_up1.bias'] + crt_net['model.6.weight'] = state_dict['conv_up2.weight'] + crt_net['model.6.bias'] = state_dict['conv_up2.bias'] + + if 'conv_up3.weight' in state_dict: + # modification supporting: https://github.com/ai-forever/Real-ESRGAN/blob/main/RealESRGAN/rrdbnet_arch.py + re8x = 3 + crt_net['model.9.weight'] = state_dict['conv_up3.weight'] + crt_net['model.9.bias'] = state_dict['conv_up3.bias'] + + crt_net[f'model.{8+re8x}.weight'] = state_dict['conv_hr.weight'] + crt_net[f'model.{8+re8x}.bias'] = state_dict['conv_hr.bias'] + crt_net[f'model.{10+re8x}.weight'] = state_dict['conv_last.weight'] + crt_net[f'model.{10+re8x}.bias'] = state_dict['conv_last.bias'] + + state_dict = crt_net + return state_dict + + +def infer_params(state_dict): + # this code is copied from https://github.com/victorca25/iNNfer + scale2x = 0 + scalemin = 6 + n_uplayer = 0 + plus = False + + for block in list(state_dict): + parts = block.split(".") + n_parts = len(parts) + if n_parts == 5 and parts[2] == "sub": + nb = int(parts[3]) + elif n_parts == 3: + part_num = int(parts[1]) + if (part_num > scalemin + and parts[0] == "model" + and parts[2] == "weight"): + scale2x += 1 + if part_num > n_uplayer: + n_uplayer = part_num + out_nc = state_dict[block].shape[0] + if not plus and "conv1x1" in block: + plus = True + + nf = state_dict["model.0.weight"].shape[0] + in_nc = state_dict["model.0.weight"].shape[1] + out_nc = out_nc + scale = 2 ** scale2x + + return in_nc, out_nc, nf, nb, plus, scale + + +class UpscalerESRGAN(Upscaler): + def __init__(self, dirname): + self.name = "ESRGAN" + self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/ESRGAN.pth" + self.model_name = "ESRGAN_4x" + self.scalers = [] + self.user_path = dirname + super().__init__() + model_paths = self.find_models(ext_filter=[".pt", ".pth"]) + scalers = [] + if len(model_paths) == 0: + scaler_data = UpscalerData(self.model_name, self.model_url, self, 4) + scalers.append(scaler_data) + for file in model_paths: + if file.startswith("http"): + name = self.model_name + else: + name = modelloader.friendly_name(file) + + scaler_data = UpscalerData(name, file, self, 4) + self.scalers.append(scaler_data) + + def do_upscale(self, img, selected_model): + try: + model = self.load_model(selected_model) + except Exception as e: + print(f"Unable to load ESRGAN model {selected_model}: {e}", file=sys.stderr) + return img + model.to(devices.device_esrgan) + img = esrgan_upscale(model, img) + return img + + def load_model(self, path: str): + if path.startswith("http"): + # TODO: this doesn't use `path` at all? + filename = modelloader.load_file_from_url( + url=self.model_url, + model_dir=self.model_download_path, + file_name=f"{self.model_name}.pth", + ) + else: + filename = path + + state_dict = torch.load(filename, map_location='cpu' if devices.device_esrgan.type == 'mps' else None) + + if "params_ema" in state_dict: + state_dict = state_dict["params_ema"] + elif "params" in state_dict: + state_dict = state_dict["params"] + num_conv = 16 if "realesr-animevideov3" in filename else 32 + model = arch.SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=num_conv, upscale=4, act_type='prelu') + model.load_state_dict(state_dict) + model.eval() + return model + + if "body.0.rdb1.conv1.weight" in state_dict and "conv_first.weight" in state_dict: + nb = 6 if "RealESRGAN_x4plus_anime_6B" in filename else 23 + state_dict = resrgan2normal(state_dict, nb) + elif "conv_first.weight" in state_dict: + state_dict = mod2normal(state_dict) + elif "model.0.weight" not in state_dict: + raise Exception("The file is not a recognized ESRGAN model.") + + in_nc, out_nc, nf, nb, plus, mscale = infer_params(state_dict) + + model = arch.RRDBNet(in_nc=in_nc, out_nc=out_nc, nf=nf, nb=nb, upscale=mscale, plus=plus) + model.load_state_dict(state_dict) + model.eval() + + return model + + +def upscale_without_tiling(model, img): + img = np.array(img) + img = img[:, :, ::-1] + img = np.ascontiguousarray(np.transpose(img, (2, 0, 1))) / 255 + img = torch.from_numpy(img).float() + img = img.unsqueeze(0).to(devices.device_esrgan) + with torch.no_grad(): + output = model(img) + output = output.squeeze().float().cpu().clamp_(0, 1).numpy() + output = 255. * np.moveaxis(output, 0, 2) + output = output.astype(np.uint8) + output = output[:, :, ::-1] + return Image.fromarray(output, 'RGB') + + +def esrgan_upscale(model, img): + if opts.ESRGAN_tile == 0: + return upscale_without_tiling(model, img) + + grid = images.split_grid(img, opts.ESRGAN_tile, opts.ESRGAN_tile, opts.ESRGAN_tile_overlap) + newtiles = [] + scale_factor = 1 + + for y, h, row in grid.tiles: + newrow = [] + for tiledata in row: + x, w, tile = tiledata + + output = upscale_without_tiling(model, tile) + scale_factor = output.width // tile.width + + newrow.append([x * scale_factor, w * scale_factor, output]) + newtiles.append([y * scale_factor, h * scale_factor, newrow]) + + newgrid = images.Grid(newtiles, grid.tile_w * scale_factor, grid.tile_h * scale_factor, grid.image_w * scale_factor, grid.image_h * scale_factor, grid.overlap * scale_factor) + output = images.combine_grid(newgrid) + return output diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..353c70dd867cb894a0ac208f39394280175e4e14 --- /dev/null +++ b/modules/esrgan_model_arch.py @@ -0,0 +1,465 @@ +# this file is adapted from https://github.com/victorca25/iNNfer + +from collections import OrderedDict +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + + +#################### +# RRDBNet Generator +#################### + +class RRDBNet(nn.Module): + def __init__(self, in_nc, out_nc, nf, nb, nr=3, gc=32, upscale=4, norm_type=None, + act_type='leakyrelu', mode='CNA', upsample_mode='upconv', convtype='Conv2D', + finalact=None, gaussian_noise=False, plus=False): + super(RRDBNet, self).__init__() + n_upscale = int(math.log(upscale, 2)) + if upscale == 3: + n_upscale = 1 + + self.resrgan_scale = 0 + if in_nc % 16 == 0: + self.resrgan_scale = 1 + elif in_nc != 4 and in_nc % 4 == 0: + self.resrgan_scale = 2 + + fea_conv = conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None, convtype=convtype) + rb_blocks = [RRDB(nf, nr, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero', + norm_type=norm_type, act_type=act_type, mode='CNA', convtype=convtype, + gaussian_noise=gaussian_noise, plus=plus) for _ in range(nb)] + LR_conv = conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode, convtype=convtype) + + if upsample_mode == 'upconv': + upsample_block = upconv_block + elif upsample_mode == 'pixelshuffle': + upsample_block = pixelshuffle_block + else: + raise NotImplementedError(f'upsample mode [{upsample_mode}] is not found') + if upscale == 3: + upsampler = upsample_block(nf, nf, 3, act_type=act_type, convtype=convtype) + else: + upsampler = [upsample_block(nf, nf, act_type=act_type, convtype=convtype) for _ in range(n_upscale)] + HR_conv0 = conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type, convtype=convtype) + HR_conv1 = conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None, convtype=convtype) + + outact = act(finalact) if finalact else None + + self.model = sequential(fea_conv, ShortcutBlock(sequential(*rb_blocks, LR_conv)), + *upsampler, HR_conv0, HR_conv1, outact) + + def forward(self, x, outm=None): + if self.resrgan_scale == 1: + feat = pixel_unshuffle(x, scale=4) + elif self.resrgan_scale == 2: + feat = pixel_unshuffle(x, scale=2) + else: + feat = x + + return self.model(feat) + + +class RRDB(nn.Module): + """ + Residual in Residual Dense Block + (ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks) + """ + + def __init__(self, nf, nr=3, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero', + norm_type=None, act_type='leakyrelu', mode='CNA', convtype='Conv2D', + spectral_norm=False, gaussian_noise=False, plus=False): + super(RRDB, self).__init__() + # This is for backwards compatibility with existing models + if nr == 3: + self.RDB1 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type, + norm_type, act_type, mode, convtype, spectral_norm=spectral_norm, + gaussian_noise=gaussian_noise, plus=plus) + self.RDB2 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type, + norm_type, act_type, mode, convtype, spectral_norm=spectral_norm, + gaussian_noise=gaussian_noise, plus=plus) + self.RDB3 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type, + norm_type, act_type, mode, convtype, spectral_norm=spectral_norm, + gaussian_noise=gaussian_noise, plus=plus) + else: + RDB_list = [ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type, + norm_type, act_type, mode, convtype, spectral_norm=spectral_norm, + gaussian_noise=gaussian_noise, plus=plus) for _ in range(nr)] + self.RDBs = nn.Sequential(*RDB_list) + + def forward(self, x): + if hasattr(self, 'RDB1'): + out = self.RDB1(x) + out = self.RDB2(out) + out = self.RDB3(out) + else: + out = self.RDBs(x) + return out * 0.2 + x + + +class ResidualDenseBlock_5C(nn.Module): + """ + Residual Dense Block + The core module of paper: (Residual Dense Network for Image Super-Resolution, CVPR 18) + Modified options that can be used: + - "Partial Convolution based Padding" arXiv:1811.11718 + - "Spectral normalization" arXiv:1802.05957 + - "ICASSP 2020 - ESRGAN+ : Further Improving ESRGAN" N. C. + {Rakotonirina} and A. {Rasoanaivo} + """ + + def __init__(self, nf=64, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero', + norm_type=None, act_type='leakyrelu', mode='CNA', convtype='Conv2D', + spectral_norm=False, gaussian_noise=False, plus=False): + super(ResidualDenseBlock_5C, self).__init__() + + self.noise = GaussianNoise() if gaussian_noise else None + self.conv1x1 = conv1x1(nf, gc) if plus else None + + self.conv1 = conv_block(nf, gc, kernel_size, stride, bias=bias, pad_type=pad_type, + norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype, + spectral_norm=spectral_norm) + self.conv2 = conv_block(nf+gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, + norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype, + spectral_norm=spectral_norm) + self.conv3 = conv_block(nf+2*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, + norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype, + spectral_norm=spectral_norm) + self.conv4 = conv_block(nf+3*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type, + norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype, + spectral_norm=spectral_norm) + if mode == 'CNA': + last_act = None + else: + last_act = act_type + self.conv5 = conv_block(nf+4*gc, nf, 3, stride, bias=bias, pad_type=pad_type, + norm_type=norm_type, act_type=last_act, mode=mode, convtype=convtype, + spectral_norm=spectral_norm) + + def forward(self, x): + x1 = self.conv1(x) + x2 = self.conv2(torch.cat((x, x1), 1)) + if self.conv1x1: + x2 = x2 + self.conv1x1(x) + x3 = self.conv3(torch.cat((x, x1, x2), 1)) + x4 = self.conv4(torch.cat((x, x1, x2, x3), 1)) + if self.conv1x1: + x4 = x4 + x2 + x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) + if self.noise: + return self.noise(x5.mul(0.2) + x) + else: + return x5 * 0.2 + x + + +#################### +# ESRGANplus +#################### + +class GaussianNoise(nn.Module): + def __init__(self, sigma=0.1, is_relative_detach=False): + super().__init__() + self.sigma = sigma + self.is_relative_detach = is_relative_detach + self.noise = torch.tensor(0, dtype=torch.float) + + def forward(self, x): + if self.training and self.sigma != 0: + self.noise = self.noise.to(x.device) + scale = self.sigma * x.detach() if self.is_relative_detach else self.sigma * x + sampled_noise = self.noise.repeat(*x.size()).normal_() * scale + x = x + sampled_noise + return x + +def conv1x1(in_planes, out_planes, stride=1): + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +#################### +# SRVGGNetCompact +#################### + +class SRVGGNetCompact(nn.Module): + """A compact VGG-style network structure for super-resolution. + This class is copied from https://github.com/xinntao/Real-ESRGAN + """ + + def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu'): + super(SRVGGNetCompact, self).__init__() + self.num_in_ch = num_in_ch + self.num_out_ch = num_out_ch + self.num_feat = num_feat + self.num_conv = num_conv + self.upscale = upscale + self.act_type = act_type + + self.body = nn.ModuleList() + # the first conv + self.body.append(nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)) + # the first activation + if act_type == 'relu': + activation = nn.ReLU(inplace=True) + elif act_type == 'prelu': + activation = nn.PReLU(num_parameters=num_feat) + elif act_type == 'leakyrelu': + activation = nn.LeakyReLU(negative_slope=0.1, inplace=True) + self.body.append(activation) + + # the body structure + for _ in range(num_conv): + self.body.append(nn.Conv2d(num_feat, num_feat, 3, 1, 1)) + # activation + if act_type == 'relu': + activation = nn.ReLU(inplace=True) + elif act_type == 'prelu': + activation = nn.PReLU(num_parameters=num_feat) + elif act_type == 'leakyrelu': + activation = nn.LeakyReLU(negative_slope=0.1, inplace=True) + self.body.append(activation) + + # the last conv + self.body.append(nn.Conv2d(num_feat, num_out_ch * upscale * upscale, 3, 1, 1)) + # upsample + self.upsampler = nn.PixelShuffle(upscale) + + def forward(self, x): + out = x + for i in range(0, len(self.body)): + out = self.body[i](out) + + out = self.upsampler(out) + # add the nearest upsampled image, so that the network learns the residual + base = F.interpolate(x, scale_factor=self.upscale, mode='nearest') + out += base + return out + + +#################### +# Upsampler +#################### + +class Upsample(nn.Module): + r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data. + The input data is assumed to be of the form + `minibatch x channels x [optional depth] x [optional height] x width`. + """ + + def __init__(self, size=None, scale_factor=None, mode="nearest", align_corners=None): + super(Upsample, self).__init__() + if isinstance(scale_factor, tuple): + self.scale_factor = tuple(float(factor) for factor in scale_factor) + else: + self.scale_factor = float(scale_factor) if scale_factor else None + self.mode = mode + self.size = size + self.align_corners = align_corners + + def forward(self, x): + return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) + + def extra_repr(self): + if self.scale_factor is not None: + info = f'scale_factor={self.scale_factor}' + else: + info = f'size={self.size}' + info += f', mode={self.mode}' + return info + + +def pixel_unshuffle(x, scale): + """ Pixel unshuffle. + Args: + x (Tensor): Input feature with shape (b, c, hh, hw). + scale (int): Downsample ratio. + Returns: + Tensor: the pixel unshuffled feature. + """ + b, c, hh, hw = x.size() + out_channel = c * (scale**2) + assert hh % scale == 0 and hw % scale == 0 + h = hh // scale + w = hw // scale + x_view = x.view(b, c, h, scale, w, scale) + return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w) + + +def pixelshuffle_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, + pad_type='zero', norm_type=None, act_type='relu', convtype='Conv2D'): + """ + Pixel shuffle layer + (Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional + Neural Network, CVPR17) + """ + conv = conv_block(in_nc, out_nc * (upscale_factor ** 2), kernel_size, stride, bias=bias, + pad_type=pad_type, norm_type=None, act_type=None, convtype=convtype) + pixel_shuffle = nn.PixelShuffle(upscale_factor) + + n = norm(norm_type, out_nc) if norm_type else None + a = act(act_type) if act_type else None + return sequential(conv, pixel_shuffle, n, a) + + +def upconv_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, + pad_type='zero', norm_type=None, act_type='relu', mode='nearest', convtype='Conv2D'): + """ Upconv layer """ + upscale_factor = (1, upscale_factor, upscale_factor) if convtype == 'Conv3D' else upscale_factor + upsample = Upsample(scale_factor=upscale_factor, mode=mode) + conv = conv_block(in_nc, out_nc, kernel_size, stride, bias=bias, + pad_type=pad_type, norm_type=norm_type, act_type=act_type, convtype=convtype) + return sequential(upsample, conv) + + + + + + + + +#################### +# Basic blocks +#################### + + +def make_layer(basic_block, num_basic_block, **kwarg): + """Make layers by stacking the same blocks. + Args: + basic_block (nn.module): nn.module class for basic block. (block) + num_basic_block (int): number of blocks. (n_layers) + Returns: + nn.Sequential: Stacked blocks in nn.Sequential. + """ + layers = [] + for _ in range(num_basic_block): + layers.append(basic_block(**kwarg)) + return nn.Sequential(*layers) + + +def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1, beta=1.0): + """ activation helper """ + act_type = act_type.lower() + if act_type == 'relu': + layer = nn.ReLU(inplace) + elif act_type in ('leakyrelu', 'lrelu'): + layer = nn.LeakyReLU(neg_slope, inplace) + elif act_type == 'prelu': + layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope) + elif act_type == 'tanh': # [-1, 1] range output + layer = nn.Tanh() + elif act_type == 'sigmoid': # [0, 1] range output + layer = nn.Sigmoid() + else: + raise NotImplementedError(f'activation layer [{act_type}] is not found') + return layer + + +class Identity(nn.Module): + def __init__(self, *kwargs): + super(Identity, self).__init__() + + def forward(self, x, *kwargs): + return x + + +def norm(norm_type, nc): + """ Return a normalization layer """ + norm_type = norm_type.lower() + if norm_type == 'batch': + layer = nn.BatchNorm2d(nc, affine=True) + elif norm_type == 'instance': + layer = nn.InstanceNorm2d(nc, affine=False) + elif norm_type == 'none': + def norm_layer(x): return Identity() + else: + raise NotImplementedError(f'normalization layer [{norm_type}] is not found') + return layer + + +def pad(pad_type, padding): + """ padding layer helper """ + pad_type = pad_type.lower() + if padding == 0: + return None + if pad_type == 'reflect': + layer = nn.ReflectionPad2d(padding) + elif pad_type == 'replicate': + layer = nn.ReplicationPad2d(padding) + elif pad_type == 'zero': + layer = nn.ZeroPad2d(padding) + else: + raise NotImplementedError(f'padding layer [{pad_type}] is not implemented') + return layer + + +def get_valid_padding(kernel_size, dilation): + kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1) + padding = (kernel_size - 1) // 2 + return padding + + +class ShortcutBlock(nn.Module): + """ Elementwise sum the output of a submodule to its input """ + def __init__(self, submodule): + super(ShortcutBlock, self).__init__() + self.sub = submodule + + def forward(self, x): + output = x + self.sub(x) + return output + + def __repr__(self): + return 'Identity + \n|' + self.sub.__repr__().replace('\n', '\n|') + + +def sequential(*args): + """ Flatten Sequential. It unwraps nn.Sequential. """ + if len(args) == 1: + if isinstance(args[0], OrderedDict): + raise NotImplementedError('sequential does not support OrderedDict input.') + return args[0] # No sequential is needed. + modules = [] + for module in args: + if isinstance(module, nn.Sequential): + for submodule in module.children(): + modules.append(submodule) + elif isinstance(module, nn.Module): + modules.append(module) + return nn.Sequential(*modules) + + +def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=True, + pad_type='zero', norm_type=None, act_type='relu', mode='CNA', convtype='Conv2D', + spectral_norm=False): + """ Conv layer with padding, normalization, activation """ + assert mode in ['CNA', 'NAC', 'CNAC'], f'Wrong conv mode [{mode}]' + padding = get_valid_padding(kernel_size, dilation) + p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None + padding = padding if pad_type == 'zero' else 0 + + if convtype=='PartialConv2D': + from torchvision.ops import PartialConv2d # this is definitely not going to work, but PartialConv2d doesn't work anyway and this shuts up static analyzer + c = PartialConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, + dilation=dilation, bias=bias, groups=groups) + elif convtype=='DeformConv2D': + from torchvision.ops import DeformConv2d # not tested + c = DeformConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, + dilation=dilation, bias=bias, groups=groups) + elif convtype=='Conv3D': + c = nn.Conv3d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, + dilation=dilation, bias=bias, groups=groups) + else: + c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, + dilation=dilation, bias=bias, groups=groups) + + if spectral_norm: + c = nn.utils.spectral_norm(c) + + a = act(act_type) if act_type else None + if 'CNA' in mode: + n = norm(norm_type, out_nc) if norm_type else None + return sequential(p, c, n, a) + elif mode == 'NAC': + if norm_type is None and act_type is not None: + a = act(act_type, inplace=False) + n = norm(norm_type, in_nc) if norm_type else None + return sequential(n, a, p, c) diff --git a/modules/extensions.py b/modules/extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..127c5abacfbb40932c3ea05daf4874e8e39b2cf7 --- /dev/null +++ b/modules/extensions.py @@ -0,0 +1,163 @@ +import os +import threading + +from modules import shared, errors, cache +from modules.gitpython_hack import Repo +from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401 + +extensions = [] + +os.makedirs(extensions_dir, exist_ok=True) + + +def active(): + if shared.opts.disable_all_extensions == "all": + return [] + elif shared.opts.disable_all_extensions == "extra": + return [x for x in extensions if x.enabled and x.is_builtin] + else: + return [x for x in extensions if x.enabled] + + +class Extension: + lock = threading.Lock() + cached_fields = ['remote', 'commit_date', 'branch', 'commit_hash', 'version'] + + def __init__(self, name, path, enabled=True, is_builtin=False): + self.name = name + self.path = path + self.enabled = enabled + self.status = '' + self.can_update = False + self.is_builtin = is_builtin + self.commit_hash = '' + self.commit_date = None + self.version = '' + self.branch = None + self.remote = None + self.have_info_from_repo = False + + def to_dict(self): + return {x: getattr(self, x) for x in self.cached_fields} + + def from_dict(self, d): + for field in self.cached_fields: + setattr(self, field, d[field]) + + def read_info_from_repo(self): + if self.is_builtin or self.have_info_from_repo: + return + + def read_from_repo(): + with self.lock: + if self.have_info_from_repo: + return + + self.do_read_info_from_repo() + + return self.to_dict() + try: + d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo) + self.from_dict(d) + except FileNotFoundError: + pass + self.status = 'unknown' if self.status == '' else self.status + + def do_read_info_from_repo(self): + repo = None + try: + if os.path.exists(os.path.join(self.path, ".git")): + repo = Repo(self.path) + except Exception: + errors.report(f"Error reading github repository info from {self.path}", exc_info=True) + + if repo is None or repo.bare: + self.remote = None + else: + try: + self.remote = next(repo.remote().urls, None) + commit = repo.head.commit + self.commit_date = commit.committed_date + if repo.active_branch: + self.branch = repo.active_branch.name + self.commit_hash = commit.hexsha + self.version = self.commit_hash[:8] + + except Exception: + errors.report(f"Failed reading extension data from Git repository ({self.name})", exc_info=True) + self.remote = None + + self.have_info_from_repo = True + + def list_files(self, subdir, extension): + from modules import scripts + + dirpath = os.path.join(self.path, subdir) + if not os.path.isdir(dirpath): + return [] + + res = [] + for filename in sorted(os.listdir(dirpath)): + res.append(scripts.ScriptFile(self.path, filename, os.path.join(dirpath, filename))) + + res = [x for x in res if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)] + + return res + + def check_updates(self): + repo = Repo(self.path) + for fetch in repo.remote().fetch(dry_run=True): + if fetch.flags != fetch.HEAD_UPTODATE: + self.can_update = True + self.status = "new commits" + return + + try: + origin = repo.rev_parse('origin') + if repo.head.commit != origin: + self.can_update = True + self.status = "behind HEAD" + return + except Exception: + self.can_update = False + self.status = "unknown (remote error)" + return + + self.can_update = False + self.status = "latest" + + def fetch_and_reset_hard(self, commit='origin'): + repo = Repo(self.path) + # Fix: `error: Your local changes to the following files would be overwritten by merge`, + # because WSL2 Docker set 755 file permissions instead of 644, this results to the error. + repo.git.fetch(all=True) + repo.git.reset(commit, hard=True) + self.have_info_from_repo = False + + +def list_extensions(): + extensions.clear() + + if not os.path.isdir(extensions_dir): + return + + if shared.opts.disable_all_extensions == "all": + print("*** \"Disable all extensions\" option was set, will not load any extensions ***") + elif shared.opts.disable_all_extensions == "extra": + print("*** \"Disable all extensions\" option was set, will only load built-in extensions ***") + + extension_paths = [] + for dirname in [extensions_dir, extensions_builtin_dir]: + if not os.path.isdir(dirname): + return + + for extension_dirname in sorted(os.listdir(dirname)): + path = os.path.join(dirname, extension_dirname) + if not os.path.isdir(path): + continue + + extension_paths.append((extension_dirname, path, dirname == extensions_builtin_dir)) + + for dirname, path, is_builtin in extension_paths: + extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin) + extensions.append(extension) diff --git a/modules/extra_networks.py b/modules/extra_networks.py new file mode 100644 index 0000000000000000000000000000000000000000..0bd616ac7136a9a004eb70e79265cf54590a1371 --- /dev/null +++ b/modules/extra_networks.py @@ -0,0 +1,179 @@ +import re +from collections import defaultdict + +from modules import errors + +extra_network_registry = {} +extra_network_aliases = {} + + +def initialize(): + extra_network_registry.clear() + extra_network_aliases.clear() + + +def register_extra_network(extra_network): + extra_network_registry[extra_network.name] = extra_network + + +def register_extra_network_alias(extra_network, alias): + extra_network_aliases[alias] = extra_network + + +def register_default_extra_networks(): + from modules.extra_networks_hypernet import ExtraNetworkHypernet + register_extra_network(ExtraNetworkHypernet()) + + +class ExtraNetworkParams: + def __init__(self, items=None): + self.items = items or [] + self.positional = [] + self.named = {} + + for item in self.items: + parts = item.split('=', 2) if isinstance(item, str) else [item] + if len(parts) == 2: + self.named[parts[0]] = parts[1] + else: + self.positional.append(item) + + def __eq__(self, other): + return self.items == other.items + + +class ExtraNetwork: + def __init__(self, name): + self.name = name + + def activate(self, p, params_list): + """ + Called by processing on every run. Whatever the extra network is meant to do should be activated here. + Passes arguments related to this extra network in params_list. + User passes arguments by specifying this in his prompt: + + + + Where name matches the name of this ExtraNetwork object, and arg1:arg2:arg3 are any natural number of text arguments + separated by colon. + + Even if the user does not mention this ExtraNetwork in his prompt, the call will stil be made, with empty params_list - + in this case, all effects of this extra networks should be disabled. + + Can be called multiple times before deactivate() - each new call should override the previous call completely. + + For example, if this ExtraNetwork's name is 'hypernet' and user's prompt is: + + > "1girl, " + + params_list will be: + + [ + ExtraNetworkParams(items=["agm", "1.1"]), + ExtraNetworkParams(items=["ray"]) + ] + + """ + raise NotImplementedError + + def deactivate(self, p): + """ + Called at the end of processing for housekeeping. No need to do anything here. + """ + + raise NotImplementedError + + +def activate(p, extra_network_data): + """call activate for extra networks in extra_network_data in specified order, then call + activate for all remaining registered networks with an empty argument list""" + + activated = [] + + for extra_network_name, extra_network_args in extra_network_data.items(): + extra_network = extra_network_registry.get(extra_network_name, None) + + if extra_network is None: + extra_network = extra_network_aliases.get(extra_network_name, None) + + if extra_network is None: + print(f"Skipping unknown extra network: {extra_network_name}") + continue + + try: + extra_network.activate(p, extra_network_args) + activated.append(extra_network) + except Exception as e: + errors.display(e, f"activating extra network {extra_network_name} with arguments {extra_network_args}") + + for extra_network_name, extra_network in extra_network_registry.items(): + if extra_network in activated: + continue + + try: + extra_network.activate(p, []) + except Exception as e: + errors.display(e, f"activating extra network {extra_network_name}") + + if p.scripts is not None: + p.scripts.after_extra_networks_activate(p, batch_number=p.iteration, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds, extra_network_data=extra_network_data) + + +def deactivate(p, extra_network_data): + """call deactivate for extra networks in extra_network_data in specified order, then call + deactivate for all remaining registered networks""" + + for extra_network_name in extra_network_data: + extra_network = extra_network_registry.get(extra_network_name, None) + if extra_network is None: + continue + + try: + extra_network.deactivate(p) + except Exception as e: + errors.display(e, f"deactivating extra network {extra_network_name}") + + for extra_network_name, extra_network in extra_network_registry.items(): + args = extra_network_data.get(extra_network_name, None) + if args is not None: + continue + + try: + extra_network.deactivate(p) + except Exception as e: + errors.display(e, f"deactivating unmentioned extra network {extra_network_name}") + + +re_extra_net = re.compile(r"<(\w+):([^>]+)>") + + +def parse_prompt(prompt): + res = defaultdict(list) + + def found(m): + name = m.group(1) + args = m.group(2) + + res[name].append(ExtraNetworkParams(items=args.split(":"))) + + return "" + + prompt = re.sub(re_extra_net, found, prompt) + + return prompt, res + + +def parse_prompts(prompts): + res = [] + extra_data = None + + for prompt in prompts: + updated_prompt, parsed_extra_data = parse_prompt(prompt) + + if extra_data is None: + extra_data = parsed_extra_data + + res.append(updated_prompt) + + return res, extra_data + diff --git a/modules/extra_networks_hypernet.py b/modules/extra_networks_hypernet.py new file mode 100644 index 0000000000000000000000000000000000000000..192f11b9cbd88447a0f80dbd2f0ace26d74f18b2 --- /dev/null +++ b/modules/extra_networks_hypernet.py @@ -0,0 +1,28 @@ +from modules import extra_networks, shared +from modules.hypernetworks import hypernetwork + + +class ExtraNetworkHypernet(extra_networks.ExtraNetwork): + def __init__(self): + super().__init__('hypernet') + + def activate(self, p, params_list): + additional = shared.opts.sd_hypernetwork + + if additional != "None" and additional in shared.hypernetworks and not any(x for x in params_list if x.items[0] == additional): + hypernet_prompt_text = f"" + p.all_prompts = [f"{prompt}{hypernet_prompt_text}" for prompt in p.all_prompts] + params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) + + names = [] + multipliers = [] + for params in params_list: + assert params.items + + names.append(params.items[0]) + multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0) + + hypernetwork.load_hypernetworks(names, multipliers) + + def deactivate(self, p): + pass diff --git a/modules/extras.py b/modules/extras.py new file mode 100644 index 0000000000000000000000000000000000000000..675fc1e5e0821196a82d904f43d719988f7424d4 --- /dev/null +++ b/modules/extras.py @@ -0,0 +1,303 @@ +import os +import re +import shutil +import json + + +import torch +import tqdm + +from modules import shared, images, sd_models, sd_vae, sd_models_config +from modules.ui_common import plaintext_to_html +import gradio as gr +import safetensors.torch + + +def run_pnginfo(image): + if image is None: + return '', '', '' + + geninfo, items = images.read_info_from_image(image) + items = {**{'parameters': geninfo}, **items} + + info = '' + for key, text in items.items(): + info += f""" +
+

{plaintext_to_html(str(key))}

+

{plaintext_to_html(str(text))}

+
+""".strip()+"\n" + + if len(info) == 0: + message = "Nothing found in the image." + info = f"

{message}

" + + return '', geninfo, info + + +def create_config(ckpt_result, config_source, a, b, c): + def config(x): + res = sd_models_config.find_checkpoint_config_near_filename(x) if x else None + return res if res != shared.sd_default_config else None + + if config_source == 0: + cfg = config(a) or config(b) or config(c) + elif config_source == 1: + cfg = config(b) + elif config_source == 2: + cfg = config(c) + else: + cfg = None + + if cfg is None: + return + + filename, _ = os.path.splitext(ckpt_result) + checkpoint_filename = filename + ".yaml" + + print("Copying config:") + print(" from:", cfg) + print(" to:", checkpoint_filename) + shutil.copyfile(cfg, checkpoint_filename) + + +checkpoint_dict_skip_on_merge = ["cond_stage_model.transformer.text_model.embeddings.position_ids"] + + +def to_half(tensor, enable): + if enable and tensor.dtype == torch.float: + return tensor.half() + + return tensor + + +def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights, save_metadata): + shared.state.begin(job="model-merge") + + def fail(message): + shared.state.textinfo = message + shared.state.end() + return [*[gr.update() for _ in range(4)], message] + + def weighted_sum(theta0, theta1, alpha): + return ((1 - alpha) * theta0) + (alpha * theta1) + + def get_difference(theta1, theta2): + return theta1 - theta2 + + def add_difference(theta0, theta1_2_diff, alpha): + return theta0 + (alpha * theta1_2_diff) + + def filename_weighted_sum(): + a = primary_model_info.model_name + b = secondary_model_info.model_name + Ma = round(1 - multiplier, 2) + Mb = round(multiplier, 2) + + return f"{Ma}({a}) + {Mb}({b})" + + def filename_add_difference(): + a = primary_model_info.model_name + b = secondary_model_info.model_name + c = tertiary_model_info.model_name + M = round(multiplier, 2) + + return f"{a} + {M}({b} - {c})" + + def filename_nothing(): + return primary_model_info.model_name + + theta_funcs = { + "Weighted sum": (filename_weighted_sum, None, weighted_sum), + "Add difference": (filename_add_difference, get_difference, add_difference), + "No interpolation": (filename_nothing, None, None), + } + filename_generator, theta_func1, theta_func2 = theta_funcs[interp_method] + shared.state.job_count = (1 if theta_func1 else 0) + (1 if theta_func2 else 0) + + if not primary_model_name: + return fail("Failed: Merging requires a primary model.") + + primary_model_info = sd_models.checkpoints_list[primary_model_name] + + if theta_func2 and not secondary_model_name: + return fail("Failed: Merging requires a secondary model.") + + secondary_model_info = sd_models.checkpoints_list[secondary_model_name] if theta_func2 else None + + if theta_func1 and not tertiary_model_name: + return fail(f"Failed: Interpolation method ({interp_method}) requires a tertiary model.") + + tertiary_model_info = sd_models.checkpoints_list[tertiary_model_name] if theta_func1 else None + + result_is_inpainting_model = False + result_is_instruct_pix2pix_model = False + + if theta_func2: + shared.state.textinfo = "Loading B" + print(f"Loading {secondary_model_info.filename}...") + theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu') + else: + theta_1 = None + + if theta_func1: + shared.state.textinfo = "Loading C" + print(f"Loading {tertiary_model_info.filename}...") + theta_2 = sd_models.read_state_dict(tertiary_model_info.filename, map_location='cpu') + + shared.state.textinfo = 'Merging B and C' + shared.state.sampling_steps = len(theta_1.keys()) + for key in tqdm.tqdm(theta_1.keys()): + if key in checkpoint_dict_skip_on_merge: + continue + + if 'model' in key: + if key in theta_2: + t2 = theta_2.get(key, torch.zeros_like(theta_1[key])) + theta_1[key] = theta_func1(theta_1[key], t2) + else: + theta_1[key] = torch.zeros_like(theta_1[key]) + + shared.state.sampling_step += 1 + del theta_2 + + shared.state.nextjob() + + shared.state.textinfo = f"Loading {primary_model_info.filename}..." + print(f"Loading {primary_model_info.filename}...") + theta_0 = sd_models.read_state_dict(primary_model_info.filename, map_location='cpu') + + print("Merging...") + shared.state.textinfo = 'Merging A and B' + shared.state.sampling_steps = len(theta_0.keys()) + for key in tqdm.tqdm(theta_0.keys()): + if theta_1 and 'model' in key and key in theta_1: + + if key in checkpoint_dict_skip_on_merge: + continue + + a = theta_0[key] + b = theta_1[key] + + # this enables merging an inpainting model (A) with another one (B); + # where normal model would have 4 channels, for latenst space, inpainting model would + # have another 4 channels for unmasked picture's latent space, plus one channel for mask, for a total of 9 + if a.shape != b.shape and a.shape[0:1] + a.shape[2:] == b.shape[0:1] + b.shape[2:]: + if a.shape[1] == 4 and b.shape[1] == 9: + raise RuntimeError("When merging inpainting model with a normal one, A must be the inpainting model.") + if a.shape[1] == 4 and b.shape[1] == 8: + raise RuntimeError("When merging instruct-pix2pix model with a normal one, A must be the instruct-pix2pix model.") + + if a.shape[1] == 8 and b.shape[1] == 4:#If we have an Instruct-Pix2Pix model... + theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)#Merge only the vectors the models have in common. Otherwise we get an error due to dimension mismatch. + result_is_instruct_pix2pix_model = True + else: + assert a.shape[1] == 9 and b.shape[1] == 4, f"Bad dimensions for merged layer {key}: A={a.shape}, B={b.shape}" + theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier) + result_is_inpainting_model = True + else: + theta_0[key] = theta_func2(a, b, multiplier) + + theta_0[key] = to_half(theta_0[key], save_as_half) + + shared.state.sampling_step += 1 + + del theta_1 + + bake_in_vae_filename = sd_vae.vae_dict.get(bake_in_vae, None) + if bake_in_vae_filename is not None: + print(f"Baking in VAE from {bake_in_vae_filename}") + shared.state.textinfo = 'Baking in VAE' + vae_dict = sd_vae.load_vae_dict(bake_in_vae_filename, map_location='cpu') + + for key in vae_dict.keys(): + theta_0_key = 'first_stage_model.' + key + if theta_0_key in theta_0: + theta_0[theta_0_key] = to_half(vae_dict[key], save_as_half) + + del vae_dict + + if save_as_half and not theta_func2: + for key in theta_0.keys(): + theta_0[key] = to_half(theta_0[key], save_as_half) + + if discard_weights: + regex = re.compile(discard_weights) + for key in list(theta_0): + if re.search(regex, key): + theta_0.pop(key, None) + + ckpt_dir = shared.cmd_opts.ckpt_dir or sd_models.model_path + + filename = filename_generator() if custom_name == '' else custom_name + filename += ".inpainting" if result_is_inpainting_model else "" + filename += ".instruct-pix2pix" if result_is_instruct_pix2pix_model else "" + filename += "." + checkpoint_format + + output_modelname = os.path.join(ckpt_dir, filename) + + shared.state.nextjob() + shared.state.textinfo = "Saving" + print(f"Saving to {output_modelname}...") + + metadata = None + + if save_metadata: + metadata = {"format": "pt"} + + merge_recipe = { + "type": "webui", # indicate this model was merged with webui's built-in merger + "primary_model_hash": primary_model_info.sha256, + "secondary_model_hash": secondary_model_info.sha256 if secondary_model_info else None, + "tertiary_model_hash": tertiary_model_info.sha256 if tertiary_model_info else None, + "interp_method": interp_method, + "multiplier": multiplier, + "save_as_half": save_as_half, + "custom_name": custom_name, + "config_source": config_source, + "bake_in_vae": bake_in_vae, + "discard_weights": discard_weights, + "is_inpainting": result_is_inpainting_model, + "is_instruct_pix2pix": result_is_instruct_pix2pix_model + } + metadata["sd_merge_recipe"] = json.dumps(merge_recipe) + + sd_merge_models = {} + + def add_model_metadata(checkpoint_info): + checkpoint_info.calculate_shorthash() + sd_merge_models[checkpoint_info.sha256] = { + "name": checkpoint_info.name, + "legacy_hash": checkpoint_info.hash, + "sd_merge_recipe": checkpoint_info.metadata.get("sd_merge_recipe", None) + } + + sd_merge_models.update(checkpoint_info.metadata.get("sd_merge_models", {})) + + add_model_metadata(primary_model_info) + if secondary_model_info: + add_model_metadata(secondary_model_info) + if tertiary_model_info: + add_model_metadata(tertiary_model_info) + + metadata["sd_merge_models"] = json.dumps(sd_merge_models) + + _, extension = os.path.splitext(output_modelname) + if extension.lower() == ".safetensors": + safetensors.torch.save_file(theta_0, output_modelname, metadata=metadata) + else: + torch.save(theta_0, output_modelname) + + sd_models.list_models() + created_model = next((ckpt for ckpt in sd_models.checkpoints_list.values() if ckpt.name == filename), None) + if created_model: + created_model.calculate_shorthash() + + create_config(output_modelname, config_source, primary_model_info, secondary_model_info, tertiary_model_info) + + print(f"Checkpoint saved to {output_modelname}.") + shared.state.textinfo = "Checkpoint saved" + shared.state.end() + + return [*[gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)], "Checkpoint saved to " + output_modelname] diff --git a/modules/face_restoration.py b/modules/face_restoration.py new file mode 100644 index 0000000000000000000000000000000000000000..2c86c6ccce338a1411f4367a0bc6e4046ad67cae --- /dev/null +++ b/modules/face_restoration.py @@ -0,0 +1,19 @@ +from modules import shared + + +class FaceRestoration: + def name(self): + return "None" + + def restore(self, np_image): + return np_image + + +def restore_faces(np_image): + face_restorers = [x for x in shared.face_restorers if x.name() == shared.opts.face_restoration_model or shared.opts.face_restoration_model is None] + if len(face_restorers) == 0: + return np_image + + face_restorer = face_restorers[0] + + return face_restorer.restore(np_image) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py new file mode 100644 index 0000000000000000000000000000000000000000..ab4ac50cd756da4fe37da5100bec6258605e6531 --- /dev/null +++ b/modules/generation_parameters_copypaste.py @@ -0,0 +1,439 @@ +import base64 +import io +import json +import os +import re + +import gradio as gr +from modules.paths import data_path +from modules import shared, ui_tempdir, script_callbacks +from PIL import Image + +re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)' +re_param = re.compile(re_param_code) +re_imagesize = re.compile(r"^(\d+)x(\d+)$") +re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$") +type_of_gr_update = type(gr.update()) + +paste_fields = {} +registered_param_bindings = [] + + +class ParamBinding: + def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=None): + self.paste_button = paste_button + self.tabname = tabname + self.source_text_component = source_text_component + self.source_image_component = source_image_component + self.source_tabname = source_tabname + self.override_settings_component = override_settings_component + self.paste_field_names = paste_field_names or [] + + +def reset(): + paste_fields.clear() + + +def quote(text): + if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text): + return text + + return json.dumps(text, ensure_ascii=False) + + +def unquote(text): + if len(text) == 0 or text[0] != '"' or text[-1] != '"': + return text + + try: + return json.loads(text) + except Exception: + return text + + +def image_from_url_text(filedata): + if filedata is None: + return None + + if type(filedata) == list and filedata and type(filedata[0]) == dict and filedata[0].get("is_file", False): + filedata = filedata[0] + + if type(filedata) == dict and filedata.get("is_file", False): + filename = filedata["name"] + is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename) + assert is_in_right_dir, 'trying to open image file outside of allowed directories' + + filename = filename.rsplit('?', 1)[0] + return Image.open(filename) + + if type(filedata) == list: + if len(filedata) == 0: + return None + + filedata = filedata[0] + + if filedata.startswith("data:image/png;base64,"): + filedata = filedata[len("data:image/png;base64,"):] + + filedata = base64.decodebytes(filedata.encode('utf-8')) + image = Image.open(io.BytesIO(filedata)) + return image + + +def add_paste_fields(tabname, init_img, fields, override_settings_component=None): + paste_fields[tabname] = {"init_img": init_img, "fields": fields, "override_settings_component": override_settings_component} + + # backwards compatibility for existing extensions + import modules.ui + if tabname == 'txt2img': + modules.ui.txt2img_paste_fields = fields + elif tabname == 'img2img': + modules.ui.img2img_paste_fields = fields + + +def create_buttons(tabs_list): + buttons = {} + for tab in tabs_list: + buttons[tab] = gr.Button(f"Send to {tab}", elem_id=f"{tab}_tab") + return buttons + + +def bind_buttons(buttons, send_image, send_generate_info): + """old function for backwards compatibility; do not use this, use register_paste_params_button""" + for tabname, button in buttons.items(): + source_text_component = send_generate_info if isinstance(send_generate_info, gr.components.Component) else None + source_tabname = send_generate_info if isinstance(send_generate_info, str) else None + + register_paste_params_button(ParamBinding(paste_button=button, tabname=tabname, source_text_component=source_text_component, source_image_component=send_image, source_tabname=source_tabname)) + + +def register_paste_params_button(binding: ParamBinding): + registered_param_bindings.append(binding) + + +def connect_paste_params_buttons(): + binding: ParamBinding + for binding in registered_param_bindings: + destination_image_component = paste_fields[binding.tabname]["init_img"] + fields = paste_fields[binding.tabname]["fields"] + override_settings_component = binding.override_settings_component or paste_fields[binding.tabname]["override_settings_component"] + + destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None) + destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None) + + if binding.source_image_component and destination_image_component: + if isinstance(binding.source_image_component, gr.Gallery): + func = send_image_and_dimensions if destination_width_component else image_from_url_text + jsfunc = "extract_image_from_gallery" + else: + func = send_image_and_dimensions if destination_width_component else lambda x: x + jsfunc = None + + binding.paste_button.click( + fn=func, + _js=jsfunc, + inputs=[binding.source_image_component], + outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component], + show_progress=False, + ) + + if binding.source_text_component is not None and fields is not None: + connect_paste(binding.paste_button, fields, binding.source_text_component, override_settings_component, binding.tabname) + + if binding.source_tabname is not None and fields is not None: + paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else []) + binding.paste_field_names + binding.paste_button.click( + fn=lambda *x: x, + inputs=[field for field, name in paste_fields[binding.source_tabname]["fields"] if name in paste_field_names], + outputs=[field for field, name in fields if name in paste_field_names], + show_progress=False, + ) + + binding.paste_button.click( + fn=None, + _js=f"switch_to_{binding.tabname}", + inputs=None, + outputs=None, + show_progress=False, + ) + + +def send_image_and_dimensions(x): + if isinstance(x, Image.Image): + img = x + else: + img = image_from_url_text(x) + + if shared.opts.send_size and isinstance(img, Image.Image): + w = img.width + h = img.height + else: + w = gr.update() + h = gr.update() + + return img, w, h + + +def restore_old_hires_fix_params(res): + """for infotexts that specify old First pass size parameter, convert it into + width, height, and hr scale""" + + firstpass_width = res.get('First pass size-1', None) + firstpass_height = res.get('First pass size-2', None) + + if shared.opts.use_old_hires_fix_width_height: + hires_width = int(res.get("Hires resize-1", 0)) + hires_height = int(res.get("Hires resize-2", 0)) + + if hires_width and hires_height: + res['Size-1'] = hires_width + res['Size-2'] = hires_height + return + + if firstpass_width is None or firstpass_height is None: + return + + firstpass_width, firstpass_height = int(firstpass_width), int(firstpass_height) + width = int(res.get("Size-1", 512)) + height = int(res.get("Size-2", 512)) + + if firstpass_width == 0 or firstpass_height == 0: + from modules import processing + firstpass_width, firstpass_height = processing.old_hires_fix_first_pass_dimensions(width, height) + + res['Size-1'] = firstpass_width + res['Size-2'] = firstpass_height + res['Hires resize-1'] = width + res['Hires resize-2'] = height + + +def parse_generation_parameters(x: str): + """parses generation parameters string, the one you see in text field under the picture in UI: +``` +girl with an artist's beret, determined, blue eyes, desert scene, computer monitors, heavy makeup, by Alphonse Mucha and Charlie Bowater, ((eyeshadow)), (coquettish), detailed, intricate +Negative prompt: ugly, fat, obese, chubby, (((deformed))), [blurry], bad anatomy, disfigured, poorly drawn face, mutation, mutated, (extra_limb), (ugly), (poorly drawn hands), messy drawing +Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model hash: 45dee52b +``` + + returns a dict with field values + """ + + res = {} + + prompt = "" + negative_prompt = "" + + done_with_prompt = False + + *lines, lastline = x.strip().split("\n") + if len(re_param.findall(lastline)) < 3: + lines.append(lastline) + lastline = '' + + for line in lines: + line = line.strip() + if line.startswith("Negative prompt:"): + done_with_prompt = True + line = line[16:].strip() + if done_with_prompt: + negative_prompt += ("" if negative_prompt == "" else "\n") + line + else: + prompt += ("" if prompt == "" else "\n") + line + + if shared.opts.infotext_styles != "Ignore": + found_styles, prompt, negative_prompt = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt) + + if shared.opts.infotext_styles == "Apply": + res["Styles array"] = found_styles + elif shared.opts.infotext_styles == "Apply if any" and found_styles: + res["Styles array"] = found_styles + + res["Prompt"] = prompt + res["Negative prompt"] = negative_prompt + + for k, v in re_param.findall(lastline): + try: + if v[0] == '"' and v[-1] == '"': + v = unquote(v) + + m = re_imagesize.match(v) + if m is not None: + res[f"{k}-1"] = m.group(1) + res[f"{k}-2"] = m.group(2) + else: + res[k] = v + except Exception: + print(f"Error parsing \"{k}: {v}\"") + + # Missing CLIP skip means it was set to 1 (the default) + if "Clip skip" not in res: + res["Clip skip"] = "1" + + hypernet = res.get("Hypernet", None) + if hypernet is not None: + res["Prompt"] += f"""""" + + if "Hires resize-1" not in res: + res["Hires resize-1"] = 0 + res["Hires resize-2"] = 0 + + if "Hires sampler" not in res: + res["Hires sampler"] = "Use same sampler" + + if "Hires prompt" not in res: + res["Hires prompt"] = "" + + if "Hires negative prompt" not in res: + res["Hires negative prompt"] = "" + + restore_old_hires_fix_params(res) + + # Missing RNG means the default was set, which is GPU RNG + if "RNG" not in res: + res["RNG"] = "GPU" + + if "Schedule type" not in res: + res["Schedule type"] = "Automatic" + + if "Schedule max sigma" not in res: + res["Schedule max sigma"] = 0 + + if "Schedule min sigma" not in res: + res["Schedule min sigma"] = 0 + + if "Schedule rho" not in res: + res["Schedule rho"] = 0 + + return res + + +infotext_to_setting_name_mapping = [ + ('Clip skip', 'CLIP_stop_at_last_layers', ), + ('Conditional mask weight', 'inpainting_mask_weight'), + ('Model hash', 'sd_model_checkpoint'), + ('ENSD', 'eta_noise_seed_delta'), + ('Schedule type', 'k_sched_type'), + ('Schedule max sigma', 'sigma_max'), + ('Schedule min sigma', 'sigma_min'), + ('Schedule rho', 'rho'), + ('Noise multiplier', 'initial_noise_multiplier'), + ('Eta', 'eta_ancestral'), + ('Eta DDIM', 'eta_ddim'), + ('Discard penultimate sigma', 'always_discard_next_to_last_sigma'), + ('UniPC variant', 'uni_pc_variant'), + ('UniPC skip type', 'uni_pc_skip_type'), + ('UniPC order', 'uni_pc_order'), + ('UniPC lower order final', 'uni_pc_lower_order_final'), + ('Token merging ratio', 'token_merging_ratio'), + ('Token merging ratio hr', 'token_merging_ratio_hr'), + ('RNG', 'randn_source'), + ('NGMS', 's_min_uncond'), + ('Pad conds', 'pad_cond_uncond'), +] + + +def create_override_settings_dict(text_pairs): + """creates processing's override_settings parameters from gradio's multiselect + + Example input: + ['Clip skip: 2', 'Model hash: e6e99610c4', 'ENSD: 31337'] + + Example output: + {'CLIP_stop_at_last_layers': 2, 'sd_model_checkpoint': 'e6e99610c4', 'eta_noise_seed_delta': 31337} + """ + + res = {} + + params = {} + for pair in text_pairs: + k, v = pair.split(":", maxsplit=1) + + params[k] = v.strip() + + for param_name, setting_name in infotext_to_setting_name_mapping: + value = params.get(param_name, None) + + if value is None: + continue + + res[setting_name] = shared.opts.cast_value(setting_name, value) + + return res + + +def connect_paste(button, paste_fields, input_comp, override_settings_component, tabname): + def paste_func(prompt): + if not prompt and not shared.cmd_opts.hide_ui_dir_config: + filename = os.path.join(data_path, "params.txt") + if os.path.exists(filename): + with open(filename, "r", encoding="utf8") as file: + prompt = file.read() + + params = parse_generation_parameters(prompt) + script_callbacks.infotext_pasted_callback(prompt, params) + res = [] + + for output, key in paste_fields: + if callable(key): + v = key(params) + else: + v = params.get(key, None) + + if v is None: + res.append(gr.update()) + elif isinstance(v, type_of_gr_update): + res.append(v) + else: + try: + valtype = type(output.value) + + if valtype == bool and v == "False": + val = False + else: + val = valtype(v) + + res.append(gr.update(value=val)) + except Exception: + res.append(gr.update()) + + return res + + if override_settings_component is not None: + def paste_settings(params): + vals = {} + + for param_name, setting_name in infotext_to_setting_name_mapping: + v = params.get(param_name, None) + if v is None: + continue + + if setting_name == "sd_model_checkpoint" and shared.opts.disable_weights_auto_swap: + continue + + v = shared.opts.cast_value(setting_name, v) + current_value = getattr(shared.opts, setting_name, None) + + if v == current_value: + continue + + vals[param_name] = v + + vals_pairs = [f"{k}: {v}" for k, v in vals.items()] + + return gr.Dropdown.update(value=vals_pairs, choices=vals_pairs, visible=bool(vals_pairs)) + + paste_fields = paste_fields + [(override_settings_component, paste_settings)] + + button.click( + fn=paste_func, + inputs=[input_comp], + outputs=[x[0] for x in paste_fields], + show_progress=False, + ) + button.click( + fn=None, + _js=f"recalculate_prompts_{tabname}", + inputs=[], + outputs=[], + show_progress=False, + ) diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py new file mode 100644 index 0000000000000000000000000000000000000000..e2b58f0b4a864977b602d513423120ad9b29d65d --- /dev/null +++ b/modules/gfpgan_model.py @@ -0,0 +1,110 @@ +import os + +import facexlib +import gfpgan + +import modules.face_restoration +from modules import paths, shared, devices, modelloader, errors + +model_dir = "GFPGAN" +user_path = None +model_path = os.path.join(paths.models_path, model_dir) +model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth" +have_gfpgan = False +loaded_gfpgan_model = None + + +def gfpgann(): + global loaded_gfpgan_model + global model_path + if loaded_gfpgan_model is not None: + loaded_gfpgan_model.gfpgan.to(devices.device_gfpgan) + return loaded_gfpgan_model + + if gfpgan_constructor is None: + return None + + models = modelloader.load_models(model_path, model_url, user_path, ext_filter="GFPGAN") + if len(models) == 1 and models[0].startswith("http"): + model_file = models[0] + elif len(models) != 0: + latest_file = max(models, key=os.path.getctime) + model_file = latest_file + else: + print("Unable to load gfpgan model!") + return None + if hasattr(facexlib.detection.retinaface, 'device'): + facexlib.detection.retinaface.device = devices.device_gfpgan + model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=devices.device_gfpgan) + loaded_gfpgan_model = model + + return model + + +def send_model_to(model, device): + model.gfpgan.to(device) + model.face_helper.face_det.to(device) + model.face_helper.face_parse.to(device) + + +def gfpgan_fix_faces(np_image): + model = gfpgann() + if model is None: + return np_image + + send_model_to(model, devices.device_gfpgan) + + np_image_bgr = np_image[:, :, ::-1] + cropped_faces, restored_faces, gfpgan_output_bgr = model.enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True) + np_image = gfpgan_output_bgr[:, :, ::-1] + + model.face_helper.clean_all() + + if shared.opts.face_restoration_unload: + send_model_to(model, devices.cpu) + + return np_image + + +gfpgan_constructor = None + + +def setup_model(dirname): + try: + os.makedirs(model_path, exist_ok=True) + from gfpgan import GFPGANer + from facexlib import detection, parsing # noqa: F401 + global user_path + global have_gfpgan + global gfpgan_constructor + + load_file_from_url_orig = gfpgan.utils.load_file_from_url + facex_load_file_from_url_orig = facexlib.detection.load_file_from_url + facex_load_file_from_url_orig2 = facexlib.parsing.load_file_from_url + + def my_load_file_from_url(**kwargs): + return load_file_from_url_orig(**dict(kwargs, model_dir=model_path)) + + def facex_load_file_from_url(**kwargs): + return facex_load_file_from_url_orig(**dict(kwargs, save_dir=model_path, model_dir=None)) + + def facex_load_file_from_url2(**kwargs): + return facex_load_file_from_url_orig2(**dict(kwargs, save_dir=model_path, model_dir=None)) + + gfpgan.utils.load_file_from_url = my_load_file_from_url + facexlib.detection.load_file_from_url = facex_load_file_from_url + facexlib.parsing.load_file_from_url = facex_load_file_from_url2 + user_path = dirname + have_gfpgan = True + gfpgan_constructor = GFPGANer + + class FaceRestorerGFPGAN(modules.face_restoration.FaceRestoration): + def name(self): + return "GFPGAN" + + def restore(self, np_image): + return gfpgan_fix_faces(np_image) + + shared.face_restorers.append(FaceRestorerGFPGAN()) + except Exception: + errors.report("Error setting up GFPGAN", exc_info=True) diff --git a/modules/gitpython_hack.py b/modules/gitpython_hack.py new file mode 100644 index 0000000000000000000000000000000000000000..e537c1df93e15679d90e9eea3337035a8d50da89 --- /dev/null +++ b/modules/gitpython_hack.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +import io +import subprocess + +import git + + +class Git(git.Git): + """ + Git subclassed to never use persistent processes. + """ + + def _get_persistent_cmd(self, attr_name, cmd_name, *args, **kwargs): + raise NotImplementedError(f"Refusing to use persistent process: {attr_name} ({cmd_name} {args} {kwargs})") + + def get_object_header(self, ref: str | bytes) -> tuple[str, str, int]: + ret = subprocess.check_output( + [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch-check"], + input=self._prepare_ref(ref), + cwd=self._working_dir, + timeout=2, + ) + return self._parse_object_header(ret) + + def stream_object_data(self, ref: str) -> tuple[str, str, int, "Git.CatFileContentStream"]: + # Not really streaming, per se; this buffers the entire object in memory. + # Shouldn't be a problem for our use case, since we're only using this for + # object headers (commit objects). + ret = subprocess.check_output( + [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch"], + input=self._prepare_ref(ref), + cwd=self._working_dir, + timeout=30, + ) + bio = io.BytesIO(ret) + hexsha, typename, size = self._parse_object_header(bio.readline()) + return (hexsha, typename, size, self.CatFileContentStream(size, bio)) + + +class Repo(git.Repo): + GitCommandWrapperType = Git diff --git a/modules/hashes.py b/modules/hashes.py new file mode 100644 index 0000000000000000000000000000000000000000..59a81eaabc91567a1a3a3caa12f1f9944f487806 --- /dev/null +++ b/modules/hashes.py @@ -0,0 +1,81 @@ +import hashlib +import os.path + +from modules import shared +import modules.cache + +dump_cache = modules.cache.dump_cache +cache = modules.cache.cache + + +def calculate_sha256(filename): + hash_sha256 = hashlib.sha256() + blksize = 1024 * 1024 + + with open(filename, "rb") as f: + for chunk in iter(lambda: f.read(blksize), b""): + hash_sha256.update(chunk) + + return hash_sha256.hexdigest() + + +def sha256_from_cache(filename, title, use_addnet_hash=False): + hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes") + ondisk_mtime = os.path.getmtime(filename) + + if title not in hashes: + return None + + cached_sha256 = hashes[title].get("sha256", None) + cached_mtime = hashes[title].get("mtime", 0) + + if ondisk_mtime > cached_mtime or cached_sha256 is None: + return None + + return cached_sha256 + + +def sha256(filename, title, use_addnet_hash=False): + hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes") + + sha256_value = sha256_from_cache(filename, title, use_addnet_hash) + if sha256_value is not None: + return sha256_value + + if shared.cmd_opts.no_hashing: + return None + + print(f"Calculating sha256 for {filename}: ", end='') + if use_addnet_hash: + with open(filename, "rb") as file: + sha256_value = addnet_hash_safetensors(file) + else: + sha256_value = calculate_sha256(filename) + print(f"{sha256_value}") + + hashes[title] = { + "mtime": os.path.getmtime(filename), + "sha256": sha256_value, + } + + dump_cache() + + return sha256_value + + +def addnet_hash_safetensors(b): + """kohya-ss hash for safetensors from https://github.com/kohya-ss/sd-scripts/blob/main/library/train_util.py""" + hash_sha256 = hashlib.sha256() + blksize = 1024 * 1024 + + b.seek(0) + header = b.read(8) + n = int.from_bytes(header, "little") + + offset = n + 8 + b.seek(offset) + for chunk in iter(lambda: b.read(blksize), b""): + hash_sha256.update(chunk) + + return hash_sha256.hexdigest() + diff --git a/modules/hypernetworks/__pycache__/hypernetwork.cpython-310.pyc b/modules/hypernetworks/__pycache__/hypernetwork.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c23e03a05ca89710e6061e022d46a4816f9ed42e Binary files /dev/null and b/modules/hypernetworks/__pycache__/hypernetwork.cpython-310.pyc differ diff --git a/modules/hypernetworks/__pycache__/ui.cpython-310.pyc b/modules/hypernetworks/__pycache__/ui.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a82e079ec1e09f5514698aa5b0bbc4ca8c0d8178 Binary files /dev/null and b/modules/hypernetworks/__pycache__/ui.cpython-310.pyc differ diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py new file mode 100644 index 0000000000000000000000000000000000000000..dc46b61a74a26f3c0c198901281a08457a8b0c44 --- /dev/null +++ b/modules/hypernetworks/hypernetwork.py @@ -0,0 +1,783 @@ +import datetime +import glob +import html +import os +import inspect +from contextlib import closing + +import modules.textual_inversion.dataset +import torch +import tqdm +from einops import rearrange, repeat +from ldm.util import default +from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors +from modules.textual_inversion import textual_inversion, logging +from modules.textual_inversion.learn_schedule import LearnRateScheduler +from torch import einsum +from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_ + +from collections import deque +from statistics import stdev, mean + + +optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"} + +class HypernetworkModule(torch.nn.Module): + activation_dict = { + "linear": torch.nn.Identity, + "relu": torch.nn.ReLU, + "leakyrelu": torch.nn.LeakyReLU, + "elu": torch.nn.ELU, + "swish": torch.nn.Hardswish, + "tanh": torch.nn.Tanh, + "sigmoid": torch.nn.Sigmoid, + } + activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'}) + + def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', + add_layer_norm=False, activate_output=False, dropout_structure=None): + super().__init__() + + self.multiplier = 1.0 + + assert layer_structure is not None, "layer_structure must not be None" + assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!" + assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!" + + linears = [] + for i in range(len(layer_structure) - 1): + + # Add a fully-connected layer + linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1]))) + + # Add an activation func except last layer + if activation_func == "linear" or activation_func is None or (i >= len(layer_structure) - 2 and not activate_output): + pass + elif activation_func in self.activation_dict: + linears.append(self.activation_dict[activation_func]()) + else: + raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}') + + # Add layer normalization + if add_layer_norm: + linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1]))) + + # Everything should be now parsed into dropout structure, and applied here. + # Since we only have dropouts after layers, dropout structure should start with 0 and end with 0. + if dropout_structure is not None and dropout_structure[i+1] > 0: + assert 0 < dropout_structure[i+1] < 1, "Dropout probability should be 0 or float between 0 and 1!" + linears.append(torch.nn.Dropout(p=dropout_structure[i+1])) + # Code explanation : [1, 2, 1] -> dropout is missing when last_layer_dropout is false. [1, 2, 2, 1] -> [0, 0.3, 0, 0], when its True, [0, 0.3, 0.3, 0]. + + self.linear = torch.nn.Sequential(*linears) + + if state_dict is not None: + self.fix_old_state_dict(state_dict) + self.load_state_dict(state_dict) + else: + for layer in self.linear: + if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm: + w, b = layer.weight.data, layer.bias.data + if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm: + normal_(w, mean=0.0, std=0.01) + normal_(b, mean=0.0, std=0) + elif weight_init == 'XavierUniform': + xavier_uniform_(w) + zeros_(b) + elif weight_init == 'XavierNormal': + xavier_normal_(w) + zeros_(b) + elif weight_init == 'KaimingUniform': + kaiming_uniform_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu') + zeros_(b) + elif weight_init == 'KaimingNormal': + kaiming_normal_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu') + zeros_(b) + else: + raise KeyError(f"Key {weight_init} is not defined as initialization!") + self.to(devices.device) + + def fix_old_state_dict(self, state_dict): + changes = { + 'linear1.bias': 'linear.0.bias', + 'linear1.weight': 'linear.0.weight', + 'linear2.bias': 'linear.1.bias', + 'linear2.weight': 'linear.1.weight', + } + + for fr, to in changes.items(): + x = state_dict.get(fr, None) + if x is None: + continue + + del state_dict[fr] + state_dict[to] = x + + def forward(self, x): + return x + self.linear(x) * (self.multiplier if not self.training else 1) + + def trainables(self): + layer_structure = [] + for layer in self.linear: + if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm: + layer_structure += [layer.weight, layer.bias] + return layer_structure + + +#param layer_structure : sequence used for length, use_dropout : controlling boolean, last_layer_dropout : for compatibility check. +def parse_dropout_structure(layer_structure, use_dropout, last_layer_dropout): + if layer_structure is None: + layer_structure = [1, 2, 1] + if not use_dropout: + return [0] * len(layer_structure) + dropout_values = [0] + dropout_values.extend([0.3] * (len(layer_structure) - 3)) + if last_layer_dropout: + dropout_values.append(0.3) + else: + dropout_values.append(0) + dropout_values.append(0) + return dropout_values + + +class Hypernetwork: + filename = None + name = None + + def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs): + self.filename = None + self.name = name + self.layers = {} + self.step = 0 + self.sd_checkpoint = None + self.sd_checkpoint_name = None + self.layer_structure = layer_structure + self.activation_func = activation_func + self.weight_init = weight_init + self.add_layer_norm = add_layer_norm + self.use_dropout = use_dropout + self.activate_output = activate_output + self.last_layer_dropout = kwargs.get('last_layer_dropout', True) + self.dropout_structure = kwargs.get('dropout_structure', None) + if self.dropout_structure is None: + self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout) + self.optimizer_name = None + self.optimizer_state_dict = None + self.optional_info = None + + for size in enable_sizes or []: + self.layers[size] = ( + HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure), + HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure), + ) + self.eval() + + def weights(self): + res = [] + for layers in self.layers.values(): + for layer in layers: + res += layer.parameters() + return res + + def train(self, mode=True): + for layers in self.layers.values(): + for layer in layers: + layer.train(mode=mode) + for param in layer.parameters(): + param.requires_grad = mode + + def to(self, device): + for layers in self.layers.values(): + for layer in layers: + layer.to(device) + + return self + + def set_multiplier(self, multiplier): + for layers in self.layers.values(): + for layer in layers: + layer.multiplier = multiplier + + return self + + def eval(self): + for layers in self.layers.values(): + for layer in layers: + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def save(self, filename): + state_dict = {} + optimizer_saved_dict = {} + + for k, v in self.layers.items(): + state_dict[k] = (v[0].state_dict(), v[1].state_dict()) + + state_dict['step'] = self.step + state_dict['name'] = self.name + state_dict['layer_structure'] = self.layer_structure + state_dict['activation_func'] = self.activation_func + state_dict['is_layer_norm'] = self.add_layer_norm + state_dict['weight_initialization'] = self.weight_init + state_dict['sd_checkpoint'] = self.sd_checkpoint + state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name + state_dict['activate_output'] = self.activate_output + state_dict['use_dropout'] = self.use_dropout + state_dict['dropout_structure'] = self.dropout_structure + state_dict['last_layer_dropout'] = (self.dropout_structure[-2] != 0) if self.dropout_structure is not None else self.last_layer_dropout + state_dict['optional_info'] = self.optional_info if self.optional_info else None + + if self.optimizer_name is not None: + optimizer_saved_dict['optimizer_name'] = self.optimizer_name + + torch.save(state_dict, filename) + if shared.opts.save_optimizer_state and self.optimizer_state_dict: + optimizer_saved_dict['hash'] = self.shorthash() + optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict + torch.save(optimizer_saved_dict, filename + '.optim') + + def load(self, filename): + self.filename = filename + if self.name is None: + self.name = os.path.splitext(os.path.basename(filename))[0] + + state_dict = torch.load(filename, map_location='cpu') + + self.layer_structure = state_dict.get('layer_structure', [1, 2, 1]) + self.optional_info = state_dict.get('optional_info', None) + self.activation_func = state_dict.get('activation_func', None) + self.weight_init = state_dict.get('weight_initialization', 'Normal') + self.add_layer_norm = state_dict.get('is_layer_norm', False) + self.dropout_structure = state_dict.get('dropout_structure', None) + self.use_dropout = True if self.dropout_structure is not None and any(self.dropout_structure) else state_dict.get('use_dropout', False) + self.activate_output = state_dict.get('activate_output', True) + self.last_layer_dropout = state_dict.get('last_layer_dropout', False) + # Dropout structure should have same length as layer structure, Every digits should be in [0,1), and last digit must be 0. + if self.dropout_structure is None: + self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout) + + if shared.opts.print_hypernet_extra: + if self.optional_info is not None: + print(f" INFO:\n {self.optional_info}\n") + + print(f" Layer structure: {self.layer_structure}") + print(f" Activation function: {self.activation_func}") + print(f" Weight initialization: {self.weight_init}") + print(f" Layer norm: {self.add_layer_norm}") + print(f" Dropout usage: {self.use_dropout}" ) + print(f" Activate last layer: {self.activate_output}") + print(f" Dropout structure: {self.dropout_structure}") + + optimizer_saved_dict = torch.load(self.filename + '.optim', map_location='cpu') if os.path.exists(self.filename + '.optim') else {} + + if self.shorthash() == optimizer_saved_dict.get('hash', None): + self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None) + else: + self.optimizer_state_dict = None + if self.optimizer_state_dict: + self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW') + if shared.opts.print_hypernet_extra: + print("Loaded existing optimizer from checkpoint") + print(f"Optimizer name is {self.optimizer_name}") + else: + self.optimizer_name = "AdamW" + if shared.opts.print_hypernet_extra: + print("No saved optimizer exists in checkpoint") + + for size, sd in state_dict.items(): + if type(size) == int: + self.layers[size] = ( + HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.activate_output, self.dropout_structure), + HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, + self.add_layer_norm, self.activate_output, self.dropout_structure), + ) + + self.name = state_dict.get('name', self.name) + self.step = state_dict.get('step', 0) + self.sd_checkpoint = state_dict.get('sd_checkpoint', None) + self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None) + self.eval() + + def shorthash(self): + sha256 = hashes.sha256(self.filename, f'hypernet/{self.name}') + + return sha256[0:10] if sha256 else None + + +def list_hypernetworks(path): + res = {} + for filename in sorted(glob.iglob(os.path.join(path, '**/*.pt'), recursive=True), key=str.lower): + name = os.path.splitext(os.path.basename(filename))[0] + # Prevent a hypothetical "None.pt" from being listed. + if name != "None": + res[name] = filename + return res + + +def load_hypernetwork(name): + path = shared.hypernetworks.get(name, None) + + if path is None: + return None + + try: + hypernetwork = Hypernetwork() + hypernetwork.load(path) + return hypernetwork + except Exception: + errors.report(f"Error loading hypernetwork {path}", exc_info=True) + return None + + +def load_hypernetworks(names, multipliers=None): + already_loaded = {} + + for hypernetwork in shared.loaded_hypernetworks: + if hypernetwork.name in names: + already_loaded[hypernetwork.name] = hypernetwork + + shared.loaded_hypernetworks.clear() + + for i, name in enumerate(names): + hypernetwork = already_loaded.get(name, None) + if hypernetwork is None: + hypernetwork = load_hypernetwork(name) + + if hypernetwork is None: + continue + + hypernetwork.set_multiplier(multipliers[i] if multipliers else 1.0) + shared.loaded_hypernetworks.append(hypernetwork) + + +def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None): + hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context_k.shape[2], None) + + if hypernetwork_layers is None: + return context_k, context_v + + if layer is not None: + layer.hyper_k = hypernetwork_layers[0] + layer.hyper_v = hypernetwork_layers[1] + + context_k = devices.cond_cast_unet(hypernetwork_layers[0](devices.cond_cast_float(context_k))) + context_v = devices.cond_cast_unet(hypernetwork_layers[1](devices.cond_cast_float(context_v))) + return context_k, context_v + + +def apply_hypernetworks(hypernetworks, context, layer=None): + context_k = context + context_v = context + for hypernetwork in hypernetworks: + context_k, context_v = apply_single_hypernetwork(hypernetwork, context_k, context_v, layer) + + return context_k, context_v + + +def attention_CrossAttention_forward(self, x, context=None, mask=None, **kwargs): + h = self.heads + + q = self.to_q(x) + context = default(context, x) + + context_k, context_v = apply_hypernetworks(shared.loaded_hypernetworks, context, self) + k = self.to_k(context_k) + v = self.to_v(context_v) + + q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v)) + + sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + + if mask is not None: + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) + + # attention, what we cannot get enough of + attn = sim.softmax(dim=-1) + + out = einsum('b i j, b j d -> b i d', attn, v) + out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + return self.to_out(out) + + +def stack_conds(conds): + if len(conds) == 1: + return torch.stack(conds) + + # same as in reconstruct_multicond_batch + token_count = max([x.shape[0] for x in conds]) + for i in range(len(conds)): + if conds[i].shape[0] != token_count: + last_vector = conds[i][-1:] + last_vector_repeated = last_vector.repeat([token_count - conds[i].shape[0], 1]) + conds[i] = torch.vstack([conds[i], last_vector_repeated]) + + return torch.stack(conds) + + +def statistics(data): + if len(data) < 2: + std = 0 + else: + std = stdev(data) + total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})" + recent_data = data[-32:] + if len(recent_data) < 2: + std = 0 + else: + std = stdev(recent_data) + recent_information = f"recent 32 loss:{mean(recent_data):.3f}" + u"\u00B1" + f"({std / (len(recent_data) ** 0.5):.3f})" + return total_information, recent_information + + +def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): + # Remove illegal characters from name. + name = "".join( x for x in name if (x.isalnum() or x in "._- ")) + assert name, "Name cannot be empty!" + + fn = os.path.join(shared.cmd_opts.hypernetwork_dir, f"{name}.pt") + if not overwrite_old: + assert not os.path.exists(fn), f"file {fn} already exists" + + if type(layer_structure) == str: + layer_structure = [float(x.strip()) for x in layer_structure.split(",")] + + if use_dropout and dropout_structure and type(dropout_structure) == str: + dropout_structure = [float(x.strip()) for x in dropout_structure.split(",")] + else: + dropout_structure = [0] * len(layer_structure) + + hypernet = modules.hypernetworks.hypernetwork.Hypernetwork( + name=name, + enable_sizes=[int(x) for x in enable_sizes], + layer_structure=layer_structure, + activation_func=activation_func, + weight_init=weight_init, + add_layer_norm=add_layer_norm, + use_dropout=use_dropout, + dropout_structure=dropout_structure + ) + hypernet.save(fn) + + shared.reload_hypernetworks() + + +def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): + # images allows training previews to have infotext. Importing it at the top causes a circular import problem. + from modules import images + + save_hypernetwork_every = save_hypernetwork_every or 0 + create_image_every = create_image_every or 0 + template_file = textual_inversion.textual_inversion_templates.get(template_filename, None) + textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork") + template_file = template_file.path + + path = shared.hypernetworks.get(hypernetwork_name, None) + hypernetwork = Hypernetwork() + hypernetwork.load(path) + shared.loaded_hypernetworks = [hypernetwork] + + shared.state.job = "train-hypernetwork" + shared.state.textinfo = "Initializing hypernetwork training..." + shared.state.job_count = steps + + hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0] + filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt') + + log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name) + unload = shared.opts.unload_models_when_training + + if save_hypernetwork_every > 0: + hypernetwork_dir = os.path.join(log_directory, "hypernetworks") + os.makedirs(hypernetwork_dir, exist_ok=True) + else: + hypernetwork_dir = None + + if create_image_every > 0: + images_dir = os.path.join(log_directory, "images") + os.makedirs(images_dir, exist_ok=True) + else: + images_dir = None + + checkpoint = sd_models.select_checkpoint() + + initial_step = hypernetwork.step or 0 + if initial_step >= steps: + shared.state.textinfo = "Model has already been trained beyond specified max steps" + return hypernetwork, filename + + scheduler = LearnRateScheduler(learn_rate, steps, initial_step) + + clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else None + if clip_grad: + clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False) + + if shared.opts.training_enable_tensorboard: + tensorboard_writer = textual_inversion.tensorboard_setup(log_directory) + + # dataset loading may take a while, so input validations and early returns should be done before this + shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." + + pin_memory = shared.opts.pin_memory + + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight) + + if shared.opts.save_training_settings_to_txt: + saved_params = dict( + model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), + **{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]} + ) + logging.save_settings_to_file(log_directory, {**saved_params, **locals()}) + + latent_sampling_method = ds.latent_sampling_method + + dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory) + + old_parallel_processing_allowed = shared.parallel_processing_allowed + + if unload: + shared.parallel_processing_allowed = False + shared.sd_model.cond_stage_model.to(devices.cpu) + shared.sd_model.first_stage_model.to(devices.cpu) + + weights = hypernetwork.weights() + hypernetwork.train() + + # Here we use optimizer from saved HN, or we can specify as UI option. + if hypernetwork.optimizer_name in optimizer_dict: + optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate) + optimizer_name = hypernetwork.optimizer_name + else: + print(f"Optimizer type {hypernetwork.optimizer_name} is not defined!") + optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate) + optimizer_name = 'AdamW' + + if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer. + try: + optimizer.load_state_dict(hypernetwork.optimizer_state_dict) + except RuntimeError as e: + print("Cannot resume from saved optimizer!") + print(e) + + scaler = torch.cuda.amp.GradScaler() + + batch_size = ds.batch_size + gradient_step = ds.gradient_step + # n steps = batch_size * gradient_step * n image processed + steps_per_epoch = len(ds) // batch_size // gradient_step + max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step + loss_step = 0 + _loss_step = 0 #internal + # size = len(ds.indexes) + # loss_dict = defaultdict(lambda : deque(maxlen = 1024)) + loss_logging = deque(maxlen=len(ds) * 3) # this should be configurable parameter, this is 3 * epoch(dataset size) + # losses = torch.zeros((size,)) + # previous_mean_losses = [0] + # previous_mean_loss = 0 + # print("Mean loss of {} elements".format(size)) + + steps_without_grad = 0 + + last_saved_file = "" + last_saved_image = "" + forced_filename = "" + + pbar = tqdm.tqdm(total=steps - initial_step) + try: + sd_hijack_checkpoint.add() + + for _ in range((steps-initial_step) * gradient_step): + if scheduler.finished: + break + if shared.state.interrupted: + break + for j, batch in enumerate(dl): + # works as a drop_last=True for gradient accumulation + if j == max_steps_per_epoch: + break + scheduler.apply(optimizer, hypernetwork.step) + if scheduler.finished: + break + if shared.state.interrupted: + break + + if clip_grad: + clip_grad_sched.step(hypernetwork.step) + + with devices.autocast(): + x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) + if use_weight: + w = batch.weight.to(devices.device, non_blocking=pin_memory) + if tag_drop_out != 0 or shuffle_tags: + shared.sd_model.cond_stage_model.to(devices.device) + c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory) + shared.sd_model.cond_stage_model.to(devices.cpu) + else: + c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory) + if use_weight: + loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step + del w + else: + loss = shared.sd_model.forward(x, c)[0] / gradient_step + del x + del c + + _loss_step += loss.item() + scaler.scale(loss).backward() + + # go back until we reach gradient accumulation steps + if (j + 1) % gradient_step != 0: + continue + loss_logging.append(_loss_step) + if clip_grad: + clip_grad(weights, clip_grad_sched.learn_rate) + + scaler.step(optimizer) + scaler.update() + hypernetwork.step += 1 + pbar.update() + optimizer.zero_grad(set_to_none=True) + loss_step = _loss_step + _loss_step = 0 + + steps_done = hypernetwork.step + 1 + + epoch_num = hypernetwork.step // steps_per_epoch + epoch_step = hypernetwork.step % steps_per_epoch + + description = f"Training hypernetwork [Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}" + pbar.set_description(description) + if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0: + # Before saving, change name to match current checkpoint. + hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}' + last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt') + hypernetwork.optimizer_name = optimizer_name + if shared.opts.save_optimizer_state: + hypernetwork.optimizer_state_dict = optimizer.state_dict() + save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file) + hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory. + + + + if shared.opts.training_enable_tensorboard: + epoch_num = hypernetwork.step // len(ds) + epoch_step = hypernetwork.step - (epoch_num * len(ds)) + 1 + mean_loss = sum(loss_logging) / len(loss_logging) + textual_inversion.tensorboard_add(tensorboard_writer, loss=mean_loss, global_step=hypernetwork.step, step=epoch_step, learn_rate=scheduler.learn_rate, epoch_num=epoch_num) + + textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, steps_per_epoch, { + "loss": f"{loss_step:.7f}", + "learn_rate": scheduler.learn_rate + }) + + if images_dir is not None and steps_done % create_image_every == 0: + forced_filename = f'{hypernetwork_name}-{steps_done}' + last_saved_image = os.path.join(images_dir, forced_filename) + hypernetwork.eval() + rng_state = torch.get_rng_state() + cuda_rng_state = None + if torch.cuda.is_available(): + cuda_rng_state = torch.cuda.get_rng_state_all() + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + ) + + p.disable_extra_networks = True + + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_name = sd_samplers.samplers[preview_sampler_index].name + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = batch.cond_text[0] + p.steps = 20 + p.width = training_width + p.height = training_height + + preview_text = p.prompt + + with closing(p): + processed = processing.process_images(p) + image = processed.images[0] if len(processed.images) > 0 else None + + if unload: + shared.sd_model.cond_stage_model.to(devices.cpu) + shared.sd_model.first_stage_model.to(devices.cpu) + torch.set_rng_state(rng_state) + if torch.cuda.is_available(): + torch.cuda.set_rng_state_all(cuda_rng_state) + hypernetwork.train() + if image is not None: + shared.state.assign_current_image(image) + if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images: + textual_inversion.tensorboard_add_image(tensorboard_writer, + f"Validation at epoch {epoch_num}", image, + hypernetwork.step) + last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) + last_saved_image += f", prompt: {preview_text}" + + shared.state.job_no = hypernetwork.step + + shared.state.textinfo = f""" +

+Loss: {loss_step:.7f}
+Step: {steps_done}
+Last prompt: {html.escape(batch.cond_text[0])}
+Last saved hypernetwork: {html.escape(last_saved_file)}
+Last saved image: {html.escape(last_saved_image)}
+

+""" + except Exception: + errors.report("Exception in training hypernetwork", exc_info=True) + finally: + pbar.leave = False + pbar.close() + hypernetwork.eval() + sd_hijack_checkpoint.remove() + + + + filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt') + hypernetwork.optimizer_name = optimizer_name + if shared.opts.save_optimizer_state: + hypernetwork.optimizer_state_dict = optimizer.state_dict() + save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename) + + del optimizer + hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory. + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + shared.parallel_processing_allowed = old_parallel_processing_allowed + + return hypernetwork, filename + +def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename): + old_hypernetwork_name = hypernetwork.name + old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None + old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None + try: + hypernetwork.sd_checkpoint = checkpoint.shorthash + hypernetwork.sd_checkpoint_name = checkpoint.model_name + hypernetwork.name = hypernetwork_name + hypernetwork.save(filename) + except: + hypernetwork.sd_checkpoint = old_sd_checkpoint + hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name + hypernetwork.name = old_hypernetwork_name + raise diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py new file mode 100644 index 0000000000000000000000000000000000000000..351910461dadbf3bfe027e542e0fddf896352d17 --- /dev/null +++ b/modules/hypernetworks/ui.py @@ -0,0 +1,38 @@ +import html + +import gradio as gr +import modules.hypernetworks.hypernetwork +from modules import devices, sd_hijack, shared + +not_available = ["hardswish", "multiheadattention"] +keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict if x not in not_available] + + +def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): + filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure) + + return gr.Dropdown.update(choices=sorted(shared.hypernetworks)), f"Created: {filename}", "" + + +def train_hypernetwork(*args): + shared.loaded_hypernetworks = [] + + assert not shared.cmd_opts.lowvram, 'Training models with lowvram is not possible' + + try: + sd_hijack.undo_optimizations() + + hypernetwork, filename = modules.hypernetworks.hypernetwork.train_hypernetwork(*args) + + res = f""" +Training {'interrupted' if shared.state.interrupted else 'finished'} at {hypernetwork.step} steps. +Hypernetwork saved to {html.escape(filename)} +""" + return res, "" + except Exception: + raise + finally: + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + sd_hijack.apply_optimizations() + diff --git a/modules/images.py b/modules/images.py new file mode 100644 index 0000000000000000000000000000000000000000..98dd36b4d565569a1c6ac622da6221bed4f39a66 --- /dev/null +++ b/modules/images.py @@ -0,0 +1,758 @@ +from __future__ import annotations + +import datetime + +import pytz +import io +import math +import os +from collections import namedtuple +import re + +import numpy as np +import piexif +import piexif.helper +from PIL import Image, ImageFont, ImageDraw, ImageColor, PngImagePlugin +import string +import json +import hashlib + +from modules import sd_samplers, shared, script_callbacks, errors +from modules.paths_internal import roboto_ttf_file +from modules.shared import opts + +import modules.sd_vae as sd_vae + +LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) + + +def get_font(fontsize: int): + try: + return ImageFont.truetype(opts.font or roboto_ttf_file, fontsize) + except Exception: + return ImageFont.truetype(roboto_ttf_file, fontsize) + + +def image_grid(imgs, batch_size=1, rows=None): + if rows is None: + if opts.n_rows > 0: + rows = opts.n_rows + elif opts.n_rows == 0: + rows = batch_size + elif opts.grid_prevent_empty_spots: + rows = math.floor(math.sqrt(len(imgs))) + while len(imgs) % rows != 0: + rows -= 1 + else: + rows = math.sqrt(len(imgs)) + rows = round(rows) + if rows > len(imgs): + rows = len(imgs) + + cols = math.ceil(len(imgs) / rows) + + params = script_callbacks.ImageGridLoopParams(imgs, cols, rows) + script_callbacks.image_grid_callback(params) + + w, h = imgs[0].size + grid = Image.new('RGB', size=(params.cols * w, params.rows * h), color='black') + + for i, img in enumerate(params.imgs): + grid.paste(img, box=(i % params.cols * w, i // params.cols * h)) + + return grid + + +Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"]) + + +def split_grid(image, tile_w=512, tile_h=512, overlap=64): + w = image.width + h = image.height + + non_overlap_width = tile_w - overlap + non_overlap_height = tile_h - overlap + + cols = math.ceil((w - overlap) / non_overlap_width) + rows = math.ceil((h - overlap) / non_overlap_height) + + dx = (w - tile_w) / (cols - 1) if cols > 1 else 0 + dy = (h - tile_h) / (rows - 1) if rows > 1 else 0 + + grid = Grid([], tile_w, tile_h, w, h, overlap) + for row in range(rows): + row_images = [] + + y = int(row * dy) + + if y + tile_h >= h: + y = h - tile_h + + for col in range(cols): + x = int(col * dx) + + if x + tile_w >= w: + x = w - tile_w + + tile = image.crop((x, y, x + tile_w, y + tile_h)) + + row_images.append([x, tile_w, tile]) + + grid.tiles.append([y, tile_h, row_images]) + + return grid + + +def combine_grid(grid): + def make_mask_image(r): + r = r * 255 / grid.overlap + r = r.astype(np.uint8) + return Image.fromarray(r, 'L') + + mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0)) + mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1)) + + combined_image = Image.new("RGB", (grid.image_w, grid.image_h)) + for y, h, row in grid.tiles: + combined_row = Image.new("RGB", (grid.image_w, h)) + for x, w, tile in row: + if x == 0: + combined_row.paste(tile, (0, 0)) + continue + + combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w) + combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0)) + + if y == 0: + combined_image.paste(combined_row, (0, 0)) + continue + + combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h) + combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap)) + + return combined_image + + +class GridAnnotation: + def __init__(self, text='', is_active=True): + self.text = text + self.is_active = is_active + self.size = None + + +def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0): + + color_active = ImageColor.getcolor(opts.grid_text_active_color, 'RGB') + color_inactive = ImageColor.getcolor(opts.grid_text_inactive_color, 'RGB') + color_background = ImageColor.getcolor(opts.grid_background_color, 'RGB') + + def wrap(drawing, text, font, line_length): + lines = [''] + for word in text.split(): + line = f'{lines[-1]} {word}'.strip() + if drawing.textlength(line, font=font) <= line_length: + lines[-1] = line + else: + lines.append(word) + return lines + + def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize): + for line in lines: + fnt = initial_fnt + fontsize = initial_fontsize + while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0: + fontsize -= 1 + fnt = get_font(fontsize) + drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=fnt, fill=color_active if line.is_active else color_inactive, anchor="mm", align="center") + + if not line.is_active: + drawing.line((draw_x - line.size[0] // 2, draw_y + line.size[1] // 2, draw_x + line.size[0] // 2, draw_y + line.size[1] // 2), fill=color_inactive, width=4) + + draw_y += line.size[1] + line_spacing + + fontsize = (width + height) // 25 + line_spacing = fontsize // 2 + + fnt = get_font(fontsize) + + pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4 + + cols = im.width // width + rows = im.height // height + + assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}' + assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}' + + calc_img = Image.new("RGB", (1, 1), color_background) + calc_d = ImageDraw.Draw(calc_img) + + for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)): + items = [] + texts + texts.clear() + + for line in items: + wrapped = wrap(calc_d, line.text, fnt, allowed_width) + texts += [GridAnnotation(x, line.is_active) for x in wrapped] + + for line in texts: + bbox = calc_d.multiline_textbbox((0, 0), line.text, font=fnt) + line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1]) + line.allowed_width = allowed_width + + hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts] + ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in ver_texts] + + pad_top = 0 if sum(hor_text_heights) == 0 else max(hor_text_heights) + line_spacing * 2 + + result = Image.new("RGB", (im.width + pad_left + margin * (cols-1), im.height + pad_top + margin * (rows-1)), color_background) + + for row in range(rows): + for col in range(cols): + cell = im.crop((width * col, height * row, width * (col+1), height * (row+1))) + result.paste(cell, (pad_left + (width + margin) * col, pad_top + (height + margin) * row)) + + d = ImageDraw.Draw(result) + + for col in range(cols): + x = pad_left + (width + margin) * col + width / 2 + y = pad_top / 2 - hor_text_heights[col] / 2 + + draw_texts(d, x, y, hor_texts[col], fnt, fontsize) + + for row in range(rows): + x = pad_left / 2 + y = pad_top + (height + margin) * row + height / 2 - ver_text_heights[row] / 2 + + draw_texts(d, x, y, ver_texts[row], fnt, fontsize) + + return result + + +def draw_prompt_matrix(im, width, height, all_prompts, margin=0): + prompts = all_prompts[1:] + boundary = math.ceil(len(prompts) / 2) + + prompts_horiz = prompts[:boundary] + prompts_vert = prompts[boundary:] + + hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))] + ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))] + + return draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin) + + +def resize_image(resize_mode, im, width, height, upscaler_name=None): + """ + Resizes an image with the specified resize_mode, width, and height. + + Args: + resize_mode: The mode to use when resizing the image. + 0: Resize the image to the specified width and height. + 1: Resize the image to fill the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess. + 2: Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image. + im: The image to resize. + width: The width to resize the image to. + height: The height to resize the image to. + upscaler_name: The name of the upscaler to use. If not provided, defaults to opts.upscaler_for_img2img. + """ + + upscaler_name = upscaler_name or opts.upscaler_for_img2img + + def resize(im, w, h): + if upscaler_name is None or upscaler_name == "None" or im.mode == 'L': + return im.resize((w, h), resample=LANCZOS) + + scale = max(w / im.width, h / im.height) + + if scale > 1.0: + upscalers = [x for x in shared.sd_upscalers if x.name == upscaler_name] + if len(upscalers) == 0: + upscaler = shared.sd_upscalers[0] + print(f"could not find upscaler named {upscaler_name or ''}, using {upscaler.name} as a fallback") + else: + upscaler = upscalers[0] + + im = upscaler.scaler.upscale(im, scale, upscaler.data_path) + + if im.width != w or im.height != h: + im = im.resize((w, h), resample=LANCZOS) + + return im + + if resize_mode == 0: + res = resize(im, width, height) + + elif resize_mode == 1: + ratio = width / height + src_ratio = im.width / im.height + + src_w = width if ratio > src_ratio else im.width * height // im.height + src_h = height if ratio <= src_ratio else im.height * width // im.width + + resized = resize(im, src_w, src_h) + res = Image.new("RGB", (width, height)) + res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2)) + + else: + ratio = width / height + src_ratio = im.width / im.height + + src_w = width if ratio < src_ratio else im.width * height // im.height + src_h = height if ratio >= src_ratio else im.height * width // im.width + + resized = resize(im, src_w, src_h) + res = Image.new("RGB", (width, height)) + res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2)) + + if ratio < src_ratio: + fill_height = height // 2 - src_h // 2 + if fill_height > 0: + res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0)) + res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h)) + elif ratio > src_ratio: + fill_width = width // 2 - src_w // 2 + if fill_width > 0: + res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0)) + res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0)) + + return res + + +invalid_filename_chars = '<>:"/\\|?*\n' +invalid_filename_prefix = ' ' +invalid_filename_postfix = ' .' +re_nonletters = re.compile(r'[\s' + string.punctuation + ']+') +re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)") +re_pattern_arg = re.compile(r"(.*)<([^>]*)>$") +max_filename_part_length = 128 +NOTHING_AND_SKIP_PREVIOUS_TEXT = object() + + +def sanitize_filename_part(text, replace_spaces=True): + if text is None: + return None + + if replace_spaces: + text = text.replace(' ', '_') + + text = text.translate({ord(x): '_' for x in invalid_filename_chars}) + text = text.lstrip(invalid_filename_prefix)[:max_filename_part_length] + text = text.rstrip(invalid_filename_postfix) + return text + + +class FilenameGenerator: + def get_vae_filename(self): #get the name of the VAE file. + if sd_vae.loaded_vae_file is None: + return "NoneType" + file_name = os.path.basename(sd_vae.loaded_vae_file) + split_file_name = file_name.split('.') + if len(split_file_name) > 1 and split_file_name[0] == '': + return split_file_name[1] # if the first character of the filename is "." then [1] is obtained. + else: + return split_file_name[0] + + replacements = { + 'seed': lambda self: self.seed if self.seed is not None else '', + 'seed_first': lambda self: self.seed if self.p.batch_size == 1 else self.p.all_seeds[0], + 'seed_last': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 else self.p.all_seeds[-1], + 'steps': lambda self: self.p and self.p.steps, + 'cfg': lambda self: self.p and self.p.cfg_scale, + 'width': lambda self: self.image.width, + 'height': lambda self: self.image.height, + 'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False), + 'sampler': lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False), + 'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash), + 'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.name_for_extra, replace_spaces=False), + 'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'), + 'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime], [datetime