diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..0567a46e2b44dcd7a217abe13ce366674a6f48d5 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,14 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +Real-ESRGAN/assets/teaser-text.png filter=lfs diff=lfs merge=lfs -text +Real-ESRGAN/assets/teaser.jpg filter=lfs diff=lfs merge=lfs -text +Real-ESRGAN/inputs/00003.png filter=lfs diff=lfs merge=lfs -text +Real-ESRGAN/inputs/children-alpha.png filter=lfs diff=lfs merge=lfs -text +Real-ESRGAN/inputs/OST_009.png filter=lfs diff=lfs merge=lfs -text +Real-ESRGAN/inputs/tree_alpha_16bit.png filter=lfs diff=lfs merge=lfs -text +Real-ESRGAN/inputs/video/onepiece_demo.mp4 filter=lfs diff=lfs merge=lfs -text +Real-ESRGAN/tests/data/gt.lmdb/data.mdb filter=lfs diff=lfs merge=lfs -text +Real-ESRGAN/tests/data/gt/baboon.png filter=lfs diff=lfs merge=lfs -text +Real-ESRGAN/tests/data/gt/comic.png filter=lfs diff=lfs merge=lfs -text +test.mp4 filter=lfs diff=lfs merge=lfs -text diff --git a/Frame_remover_v2.py b/Frame_remover_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..571a336bc642eb074ae405bfb35be9dbb65e851f --- /dev/null +++ b/Frame_remover_v2.py @@ -0,0 +1,245 @@ +import os +import cv2 +import logging +import traceback +import shutil +import tempfile +from datetime import timedelta +from tqdm import tqdm +import threading + +from insightface.app import FaceAnalysis +from moviepy.editor import VideoFileClip, concatenate_audioclips + +# Set the temporary directory for temporary files +tempfile.tempdir = 'D:\\Switcher\\Temp' # Update this path if needed + +# Configure logging +logging.basicConfig( + filename='D:\\Switcher\\video_processing.log', # Update this path if needed + level=logging.INFO, + format='%(asctime)s [%(levelname)s] %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' +) + +# Thread lock for face analyzer initialization +THREAD_LOCK = threading.Lock() +FACE_ANALYSER = None + +def get_face_analyser(): + global FACE_ANALYSER + with THREAD_LOCK: + if FACE_ANALYSER is None: + # Initialize FaceAnalysis with specified model and providers + FACE_ANALYSER = FaceAnalysis(name='buffalo_l', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) + # Prepare the analyzer + FACE_ANALYSER.prepare(ctx_id=0) + logging.info(f'FaceAnalysis initialized. Models loaded: {list(FACE_ANALYSER.models.keys())}') + return FACE_ANALYSER + +def get_many_faces(frame): + try: + faces = get_face_analyser().get(frame) + return faces + except Exception as e: + logging.error(f'Error in get_many_faces: {e}') + logging.error(traceback.format_exc()) + return [] + +def process_video(filename, video_dir, output_dir): + try: + video_path = os.path.join(video_dir, filename) + output_video_path = os.path.join(output_dir, filename) + temp_video_path = os.path.join(tempfile.gettempdir(), f"temp_{filename}") + logging.info(f'Starting processing video: {video_path}') + + # Open video file + cap = cv2.VideoCapture(video_path) + if not cap.isOpened(): + logging.error(f'Failed to open video file: {video_path}') + return None + + # Get video properties + fps = cap.get(cv2.CAP_PROP_FPS) + if fps == 0 or fps is None or fps != fps: + fps = 30 # Default FPS if unable to get FPS from video + logging.warning(f'FPS not detected in {filename}. Using default FPS: {fps}') + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + duration_seconds = total_frames / fps + duration = str(timedelta(seconds=int(duration_seconds))) + + # Define the codec and create VideoWriter object + fourcc = cv2.VideoWriter_fourcc(*'mp4v') # You can use 'XVID' or other codecs + out = cv2.VideoWriter(temp_video_path, fourcc, fps, (width, height)) + + frames_with_faces = 0 + frame_index = 0 + + # Initialize variables for audio synchronization + face_intervals = [] + in_face_sequence = False + + # Create a progress bar using tqdm + with tqdm(total=total_frames, desc=f"Processing {filename}", unit="frame", ncols=120) as pbar: + while True: + ret, frame = cap.read() + if not ret: + break + + # Analyze frame for faces + faces = get_many_faces(frame) + if len(faces) > 0: + frames_with_faces += 1 + # Write the frame to the output video + out.write(frame) + + # Start a new face interval if not already in one + if not in_face_sequence: + in_face_sequence = True + start_time = frame_index / fps + else: + # End the current face interval if in one + if in_face_sequence: + in_face_sequence = False + end_time = frame_index / fps + face_intervals.append((start_time, end_time)) + + frame_index += 1 + pbar.update(1) # Update the progress bar by one frame + + # Calculate percentage of frames with faces + percentage = (frames_with_faces / frame_index) * 100 if frame_index > 0 else 0 + + # Update the postfix with the number of frames with faces and percentage + pbar.set_postfix({ + 'Faces Detected': frames_with_faces, + 'Face Percentage': f"{percentage:.2f}%" + }) + + # Check if we were still in a face interval at the end + if in_face_sequence: + end_time = frame_index / fps + face_intervals.append((start_time, end_time)) + + cap.release() + out.release() + logging.info(f'Finished processing video: {video_path}') + logging.info(f'Total frames: {total_frames}, Frames with faces: {frames_with_faces}, Face Percentage: {frames_with_faces / total_frames * 100:.2f}%') + + # If no faces detected, delete the empty output video + if frames_with_faces == 0: + os.remove(temp_video_path) + logging.info(f'No faces detected in video: {filename}') + return { + 'filename': filename, + 'duration': duration, + 'total_frames': total_frames, + 'frames_with_faces': frames_with_faces, + 'output_video': None + } + + # Process the audio + # Load the original video using moviepy + original_video = VideoFileClip(video_path) + + # Extract the audio + original_audio = original_video.audio + + # Extract the audio segments corresponding to face intervals + audio_segments = [] + for interval in face_intervals: + audio_segment = original_audio.subclip(interval[0], interval[1]) + audio_segments.append(audio_segment) + + # Concatenate the audio segments + if audio_segments: + final_audio = concatenate_audioclips(audio_segments) + else: + final_audio = None + + # Now, load the processed video (without audio) using moviepy + processed_video = VideoFileClip(temp_video_path) + + # Set the audio to the processed video + if final_audio: + processed_video = processed_video.set_audio(final_audio) + # Save the video with audio + processed_video.write_videofile(output_video_path, codec='libx264', audio_codec='aac', fps=fps, remove_temp=True) + processed_video.close() + else: + logging.warning(f"No audio segments extracted for video: {filename}") + # Save the video without audio + shutil.move(temp_video_path, output_video_path) + + # Close original video and audio + original_video.close() + + # Remove temporary video file if it still exists + if os.path.exists(temp_video_path): + os.remove(temp_video_path) + + # Return result + return { + 'filename': filename, + 'duration': duration, + 'total_frames': total_frames, + 'frames_with_faces': frames_with_faces, + 'output_video': output_video_path + } + except Exception as e: + logging.error(f'Error processing video {filename}: {e}') + logging.error(traceback.format_exc()) + return None + +def main(): + # Directories and file paths + video_dir = 'D:\\Switcher\\Convert' # Update this path if needed + output_dir = 'D:\\Switcher\\Processed_Videos' # Update this path if needed + output_file = 'D:\\Switcher\\video_analysis_results.txt' # Update this path if needed + + # Create output directory if it doesn't exist + os.makedirs(output_dir, exist_ok=True) + + # Supported video file extensions + video_extensions = ['.mp4', '.avi', '.mov', '.mkv'] # Add more extensions if needed + + # Collect video files + video_files = [ + f for f in os.listdir(video_dir) + if os.path.isfile(os.path.join(video_dir, f)) and f.lower().endswith(tuple(video_extensions)) + ] + + results = [] + + # Process videos one at a time + for filename in video_files: + logging.info(f'Starting analysis for video: {filename}') + result = process_video(filename, video_dir, output_dir) + if result: + results.append(result) + else: + logging.error(f'Failed to process video: {filename}') + logging.info(f'Completed analysis for video: {filename}') + + # Write results to output file + with open(output_file, 'w') as f: + for result in results: + f.write(f"Video: {result['filename']}\n") + f.write(f"Duration: {result['duration']}\n") + f.write(f"Total frames: {result['total_frames']}\n") + f.write(f"Frames with faces: {result['frames_with_faces']}\n") + face_percentage = (result['frames_with_faces'] / result['total_frames'] * 100) if result['total_frames'] > 0 else 0 + f.write(f"Face Percentage: {face_percentage:.2f}%\n") + if result['output_video']: + f.write(f"Processed video saved to: {result['output_video']}\n") + else: + f.write("No faces detected; no processed video generated.\n") + f.write('-' * 40 + '\n') + + print('Analysis complete. Results saved to:', output_file) + logging.info('Analysis complete. Results saved to: ' + output_file) + +if __name__ == '__main__': + main() diff --git a/Install.bat b/Install.bat new file mode 100644 index 0000000000000000000000000000000000000000..416a268daf585b8ef96da24389699765db6b5ac8 --- /dev/null +++ b/Install.bat @@ -0,0 +1,4 @@ +winget install Python.Python.3.11 +pip install torch==2.5.0+cu121 torchvision==0.20.0+cu121 torchaudio==2.5.0+cu121 --index-url https://download.pytorch.org/whl/cu121 +python pip install -r requirements.txt +python -m pip install --upgrade pip \ No newline at end of file diff --git a/Real-ESRGAN/.github/workflows/publish-pip.yml b/Real-ESRGAN/.github/workflows/publish-pip.yml new file mode 100644 index 0000000000000000000000000000000000000000..db319cdb65bacdca054caaefb3c1667124c48cd9 --- /dev/null +++ b/Real-ESRGAN/.github/workflows/publish-pip.yml @@ -0,0 +1,33 @@ +name: PyPI Publish + +on: push + +jobs: + build-n-publish: + runs-on: ubuntu-latest + if: startsWith(github.event.ref, 'refs/tags') + + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.8 + uses: actions/setup-python@v1 + with: + python-version: 3.8 + - name: Upgrade pip + run: pip install pip --upgrade + - name: Install PyTorch (cpu) + run: pip install torch==1.7.0+cpu torchvision==0.8.1+cpu -f https://download.pytorch.org/whl/torch_stable.html + - name: Install dependencies + run: | + pip install basicsr + pip install facexlib + pip install gfpgan + pip install -r requirements.txt + - name: Build and install + run: rm -rf .eggs && pip install -e . + - name: Build for distribution + run: python setup.py sdist bdist_wheel + - name: Publish distribution to PyPI + uses: pypa/gh-action-pypi-publish@master + with: + password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/Real-ESRGAN/.github/workflows/pylint.yml b/Real-ESRGAN/.github/workflows/pylint.yml new file mode 100644 index 0000000000000000000000000000000000000000..784a41058979ab46d1cf5ab86c50bf2121886df0 --- /dev/null +++ b/Real-ESRGAN/.github/workflows/pylint.yml @@ -0,0 +1,31 @@ +name: PyLint + +on: [push, pull_request] + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + matrix: + python-version: [3.8] + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install codespell flake8 isort yapf + + # modify the folders accordingly + - name: Lint + run: | + codespell + flake8 . + isort --check-only --diff realesrgan/ scripts/ inference_realesrgan.py setup.py + yapf -r -d realesrgan/ scripts/ inference_realesrgan.py setup.py diff --git a/Real-ESRGAN/.github/workflows/release.yml b/Real-ESRGAN/.github/workflows/release.yml new file mode 100644 index 0000000000000000000000000000000000000000..8d4ecd3c3599c1ba3b63eacdce93cc568ac6c192 --- /dev/null +++ b/Real-ESRGAN/.github/workflows/release.yml @@ -0,0 +1,41 @@ +name: release +on: + push: + tags: + - '*' + +jobs: + build: + permissions: write-all + name: Create Release + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Create Release + id: create_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ github.ref }} + release_name: Real-ESRGAN ${{ github.ref }} Release Note + body: | + 🚀 See you again 😸 + 🚀Have a nice day 😸 and happy everyday 😃 + 🚀 Long time no see ☄️ + + ✨ **Highlights** + ✅ [Features] Support ... + + 🐛 **Bug Fixes** + + 🌴 **Improvements** + + 📢📢📢 + +

+ +

+ draft: true + prerelease: false diff --git a/Real-ESRGAN/.gitignore b/Real-ESRGAN/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..c01691a269b9c326ecc3fc7f7b09def4039a763c --- /dev/null +++ b/Real-ESRGAN/.gitignore @@ -0,0 +1,140 @@ +# ignored folders +datasets/* +experiments/* +results/* +tb_logger/* +wandb/* +tmp/* +weights/* + +version.py + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ diff --git a/Real-ESRGAN/.pre-commit-config.yaml b/Real-ESRGAN/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc3c7ea9abf86792902feaeb76fb0fd58e5109cb --- /dev/null +++ b/Real-ESRGAN/.pre-commit-config.yaml @@ -0,0 +1,46 @@ +repos: + # flake8 + - repo: https://github.com/PyCQA/flake8 + rev: 3.8.3 + hooks: + - id: flake8 + args: ["--config=setup.cfg", "--ignore=W504, W503"] + + # modify known_third_party + - repo: https://github.com/asottile/seed-isort-config + rev: v2.2.0 + hooks: + - id: seed-isort-config + + # isort + - repo: https://github.com/timothycrosley/isort + rev: 5.2.2 + hooks: + - id: isort + + # yapf + - repo: https://github.com/pre-commit/mirrors-yapf + rev: v0.30.0 + hooks: + - id: yapf + + # codespell + - repo: https://github.com/codespell-project/codespell + rev: v2.1.0 + hooks: + - id: codespell + + # pre-commit-hooks + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.2.0 + hooks: + - id: trailing-whitespace # Trim trailing whitespace + - id: check-yaml # Attempt to load all yaml files to verify syntax + - id: check-merge-conflict # Check for files that contain merge conflict strings + - id: double-quote-string-fixer # Replace double quoted strings with single quoted strings + - id: end-of-file-fixer # Make sure files end in a newline and only a newline + - id: requirements-txt-fixer # Sort entries in requirements.txt and remove incorrect entry for pkg-resources==0.0.0 + - id: fix-encoding-pragma # Remove the coding pragma: # -*- coding: utf-8 -*- + args: ["--remove"] + - id: mixed-line-ending # Replace or check mixed line ending + args: ["--fix=lf"] diff --git a/Real-ESRGAN/.vscode/settings.json b/Real-ESRGAN/.vscode/settings.json new file mode 100644 index 0000000000000000000000000000000000000000..d7dfc72871187145352ad8e517f4f56bf0256d23 --- /dev/null +++ b/Real-ESRGAN/.vscode/settings.json @@ -0,0 +1,19 @@ +{ + "files.trimTrailingWhitespace": true, + "editor.wordWrap": "on", + "editor.rulers": [ + 80, + 120 + ], + "editor.renderWhitespace": "all", + "editor.renderControlCharacters": true, + "python.formatting.provider": "yapf", + "python.formatting.yapfArgs": [ + "--style", + "{BASED_ON_STYLE = pep8, BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true, SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true, COLUMN_LIMIT = 120}" + ], + "python.linting.flake8Enabled": true, + "python.linting.flake8Args": [ + "max-line-length=120" + ], +} diff --git a/Real-ESRGAN/CODE_OF_CONDUCT.md b/Real-ESRGAN/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..8711be8c5d962d0cbba1bfdd7080bd20bf4812a0 --- /dev/null +++ b/Real-ESRGAN/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +xintao.wang@outlook.com or xintaowang@tencent.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/Real-ESRGAN/LICENSE b/Real-ESRGAN/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..3988334045644e9597b4486baac6256812ac37b0 --- /dev/null +++ b/Real-ESRGAN/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2021, Xintao Wang +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Real-ESRGAN/MANIFEST.in b/Real-ESRGAN/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..9ec8f91c0a630f9f44bbeeaa9521b31cb5865f00 --- /dev/null +++ b/Real-ESRGAN/MANIFEST.in @@ -0,0 +1,8 @@ +include assets/* +include inputs/* +include scripts/*.py +include inference_realesrgan.py +include VERSION +include LICENSE +include requirements.txt +include weights/README.md diff --git a/Real-ESRGAN/README.md b/Real-ESRGAN/README.md new file mode 100644 index 0000000000000000000000000000000000000000..503d9281a76ccf9da1c03c113867f6b88bb36585 --- /dev/null +++ b/Real-ESRGAN/README.md @@ -0,0 +1,272 @@ +

+ +

+ +##
English | 简体中文
+ +
+ +👀[**Demos**](#-demos-videos) **|** 🚩[**Updates**](#-updates) **|** ⚡[**Usage**](#-quick-inference) **|** 🏰[**Model Zoo**](docs/model_zoo.md) **|** 🔧[Install](#-dependencies-and-installation) **|** 💻[Train](docs/Training.md) **|** ❓[FAQ](docs/FAQ.md) **|** 🎨[Contribution](docs/CONTRIBUTING.md) + +[![download](https://img.shields.io/github/downloads/xinntao/Real-ESRGAN/total.svg)](https://github.com/xinntao/Real-ESRGAN/releases) +[![PyPI](https://img.shields.io/pypi/v/realesrgan)](https://pypi.org/project/realesrgan/) +[![Open issue](https://img.shields.io/github/issues/xinntao/Real-ESRGAN)](https://github.com/xinntao/Real-ESRGAN/issues) +[![Closed issue](https://img.shields.io/github/issues-closed/xinntao/Real-ESRGAN)](https://github.com/xinntao/Real-ESRGAN/issues) +[![LICENSE](https://img.shields.io/github/license/xinntao/Real-ESRGAN.svg)](https://github.com/xinntao/Real-ESRGAN/blob/master/LICENSE) +[![python lint](https://github.com/xinntao/Real-ESRGAN/actions/workflows/pylint.yml/badge.svg)](https://github.com/xinntao/Real-ESRGAN/blob/master/.github/workflows/pylint.yml) +[![Publish-pip](https://github.com/xinntao/Real-ESRGAN/actions/workflows/publish-pip.yml/badge.svg)](https://github.com/xinntao/Real-ESRGAN/blob/master/.github/workflows/publish-pip.yml) + +
+ +🔥 **AnimeVideo-v3 model (动漫视频小模型)**. Please see [[*anime video models*](docs/anime_video_model.md)] and [[*comparisons*](docs/anime_comparisons.md)]
+🔥 **RealESRGAN_x4plus_anime_6B** for anime images **(动漫插图模型)**. Please see [[*anime_model*](docs/anime_model.md)] + + +1. :boom: **Update** online Replicate demo: [![Replicate](https://img.shields.io/static/v1?label=Demo&message=Replicate&color=blue)](https://replicate.com/xinntao/realesrgan) +1. Online Colab demo for Real-ESRGAN: [![Colab](https://img.shields.io/static/v1?label=Demo&message=Colab&color=orange)](https://colab.research.google.com/drive/1k2Zod6kSHEvraybHl50Lys0LerhyTMCo?usp=sharing) **|** Online Colab demo for for Real-ESRGAN (**anime videos**): [![Colab](https://img.shields.io/static/v1?label=Demo&message=Colab&color=orange)](https://colab.research.google.com/drive/1yNl9ORUxxlL4N0keJa2SEPB61imPQd1B?usp=sharing) +1. Portable [Windows](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-windows.zip) / [Linux](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-ubuntu.zip) / [MacOS](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-macos.zip) **executable files for Intel/AMD/Nvidia GPU**. You can find more information [here](#portable-executable-files-ncnn). The ncnn implementation is in [Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan) + + +Real-ESRGAN aims at developing **Practical Algorithms for General Image/Video Restoration**.
+We extend the powerful ESRGAN to a practical restoration application (namely, Real-ESRGAN), which is trained with pure synthetic data. + +🌌 Thanks for your valuable feedbacks/suggestions. All the feedbacks are updated in [feedback.md](docs/feedback.md). + +--- + +If Real-ESRGAN is helpful, please help to ⭐ this repo or recommend it to your friends 😊
+Other recommended projects:
+▶️ [GFPGAN](https://github.com/TencentARC/GFPGAN): A practical algorithm for real-world face restoration
+▶️ [BasicSR](https://github.com/xinntao/BasicSR): An open-source image and video restoration toolbox
+▶️ [facexlib](https://github.com/xinntao/facexlib): A collection that provides useful face-relation functions.
+▶️ [HandyView](https://github.com/xinntao/HandyView): A PyQt5-based image viewer that is handy for view and comparison
+▶️ [HandyFigure](https://github.com/xinntao/HandyFigure): Open source of paper figures
+ +--- + +### 📖 Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data + +> [[Paper](https://arxiv.org/abs/2107.10833)]   [[YouTube Video](https://www.youtube.com/watch?v=fxHWoDSSvSc)]   [[B站讲解](https://www.bilibili.com/video/BV1H34y1m7sS/)]   [[Poster](https://xinntao.github.io/projects/RealESRGAN_src/RealESRGAN_poster.pdf)]   [[PPT slides](https://docs.google.com/presentation/d/1QtW6Iy8rm8rGLsJ0Ldti6kP-7Qyzy6XL/edit?usp=sharing&ouid=109799856763657548160&rtpof=true&sd=true)]
+> [Xintao Wang](https://xinntao.github.io/), Liangbin Xie, [Chao Dong](https://scholar.google.com.hk/citations?user=OSDCB0UAAAAJ), [Ying Shan](https://scholar.google.com/citations?user=4oXBp9UAAAAJ&hl=en)
+> [Tencent ARC Lab](https://arc.tencent.com/en/ai-demos/imgRestore); Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences + +

+ +

+ +--- + + +## 🚩 Updates + +- ✅ Add the **realesr-general-x4v3** model - a tiny small model for general scenes. It also supports the **-dn** option to balance the noise (avoiding over-smooth results). **-dn** is short for denoising strength. +- ✅ Update the **RealESRGAN AnimeVideo-v3** model. Please see [anime video models](docs/anime_video_model.md) and [comparisons](docs/anime_comparisons.md) for more details. +- ✅ Add small models for anime videos. More details are in [anime video models](docs/anime_video_model.md). +- ✅ Add the ncnn implementation [Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan). +- ✅ Add [*RealESRGAN_x4plus_anime_6B.pth*](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth), which is optimized for **anime** images with much smaller model size. More details and comparisons with [waifu2x](https://github.com/nihui/waifu2x-ncnn-vulkan) are in [**anime_model.md**](docs/anime_model.md) +- ✅ Support finetuning on your own data or paired data (*i.e.*, finetuning ESRGAN). See [here](docs/Training.md#Finetune-Real-ESRGAN-on-your-own-dataset) +- ✅ Integrate [GFPGAN](https://github.com/TencentARC/GFPGAN) to support **face enhancement**. +- ✅ Integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See [Gradio Web Demo](https://huggingface.co/spaces/akhaliq/Real-ESRGAN). Thanks [@AK391](https://github.com/AK391) +- ✅ Support arbitrary scale with `--outscale` (It actually further resizes outputs with `LANCZOS4`). Add *RealESRGAN_x2plus.pth* model. +- ✅ [The inference code](inference_realesrgan.py) supports: 1) **tile** options; 2) images with **alpha channel**; 3) **gray** images; 4) **16-bit** images. +- ✅ The training codes have been released. A detailed guide can be found in [Training.md](docs/Training.md). + +--- + + +## 👀 Demos Videos + +#### Bilibili + +- [大闹天宫片段](https://www.bilibili.com/video/BV1ja41117zb) +- [Anime dance cut 动漫魔性舞蹈](https://www.bilibili.com/video/BV1wY4y1L7hT/) +- [海贼王片段](https://www.bilibili.com/video/BV1i3411L7Gy/) + +#### YouTube + +## 🔧 Dependencies and Installation + +- Python >= 3.7 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html)) +- [PyTorch >= 1.7](https://pytorch.org/) + +### Installation + +1. Clone repo + + ```bash + git clone https://github.com/xinntao/Real-ESRGAN.git + cd Real-ESRGAN + ``` + +1. Install dependent packages + + ```bash + # Install basicsr - https://github.com/xinntao/BasicSR + # We use BasicSR for both training and inference + pip install basicsr + # facexlib and gfpgan are for face enhancement + pip install facexlib + pip install gfpgan + pip install -r requirements.txt + python setup.py develop + ``` + +--- + +## ⚡ Quick Inference + +There are usually three ways to inference Real-ESRGAN. + +1. [Online inference](#online-inference) +1. [Portable executable files (NCNN)](#portable-executable-files-ncnn) +1. [Python script](#python-script) + +### Online inference + +1. You can try in our website: [ARC Demo](https://arc.tencent.com/en/ai-demos/imgRestore) (now only support RealESRGAN_x4plus_anime_6B) +1. [Colab Demo](https://colab.research.google.com/drive/1k2Zod6kSHEvraybHl50Lys0LerhyTMCo?usp=sharing) for Real-ESRGAN **|** [Colab Demo](https://colab.research.google.com/drive/1yNl9ORUxxlL4N0keJa2SEPB61imPQd1B?usp=sharing) for Real-ESRGAN (**anime videos**). + +### Portable executable files (NCNN) + +You can download [Windows](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-windows.zip) / [Linux](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-ubuntu.zip) / [MacOS](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-macos.zip) **executable files for Intel/AMD/Nvidia GPU**. + +This executable file is **portable** and includes all the binaries and models required. No CUDA or PyTorch environment is needed.
+ +You can simply run the following command (the Windows example, more information is in the README.md of each executable files): + +```bash +./realesrgan-ncnn-vulkan.exe -i input.jpg -o output.png -n model_name +``` + +We have provided five models: + +1. realesrgan-x4plus (default) +2. realesrnet-x4plus +3. realesrgan-x4plus-anime (optimized for anime images, small model size) +4. realesr-animevideov3 (animation video) + +You can use the `-n` argument for other models, for example, `./realesrgan-ncnn-vulkan.exe -i input.jpg -o output.png -n realesrnet-x4plus` + +#### Usage of portable executable files + +1. Please refer to [Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan#computer-usages) for more details. +1. Note that it does not support all the functions (such as `outscale`) as the python script `inference_realesrgan.py`. + +```console +Usage: realesrgan-ncnn-vulkan.exe -i infile -o outfile [options]... + + -h show this help + -i input-path input image path (jpg/png/webp) or directory + -o output-path output image path (jpg/png/webp) or directory + -s scale upscale ratio (can be 2, 3, 4. default=4) + -t tile-size tile size (>=32/0=auto, default=0) can be 0,0,0 for multi-gpu + -m model-path folder path to the pre-trained models. default=models + -n model-name model name (default=realesr-animevideov3, can be realesr-animevideov3 | realesrgan-x4plus | realesrgan-x4plus-anime | realesrnet-x4plus) + -g gpu-id gpu device to use (default=auto) can be 0,1,2 for multi-gpu + -j load:proc:save thread count for load/proc/save (default=1:2:2) can be 1:2,2,2:2 for multi-gpu + -x enable tta mode" + -f format output image format (jpg/png/webp, default=ext/png) + -v verbose output +``` + +Note that it may introduce block inconsistency (and also generate slightly different results from the PyTorch implementation), because this executable file first crops the input image into several tiles, and then processes them separately, finally stitches together. + +### Python script + +#### Usage of python script + +1. You can use X4 model for **arbitrary output size** with the argument `outscale`. The program will further perform cheap resize operation after the Real-ESRGAN output. + +```console +Usage: python inference_realesrgan.py -n RealESRGAN_x4plus -i infile -o outfile [options]... + +A common command: python inference_realesrgan.py -n RealESRGAN_x4plus -i infile --outscale 3.5 --face_enhance + + -h show this help + -i --input Input image or folder. Default: inputs + -o --output Output folder. Default: results + -n --model_name Model name. Default: RealESRGAN_x4plus + -s, --outscale The final upsampling scale of the image. Default: 4 + --suffix Suffix of the restored image. Default: out + -t, --tile Tile size, 0 for no tile during testing. Default: 0 + --face_enhance Whether to use GFPGAN to enhance face. Default: False + --fp32 Use fp32 precision during inference. Default: fp16 (half precision). + --ext Image extension. Options: auto | jpg | png, auto means using the same extension as inputs. Default: auto +``` + +#### Inference general images + +Download pre-trained models: [RealESRGAN_x4plus.pth](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth) + +```bash +wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P weights +``` + +Inference! + +```bash +python inference_realesrgan.py -n RealESRGAN_x4plus -i inputs --face_enhance +``` + +Results are in the `results` folder + +#### Inference anime images + +

+ +

+ +Pre-trained models: [RealESRGAN_x4plus_anime_6B](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth)
+ More details and comparisons with [waifu2x](https://github.com/nihui/waifu2x-ncnn-vulkan) are in [**anime_model.md**](docs/anime_model.md) + +```bash +# download model +wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P weights +# inference +python inference_realesrgan.py -n RealESRGAN_x4plus_anime_6B -i inputs +``` + +Results are in the `results` folder + +--- + +## BibTeX + + @InProceedings{wang2021realesrgan, + author = {Xintao Wang and Liangbin Xie and Chao Dong and Ying Shan}, + title = {Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data}, + booktitle = {International Conference on Computer Vision Workshops (ICCVW)}, + date = {2021} + } + +## 📧 Contact + +If you have any question, please email `xintao.wang@outlook.com` or `xintaowang@tencent.com`. + + +## 🧩 Projects that use Real-ESRGAN + +If you develop/use Real-ESRGAN in your projects, welcome to let me know. + +- NCNN-Android: [RealSR-NCNN-Android](https://github.com/tumuyan/RealSR-NCNN-Android) by [tumuyan](https://github.com/tumuyan) +- VapourSynth: [vs-realesrgan](https://github.com/HolyWu/vs-realesrgan) by [HolyWu](https://github.com/HolyWu) +- NCNN: [Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan) + +    **GUI** + +- [Waifu2x-Extension-GUI](https://github.com/AaronFeng753/Waifu2x-Extension-GUI) by [AaronFeng753](https://github.com/AaronFeng753) +- [Squirrel-RIFE](https://github.com/Justin62628/Squirrel-RIFE) by [Justin62628](https://github.com/Justin62628) +- [Real-GUI](https://github.com/scifx/Real-GUI) by [scifx](https://github.com/scifx) +- [Real-ESRGAN_GUI](https://github.com/net2cn/Real-ESRGAN_GUI) by [net2cn](https://github.com/net2cn) +- [Real-ESRGAN-EGUI](https://github.com/WGzeyu/Real-ESRGAN-EGUI) by [WGzeyu](https://github.com/WGzeyu) +- [anime_upscaler](https://github.com/shangar21/anime_upscaler) by [shangar21](https://github.com/shangar21) +- [Upscayl](https://github.com/upscayl/upscayl) by [Nayam Amarshe](https://github.com/NayamAmarshe) and [TGS963](https://github.com/TGS963) + +## 🤗 Acknowledgement + +Thanks for all the contributors. + +- [AK391](https://github.com/AK391): Integrate RealESRGAN to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See [Gradio Web Demo](https://huggingface.co/spaces/akhaliq/Real-ESRGAN). +- [Asiimoviet](https://github.com/Asiimoviet): Translate the README.md to Chinese (中文). +- [2ji3150](https://github.com/2ji3150): Thanks for the [detailed and valuable feedbacks/suggestions](https://github.com/xinntao/Real-ESRGAN/issues/131). +- [Jared-02](https://github.com/Jared-02): Translate the Training.md to Chinese (中文). diff --git a/Real-ESRGAN/README_CN.md b/Real-ESRGAN/README_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..ff9e50d7f6e3b9f075c48116b0b77061c7206d94 --- /dev/null +++ b/Real-ESRGAN/README_CN.md @@ -0,0 +1,276 @@ +

+ +

+ +##
English | 简体中文
+ +[![download](https://img.shields.io/github/downloads/xinntao/Real-ESRGAN/total.svg)](https://github.com/xinntao/Real-ESRGAN/releases) +[![PyPI](https://img.shields.io/pypi/v/realesrgan)](https://pypi.org/project/realesrgan/) +[![Open issue](https://img.shields.io/github/issues/xinntao/Real-ESRGAN)](https://github.com/xinntao/Real-ESRGAN/issues) +[![Closed issue](https://img.shields.io/github/issues-closed/xinntao/Real-ESRGAN)](https://github.com/xinntao/Real-ESRGAN/issues) +[![LICENSE](https://img.shields.io/github/license/xinntao/Real-ESRGAN.svg)](https://github.com/xinntao/Real-ESRGAN/blob/master/LICENSE) +[![python lint](https://github.com/xinntao/Real-ESRGAN/actions/workflows/pylint.yml/badge.svg)](https://github.com/xinntao/Real-ESRGAN/blob/master/.github/workflows/pylint.yml) +[![Publish-pip](https://github.com/xinntao/Real-ESRGAN/actions/workflows/publish-pip.yml/badge.svg)](https://github.com/xinntao/Real-ESRGAN/blob/master/.github/workflows/publish-pip.yml) + +:fire: 更新动漫视频的小模型 **RealESRGAN AnimeVideo-v3**. 更多信息在 [[动漫视频模型介绍](docs/anime_video_model.md)] 和 [[比较](docs/anime_comparisons_CN.md)] 中. + +1. Real-ESRGAN的[Colab Demo](https://colab.research.google.com/drive/1k2Zod6kSHEvraybHl50Lys0LerhyTMCo?usp=sharing) | Real-ESRGAN**动漫视频** 的[Colab Demo](https://colab.research.google.com/drive/1yNl9ORUxxlL4N0keJa2SEPB61imPQd1B?usp=sharing) +2. **支持Intel/AMD/Nvidia显卡**的绿色版exe文件: [Windows版](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-windows.zip) / [Linux版](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-ubuntu.zip) / [macOS版](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-macos.zip),详情请移步[这里](#便携版(绿色版)可执行文件)。NCNN的实现在 [Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan)。 + +Real-ESRGAN 的目标是开发出**实用的图像/视频修复算法**。
+我们在 ESRGAN 的基础上使用纯合成的数据来进行训练,以使其能被应用于实际的图片修复的场景(顾名思义:Real-ESRGAN)。 + +:art: Real-ESRGAN 需要,也很欢迎你的贡献,如新功能、模型、bug修复、建议、维护等等。详情可以查看[CONTRIBUTING.md](docs/CONTRIBUTING.md),所有的贡献者都会被列在[此处](README_CN.md#hugs-感谢)。 + +:milky_way: 感谢大家提供了很好的反馈。这些反馈会逐步更新在 [这个文档](docs/feedback.md)。 + +:question: 常见的问题可以在[FAQ.md](docs/FAQ.md)中找到答案。(好吧,现在还是空白的=-=||) + +--- + +如果 Real-ESRGAN 对你有帮助,可以给本项目一个 Star :star: ,或者推荐给你的朋友们,谢谢!:blush:
+其他推荐的项目:
+:arrow_forward: [GFPGAN](https://github.com/TencentARC/GFPGAN): 实用的人脸复原算法
+:arrow_forward: [BasicSR](https://github.com/xinntao/BasicSR): 开源的图像和视频工具箱
+:arrow_forward: [facexlib](https://github.com/xinntao/facexlib): 提供与人脸相关的工具箱
+:arrow_forward: [HandyView](https://github.com/xinntao/HandyView): 基于PyQt5的图片查看器,方便查看以及比较
+ +--- + + +
+🚩更新 + +- ✅ 更新动漫视频的小模型 **RealESRGAN AnimeVideo-v3**. 更多信息在 [anime video models](docs/anime_video_model.md) 和 [comparisons](docs/anime_comparisons.md)中. +- ✅ 添加了针对动漫视频的小模型, 更多信息在 [anime video models](docs/anime_video_model.md) 中. +- ✅ 添加了ncnn 实现:[Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan). +- ✅ 添加了 [*RealESRGAN_x4plus_anime_6B.pth*](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth),对二次元图片进行了优化,并减少了model的大小。详情 以及 与[waifu2x](https://github.com/nihui/waifu2x-ncnn-vulkan)的对比请查看[**anime_model.md**](docs/anime_model.md) +- ✅支持用户在自己的数据上进行微调 (finetune):[详情](docs/Training.md#Finetune-Real-ESRGAN-on-your-own-dataset) +- ✅ 支持使用[GFPGAN](https://github.com/TencentARC/GFPGAN)**增强人脸** +- ✅ 通过[Gradio](https://github.com/gradio-app/gradio)添加到了[Huggingface Spaces](https://huggingface.co/spaces)(一个机器学习应用的在线平台):[Gradio在线版](https://huggingface.co/spaces/akhaliq/Real-ESRGAN)。感谢[@AK391](https://github.com/AK391) +- ✅ 支持任意比例的缩放:`--outscale`(实际上使用`LANCZOS4`来更进一步调整输出图像的尺寸)。添加了*RealESRGAN_x2plus.pth*模型 +- ✅ [推断脚本](inference_realesrgan.py)支持: 1) 分块处理**tile**; 2) 带**alpha通道**的图像; 3) **灰色**图像; 4) **16-bit**图像. +- ✅ 训练代码已经发布,具体做法可查看:[Training.md](docs/Training.md)。 + +
+ + +
+🧩使用Real-ESRGAN的项目 + +    👋 如果你开发/使用/集成了Real-ESRGAN, 欢迎联系我添加 + +- NCNN-Android: [RealSR-NCNN-Android](https://github.com/tumuyan/RealSR-NCNN-Android) by [tumuyan](https://github.com/tumuyan) +- VapourSynth: [vs-realesrgan](https://github.com/HolyWu/vs-realesrgan) by [HolyWu](https://github.com/HolyWu) +- NCNN: [Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan) + +    **易用的图形界面** + +- [Waifu2x-Extension-GUI](https://github.com/AaronFeng753/Waifu2x-Extension-GUI) by [AaronFeng753](https://github.com/AaronFeng753) +- [Squirrel-RIFE](https://github.com/Justin62628/Squirrel-RIFE) by [Justin62628](https://github.com/Justin62628) +- [Real-GUI](https://github.com/scifx/Real-GUI) by [scifx](https://github.com/scifx) +- [Real-ESRGAN_GUI](https://github.com/net2cn/Real-ESRGAN_GUI) by [net2cn](https://github.com/net2cn) +- [Real-ESRGAN-EGUI](https://github.com/WGzeyu/Real-ESRGAN-EGUI) by [WGzeyu](https://github.com/WGzeyu) +- [anime_upscaler](https://github.com/shangar21/anime_upscaler) by [shangar21](https://github.com/shangar21) +- [RealESRGAN-GUI](https://github.com/Baiyuetribe/paper2gui/blob/main/Video%20Super%20Resolution/RealESRGAN-GUI.md) by [Baiyuetribe](https://github.com/Baiyuetribe) + +
+ +
+👀Demo视频(B站) + +- [大闹天宫片段](https://www.bilibili.com/video/BV1ja41117zb) + +
+ +### :book: Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data + +> [[论文](https://arxiv.org/abs/2107.10833)]   [项目主页]   [[YouTube 视频](https://www.youtube.com/watch?v=fxHWoDSSvSc)]   [[B站视频](https://www.bilibili.com/video/BV1H34y1m7sS/)]   [[Poster](https://xinntao.github.io/projects/RealESRGAN_src/RealESRGAN_poster.pdf)]   [[PPT](https://docs.google.com/presentation/d/1QtW6Iy8rm8rGLsJ0Ldti6kP-7Qyzy6XL/edit?usp=sharing&ouid=109799856763657548160&rtpof=true&sd=true)]
+> [Xintao Wang](https://xinntao.github.io/), Liangbin Xie, [Chao Dong](https://scholar.google.com.hk/citations?user=OSDCB0UAAAAJ), [Ying Shan](https://scholar.google.com/citations?user=4oXBp9UAAAAJ&hl=en)
+> Tencent ARC Lab; Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences + +

+ +

+ +--- + +我们提供了一套训练好的模型(*RealESRGAN_x4plus.pth*),可以进行4倍的超分辨率。
+**现在的 Real-ESRGAN 还是有几率失败的,因为现实生活的降质过程比较复杂。**
+而且,本项目对**人脸以及文字之类**的效果还不是太好,但是我们会持续进行优化的。
+ +Real-ESRGAN 将会被长期支持,我会在空闲的时间中持续维护更新。 + +这些是未来计划的几个新功能: + +- [ ] 优化人脸 +- [ ] 优化文字 +- [x] 优化动画图像 +- [ ] 支持更多的超分辨率比例 +- [ ] 可调节的复原 + +如果你有好主意或需求,欢迎在 issue 或 discussion 中提出。
+如果你有一些 Real-ESRGAN 中有问题的照片,你也可以在 issue 或者 discussion 中发出来。我会留意(但是不一定能解决:stuck_out_tongue:)。如果有必要的话,我还会专门开一页来记录那些有待解决的图像。 + +--- + +### 便携版(绿色版)可执行文件 + +你可以下载**支持Intel/AMD/Nvidia显卡**的绿色版exe文件: [Windows版](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-windows.zip) / [Linux版](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-ubuntu.zip) / [macOS版](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-macos.zip)。 + +绿色版指的是这些exe你可以直接运行(放U盘里拷走都没问题),因为里面已经有所需的文件和模型了。它不需要 CUDA 或者 PyTorch运行环境。
+ +你可以通过下面这个命令来运行(Windows版本的例子,更多信息请查看对应版本的README.md): + +```bash +./realesrgan-ncnn-vulkan.exe -i 输入图像.jpg -o 输出图像.png -n 模型名字 +``` + +我们提供了五种模型: + +1. realesrgan-x4plus(默认) +2. reaesrnet-x4plus +3. realesrgan-x4plus-anime(针对动漫插画图像优化,有更小的体积) +4. realesr-animevideov3 (针对动漫视频) + +你可以通过`-n`参数来使用其他模型,例如`./realesrgan-ncnn-vulkan.exe -i 二次元图片.jpg -o 二刺螈图片.png -n realesrgan-x4plus-anime` + +### 可执行文件的用法 + +1. 更多细节可以参考 [Real-ESRGAN-ncnn-vulkan](https://github.com/xinntao/Real-ESRGAN-ncnn-vulkan#computer-usages). +2. 注意:可执行文件并没有支持 python 脚本 `inference_realesrgan.py` 中所有的功能,比如 `outscale` 选项) . + +```console +Usage: realesrgan-ncnn-vulkan.exe -i infile -o outfile [options]... + + -h show this help + -i input-path input image path (jpg/png/webp) or directory + -o output-path output image path (jpg/png/webp) or directory + -s scale upscale ratio (can be 2, 3, 4. default=4) + -t tile-size tile size (>=32/0=auto, default=0) can be 0,0,0 for multi-gpu + -m model-path folder path to the pre-trained models. default=models + -n model-name model name (default=realesr-animevideov3, can be realesr-animevideov3 | realesrgan-x4plus | realesrgan-x4plus-anime | realesrnet-x4plus) + -g gpu-id gpu device to use (default=auto) can be 0,1,2 for multi-gpu + -j load:proc:save thread count for load/proc/save (default=1:2:2) can be 1:2,2,2:2 for multi-gpu + -x enable tta mode" + -f format output image format (jpg/png/webp, default=ext/png) + -v verbose output +``` + +由于这些exe文件会把图像分成几个板块,然后来分别进行处理,再合成导出,输出的图像可能会有一点割裂感(而且可能跟PyTorch的输出不太一样) + +--- + +## :wrench: 依赖以及安装 + +- Python >= 3.7 (推荐使用[Anaconda](https://www.anaconda.com/download/#linux)或[Miniconda](https://docs.conda.io/en/latest/miniconda.html)) +- [PyTorch >= 1.7](https://pytorch.org/) + +#### 安装 + +1. 把项目克隆到本地 + + ```bash + git clone https://github.com/xinntao/Real-ESRGAN.git + cd Real-ESRGAN + ``` + +2. 安装各种依赖 + + ```bash + # 安装 basicsr - https://github.com/xinntao/BasicSR + # 我们使用BasicSR来训练以及推断 + pip install basicsr + # facexlib和gfpgan是用来增强人脸的 + pip install facexlib + pip install gfpgan + pip install -r requirements.txt + python setup.py develop + ``` + +## :zap: 快速上手 + +### 普通图片 + +下载我们训练好的模型: [RealESRGAN_x4plus.pth](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth) + +```bash +wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P weights +``` + +推断! + +```bash +python inference_realesrgan.py -n RealESRGAN_x4plus -i inputs --face_enhance +``` + +结果在`results`文件夹 + +### 动画图片 + +

+ +

+ +训练好的模型: [RealESRGAN_x4plus_anime_6B](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth)
+有关[waifu2x](https://github.com/nihui/waifu2x-ncnn-vulkan)的更多信息和对比在[**anime_model.md**](docs/anime_model.md)中。 + +```bash +# 下载模型 +wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P weights +# 推断 +python inference_realesrgan.py -n RealESRGAN_x4plus_anime_6B -i inputs +``` + +结果在`results`文件夹 + +### Python 脚本的用法 + +1. 虽然你使用了 X4 模型,但是你可以 **输出任意尺寸比例的图片**,只要实用了 `outscale` 参数. 程序会进一步对模型的输出图像进行缩放。 + +```console +Usage: python inference_realesrgan.py -n RealESRGAN_x4plus -i infile -o outfile [options]... + +A common command: python inference_realesrgan.py -n RealESRGAN_x4plus -i infile --outscale 3.5 --face_enhance + + -h show this help + -i --input Input image or folder. Default: inputs + -o --output Output folder. Default: results + -n --model_name Model name. Default: RealESRGAN_x4plus + -s, --outscale The final upsampling scale of the image. Default: 4 + --suffix Suffix of the restored image. Default: out + -t, --tile Tile size, 0 for no tile during testing. Default: 0 + --face_enhance Whether to use GFPGAN to enhance face. Default: False + --fp32 Whether to use half precision during inference. Default: False + --ext Image extension. Options: auto | jpg | png, auto means using the same extension as inputs. Default: auto +``` + +## :european_castle: 模型库 + +请参见 [docs/model_zoo.md](docs/model_zoo.md) + +## :computer: 训练,在你的数据上微调(Fine-tune) + +这里有一份详细的指南:[Training.md](docs/Training.md). + +## BibTeX 引用 + + @Article{wang2021realesrgan, + title={Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data}, + author={Xintao Wang and Liangbin Xie and Chao Dong and Ying Shan}, + journal={arXiv:2107.10833}, + year={2021} + } + +## :e-mail: 联系我们 + +如果你有任何问题,请通过 `xintao.wang@outlook.com` 或 `xintaowang@tencent.com` 联系我们。 + +## :hugs: 感谢 + +感谢所有的贡献者大大们~ + +- [AK391](https://github.com/AK391): 通过[Gradio](https://github.com/gradio-app/gradio)添加到了[Huggingface Spaces](https://huggingface.co/spaces)(一个机器学习应用的在线平台):[Gradio在线版](https://huggingface.co/spaces/akhaliq/Real-ESRGAN)。 +- [Asiimoviet](https://github.com/Asiimoviet): 把 README.md 文档 翻译成了中文。 +- [2ji3150](https://github.com/2ji3150): 感谢详尽并且富有价值的[反馈、建议](https://github.com/xinntao/Real-ESRGAN/issues/131). +- [Jared-02](https://github.com/Jared-02): 把 Training.md 文档 翻译成了中文。 diff --git a/Real-ESRGAN/VERSION b/Real-ESRGAN/VERSION new file mode 100644 index 0000000000000000000000000000000000000000..2b2843e9091e03ddadc674131b33efdfbc0c1ccf --- /dev/null +++ b/Real-ESRGAN/VERSION @@ -0,0 +1 @@ +0.3.0 diff --git a/Real-ESRGAN/assets/realesrgan_logo.png b/Real-ESRGAN/assets/realesrgan_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..88cd1ad6170794c2becb95006edffa0655d9372a Binary files /dev/null and b/Real-ESRGAN/assets/realesrgan_logo.png differ diff --git a/Real-ESRGAN/assets/realesrgan_logo_ai.png b/Real-ESRGAN/assets/realesrgan_logo_ai.png new file mode 100644 index 0000000000000000000000000000000000000000..b0f595cf2535de7e69393384d8d056300f1cdddc Binary files /dev/null and b/Real-ESRGAN/assets/realesrgan_logo_ai.png differ diff --git a/Real-ESRGAN/assets/realesrgan_logo_av.png b/Real-ESRGAN/assets/realesrgan_logo_av.png new file mode 100644 index 0000000000000000000000000000000000000000..501ac8e81292d9369122a69ec2dd56a3ae8beca6 Binary files /dev/null and b/Real-ESRGAN/assets/realesrgan_logo_av.png differ diff --git a/Real-ESRGAN/assets/realesrgan_logo_gi.png b/Real-ESRGAN/assets/realesrgan_logo_gi.png new file mode 100644 index 0000000000000000000000000000000000000000..cdb0a1a74e0b54a1c684141324c6635acf2f60f8 Binary files /dev/null and b/Real-ESRGAN/assets/realesrgan_logo_gi.png differ diff --git a/Real-ESRGAN/assets/realesrgan_logo_gv.png b/Real-ESRGAN/assets/realesrgan_logo_gv.png new file mode 100644 index 0000000000000000000000000000000000000000..21dfba05f3855f1d9740e6d2cbe2a8ac736f4508 Binary files /dev/null and b/Real-ESRGAN/assets/realesrgan_logo_gv.png differ diff --git a/Real-ESRGAN/assets/teaser-text.png b/Real-ESRGAN/assets/teaser-text.png new file mode 100644 index 0000000000000000000000000000000000000000..7b1bb07c7d30bf2aa8e7db5c1c9b5cfe925768e9 --- /dev/null +++ b/Real-ESRGAN/assets/teaser-text.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:182010d07822bf304fee73e0b9887e31d65dcaa09d0084ea6d8ce794360d8855 +size 558847 diff --git a/Real-ESRGAN/assets/teaser.jpg b/Real-ESRGAN/assets/teaser.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2df714b5adf6ca0e397d7b5adaf52019693cd377 --- /dev/null +++ b/Real-ESRGAN/assets/teaser.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6089685a1246831b8f5afe68cdfcd573ad9c1142209573b3db699742ac92ee0 +size 405152 diff --git a/Real-ESRGAN/cog.yaml b/Real-ESRGAN/cog.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5bfcd17dc7c582f02ef07a694a4e33e63475831e --- /dev/null +++ b/Real-ESRGAN/cog.yaml @@ -0,0 +1,22 @@ +# This file is used for constructing replicate env +image: "r8.im/tencentarc/realesrgan" + +build: + gpu: true + python_version: "3.8" + system_packages: + - "libgl1-mesa-glx" + - "libglib2.0-0" + python_packages: + - "torch==1.7.1" + - "torchvision==0.8.2" + - "numpy==1.21.1" + - "lmdb==1.2.1" + - "opencv-python==4.5.3.56" + - "PyYAML==5.4.1" + - "tqdm==4.62.2" + - "yapf==0.31.0" + - "basicsr==1.4.2" + - "facexlib==0.2.5" + +predict: "cog_predict.py:Predictor" diff --git a/Real-ESRGAN/cog_predict.py b/Real-ESRGAN/cog_predict.py new file mode 100644 index 0000000000000000000000000000000000000000..478c0b3f3788617cf57c7a101326f302aec53f1b --- /dev/null +++ b/Real-ESRGAN/cog_predict.py @@ -0,0 +1,148 @@ +# flake8: noqa +# This file is used for deploying replicate models +# running: cog predict -i img=@inputs/00017_gray.png -i version='General - v3' -i scale=2 -i face_enhance=True -i tile=0 +# push: cog push r8.im/xinntao/realesrgan + +import os + +os.system('pip install gfpgan') +os.system('python setup.py develop') + +import cv2 +import shutil +import tempfile +import torch +from basicsr.archs.rrdbnet_arch import RRDBNet +from basicsr.archs.srvgg_arch import SRVGGNetCompact + +from realesrgan.utils import RealESRGANer + +try: + from cog import BasePredictor, Input, Path + from gfpgan import GFPGANer +except Exception: + print('please install cog and realesrgan package') + + +class Predictor(BasePredictor): + + def setup(self): + os.makedirs('output', exist_ok=True) + # download weights + if not os.path.exists('weights/realesr-general-x4v3.pth'): + os.system( + 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P ./weights' + ) + if not os.path.exists('weights/GFPGANv1.4.pth'): + os.system('wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P ./weights') + if not os.path.exists('weights/RealESRGAN_x4plus.pth'): + os.system( + 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P ./weights' + ) + if not os.path.exists('weights/RealESRGAN_x4plus_anime_6B.pth'): + os.system( + 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P ./weights' + ) + if not os.path.exists('weights/realesr-animevideov3.pth'): + os.system( + 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth -P ./weights' + ) + + def choose_model(self, scale, version, tile=0): + half = True if torch.cuda.is_available() else False + if version == 'General - RealESRGANplus': + model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) + model_path = 'weights/RealESRGAN_x4plus.pth' + self.upsampler = RealESRGANer( + scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half) + elif version == 'General - v3': + model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') + model_path = 'weights/realesr-general-x4v3.pth' + self.upsampler = RealESRGANer( + scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half) + elif version == 'Anime - anime6B': + model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) + model_path = 'weights/RealESRGAN_x4plus_anime_6B.pth' + self.upsampler = RealESRGANer( + scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half) + elif version == 'AnimeVideo - v3': + model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu') + model_path = 'weights/realesr-animevideov3.pth' + self.upsampler = RealESRGANer( + scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half) + + self.face_enhancer = GFPGANer( + model_path='weights/GFPGANv1.4.pth', + upscale=scale, + arch='clean', + channel_multiplier=2, + bg_upsampler=self.upsampler) + + def predict( + self, + img: Path = Input(description='Input'), + version: str = Input( + description='RealESRGAN version. Please see [Readme] below for more descriptions', + choices=['General - RealESRGANplus', 'General - v3', 'Anime - anime6B', 'AnimeVideo - v3'], + default='General - v3'), + scale: float = Input(description='Rescaling factor', default=2), + face_enhance: bool = Input( + description='Enhance faces with GFPGAN. Note that it does not work for anime images/vidoes', default=False), + tile: int = Input( + description= + 'Tile size. Default is 0, that is no tile. When encountering the out-of-GPU-memory issue, please specify it, e.g., 400 or 200', + default=0) + ) -> Path: + if tile <= 100 or tile is None: + tile = 0 + print(f'img: {img}. version: {version}. scale: {scale}. face_enhance: {face_enhance}. tile: {tile}.') + try: + extension = os.path.splitext(os.path.basename(str(img)))[1] + img = cv2.imread(str(img), cv2.IMREAD_UNCHANGED) + if len(img.shape) == 3 and img.shape[2] == 4: + img_mode = 'RGBA' + elif len(img.shape) == 2: + img_mode = None + img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + else: + img_mode = None + + h, w = img.shape[0:2] + if h < 300: + img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4) + + self.choose_model(scale, version, tile) + + try: + if face_enhance: + _, _, output = self.face_enhancer.enhance( + img, has_aligned=False, only_center_face=False, paste_back=True) + else: + output, _ = self.upsampler.enhance(img, outscale=scale) + except RuntimeError as error: + print('Error', error) + print('If you encounter CUDA out of memory, try to set "tile" to a smaller size, e.g., 400.') + + if img_mode == 'RGBA': # RGBA images should be saved in png format + extension = 'png' + # save_path = f'output/out.{extension}' + # cv2.imwrite(save_path, output) + out_path = Path(tempfile.mkdtemp()) / f'out.{extension}' + cv2.imwrite(str(out_path), output) + except Exception as error: + print('global exception: ', error) + finally: + clean_folder('output') + return out_path + + +def clean_folder(folder): + for filename in os.listdir(folder): + file_path = os.path.join(folder, filename) + try: + if os.path.isfile(file_path) or os.path.islink(file_path): + os.unlink(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + except Exception as e: + print(f'Failed to delete {file_path}. Reason: {e}') diff --git a/Real-ESRGAN/docs/CONTRIBUTING.md b/Real-ESRGAN/docs/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..de7b4501451d54c95af2765e224a3234e083d316 --- /dev/null +++ b/Real-ESRGAN/docs/CONTRIBUTING.md @@ -0,0 +1,44 @@ +# Contributing to Real-ESRGAN + +:art: Real-ESRGAN needs your contributions. Any contributions are welcome, such as new features/models/typo fixes/suggestions/maintenance, *etc*. See [CONTRIBUTING.md](docs/CONTRIBUTING.md). All contributors are list [here](README.md#hugs-acknowledgement). + +We like open-source and want to develop practical algorithms for general image restoration. However, individual strength is limited. So, any kinds of contributions are welcome, such as: + +- New features +- New models (your fine-tuned models) +- Bug fixes +- Typo fixes +- Suggestions +- Maintenance +- Documents +- *etc* + +## Workflow + +1. Fork and pull the latest Real-ESRGAN repository +1. Checkout a new branch (do not use master branch for PRs) +1. Commit your changes +1. Create a PR + +**Note**: + +1. Please check the code style and linting + 1. The style configuration is specified in [setup.cfg](setup.cfg) + 1. If you use VSCode, the settings are configured in [.vscode/settings.json](.vscode/settings.json) +1. Strongly recommend using `pre-commit hook`. It will check your code style and linting before your commit. + 1. In the root path of project folder, run `pre-commit install` + 1. The pre-commit configuration is listed in [.pre-commit-config.yaml](.pre-commit-config.yaml) +1. Better to [open a discussion](https://github.com/xinntao/Real-ESRGAN/discussions) before large changes. + 1. Welcome to discuss :sunglasses:. I will try my best to join the discussion. + +## TODO List + +:zero: The most straightforward way of improving model performance is to fine-tune on some specific datasets. + +Here are some TODOs: + +- [ ] optimize for human faces +- [ ] optimize for texts +- [ ] support controllable restoration strength + +:one: There are also [several issues](https://github.com/xinntao/Real-ESRGAN/issues) that require helpers to improve. If you can help, please let me know :smile: diff --git a/Real-ESRGAN/docs/FAQ.md b/Real-ESRGAN/docs/FAQ.md new file mode 100644 index 0000000000000000000000000000000000000000..aecfe79bd7cce2cafe9d7447f03589458d2c629d --- /dev/null +++ b/Real-ESRGAN/docs/FAQ.md @@ -0,0 +1,10 @@ +# FAQ + +1. **Q: How to select models?**
+A: Please refer to [docs/model_zoo.md](docs/model_zoo.md) + +1. **Q: Can `face_enhance` be used for anime images/animation videos?**
+A: No, it can only be used for real faces. It is recommended not to use this option for anime images/animation videos to save GPU memory. + +1. **Q: Error "slow_conv2d_cpu" not implemented for 'Half'**
+A: In order to save GPU memory consumption and speed up inference, Real-ESRGAN uses half precision (fp16) during inference by default. However, some operators for half inference are not implemented in CPU mode. You need to add **`--fp32` option** for the commands. For example, `python inference_realesrgan.py -n RealESRGAN_x4plus.pth -i inputs --fp32`. diff --git a/Real-ESRGAN/docs/Training.md b/Real-ESRGAN/docs/Training.md new file mode 100644 index 0000000000000000000000000000000000000000..afcc4587edeffcd45815b0a71276b1eab3f1d9ae --- /dev/null +++ b/Real-ESRGAN/docs/Training.md @@ -0,0 +1,271 @@ +# :computer: How to Train/Finetune Real-ESRGAN + +- [Train Real-ESRGAN](#train-real-esrgan) + - [Overview](#overview) + - [Dataset Preparation](#dataset-preparation) + - [Train Real-ESRNet](#Train-Real-ESRNet) + - [Train Real-ESRGAN](#Train-Real-ESRGAN) +- [Finetune Real-ESRGAN on your own dataset](#Finetune-Real-ESRGAN-on-your-own-dataset) + - [Generate degraded images on the fly](#Generate-degraded-images-on-the-fly) + - [Use paired training data](#use-your-own-paired-data) + +[English](Training.md) **|** [简体中文](Training_CN.md) + +## Train Real-ESRGAN + +### Overview + +The training has been divided into two stages. These two stages have the same data synthesis process and training pipeline, except for the loss functions. Specifically, + +1. We first train Real-ESRNet with L1 loss from the pre-trained model ESRGAN. +1. We then use the trained Real-ESRNet model as an initialization of the generator, and train the Real-ESRGAN with a combination of L1 loss, perceptual loss and GAN loss. + +### Dataset Preparation + +We use DF2K (DIV2K and Flickr2K) + OST datasets for our training. Only HR images are required.
+You can download from : + +1. DIV2K: http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip +2. Flickr2K: https://cv.snu.ac.kr/research/EDSR/Flickr2K.tar +3. OST: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/datasets/OST_dataset.zip + +Here are steps for data preparation. + +#### Step 1: [Optional] Generate multi-scale images + +For the DF2K dataset, we use a multi-scale strategy, *i.e.*, we downsample HR images to obtain several Ground-Truth images with different scales.
+You can use the [scripts/generate_multiscale_DF2K.py](scripts/generate_multiscale_DF2K.py) script to generate multi-scale images.
+Note that this step can be omitted if you just want to have a fast try. + +```bash +python scripts/generate_multiscale_DF2K.py --input datasets/DF2K/DF2K_HR --output datasets/DF2K/DF2K_multiscale +``` + +#### Step 2: [Optional] Crop to sub-images + +We then crop DF2K images into sub-images for faster IO and processing.
+This step is optional if your IO is enough or your disk space is limited. + +You can use the [scripts/extract_subimages.py](scripts/extract_subimages.py) script. Here is the example: + +```bash + python scripts/extract_subimages.py --input datasets/DF2K/DF2K_multiscale --output datasets/DF2K/DF2K_multiscale_sub --crop_size 400 --step 200 +``` + +#### Step 3: Prepare a txt for meta information + +You need to prepare a txt file containing the image paths. The following are some examples in `meta_info_DF2Kmultiscale+OST_sub.txt` (As different users may have different sub-images partitions, this file is not suitable for your purpose and you need to prepare your own txt file): + +```txt +DF2K_HR_sub/000001_s001.png +DF2K_HR_sub/000001_s002.png +DF2K_HR_sub/000001_s003.png +... +``` + +You can use the [scripts/generate_meta_info.py](scripts/generate_meta_info.py) script to generate the txt file.
+You can merge several folders into one meta_info txt. Here is the example: + +```bash + python scripts/generate_meta_info.py --input datasets/DF2K/DF2K_HR datasets/DF2K/DF2K_multiscale --root datasets/DF2K datasets/DF2K --meta_info datasets/DF2K/meta_info/meta_info_DF2Kmultiscale.txt +``` + +### Train Real-ESRNet + +1. Download pre-trained model [ESRGAN](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth) into `experiments/pretrained_models`. + ```bash + wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth -P experiments/pretrained_models + ``` +1. Modify the content in the option file `options/train_realesrnet_x4plus.yml` accordingly: + ```yml + train: + name: DF2K+OST + type: RealESRGANDataset + dataroot_gt: datasets/DF2K # modify to the root path of your folder + meta_info: realesrgan/meta_info/meta_info_DF2Kmultiscale+OST_sub.txt # modify to your own generate meta info txt + io_backend: + type: disk + ``` +1. If you want to perform validation during training, uncomment those lines and modify accordingly: + ```yml + # Uncomment these for validation + # val: + # name: validation + # type: PairedImageDataset + # dataroot_gt: path_to_gt + # dataroot_lq: path_to_lq + # io_backend: + # type: disk + + ... + + # Uncomment these for validation + # validation settings + # val: + # val_freq: !!float 5e3 + # save_img: True + + # metrics: + # psnr: # metric name, can be arbitrary + # type: calculate_psnr + # crop_border: 4 + # test_y_channel: false + ``` +1. Before the formal training, you may run in the `--debug` mode to see whether everything is OK. We use four GPUs for training: + ```bash + CUDA_VISIBLE_DEVICES=0,1,2,3 \ + python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --launcher pytorch --debug + ``` + + Train with **a single GPU** in the *debug* mode: + ```bash + python realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --debug + ``` +1. The formal training. We use four GPUs for training. We use the `--auto_resume` argument to automatically resume the training if necessary. + ```bash + CUDA_VISIBLE_DEVICES=0,1,2,3 \ + python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --launcher pytorch --auto_resume + ``` + + Train with **a single GPU**: + ```bash + python realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --auto_resume + ``` + +### Train Real-ESRGAN + +1. After the training of Real-ESRNet, you now have the file `experiments/train_RealESRNetx4plus_1000k_B12G4_fromESRGAN/model/net_g_1000000.pth`. If you need to specify the pre-trained path to other files, modify the `pretrain_network_g` value in the option file `train_realesrgan_x4plus.yml`. +1. Modify the option file `train_realesrgan_x4plus.yml` accordingly. Most modifications are similar to those listed above. +1. Before the formal training, you may run in the `--debug` mode to see whether everything is OK. We use four GPUs for training: + ```bash + CUDA_VISIBLE_DEVICES=0,1,2,3 \ + python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --launcher pytorch --debug + ``` + + Train with **a single GPU** in the *debug* mode: + ```bash + python realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --debug + ``` +1. The formal training. We use four GPUs for training. We use the `--auto_resume` argument to automatically resume the training if necessary. + ```bash + CUDA_VISIBLE_DEVICES=0,1,2,3 \ + python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --launcher pytorch --auto_resume + ``` + + Train with **a single GPU**: + ```bash + python realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --auto_resume + ``` + +## Finetune Real-ESRGAN on your own dataset + +You can finetune Real-ESRGAN on your own dataset. Typically, the fine-tuning process can be divided into two cases: + +1. [Generate degraded images on the fly](#Generate-degraded-images-on-the-fly) +1. [Use your own **paired** data](#Use-paired-training-data) + +### Generate degraded images on the fly + +Only high-resolution images are required. The low-quality images are generated with the degradation process described in Real-ESRGAN during training. + +**1. Prepare dataset** + +See [this section](#dataset-preparation) for more details. + +**2. Download pre-trained models** + +Download pre-trained models into `experiments/pretrained_models`. + +- *RealESRGAN_x4plus.pth*: + ```bash + wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P experiments/pretrained_models + ``` + +- *RealESRGAN_x4plus_netD.pth*: + ```bash + wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.3/RealESRGAN_x4plus_netD.pth -P experiments/pretrained_models + ``` + +**3. Finetune** + +Modify [options/finetune_realesrgan_x4plus.yml](options/finetune_realesrgan_x4plus.yml) accordingly, especially the `datasets` part: + +```yml +train: + name: DF2K+OST + type: RealESRGANDataset + dataroot_gt: datasets/DF2K # modify to the root path of your folder + meta_info: realesrgan/meta_info/meta_info_DF2Kmultiscale+OST_sub.txt # modify to your own generate meta info txt + io_backend: + type: disk +``` + +We use four GPUs for training. We use the `--auto_resume` argument to automatically resume the training if necessary. + +```bash +CUDA_VISIBLE_DEVICES=0,1,2,3 \ +python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/finetune_realesrgan_x4plus.yml --launcher pytorch --auto_resume +``` + +Finetune with **a single GPU**: +```bash +python realesrgan/train.py -opt options/finetune_realesrgan_x4plus.yml --auto_resume +``` + +### Use your own paired data + +You can also finetune RealESRGAN with your own paired data. It is more similar to fine-tuning ESRGAN. + +**1. Prepare dataset** + +Assume that you already have two folders: + +- **gt folder** (Ground-truth, high-resolution images): *datasets/DF2K/DIV2K_train_HR_sub* +- **lq folder** (Low quality, low-resolution images): *datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub* + +Then, you can prepare the meta_info txt file using the script [scripts/generate_meta_info_pairdata.py](scripts/generate_meta_info_pairdata.py): + +```bash +python scripts/generate_meta_info_pairdata.py --input datasets/DF2K/DIV2K_train_HR_sub datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub --meta_info datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt +``` + +**2. Download pre-trained models** + +Download pre-trained models into `experiments/pretrained_models`. + +- *RealESRGAN_x4plus.pth* + ```bash + wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P experiments/pretrained_models + ``` + +- *RealESRGAN_x4plus_netD.pth* + ```bash + wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.3/RealESRGAN_x4plus_netD.pth -P experiments/pretrained_models + ``` + +**3. Finetune** + +Modify [options/finetune_realesrgan_x4plus_pairdata.yml](options/finetune_realesrgan_x4plus_pairdata.yml) accordingly, especially the `datasets` part: + +```yml +train: + name: DIV2K + type: RealESRGANPairedDataset + dataroot_gt: datasets/DF2K # modify to the root path of your folder + dataroot_lq: datasets/DF2K # modify to the root path of your folder + meta_info: datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt # modify to your own generate meta info txt + io_backend: + type: disk +``` + +We use four GPUs for training. We use the `--auto_resume` argument to automatically resume the training if necessary. + +```bash +CUDA_VISIBLE_DEVICES=0,1,2,3 \ +python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/finetune_realesrgan_x4plus_pairdata.yml --launcher pytorch --auto_resume +``` + +Finetune with **a single GPU**: +```bash +python realesrgan/train.py -opt options/finetune_realesrgan_x4plus_pairdata.yml --auto_resume +``` diff --git a/Real-ESRGAN/docs/Training_CN.md b/Real-ESRGAN/docs/Training_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..cd32678bca9f3d0fddca693de8a17dd343787339 --- /dev/null +++ b/Real-ESRGAN/docs/Training_CN.md @@ -0,0 +1,271 @@ +# :computer: 如何训练/微调 Real-ESRGAN + +- [训练 Real-ESRGAN](#训练-real-esrgan) + - [概述](#概述) + - [准备数据集](#准备数据集) + - [训练 Real-ESRNet 模型](#训练-real-esrnet-模型) + - [训练 Real-ESRGAN 模型](#训练-real-esrgan-模型) +- [用自己的数据集微调 Real-ESRGAN](#用自己的数据集微调-real-esrgan) + - [动态生成降级图像](#动态生成降级图像) + - [使用已配对的数据](#使用已配对的数据) + +[English](Training.md) **|** [简体中文](Training_CN.md) + +## 训练 Real-ESRGAN + +### 概述 + +训练分为两个步骤。除了 loss 函数外,这两个步骤拥有相同数据合成以及训练的一条龙流程。具体点说: + +1. 首先使用 L1 loss 训练 Real-ESRNet 模型,其中 L1 loss 来自预先训练的 ESRGAN 模型。 + +2. 然后我们将 Real-ESRNet 模型作为生成器初始化,结合L1 loss、感知 loss、GAN loss 三者的参数对 Real-ESRGAN 进行训练。 + +### 准备数据集 + +我们使用 DF2K ( DIV2K 和 Flickr2K ) + OST 数据集进行训练。只需要HR图像!
+下面是网站链接: +1. DIV2K: http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip +2. Flickr2K: https://cv.snu.ac.kr/research/EDSR/Flickr2K.tar +3. OST: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/datasets/OST_dataset.zip + +以下是数据的准备步骤。 + +#### 第1步:【可选】生成多尺寸图片 + +针对 DF2K 数据集,我们使用多尺寸缩放策略,*换言之*,我们对 HR 图像进行下采样,就能获得多尺寸的标准参考(Ground-Truth)图像。
+您可以使用这个 [scripts/generate_multiscale_DF2K.py](scripts/generate_multiscale_DF2K.py) 脚本快速生成多尺寸的图像。
+注意:如果您只想简单试试,那么可以跳过此步骤。 + +```bash +python scripts/generate_multiscale_DF2K.py --input datasets/DF2K/DF2K_HR --output datasets/DF2K/DF2K_multiscale +``` + +#### 第2步:【可选】裁切为子图像 + +我们可以将 DF2K 图像裁切为子图像,以加快 IO 和处理速度。
+如果你的 IO 够好或储存空间有限,那么此步骤是可选的。
+ +您可以使用脚本 [scripts/extract_subimages.py](scripts/extract_subimages.py)。这是使用示例: + +```bash + python scripts/extract_subimages.py --input datasets/DF2K/DF2K_multiscale --output datasets/DF2K/DF2K_multiscale_sub --crop_size 400 --step 200 +``` + +#### 第3步:准备元信息 txt + +您需要准备一个包含图像路径的 txt 文件。下面是 `meta_info_DF2Kmultiscale+OST_sub.txt` 中的部分展示(由于各个用户可能有截然不同的子图像划分,这个文件不适合你的需求,你得准备自己的 txt 文件): + +```txt +DF2K_HR_sub/000001_s001.png +DF2K_HR_sub/000001_s002.png +DF2K_HR_sub/000001_s003.png +... +``` + +你可以使用该脚本 [scripts/generate_meta_info.py](scripts/generate_meta_info.py) 生成包含图像路径的 txt 文件。
+你还可以合并多个文件夹的图像路径到一个元信息(meta_info)txt。这是使用示例: + +```bash + python scripts/generate_meta_info.py --input datasets/DF2K/DF2K_HR, datasets/DF2K/DF2K_multiscale --root datasets/DF2K, datasets/DF2K --meta_info datasets/DF2K/meta_info/meta_info_DF2Kmultiscale.txt +``` + +### 训练 Real-ESRNet 模型 + +1. 下载预先训练的模型 [ESRGAN](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth),放到 `experiments/pretrained_models`目录下。 + ```bash + wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth -P experiments/pretrained_models + ``` +2. 相应地修改选项文件 `options/train_realesrnet_x4plus.yml` 中的内容: + ```yml + train: + name: DF2K+OST + type: RealESRGANDataset + dataroot_gt: datasets/DF2K # 修改为你的数据集文件夹根目录 + meta_info: realesrgan/meta_info/meta_info_DF2Kmultiscale+OST_sub.txt # 修改为你自己生成的元信息txt + io_backend: + type: disk + ``` +3. 如果你想在训练过程中执行验证,就取消注释这些内容并进行相应的修改: + ```yml + # 取消注释这些以进行验证 + # val: + # name: validation + # type: PairedImageDataset + # dataroot_gt: path_to_gt + # dataroot_lq: path_to_lq + # io_backend: + # type: disk + + ... + + # 取消注释这些以进行验证 + # 验证设置 + # val: + # val_freq: !!float 5e3 + # save_img: True + + # metrics: + # psnr: # 指标名称,可以是任意的 + # type: calculate_psnr + # crop_border: 4 + # test_y_channel: false + ``` +4. 正式训练之前,你可以用 `--debug` 模式检查是否正常运行。我们用了4个GPU进行训练: + ```bash + CUDA_VISIBLE_DEVICES=0,1,2,3 \ + python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --launcher pytorch --debug + ``` + + 用 **1个GPU** 训练的 debug 模式示例: + ```bash + python realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --debug + ``` +5. 正式训练开始。我们用了4个GPU进行训练。还可以使用参数 `--auto_resume` 在必要时自动恢复训练。 + ```bash + CUDA_VISIBLE_DEVICES=0,1,2,3 \ + python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --launcher pytorch --auto_resume + ``` + + 用 **1个GPU** 训练: + ```bash + python realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --auto_resume + ``` + +### 训练 Real-ESRGAN 模型 + +1. 训练 Real-ESRNet 模型后,您得到了这个 `experiments/train_RealESRNetx4plus_1000k_B12G4_fromESRGAN/model/net_g_1000000.pth` 文件。如果需要指定预训练路径到其他文件,请修改选项文件 `train_realesrgan_x4plus.yml` 中 `pretrain_network_g` 的值。 +1. 修改选项文件 `train_realesrgan_x4plus.yml` 的内容。大多数修改与上节提到的类似。 +1. 正式训练之前,你可以以 `--debug` 模式检查是否正常运行。我们使用了4个GPU进行训练: + ```bash + CUDA_VISIBLE_DEVICES=0,1,2,3 \ + python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --launcher pytorch --debug + ``` + + 用 **1个GPU** 训练的 debug 模式示例: + ```bash + python realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --debug + ``` +1. 正式训练开始。我们使用4个GPU进行训练。还可以使用参数 `--auto_resume` 在必要时自动恢复训练。 + ```bash + CUDA_VISIBLE_DEVICES=0,1,2,3 \ + python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --launcher pytorch --auto_resume + ``` + + 用 **1个GPU** 训练: + ```bash + python realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --auto_resume + ``` + +## 用自己的数据集微调 Real-ESRGAN + +你可以用自己的数据集微调 Real-ESRGAN。一般地,微调(Fine-Tune)程序可以分为两种类型: + +1. [动态生成降级图像](#动态生成降级图像) +2. [使用**已配对**的数据](#使用已配对的数据) + +### 动态生成降级图像 + +只需要高分辨率图像。在训练过程中,使用 Real-ESRGAN 描述的降级模型生成低质量图像。 + +**1. 准备数据集** + +完整信息请参见[本节](#准备数据集)。 + +**2. 下载预训练模型** + +下载预先训练的模型到 `experiments/pretrained_models` 目录下。 + +- *RealESRGAN_x4plus.pth*: + ```bash + wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P experiments/pretrained_models + ``` + +- *RealESRGAN_x4plus_netD.pth*: + ```bash + wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.3/RealESRGAN_x4plus_netD.pth -P experiments/pretrained_models + ``` + +**3. 微调** + +修改选项文件 [options/finetune_realesrgan_x4plus.yml](options/finetune_realesrgan_x4plus.yml) ,特别是 `datasets` 部分: + +```yml +train: + name: DF2K+OST + type: RealESRGANDataset + dataroot_gt: datasets/DF2K # 修改为你的数据集文件夹根目录 + meta_info: realesrgan/meta_info/meta_info_DF2Kmultiscale+OST_sub.txt # 修改为你自己生成的元信息txt + io_backend: + type: disk +``` + +我们使用4个GPU进行训练。还可以使用参数 `--auto_resume` 在必要时自动恢复训练。 + +```bash +CUDA_VISIBLE_DEVICES=0,1,2,3 \ +python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/finetune_realesrgan_x4plus.yml --launcher pytorch --auto_resume +``` + +用 **1个GPU** 训练: +```bash +python realesrgan/train.py -opt options/finetune_realesrgan_x4plus.yml --auto_resume +``` + +### 使用已配对的数据 + +你还可以用自己已经配对的数据微调 RealESRGAN。这个过程更类似于微调 ESRGAN。 + +**1. 准备数据集** + +假设你已经有两个文件夹(folder): + +- **gt folder**(标准参考,高分辨率图像):*datasets/DF2K/DIV2K_train_HR_sub* +- **lq folder**(低质量,低分辨率图像):*datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub* + +然后,您可以使用脚本 [scripts/generate_meta_info_pairdata.py](scripts/generate_meta_info_pairdata.py) 生成元信息(meta_info)txt 文件。 + +```bash +python scripts/generate_meta_info_pairdata.py --input datasets/DF2K/DIV2K_train_HR_sub datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub --meta_info datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt +``` + +**2. 下载预训练模型** + +下载预先训练的模型到 `experiments/pretrained_models` 目录下。 + +- *RealESRGAN_x4plus.pth*: + ```bash + wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P experiments/pretrained_models + ``` + +- *RealESRGAN_x4plus_netD.pth*: + ```bash + wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.3/RealESRGAN_x4plus_netD.pth -P experiments/pretrained_models + ``` + +**3. 微调** + +修改选项文件 [options/finetune_realesrgan_x4plus_pairdata.yml](options/finetune_realesrgan_x4plus_pairdata.yml) ,特别是 `datasets` 部分: + +```yml +train: + name: DIV2K + type: RealESRGANPairedDataset + dataroot_gt: datasets/DF2K # 修改为你的 gt folder 文件夹根目录 + dataroot_lq: datasets/DF2K # 修改为你的 lq folder 文件夹根目录 + meta_info: datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt # 修改为你自己生成的元信息txt + io_backend: + type: disk +``` + +我们使用4个GPU进行训练。还可以使用参数 `--auto_resume` 在必要时自动恢复训练。 + +```bash +CUDA_VISIBLE_DEVICES=0,1,2,3 \ +python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/finetune_realesrgan_x4plus_pairdata.yml --launcher pytorch --auto_resume +``` + +用 **1个GPU** 训练: +```bash +python realesrgan/train.py -opt options/finetune_realesrgan_x4plus_pairdata.yml --auto_resume +``` diff --git a/Real-ESRGAN/docs/anime_comparisons.md b/Real-ESRGAN/docs/anime_comparisons.md new file mode 100644 index 0000000000000000000000000000000000000000..5f2cd58846c08f9f5c226c5460b4ccd4e06cfc47 --- /dev/null +++ b/Real-ESRGAN/docs/anime_comparisons.md @@ -0,0 +1,66 @@ +# Comparisons among different anime models + +[English](anime_comparisons.md) **|** [简体中文](anime_comparisons_CN.md) + +## Update News + +- 2022/04/24: Release **AnimeVideo-v3**. We have made the following improvements: + - **better naturalness** + - **Fewer artifacts** + - **more faithful to the original colors** + - **better texture restoration** + - **better background restoration** + +## Comparisons + +We have compared our RealESRGAN-AnimeVideo-v3 with the following methods. +Our RealESRGAN-AnimeVideo-v3 can achieve better results with faster inference speed. + +- [waifu2x](https://github.com/nihui/waifu2x-ncnn-vulkan) with the hyperparameters: `tile=0`, `noiselevel=2` +- [Real-CUGAN](https://github.com/bilibili/ailab/tree/main/Real-CUGAN): we use the [20220227](https://github.com/bilibili/ailab/releases/tag/Real-CUGAN-add-faster-low-memory-mode) version, the hyperparameters are: `cache_mode=0`, `tile=0`, `alpha=1`. +- our RealESRGAN-AnimeVideo-v3 + +## Results + +You may need to **zoom in** for comparing details, or **click the image** to see in the full size. Please note that the images +in the table below are the resized and cropped patches from the original images, you can download the original inputs and outputs from [Google Drive](https://drive.google.com/drive/folders/1bc_Hje1Nqop9NDkUvci2VACSjL7HZMRp?usp=sharing) . + +**More natural results, better background restoration** +| Input | waifu2x | Real-CUGAN | RealESRGAN
AnimeVideo-v3 | +| :---: | :---: | :---: | :---: | +|![157083983-bec52c67-9a5e-4eed-afef-01fe6cd2af85_patch](https://user-images.githubusercontent.com/11482921/164452769-5d8cb4f8-1708-42d2-b941-f44a6f136feb.png) | ![](https://user-images.githubusercontent.com/11482921/164452767-c825cdec-f721-4ff1-aef1-fec41f146c4c.png) | ![](https://user-images.githubusercontent.com/11482921/164452755-3be50895-e3d4-432d-a7b9-9085c2a8e771.png) | ![](https://user-images.githubusercontent.com/11482921/164452771-be300656-379a-4323-a755-df8025a8c451.png) | +|![a0010_patch](https://user-images.githubusercontent.com/11482921/164454047-22eeb493-3fa9-4142-9fc2-6f2a1c074cd5.png) | ![](https://user-images.githubusercontent.com/11482921/164454046-d5e79f8f-00a0-4b55-bc39-295d0d69747a.png) | ![](https://user-images.githubusercontent.com/11482921/164454040-87886b11-9d08-48bd-862f-0d4aed72eb19.png) | ![](https://user-images.githubusercontent.com/11482921/164454055-73dc9f02-286e-4d5c-8f70-c13742e08f42.png) | +|![00000044_patch](https://user-images.githubusercontent.com/11482921/164451232-bacf64fc-e55a-44db-afbb-6b31ab0f8973.png) | ![](https://user-images.githubusercontent.com/11482921/164451318-f309b61a-75b8-4b74-b5f3-595725f1cf0b.png) | ![](https://user-images.githubusercontent.com/11482921/164451348-994f8a35-adbe-4a4b-9c61-feaa294af06a.png) | ![](https://user-images.githubusercontent.com/11482921/164451361-9b7d376e-6f75-4648-b752-542b44845d1c.png) | + +**Fewer artifacts, better detailed textures** +| Input | waifu2x | Real-CUGAN | RealESRGAN
AnimeVideo-v3 | +| :---: | :---: | :---: | :---: | +|![00000053_patch](https://user-images.githubusercontent.com/11482921/164448411-148a7e5c-cfcd-4504-8bc7-e318eb883bb6.png) | ![](https://user-images.githubusercontent.com/11482921/164448633-dfc15224-b6d2-4403-a3c9-4bb819979364.png) | ![](https://user-images.githubusercontent.com/11482921/164448771-0d359509-5293-4d4c-8e3c-86a2a314ea88.png) | ![](https://user-images.githubusercontent.com/11482921/164448848-1a4ff99e-075b-4458-9db7-2c89e8160aa0.png) | +|![Disney_v4_22_018514_s2_patch](https://user-images.githubusercontent.com/11482921/164451898-83311cdf-bd3e-450f-b9f6-34d7fea3ab79.png) | ![](https://user-images.githubusercontent.com/11482921/164451894-6c56521c-6561-40d6-a3a5-8dde2c167b8a.png) | ![](https://user-images.githubusercontent.com/11482921/164451888-af9b47e3-39dc-4f3e-b0d7-d372d8191e2a.png) | ![](https://user-images.githubusercontent.com/11482921/164451901-31ca4dd4-9847-4baa-8cde-ad50f4053dcf.png) | +|![Japan_v2_0_007261_s2_patch](https://user-images.githubusercontent.com/11482921/164454578-73c77392-77de-49c5-b03c-c36631723192.png) | ![](https://user-images.githubusercontent.com/11482921/164454574-b1ede5f0-4520-4eaa-8f59-086751a34e62.png) | ![](https://user-images.githubusercontent.com/11482921/164454567-4cb3fdd8-6a2d-4016-85b2-a305a8ff80e4.png) | ![](https://user-images.githubusercontent.com/11482921/164454583-7f243f20-eca3-4500-ac43-eb058a4a101a.png) | +|![huluxiongdi_2_patch](https://user-images.githubusercontent.com/11482921/164453482-0726c842-337e-40ec-bf6c-f902ee956a8b.png) | ![](https://user-images.githubusercontent.com/11482921/164453480-71d5e091-5bfa-4c77-9c57-4e37f66ca0a3.png) | ![](https://user-images.githubusercontent.com/11482921/164453468-c295d3c9-3661-45f0-9ecd-406a1877f76e.png) | ![](https://user-images.githubusercontent.com/11482921/164453486-3091887c-587c-450e-b6fe-905cb518d57e.png) | + +**Other better results** +| Input | waifu2x | Real-CUGAN | RealESRGAN
AnimeVideo-v3 | +| :---: | :---: | :---: | :---: | +|![Japan_v2_1_128525_s1_patch](https://user-images.githubusercontent.com/11482921/164454933-67697f7c-b6ef-47dc-bfca-822a78af8acf.png) | ![](https://user-images.githubusercontent.com/11482921/164454931-9450de7c-f0b3-4638-9c1e-0668e0c41ef0.png) | ![](https://user-images.githubusercontent.com/11482921/164454926-ed746976-786d-41c5-8a83-7693cd774c3a.png) | ![](https://user-images.githubusercontent.com/11482921/164454936-8abdf0f0-fb30-40eb-8281-3b46c0bcb9ae.png) | +|![tianshuqitan_2_patch](https://user-images.githubusercontent.com/11482921/164456948-807c1476-90b6-4507-81da-cb986d01600c.png) | ![](https://user-images.githubusercontent.com/11482921/164456943-25e89de9-d7e5-4f61-a2e1-96786af6ae9e.png) | ![](https://user-images.githubusercontent.com/11482921/164456954-b468c447-59f5-4594-9693-3683e44ba3e6.png) | ![](https://user-images.githubusercontent.com/11482921/164456957-640f910c-3b04-407c-ac20-044d72e19735.png) | +|![00000051_patch](https://user-images.githubusercontent.com/11482921/164456044-e9a6b3fa-b24e-4eb7-acf9-1f7746551b1e.png) ![00000051_patch](https://user-images.githubusercontent.com/11482921/164456421-b67245b0-767d-4250-9105-80bbe507ecfc.png) | ![](https://user-images.githubusercontent.com/11482921/164456040-85763cf2-cb28-4ba3-abb6-1dbb48c55713.png) ![](https://user-images.githubusercontent.com/11482921/164456419-59cf342e-bc1e-4044-868c-e1090abad313.png) | ![](https://user-images.githubusercontent.com/11482921/164456031-4244bb7b-8649-4e01-86f4-40c2099c5afd.png) ![](https://user-images.githubusercontent.com/11482921/164456411-b6afcbe9-c054-448d-a6df-96d3ba3047f8.png) | ![](https://user-images.githubusercontent.com/11482921/164456035-12e270be-fd52-46d4-b18a-3d3b680731fe.png) ![](https://user-images.githubusercontent.com/11482921/164456417-dcaa8b62-f497-427d-b2d2-f390f1200fb9.png) | +|![00000099_patch](https://user-images.githubusercontent.com/11482921/164455312-6411b6e1-5823-4131-a4b0-a6be8a9ae89f.png) | ![](https://user-images.githubusercontent.com/11482921/164455310-f2b99646-3a22-47a4-805b-dc451ac86ddb.png) | ![](https://user-images.githubusercontent.com/11482921/164455294-35471b42-2826-4451-b7ec-6de01344954c.png) | ![](https://user-images.githubusercontent.com/11482921/164455305-fa4c9758-564a-4081-8b4e-f11057a0404d.png) | +|![00000016_patch](https://user-images.githubusercontent.com/11482921/164455672-447353c9-2da2-4fcb-ba4a-7dd6b94c19c1.png) | ![](https://user-images.githubusercontent.com/11482921/164455669-df384631-baaa-42f8-9150-40f658471558.png) | ![](https://user-images.githubusercontent.com/11482921/164455657-68006bf0-138d-4981-aaca-8aa927d2f78a.png) | ![](https://user-images.githubusercontent.com/11482921/164455664-0342b93e-a62a-4b36-a90e-7118f3f1e45d.png) | + +## Inference Speed + +### PyTorch + +Note that we only report the **model** time, and ignore the IO time. + +| GPU | Input Resolution | waifu2x | Real-CUGAN | RealESRGAN-AnimeVideo-v3 +| :---: | :---: | :---: | :---: | :---: | +| V100 | 1921 x 1080 | - | 3.4 fps | **10.0** fps | +| V100 | 1280 x 720 | - | 7.2 fps | **22.6** fps | +| V100 | 640 x 480 | - | 24.4 fps | **65.9** fps | + +### ncnn + +- [ ] TODO diff --git a/Real-ESRGAN/docs/anime_comparisons_CN.md b/Real-ESRGAN/docs/anime_comparisons_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..c6dfea6bd42c99e318c80d3d774c8e21b1bc5e11 --- /dev/null +++ b/Real-ESRGAN/docs/anime_comparisons_CN.md @@ -0,0 +1,68 @@ +# 动漫视频模型比较 + +[English](anime_comparisons.md) **|** [简体中文](anime_comparisons_CN.md) + +## 更新 + +- 2022/04/24: 发布 **AnimeVideo-v3**. 主要做了以下更新: + - **更自然** + - **更少瑕疵** + - **颜色保持得更好** + - **更好的纹理恢复** + - **虚化背景处理** + +## 比较 + +我们将 RealESRGAN-AnimeVideo-v3 与以下方法进行了比较。我们的 RealESRGAN-AnimeVideo-v3 可以以更快的推理速度获得更好的结果。 + +- [waifu2x](https://github.com/nihui/waifu2x-ncnn-vulkan). 超参数: `tile=0`, `noiselevel=2` +- [Real-CUGAN](https://github.com/bilibili/ailab/tree/main/Real-CUGAN): 我们使用了[20220227](https://github.com/bilibili/ailab/releases/tag/Real-CUGAN-add-faster-low-memory-mode)版本, 超参: `cache_mode=0`, `tile=0`, `alpha=1`. +- 我们的 RealESRGAN-AnimeVideo-v3 + +## 结果 + +您可能需要**放大**以比较详细信息, 或者**单击图像**以查看完整尺寸。 请注意下面表格的图片是从原图里裁剪patch并且resize后的结果,您可以从 +[Google Drive](https://drive.google.com/drive/folders/1bc_Hje1Nqop9NDkUvci2VACSjL7HZMRp?usp=sharing) 里下载原始的输入和输出。 + +**更自然的结果,更好的虚化背景恢复** + +| 输入 | waifu2x | Real-CUGAN | RealESRGAN
AnimeVideo-v3 | +| :---: | :---: | :---: | :---: | +|![157083983-bec52c67-9a5e-4eed-afef-01fe6cd2af85_patch](https://user-images.githubusercontent.com/11482921/164452769-5d8cb4f8-1708-42d2-b941-f44a6f136feb.png) | ![](https://user-images.githubusercontent.com/11482921/164452767-c825cdec-f721-4ff1-aef1-fec41f146c4c.png) | ![](https://user-images.githubusercontent.com/11482921/164452755-3be50895-e3d4-432d-a7b9-9085c2a8e771.png) | ![](https://user-images.githubusercontent.com/11482921/164452771-be300656-379a-4323-a755-df8025a8c451.png) | +|![a0010_patch](https://user-images.githubusercontent.com/11482921/164454047-22eeb493-3fa9-4142-9fc2-6f2a1c074cd5.png) | ![](https://user-images.githubusercontent.com/11482921/164454046-d5e79f8f-00a0-4b55-bc39-295d0d69747a.png) | ![](https://user-images.githubusercontent.com/11482921/164454040-87886b11-9d08-48bd-862f-0d4aed72eb19.png) | ![](https://user-images.githubusercontent.com/11482921/164454055-73dc9f02-286e-4d5c-8f70-c13742e08f42.png) | +|![00000044_patch](https://user-images.githubusercontent.com/11482921/164451232-bacf64fc-e55a-44db-afbb-6b31ab0f8973.png) | ![](https://user-images.githubusercontent.com/11482921/164451318-f309b61a-75b8-4b74-b5f3-595725f1cf0b.png) | ![](https://user-images.githubusercontent.com/11482921/164451348-994f8a35-adbe-4a4b-9c61-feaa294af06a.png) | ![](https://user-images.githubusercontent.com/11482921/164451361-9b7d376e-6f75-4648-b752-542b44845d1c.png) | + +**更少瑕疵,更好的细节纹理** + +| 输入 | waifu2x | Real-CUGAN | RealESRGAN
AnimeVideo-v3 | +| :---: | :---: | :---: | :---: | +|![00000053_patch](https://user-images.githubusercontent.com/11482921/164448411-148a7e5c-cfcd-4504-8bc7-e318eb883bb6.png) | ![](https://user-images.githubusercontent.com/11482921/164448633-dfc15224-b6d2-4403-a3c9-4bb819979364.png) | ![](https://user-images.githubusercontent.com/11482921/164448771-0d359509-5293-4d4c-8e3c-86a2a314ea88.png) | ![](https://user-images.githubusercontent.com/11482921/164448848-1a4ff99e-075b-4458-9db7-2c89e8160aa0.png) | +|![Disney_v4_22_018514_s2_patch](https://user-images.githubusercontent.com/11482921/164451898-83311cdf-bd3e-450f-b9f6-34d7fea3ab79.png) | ![](https://user-images.githubusercontent.com/11482921/164451894-6c56521c-6561-40d6-a3a5-8dde2c167b8a.png) | ![](https://user-images.githubusercontent.com/11482921/164451888-af9b47e3-39dc-4f3e-b0d7-d372d8191e2a.png) | ![](https://user-images.githubusercontent.com/11482921/164451901-31ca4dd4-9847-4baa-8cde-ad50f4053dcf.png) | +|![Japan_v2_0_007261_s2_patch](https://user-images.githubusercontent.com/11482921/164454578-73c77392-77de-49c5-b03c-c36631723192.png) | ![](https://user-images.githubusercontent.com/11482921/164454574-b1ede5f0-4520-4eaa-8f59-086751a34e62.png) | ![](https://user-images.githubusercontent.com/11482921/164454567-4cb3fdd8-6a2d-4016-85b2-a305a8ff80e4.png) | ![](https://user-images.githubusercontent.com/11482921/164454583-7f243f20-eca3-4500-ac43-eb058a4a101a.png) | +|![huluxiongdi_2_patch](https://user-images.githubusercontent.com/11482921/164453482-0726c842-337e-40ec-bf6c-f902ee956a8b.png) | ![](https://user-images.githubusercontent.com/11482921/164453480-71d5e091-5bfa-4c77-9c57-4e37f66ca0a3.png) | ![](https://user-images.githubusercontent.com/11482921/164453468-c295d3c9-3661-45f0-9ecd-406a1877f76e.png) | ![](https://user-images.githubusercontent.com/11482921/164453486-3091887c-587c-450e-b6fe-905cb518d57e.png) | + +**其他更好的结果** + +| 输入 | waifu2x | Real-CUGAN | RealESRGAN
AnimeVideo-v3 | +| :---: | :---: | :---: | :---: | +|![Japan_v2_1_128525_s1_patch](https://user-images.githubusercontent.com/11482921/164454933-67697f7c-b6ef-47dc-bfca-822a78af8acf.png) | ![](https://user-images.githubusercontent.com/11482921/164454931-9450de7c-f0b3-4638-9c1e-0668e0c41ef0.png) | ![](https://user-images.githubusercontent.com/11482921/164454926-ed746976-786d-41c5-8a83-7693cd774c3a.png) | ![](https://user-images.githubusercontent.com/11482921/164454936-8abdf0f0-fb30-40eb-8281-3b46c0bcb9ae.png) | +|![tianshuqitan_2_patch](https://user-images.githubusercontent.com/11482921/164456948-807c1476-90b6-4507-81da-cb986d01600c.png) | ![](https://user-images.githubusercontent.com/11482921/164456943-25e89de9-d7e5-4f61-a2e1-96786af6ae9e.png) | ![](https://user-images.githubusercontent.com/11482921/164456954-b468c447-59f5-4594-9693-3683e44ba3e6.png) | ![](https://user-images.githubusercontent.com/11482921/164456957-640f910c-3b04-407c-ac20-044d72e19735.png) | +|![00000051_patch](https://user-images.githubusercontent.com/11482921/164456044-e9a6b3fa-b24e-4eb7-acf9-1f7746551b1e.png) ![00000051_patch](https://user-images.githubusercontent.com/11482921/164456421-b67245b0-767d-4250-9105-80bbe507ecfc.png) | ![](https://user-images.githubusercontent.com/11482921/164456040-85763cf2-cb28-4ba3-abb6-1dbb48c55713.png) ![](https://user-images.githubusercontent.com/11482921/164456419-59cf342e-bc1e-4044-868c-e1090abad313.png) | ![](https://user-images.githubusercontent.com/11482921/164456031-4244bb7b-8649-4e01-86f4-40c2099c5afd.png) ![](https://user-images.githubusercontent.com/11482921/164456411-b6afcbe9-c054-448d-a6df-96d3ba3047f8.png) | ![](https://user-images.githubusercontent.com/11482921/164456035-12e270be-fd52-46d4-b18a-3d3b680731fe.png) ![](https://user-images.githubusercontent.com/11482921/164456417-dcaa8b62-f497-427d-b2d2-f390f1200fb9.png) | +|![00000099_patch](https://user-images.githubusercontent.com/11482921/164455312-6411b6e1-5823-4131-a4b0-a6be8a9ae89f.png) | ![](https://user-images.githubusercontent.com/11482921/164455310-f2b99646-3a22-47a4-805b-dc451ac86ddb.png) | ![](https://user-images.githubusercontent.com/11482921/164455294-35471b42-2826-4451-b7ec-6de01344954c.png) | ![](https://user-images.githubusercontent.com/11482921/164455305-fa4c9758-564a-4081-8b4e-f11057a0404d.png) | +|![00000016_patch](https://user-images.githubusercontent.com/11482921/164455672-447353c9-2da2-4fcb-ba4a-7dd6b94c19c1.png) | ![](https://user-images.githubusercontent.com/11482921/164455669-df384631-baaa-42f8-9150-40f658471558.png) | ![](https://user-images.githubusercontent.com/11482921/164455657-68006bf0-138d-4981-aaca-8aa927d2f78a.png) | ![](https://user-images.githubusercontent.com/11482921/164455664-0342b93e-a62a-4b36-a90e-7118f3f1e45d.png) | + +## 推理速度比较 + +### PyTorch + +请注意,我们只报告了**模型推理**的时间, 而忽略了读写硬盘的时间. + +| GPU | 输入尺寸 | waifu2x | Real-CUGAN | RealESRGAN-AnimeVideo-v3 +| :---: | :---: | :---: | :---: | :---: | +| V100 | 1921 x 1080 | - | 3.4 fps | **10.0** fps | +| V100 | 1280 x 720 | - | 7.2 fps | **22.6** fps | +| V100 | 640 x 480 | - | 24.4 fps | **65.9** fps | + +### ncnn + +- [ ] TODO diff --git a/Real-ESRGAN/docs/anime_model.md b/Real-ESRGAN/docs/anime_model.md new file mode 100644 index 0000000000000000000000000000000000000000..987ab680ba45ae806c7bf6aa49df8b1805f1001d --- /dev/null +++ b/Real-ESRGAN/docs/anime_model.md @@ -0,0 +1,68 @@ +# Anime Model + +:white_check_mark: We add [*RealESRGAN_x4plus_anime_6B.pth*](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth), which is optimized for **anime** images with much smaller model size. + +- [How to Use](#how-to-use) + - [PyTorch Inference](#pytorch-inference) + - [ncnn Executable File](#ncnn-executable-file) +- [Comparisons with waifu2x](#comparisons-with-waifu2x) +- [Comparisons with Sliding Bars](#comparisons-with-sliding-bars) + +

+ +

+ +The following is a video comparison with sliding bar. You may need to use the full-screen mode for better visual quality, as the original image is large; otherwise, you may encounter aliasing issue. + + + +## How to Use + +### PyTorch Inference + +Pre-trained models: [RealESRGAN_x4plus_anime_6B](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth) + +```bash +# download model +wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P weights +# inference +python inference_realesrgan.py -n RealESRGAN_x4plus_anime_6B -i inputs +``` + +### ncnn Executable File + +Download the latest portable [Windows](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-windows.zip) / [Linux](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-ubuntu.zip) / [MacOS](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-macos.zip) **executable files for Intel/AMD/Nvidia GPU**. + +Taking the Windows as example, run: + +```bash +./realesrgan-ncnn-vulkan.exe -i input.jpg -o output.png -n realesrgan-x4plus-anime +``` + +## Comparisons with waifu2x + +We compare Real-ESRGAN-anime with [waifu2x](https://github.com/nihui/waifu2x-ncnn-vulkan). We use the `-n 2 -s 4` for waifu2x. + +

+ +

+

+ +

+

+ +

+

+ +

+

+ +

+ +## Comparisons with Sliding Bars + +The following are video comparisons with sliding bar. You may need to use the full-screen mode for better visual quality, as the original image is large; otherwise, you may encounter aliasing issue. + + + + diff --git a/Real-ESRGAN/docs/anime_video_model.md b/Real-ESRGAN/docs/anime_video_model.md new file mode 100644 index 0000000000000000000000000000000000000000..6230bd0a019c21e768b7750c39d4a1ef7410ab80 --- /dev/null +++ b/Real-ESRGAN/docs/anime_video_model.md @@ -0,0 +1,136 @@ +# Anime Video Models + +:white_check_mark: We add small models that are optimized for anime videos :-)
+More comparisons can be found in [anime_comparisons.md](anime_comparisons.md) + +- [How to Use](#how-to-use) +- [PyTorch Inference](#pytorch-inference) +- [ncnn Executable File](#ncnn-executable-file) + - [Step 1: Use ffmpeg to extract frames from video](#step-1-use-ffmpeg-to-extract-frames-from-video) + - [Step 2: Inference with Real-ESRGAN executable file](#step-2-inference-with-real-esrgan-executable-file) + - [Step 3: Merge the enhanced frames back into a video](#step-3-merge-the-enhanced-frames-back-into-a-video) +- [More Demos](#more-demos) + +| Models | Scale | Description | +| ---------------------------------------------------------------------------------------------------------------------------------- | :---- | :----------------------------- | +| [realesr-animevideov3](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth) | X4 1 | Anime video model with XS size | + +Note:
+1 This model can also be used for X1, X2, X3. + +--- + +The following are some demos (best view in the full screen mode). + + + + + + + +## How to Use + +### PyTorch Inference + +```bash +# download model +wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth -P weights +# single gpu and single process inference +CUDA_VISIBLE_DEVICES=0 python inference_realesrgan_video.py -i inputs/video/onepiece_demo.mp4 -n realesr-animevideov3 -s 2 --suffix outx2 +# single gpu and multi process inference (you can use multi-processing to improve GPU utilization) +CUDA_VISIBLE_DEVICES=0 python inference_realesrgan_video.py -i inputs/video/onepiece_demo.mp4 -n realesr-animevideov3 -s 2 --suffix outx2 --num_process_per_gpu 2 +# multi gpu and multi process inference +CUDA_VISIBLE_DEVICES=0,1,2,3 python inference_realesrgan_video.py -i inputs/video/onepiece_demo.mp4 -n realesr-animevideov3 -s 2 --suffix outx2 --num_process_per_gpu 2 +``` + +```console +Usage: +--num_process_per_gpu The total number of process is num_gpu * num_process_per_gpu. The bottleneck of + the program lies on the IO, so the GPUs are usually not fully utilized. To alleviate + this issue, you can use multi-processing by setting this parameter. As long as it + does not exceed the CUDA memory +--extract_frame_first If you encounter ffmpeg error when using multi-processing, you can turn this option on. +``` + +### NCNN Executable File + +#### Step 1: Use ffmpeg to extract frames from video + +```bash +ffmpeg -i onepiece_demo.mp4 -qscale:v 1 -qmin 1 -qmax 1 -vsync 0 tmp_frames/frame%08d.png +``` + +- Remember to create the folder `tmp_frames` ahead + +#### Step 2: Inference with Real-ESRGAN executable file + +1. Download the latest portable [Windows](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-windows.zip) / [Linux](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-ubuntu.zip) / [MacOS](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesrgan-ncnn-vulkan-20220424-macos.zip) **executable files for Intel/AMD/Nvidia GPU** + +1. Taking the Windows as example, run: + + ```bash + ./realesrgan-ncnn-vulkan.exe -i tmp_frames -o out_frames -n realesr-animevideov3 -s 2 -f jpg + ``` + + - Remember to create the folder `out_frames` ahead + +#### Step 3: Merge the enhanced frames back into a video + +1. First obtain fps from input videos by + + ```bash + ffmpeg -i onepiece_demo.mp4 + ``` + + ```console + Usage: + -i input video path + ``` + + You will get the output similar to the following screenshot. + +

+ +

+ +2. Merge frames + + ```bash + ffmpeg -r 23.98 -i out_frames/frame%08d.jpg -c:v libx264 -r 23.98 -pix_fmt yuv420p output.mp4 + ``` + + ```console + Usage: + -i input video path + -c:v video encoder (usually we use libx264) + -r fps, remember to modify it to meet your needs + -pix_fmt pixel format in video + ``` + + If you also want to copy audio from the input videos, run: + + ```bash + ffmpeg -r 23.98 -i out_frames/frame%08d.jpg -i onepiece_demo.mp4 -map 0:v:0 -map 1:a:0 -c:a copy -c:v libx264 -r 23.98 -pix_fmt yuv420p output_w_audio.mp4 + ``` + + ```console + Usage: + -i input video path, here we use two input streams + -c:v video encoder (usually we use libx264) + -r fps, remember to modify it to meet your needs + -pix_fmt pixel format in video + ``` + +## More Demos + +- Input video for One Piece: + + + +- Out video for One Piece + + + +**More comparisons** + + diff --git a/Real-ESRGAN/docs/feedback.md b/Real-ESRGAN/docs/feedback.md new file mode 100644 index 0000000000000000000000000000000000000000..aa97ad6cbac41ebc5939c846ee81ce085f77db30 --- /dev/null +++ b/Real-ESRGAN/docs/feedback.md @@ -0,0 +1,11 @@ +# Feedback 反馈 + +## 动漫插画模型 + +1. 视频处理不了: 目前的模型,不是针对视频的,所以视频效果很很不好。我们在探究针对视频的模型了 +1. 景深虚化有问题: 现在的模型把一些景深 和 特意的虚化 都复原了,感觉不好。这个后面我们会考虑把这个信息结合进入。一个简单的做法是识别景深和虚化,然后作为条件告诉神经网络,哪些地方复原强一些,哪些地方复原要弱一些 +1. 不可以调节: 像 Waifu2X 可以调节。可以根据自己的喜好,做调整,但是 Real-ESRGAN-anime 并不可以。导致有些恢复效果过了 +1. 把原来的风格改变了: 不同的动漫插画都有自己的风格,现在的 Real-ESRGAN-anime 倾向于恢复成一种风格(这是受到训练数据集影响的)。风格是动漫很重要的一个要素,所以要尽可能保持 +1. 模型太大: 目前的模型处理太慢,能够更快。这个我们有相关的工作在探究,希望能够尽快有结果,并应用到 Real-ESRGAN 这一系列的模型上 + +Thanks for the [detailed and valuable feedbacks/suggestions](https://github.com/xinntao/Real-ESRGAN/issues/131) by [2ji3150](https://github.com/2ji3150). diff --git a/Real-ESRGAN/docs/model_zoo.md b/Real-ESRGAN/docs/model_zoo.md new file mode 100644 index 0000000000000000000000000000000000000000..203c5f9f25e80cff5298131d2338ecda446f8745 --- /dev/null +++ b/Real-ESRGAN/docs/model_zoo.md @@ -0,0 +1,49 @@ +# :european_castle: Model Zoo + +- [For General Images](#for-general-images) +- [For Anime Images](#for-anime-images) +- [For Anime Videos](#for-anime-videos) + +--- + +## For General Images + +| Models | Scale | Description | +| ------------------------------------------------------------------------------------------------------------------------------- | :---- | :------------------------------------------- | +| [RealESRGAN_x4plus](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth) | X4 | X4 model for general images | +| [RealESRGAN_x2plus](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth) | X2 | X2 model for general images | +| [RealESRNet_x4plus](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth) | X4 | X4 model with MSE loss (over-smooth effects) | +| [official ESRGAN_x4](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth) | X4 | official ESRGAN model | +| [realesr-general-x4v3](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth) | X4 (can also be used for X1, X2, X3) | A tiny small model (consume much fewer GPU memory and time); not too strong deblur and denoise capacity | + +The following models are **discriminators**, which are usually used for fine-tuning. + +| Models | Corresponding model | +| ---------------------------------------------------------------------------------------------------------------------- | :------------------ | +| [RealESRGAN_x4plus_netD](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.3/RealESRGAN_x4plus_netD.pth) | RealESRGAN_x4plus | +| [RealESRGAN_x2plus_netD](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.3/RealESRGAN_x2plus_netD.pth) | RealESRGAN_x2plus | + +## For Anime Images / Illustrations + +| Models | Scale | Description | +| ------------------------------------------------------------------------------------------------------------------------------ | :---- | :---------------------------------------------------------- | +| [RealESRGAN_x4plus_anime_6B](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth) | X4 | Optimized for anime images; 6 RRDB blocks (smaller network) | + +The following models are **discriminators**, which are usually used for fine-tuning. + +| Models | Corresponding model | +| ---------------------------------------------------------------------------------------------------------------------------------------- | :------------------------- | +| [RealESRGAN_x4plus_anime_6B_netD](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B_netD.pth) | RealESRGAN_x4plus_anime_6B | + +## For Animation Videos + +| Models | Scale | Description | +| ---------------------------------------------------------------------------------------------------------------------------------- | :---- | :----------------------------- | +| [realesr-animevideov3](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth) | X41 | Anime video model with XS size | + +Note:
+1 This model can also be used for X1, X2, X3. + +The following models are **discriminators**, which are usually used for fine-tuning. + +TODO diff --git a/Real-ESRGAN/docs/ncnn_conversion.md b/Real-ESRGAN/docs/ncnn_conversion.md new file mode 100644 index 0000000000000000000000000000000000000000..076d23d1dcb5198c1d456b517875d74b1a3ece2c --- /dev/null +++ b/Real-ESRGAN/docs/ncnn_conversion.md @@ -0,0 +1,11 @@ +# Instructions on converting to NCNN models + +1. Convert to onnx model with `scripts/pytorch2onnx.py`. Remember to modify codes accordingly +1. Convert onnx model to ncnn model + 1. `cd ncnn-master\ncnn\build\tools\onnx` + 1. `onnx2ncnn.exe realesrgan-x4.onnx realesrgan-x4-raw.param realesrgan-x4-raw.bin` +1. Optimize ncnn model + 1. fp16 mode + 1. `cd ncnn-master\ncnn\build\tools` + 1. `ncnnoptimize.exe realesrgan-x4-raw.param realesrgan-x4-raw.bin realesrgan-x4.param realesrgan-x4.bin 1` +1. Modify the blob name in `realesrgan-x4.param`: `data` and `output` diff --git a/Real-ESRGAN/experiments/pretrained_models/README.md b/Real-ESRGAN/experiments/pretrained_models/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bc70072ba3cf8735a6399dbb4a7129600be80ebf --- /dev/null +++ b/Real-ESRGAN/experiments/pretrained_models/README.md @@ -0,0 +1 @@ +# Put downloaded pre-trained models here diff --git a/Real-ESRGAN/inference_realesrgan.py b/Real-ESRGAN/inference_realesrgan.py new file mode 100644 index 0000000000000000000000000000000000000000..883a89796b3871ddb2df56b6915d1f223f2fdb78 --- /dev/null +++ b/Real-ESRGAN/inference_realesrgan.py @@ -0,0 +1,166 @@ +import argparse +import cv2 +import glob +import os +from basicsr.archs.rrdbnet_arch import RRDBNet +from basicsr.utils.download_util import load_file_from_url + +from realesrgan import RealESRGANer +from realesrgan.archs.srvgg_arch import SRVGGNetCompact + + +def main(): + """Inference demo for Real-ESRGAN. + """ + parser = argparse.ArgumentParser() + parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder') + parser.add_argument( + '-n', + '--model_name', + type=str, + default='RealESRGAN_x4plus', + help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus | ' + 'realesr-animevideov3 | realesr-general-x4v3')) + parser.add_argument('-o', '--output', type=str, default='results', help='Output folder') + parser.add_argument( + '-dn', + '--denoise_strength', + type=float, + default=0.5, + help=('Denoise strength. 0 for weak denoise (keep noise), 1 for strong denoise ability. ' + 'Only used for the realesr-general-x4v3 model')) + parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image') + parser.add_argument( + '--model_path', type=str, default=None, help='[Option] Model path. Usually, you do not need to specify it') + parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored image') + parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing') + parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding') + parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border') + parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face') + parser.add_argument( + '--fp32', action='store_true', help='Use fp32 precision during inference. Default: fp16 (half precision).') + parser.add_argument( + '--alpha_upsampler', + type=str, + default='realesrgan', + help='The upsampler for the alpha channels. Options: realesrgan | bicubic') + parser.add_argument( + '--ext', + type=str, + default='auto', + help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs') + parser.add_argument( + '-g', '--gpu-id', type=int, default=0,1, help='gpu device to use (default=None) can be 0,1,2 for multi-gpu') + + args = parser.parse_args() + + # determine models according to model names + args.model_name = args.model_name.split('.')[0] + if args.model_name == 'RealESRGAN_x4plus': # x4 RRDBNet model + model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) + netscale = 4 + file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth'] + elif args.model_name == 'RealESRNet_x4plus': # x4 RRDBNet model + model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) + netscale = 4 + file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth'] + elif args.model_name == 'RealESRGAN_x4plus_anime_6B': # x4 RRDBNet model with 6 blocks + model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) + netscale = 4 + file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth'] + elif args.model_name == 'RealESRGAN_x2plus': # x2 RRDBNet model + model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) + netscale = 2 + file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth'] + elif args.model_name == 'realesr-animevideov3': # x4 VGG-style model (XS size) + model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu') + netscale = 4 + file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth'] + elif args.model_name == 'realesr-general-x4v3': # x4 VGG-style model (S size) + model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') + netscale = 4 + file_url = [ + 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth', + 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth' + ] + + # determine model paths + if args.model_path is not None: + model_path = args.model_path + else: + model_path = os.path.join('weights', args.model_name + '.pth') + if not os.path.isfile(model_path): + ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + for url in file_url: + # model_path will be updated + model_path = load_file_from_url( + url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None) + + # use dni to control the denoise strength + dni_weight = None + if args.model_name == 'realesr-general-x4v3' and args.denoise_strength != 1: + wdn_model_path = model_path.replace('realesr-general-x4v3', 'realesr-general-wdn-x4v3') + model_path = [model_path, wdn_model_path] + dni_weight = [args.denoise_strength, 1 - args.denoise_strength] + + # restorer + upsampler = RealESRGANer( + scale=netscale, + model_path=model_path, + dni_weight=dni_weight, + model=model, + tile=args.tile, + tile_pad=args.tile_pad, + pre_pad=args.pre_pad, + half=not args.fp32, + gpu_id=args.gpu_id) + + if args.face_enhance: # Use GFPGAN for face enhancement + from gfpgan import GFPGANer + face_enhancer = GFPGANer( + model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth', + upscale=args.outscale, + arch='clean', + channel_multiplier=2, + bg_upsampler=upsampler) + os.makedirs(args.output, exist_ok=True) + + if os.path.isfile(args.input): + paths = [args.input] + else: + paths = sorted(glob.glob(os.path.join(args.input, '*'))) + + for idx, path in enumerate(paths): + imgname, extension = os.path.splitext(os.path.basename(path)) + print('Testing', idx, imgname) + + img = cv2.imread(path, cv2.IMREAD_UNCHANGED) + if len(img.shape) == 3 and img.shape[2] == 4: + img_mode = 'RGBA' + else: + img_mode = None + + try: + if args.face_enhance: + _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True) + else: + output, _ = upsampler.enhance(img, outscale=args.outscale) + except RuntimeError as error: + print('Error', error) + print('If you encounter CUDA out of memory, try to set --tile with a smaller number.') + else: + if args.ext == 'auto': + extension = extension[1:] + else: + extension = args.ext + if img_mode == 'RGBA': # RGBA images should be saved in png format + extension = 'png' + if args.suffix == '': + save_path = os.path.join(args.output, f'{imgname}.{extension}') + else: + save_path = os.path.join(args.output, f'{imgname}_{args.suffix}.{extension}') + cv2.imwrite(save_path, output) + + +if __name__ == '__main__': + main() diff --git a/Real-ESRGAN/inference_realesrgan_video.py b/Real-ESRGAN/inference_realesrgan_video.py new file mode 100644 index 0000000000000000000000000000000000000000..f4cdc228a574bce4d1450aaf985801fd3e9a4737 --- /dev/null +++ b/Real-ESRGAN/inference_realesrgan_video.py @@ -0,0 +1,398 @@ +import argparse +import cv2 +import glob +import mimetypes +import numpy as np +import os +import shutil +import subprocess +import torch +from basicsr.archs.rrdbnet_arch import RRDBNet +from basicsr.utils.download_util import load_file_from_url +from os import path as osp +from tqdm import tqdm + +from realesrgan import RealESRGANer +from realesrgan.archs.srvgg_arch import SRVGGNetCompact + +try: + import ffmpeg +except ImportError: + import pip + pip.main(['install', '--user', 'ffmpeg-python']) + import ffmpeg + + +def get_video_meta_info(video_path): + ret = {} + probe = ffmpeg.probe(video_path) + video_streams = [stream for stream in probe['streams'] if stream['codec_type'] == 'video'] + has_audio = any(stream['codec_type'] == 'audio' for stream in probe['streams']) + ret['width'] = video_streams[0]['width'] + ret['height'] = video_streams[0]['height'] + ret['fps'] = eval(video_streams[0]['avg_frame_rate']) + ret['audio'] = ffmpeg.input(video_path).audio if has_audio else None + ret['nb_frames'] = int(video_streams[0]['nb_frames']) + return ret + + +def get_sub_video(args, num_process, process_idx): + if num_process == 1: + return args.input + meta = get_video_meta_info(args.input) + duration = int(meta['nb_frames'] / meta['fps']) + part_time = duration // num_process + print(f'duration: {duration}, part_time: {part_time}') + os.makedirs(osp.join(args.output, f'{args.video_name}_inp_tmp_videos'), exist_ok=True) + out_path = osp.join(args.output, f'{args.video_name}_inp_tmp_videos', f'{process_idx:03d}.mp4') + cmd = [ + args.ffmpeg_bin, f'-i {args.input}', '-ss', f'{part_time * process_idx}', + f'-to {part_time * (process_idx + 1)}' if process_idx != num_process - 1 else '', '-async 1', out_path, '-y' + ] + print(' '.join(cmd)) + subprocess.call(' '.join(cmd), shell=True) + return out_path + + +class Reader: + + def __init__(self, args, total_workers=1, worker_idx=0): + self.args = args + input_type = mimetypes.guess_type(args.input)[0] + self.input_type = 'folder' if input_type is None else input_type + self.paths = [] # for image&folder type + self.audio = None + self.input_fps = None + if self.input_type.startswith('video'): + video_path = get_sub_video(args, total_workers, worker_idx) + self.stream_reader = ( + ffmpeg.input(video_path).output('pipe:', format='rawvideo', pix_fmt='bgr24', + loglevel='error').run_async( + pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin)) + meta = get_video_meta_info(video_path) + self.width = meta['width'] + self.height = meta['height'] + self.input_fps = meta['fps'] + self.audio = meta['audio'] + self.nb_frames = meta['nb_frames'] + + else: + if self.input_type.startswith('image'): + self.paths = [args.input] + else: + paths = sorted(glob.glob(os.path.join(args.input, '*'))) + tot_frames = len(paths) + num_frame_per_worker = tot_frames // total_workers + (1 if tot_frames % total_workers else 0) + self.paths = paths[num_frame_per_worker * worker_idx:num_frame_per_worker * (worker_idx + 1)] + + self.nb_frames = len(self.paths) + assert self.nb_frames > 0, 'empty folder' + from PIL import Image + tmp_img = Image.open(self.paths[0]) + self.width, self.height = tmp_img.size + self.idx = 0 + + def get_resolution(self): + return self.height, self.width + + def get_fps(self): + if self.args.fps is not None: + return self.args.fps + elif self.input_fps is not None: + return self.input_fps + return 24 + + def get_audio(self): + return self.audio + + def __len__(self): + return self.nb_frames + + def get_frame_from_stream(self): + img_bytes = self.stream_reader.stdout.read(self.width * self.height * 3) # 3 bytes for one pixel + if not img_bytes: + return None + img = np.frombuffer(img_bytes, np.uint8).reshape([self.height, self.width, 3]) + return img + + def get_frame_from_list(self): + if self.idx >= self.nb_frames: + return None + img = cv2.imread(self.paths[self.idx]) + self.idx += 1 + return img + + def get_frame(self): + if self.input_type.startswith('video'): + return self.get_frame_from_stream() + else: + return self.get_frame_from_list() + + def close(self): + if self.input_type.startswith('video'): + self.stream_reader.stdin.close() + self.stream_reader.wait() + + +class Writer: + + def __init__(self, args, audio, height, width, video_save_path, fps): + out_width, out_height = int(width * args.outscale), int(height * args.outscale) + if out_height > 2160: + print('You are generating video that is larger than 4K, which will be very slow due to IO speed.', + 'We highly recommend to decrease the outscale(aka, -s).') + + if audio is not None: + self.stream_writer = ( + ffmpeg.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{out_width}x{out_height}', + framerate=fps).output( + audio, + video_save_path, + pix_fmt='yuv420p', + vcodec='libx264', + loglevel='error', + acodec='copy').overwrite_output().run_async( + pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin)) + else: + self.stream_writer = ( + ffmpeg.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{out_width}x{out_height}', + framerate=fps).output( + video_save_path, pix_fmt='yuv420p', vcodec='libx264', + loglevel='error').overwrite_output().run_async( + pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin)) + + def write_frame(self, frame): + frame = frame.astype(np.uint8).tobytes() + self.stream_writer.stdin.write(frame) + + def close(self): + self.stream_writer.stdin.close() + self.stream_writer.wait() + + +def inference_video(args, video_save_path, device=None, total_workers=1, worker_idx=0): + # ---------------------- determine models according to model names ---------------------- # + args.model_name = args.model_name.split('.pth')[0] + if args.model_name == 'RealESRGAN_x4plus': # x4 RRDBNet model + model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) + netscale = 4 + file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth'] + elif args.model_name == 'RealESRNet_x4plus': # x4 RRDBNet model + model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) + netscale = 4 + file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth'] + elif args.model_name == 'RealESRGAN_x4plus_anime_6B': # x4 RRDBNet model with 6 blocks + model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) + netscale = 4 + file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth'] + elif args.model_name == 'RealESRGAN_x2plus': # x2 RRDBNet model + model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) + netscale = 2 + file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth'] + elif args.model_name == 'realesr-animevideov3': # x4 VGG-style model (XS size) + model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu') + netscale = 4 + file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth'] + elif args.model_name == 'realesr-general-x4v3': # x4 VGG-style model (S size) + model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') + netscale = 4 + file_url = [ + 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth', + 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth' + ] + + # ---------------------- determine model paths ---------------------- # + model_path = os.path.join('weights', args.model_name + '.pth') + if not os.path.isfile(model_path): + ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + for url in file_url: + # model_path will be updated + model_path = load_file_from_url( + url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None) + + # use dni to control the denoise strength + dni_weight = None + if args.model_name == 'realesr-general-x4v3' and args.denoise_strength != 1: + wdn_model_path = model_path.replace('realesr-general-x4v3', 'realesr-general-wdn-x4v3') + model_path = [model_path, wdn_model_path] + dni_weight = [args.denoise_strength, 1 - args.denoise_strength] + + # restorer + upsampler = RealESRGANer( + scale=netscale, + model_path=model_path, + dni_weight=dni_weight, + model=model, + tile=args.tile, + tile_pad=args.tile_pad, + pre_pad=args.pre_pad, + half=not args.fp32, + device=device, + ) + + if 'anime' in args.model_name and args.face_enhance: + print('face_enhance is not supported in anime models, we turned this option off for you. ' + 'if you insist on turning it on, please manually comment the relevant lines of code.') + args.face_enhance = False + + if args.face_enhance: # Use GFPGAN for face enhancement + from gfpgan import GFPGANer + face_enhancer = GFPGANer( + model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth', + upscale=args.outscale, + arch='clean', + channel_multiplier=2, + bg_upsampler=upsampler) # TODO support custom device + else: + face_enhancer = None + + reader = Reader(args, total_workers, worker_idx) + audio = reader.get_audio() + height, width = reader.get_resolution() + fps = reader.get_fps() + writer = Writer(args, audio, height, width, video_save_path, fps) + + pbar = tqdm(total=len(reader), unit='frame', desc='inference') + while True: + img = reader.get_frame() + if img is None: + break + + try: + if args.face_enhance: + _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True) + else: + output, _ = upsampler.enhance(img, outscale=args.outscale) + except RuntimeError as error: + print('Error', error) + print('If you encounter CUDA out of memory, try to set --tile with a smaller number.') + else: + writer.write_frame(output) + + torch.cuda.synchronize(device) + pbar.update(1) + + reader.close() + writer.close() + + +def run(args): + args.video_name = osp.splitext(os.path.basename(args.input))[0] + video_save_path = osp.join(args.output, f'{args.video_name}_{args.suffix}.mp4') + + if args.extract_frame_first: + tmp_frames_folder = osp.join(args.output, f'{args.video_name}_inp_tmp_frames') + os.makedirs(tmp_frames_folder, exist_ok=True) + os.system(f'ffmpeg -i {args.input} -qscale:v 1 -qmin 1 -qmax 1 -vsync 0 {tmp_frames_folder}/frame%08d.png') + args.input = tmp_frames_folder + + num_gpus = torch.cuda.device_count() + num_process = num_gpus * args.num_process_per_gpu + if num_process == 1: + inference_video(args, video_save_path) + return + + ctx = torch.multiprocessing.get_context('spawn') + pool = ctx.Pool(num_process) + os.makedirs(osp.join(args.output, f'{args.video_name}_out_tmp_videos'), exist_ok=True) + pbar = tqdm(total=num_process, unit='sub_video', desc='inference') + for i in range(num_process): + sub_video_save_path = osp.join(args.output, f'{args.video_name}_out_tmp_videos', f'{i:03d}.mp4') + pool.apply_async( + inference_video, + args=(args, sub_video_save_path, torch.device(i % num_gpus), num_process, i), + callback=lambda arg: pbar.update(1)) + pool.close() + pool.join() + + # combine sub videos + # prepare vidlist.txt + with open(f'{args.output}/{args.video_name}_vidlist.txt', 'w') as f: + for i in range(num_process): + f.write(f'file \'{args.video_name}_out_tmp_videos/{i:03d}.mp4\'\n') + + cmd = [ + args.ffmpeg_bin, '-f', 'concat', '-safe', '0', '-i', f'{args.output}/{args.video_name}_vidlist.txt', '-c', + 'copy', f'{video_save_path}' + ] + print(' '.join(cmd)) + subprocess.call(cmd) + shutil.rmtree(osp.join(args.output, f'{args.video_name}_out_tmp_videos')) + if osp.exists(osp.join(args.output, f'{args.video_name}_inp_tmp_videos')): + shutil.rmtree(osp.join(args.output, f'{args.video_name}_inp_tmp_videos')) + os.remove(f'{args.output}/{args.video_name}_vidlist.txt') + + +def main(): + """Inference demo for Real-ESRGAN. + It mainly for restoring anime videos. + + """ + parser = argparse.ArgumentParser() + parser.add_argument('-i', '--input', type=str, default='inputs', help='Input video, image or folder') + parser.add_argument( + '-n', + '--model_name', + type=str, + default='realesr-animevideov3', + help=('Model names: realesr-animevideov3 | RealESRGAN_x4plus_anime_6B | RealESRGAN_x4plus | RealESRNet_x4plus |' + ' RealESRGAN_x2plus | realesr-general-x4v3' + 'Default:realesr-animevideov3')) + parser.add_argument('-o', '--output', type=str, default='results', help='Output folder') + parser.add_argument( + '-dn', + '--denoise_strength', + type=float, + default=0.5, + help=('Denoise strength. 0 for weak denoise (keep noise), 1 for strong denoise ability. ' + 'Only used for the realesr-general-x4v3 model')) + parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image') + parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored video') + parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing') + parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding') + parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border') + parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face') + parser.add_argument( + '--fp32', action='store_true', help='Use fp32 precision during inference. Default: fp16 (half precision).') + parser.add_argument('--fps', type=float, default=None, help='FPS of the output video') + parser.add_argument('--ffmpeg_bin', type=str, default='ffmpeg', help='The path to ffmpeg') + parser.add_argument('--extract_frame_first', action='store_true') + parser.add_argument('--num_process_per_gpu', type=int, default=1) + + parser.add_argument( + '--alpha_upsampler', + type=str, + default='realesrgan', + help='The upsampler for the alpha channels. Options: realesrgan | bicubic') + parser.add_argument( + '--ext', + type=str, + default='auto', + help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs') + args = parser.parse_args() + + args.input = args.input.rstrip('/').rstrip('\\') + os.makedirs(args.output, exist_ok=True) + + if mimetypes.guess_type(args.input)[0] is not None and mimetypes.guess_type(args.input)[0].startswith('video'): + is_video = True + else: + is_video = False + + if is_video and args.input.endswith('.flv'): + mp4_path = args.input.replace('.flv', '.mp4') + os.system(f'ffmpeg -i {args.input} -codec copy {mp4_path}') + args.input = mp4_path + + if args.extract_frame_first and not is_video: + args.extract_frame_first = False + + run(args) + + if args.extract_frame_first: + tmp_frames_folder = osp.join(args.output, f'{args.video_name}_inp_tmp_frames') + shutil.rmtree(tmp_frames_folder) + + +if __name__ == '__main__': + main() diff --git a/Real-ESRGAN/inputs/00003.png b/Real-ESRGAN/inputs/00003.png new file mode 100644 index 0000000000000000000000000000000000000000..a6c8db3dae627eb7b79f5d32e7e194dad6432f49 --- /dev/null +++ b/Real-ESRGAN/inputs/00003.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d37932ae7d3137a0e38f8a90f7e3e16e13353399db6e29dca5a03a350f5fed1b +size 164429 diff --git a/Real-ESRGAN/inputs/00017_gray.png b/Real-ESRGAN/inputs/00017_gray.png new file mode 100644 index 0000000000000000000000000000000000000000..79af68e8aa0f036211734b7271633d88b2fc8f0d Binary files /dev/null and b/Real-ESRGAN/inputs/00017_gray.png differ diff --git a/Real-ESRGAN/inputs/0014.jpg b/Real-ESRGAN/inputs/0014.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f59554fe3143b3ffa27d6fcb04143124b4d0412b Binary files /dev/null and b/Real-ESRGAN/inputs/0014.jpg differ diff --git a/Real-ESRGAN/inputs/0030.jpg b/Real-ESRGAN/inputs/0030.jpg new file mode 100644 index 0000000000000000000000000000000000000000..61868926af738046e984bcf652134e3ea9b958d9 Binary files /dev/null and b/Real-ESRGAN/inputs/0030.jpg differ diff --git a/Real-ESRGAN/inputs/ADE_val_00000114.jpg b/Real-ESRGAN/inputs/ADE_val_00000114.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b4d9c9067adbcdd153527cef2c0cab4cf40bbfa5 Binary files /dev/null and b/Real-ESRGAN/inputs/ADE_val_00000114.jpg differ diff --git a/Real-ESRGAN/inputs/OST_009.png b/Real-ESRGAN/inputs/OST_009.png new file mode 100644 index 0000000000000000000000000000000000000000..26adc813b2fb44e67846c3e1395292831527e895 --- /dev/null +++ b/Real-ESRGAN/inputs/OST_009.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62c8ec34919070f9c6fd3398d7a863b4d214adb4822331e9d507317b683ef46d +size 718095 diff --git a/Real-ESRGAN/inputs/children-alpha.png b/Real-ESRGAN/inputs/children-alpha.png new file mode 100644 index 0000000000000000000000000000000000000000..d0f5ad034f314b8a07574d10661518ca0a3f57de --- /dev/null +++ b/Real-ESRGAN/inputs/children-alpha.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17323c91483660079e2e95fce438485b8f144bbaee50b2e7b10a9c343c628589 +size 274811 diff --git a/Real-ESRGAN/inputs/tree_alpha_16bit.png b/Real-ESRGAN/inputs/tree_alpha_16bit.png new file mode 100644 index 0000000000000000000000000000000000000000..4617d1d347addc6876dd93e6f958813df66cbe89 --- /dev/null +++ b/Real-ESRGAN/inputs/tree_alpha_16bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6af49641c52884f1d5af6f8afcc75fa2ee0c31fb8e60a37e907b62aeb30d660 +size 382141 diff --git a/Real-ESRGAN/inputs/video/onepiece_demo.mp4 b/Real-ESRGAN/inputs/video/onepiece_demo.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..f607a4983f57e4c309085af24433595b8149315e --- /dev/null +++ b/Real-ESRGAN/inputs/video/onepiece_demo.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f2084db2ca6b363b0857758ac99084775c7f40cd074bf7e23c35e2ce0477778 +size 592566 diff --git a/Real-ESRGAN/inputs/wolf_gray.jpg b/Real-ESRGAN/inputs/wolf_gray.jpg new file mode 100644 index 0000000000000000000000000000000000000000..614766bdbcaa3730a8191afcb9616305381245ea Binary files /dev/null and b/Real-ESRGAN/inputs/wolf_gray.jpg differ diff --git a/Real-ESRGAN/options/finetune_realesrgan_x4plus.yml b/Real-ESRGAN/options/finetune_realesrgan_x4plus.yml new file mode 100644 index 0000000000000000000000000000000000000000..951dd5b973599114cd237106fe847f58d93b74ec --- /dev/null +++ b/Real-ESRGAN/options/finetune_realesrgan_x4plus.yml @@ -0,0 +1,188 @@ +# general settings +name: finetune_RealESRGANx4plus_400k +model_type: RealESRGANModel +scale: 4 +num_gpu: auto +manual_seed: 0 + +# ----------------- options for synthesizing training data in RealESRGANModel ----------------- # +# USM the ground-truth +l1_gt_usm: True +percep_gt_usm: True +gan_gt_usm: False + +# the first degradation process +resize_prob: [0.2, 0.7, 0.1] # up, down, keep +resize_range: [0.15, 1.5] +gaussian_noise_prob: 0.5 +noise_range: [1, 30] +poisson_scale_range: [0.05, 3] +gray_noise_prob: 0.4 +jpeg_range: [30, 95] + +# the second degradation process +second_blur_prob: 0.8 +resize_prob2: [0.3, 0.4, 0.3] # up, down, keep +resize_range2: [0.3, 1.2] +gaussian_noise_prob2: 0.5 +noise_range2: [1, 25] +poisson_scale_range2: [0.05, 2.5] +gray_noise_prob2: 0.4 +jpeg_range2: [30, 95] + +gt_size: 256 +queue_size: 180 + +# dataset and data loader settings +datasets: + train: + name: DF2K+OST + type: RealESRGANDataset + dataroot_gt: datasets/DF2K + meta_info: datasets/DF2K/meta_info/meta_info_DF2Kmultiscale+OST_sub.txt + io_backend: + type: disk + + blur_kernel_size: 21 + kernel_list: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] + kernel_prob: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] + sinc_prob: 0.1 + blur_sigma: [0.2, 3] + betag_range: [0.5, 4] + betap_range: [1, 2] + + blur_kernel_size2: 21 + kernel_list2: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] + kernel_prob2: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] + sinc_prob2: 0.1 + blur_sigma2: [0.2, 1.5] + betag_range2: [0.5, 4] + betap_range2: [1, 2] + + final_sinc_prob: 0.8 + + gt_size: 256 + use_hflip: True + use_rot: False + + # data loader + use_shuffle: true + num_worker_per_gpu: 5 + batch_size_per_gpu: 12 + dataset_enlarge_ratio: 1 + prefetch_mode: ~ + + # Uncomment these for validation + # val: + # name: validation + # type: PairedImageDataset + # dataroot_gt: path_to_gt + # dataroot_lq: path_to_lq + # io_backend: + # type: disk + +# network structures +network_g: + type: RRDBNet + num_in_ch: 3 + num_out_ch: 3 + num_feat: 64 + num_block: 23 + num_grow_ch: 32 + +network_d: + type: UNetDiscriminatorSN + num_in_ch: 3 + num_feat: 64 + skip_connection: True + +# path +path: + # use the pre-trained Real-ESRNet model + pretrain_network_g: experiments/pretrained_models/RealESRNet_x4plus.pth + param_key_g: params_ema + strict_load_g: true + pretrain_network_d: experiments/pretrained_models/RealESRGAN_x4plus_netD.pth + param_key_d: params + strict_load_d: true + resume_state: ~ + +# training settings +train: + ema_decay: 0.999 + optim_g: + type: Adam + lr: !!float 1e-4 + weight_decay: 0 + betas: [0.9, 0.99] + optim_d: + type: Adam + lr: !!float 1e-4 + weight_decay: 0 + betas: [0.9, 0.99] + + scheduler: + type: MultiStepLR + milestones: [400000] + gamma: 0.5 + + total_iter: 400000 + warmup_iter: -1 # no warm up + + # losses + pixel_opt: + type: L1Loss + loss_weight: 1.0 + reduction: mean + # perceptual loss (content and style losses) + perceptual_opt: + type: PerceptualLoss + layer_weights: + # before relu + 'conv1_2': 0.1 + 'conv2_2': 0.1 + 'conv3_4': 1 + 'conv4_4': 1 + 'conv5_4': 1 + vgg_type: vgg19 + use_input_norm: true + perceptual_weight: !!float 1.0 + style_weight: 0 + range_norm: false + criterion: l1 + # gan loss + gan_opt: + type: GANLoss + gan_type: vanilla + real_label_val: 1.0 + fake_label_val: 0.0 + loss_weight: !!float 1e-1 + + net_d_iters: 1 + net_d_init_iters: 0 + +# Uncomment these for validation +# validation settings +# val: +# val_freq: !!float 5e3 +# save_img: True + +# metrics: +# psnr: # metric name +# type: calculate_psnr +# crop_border: 4 +# test_y_channel: false + +# logging settings +logger: + print_freq: 100 + save_checkpoint_freq: !!float 5e3 + use_tb_logger: true + wandb: + project: ~ + resume_id: ~ + +# dist training settings +dist_params: + backend: nccl + port: 29500 diff --git a/Real-ESRGAN/options/finetune_realesrgan_x4plus_pairdata.yml b/Real-ESRGAN/options/finetune_realesrgan_x4plus_pairdata.yml new file mode 100644 index 0000000000000000000000000000000000000000..392cd6ae4bf92825ebc9ad2bc613af886d764af2 --- /dev/null +++ b/Real-ESRGAN/options/finetune_realesrgan_x4plus_pairdata.yml @@ -0,0 +1,150 @@ +# general settings +name: finetune_RealESRGANx4plus_400k_pairdata +model_type: RealESRGANModel +scale: 4 +num_gpu: auto +manual_seed: 0 + +# USM the ground-truth +l1_gt_usm: True +percep_gt_usm: True +gan_gt_usm: False + +high_order_degradation: False # do not use the high-order degradation generation process + +# dataset and data loader settings +datasets: + train: + name: DIV2K + type: RealESRGANPairedDataset + dataroot_gt: datasets/DF2K + dataroot_lq: datasets/DF2K + meta_info: datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt + io_backend: + type: disk + + gt_size: 256 + use_hflip: True + use_rot: False + + # data loader + use_shuffle: true + num_worker_per_gpu: 5 + batch_size_per_gpu: 12 + dataset_enlarge_ratio: 1 + prefetch_mode: ~ + + # Uncomment these for validation + # val: + # name: validation + # type: PairedImageDataset + # dataroot_gt: path_to_gt + # dataroot_lq: path_to_lq + # io_backend: + # type: disk + +# network structures +network_g: + type: RRDBNet + num_in_ch: 3 + num_out_ch: 3 + num_feat: 64 + num_block: 23 + num_grow_ch: 32 + +network_d: + type: UNetDiscriminatorSN + num_in_ch: 3 + num_feat: 64 + skip_connection: True + +# path +path: + # use the pre-trained Real-ESRNet model + pretrain_network_g: experiments/pretrained_models/RealESRNet_x4plus.pth + param_key_g: params_ema + strict_load_g: true + pretrain_network_d: experiments/pretrained_models/RealESRGAN_x4plus_netD.pth + param_key_d: params + strict_load_d: true + resume_state: ~ + +# training settings +train: + ema_decay: 0.999 + optim_g: + type: Adam + lr: !!float 1e-4 + weight_decay: 0 + betas: [0.9, 0.99] + optim_d: + type: Adam + lr: !!float 1e-4 + weight_decay: 0 + betas: [0.9, 0.99] + + scheduler: + type: MultiStepLR + milestones: [400000] + gamma: 0.5 + + total_iter: 400000 + warmup_iter: -1 # no warm up + + # losses + pixel_opt: + type: L1Loss + loss_weight: 1.0 + reduction: mean + # perceptual loss (content and style losses) + perceptual_opt: + type: PerceptualLoss + layer_weights: + # before relu + 'conv1_2': 0.1 + 'conv2_2': 0.1 + 'conv3_4': 1 + 'conv4_4': 1 + 'conv5_4': 1 + vgg_type: vgg19 + use_input_norm: true + perceptual_weight: !!float 1.0 + style_weight: 0 + range_norm: false + criterion: l1 + # gan loss + gan_opt: + type: GANLoss + gan_type: vanilla + real_label_val: 1.0 + fake_label_val: 0.0 + loss_weight: !!float 1e-1 + + net_d_iters: 1 + net_d_init_iters: 0 + +# Uncomment these for validation +# validation settings +# val: +# val_freq: !!float 5e3 +# save_img: True + +# metrics: +# psnr: # metric name +# type: calculate_psnr +# crop_border: 4 +# test_y_channel: false + +# logging settings +logger: + print_freq: 100 + save_checkpoint_freq: !!float 5e3 + use_tb_logger: true + wandb: + project: ~ + resume_id: ~ + +# dist training settings +dist_params: + backend: nccl + port: 29500 diff --git a/Real-ESRGAN/options/train_realesrgan_x2plus.yml b/Real-ESRGAN/options/train_realesrgan_x2plus.yml new file mode 100644 index 0000000000000000000000000000000000000000..4c0fa06633fa54bb49b21337c0d11978dbbd75b6 --- /dev/null +++ b/Real-ESRGAN/options/train_realesrgan_x2plus.yml @@ -0,0 +1,186 @@ +# general settings +name: train_RealESRGANx2plus_400k_B12G4 +model_type: RealESRGANModel +scale: 2 +num_gpu: auto # auto: can infer from your visible devices automatically. official: 4 GPUs +manual_seed: 0 + +# ----------------- options for synthesizing training data in RealESRGANModel ----------------- # +# USM the ground-truth +l1_gt_usm: True +percep_gt_usm: True +gan_gt_usm: False + +# the first degradation process +resize_prob: [0.2, 0.7, 0.1] # up, down, keep +resize_range: [0.15, 1.5] +gaussian_noise_prob: 0.5 +noise_range: [1, 30] +poisson_scale_range: [0.05, 3] +gray_noise_prob: 0.4 +jpeg_range: [30, 95] + +# the second degradation process +second_blur_prob: 0.8 +resize_prob2: [0.3, 0.4, 0.3] # up, down, keep +resize_range2: [0.3, 1.2] +gaussian_noise_prob2: 0.5 +noise_range2: [1, 25] +poisson_scale_range2: [0.05, 2.5] +gray_noise_prob2: 0.4 +jpeg_range2: [30, 95] + +gt_size: 256 +queue_size: 180 + +# dataset and data loader settings +datasets: + train: + name: DF2K+OST + type: RealESRGANDataset + dataroot_gt: datasets/DF2K + meta_info: datasets/DF2K/meta_info/meta_info_DF2Kmultiscale+OST_sub.txt + io_backend: + type: disk + + blur_kernel_size: 21 + kernel_list: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] + kernel_prob: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] + sinc_prob: 0.1 + blur_sigma: [0.2, 3] + betag_range: [0.5, 4] + betap_range: [1, 2] + + blur_kernel_size2: 21 + kernel_list2: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] + kernel_prob2: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] + sinc_prob2: 0.1 + blur_sigma2: [0.2, 1.5] + betag_range2: [0.5, 4] + betap_range2: [1, 2] + + final_sinc_prob: 0.8 + + gt_size: 256 + use_hflip: True + use_rot: False + + # data loader + use_shuffle: true + num_worker_per_gpu: 5 + batch_size_per_gpu: 12 + dataset_enlarge_ratio: 1 + prefetch_mode: ~ + + # Uncomment these for validation + # val: + # name: validation + # type: PairedImageDataset + # dataroot_gt: path_to_gt + # dataroot_lq: path_to_lq + # io_backend: + # type: disk + +# network structures +network_g: + type: RRDBNet + num_in_ch: 3 + num_out_ch: 3 + num_feat: 64 + num_block: 23 + num_grow_ch: 32 + scale: 2 + +network_d: + type: UNetDiscriminatorSN + num_in_ch: 3 + num_feat: 64 + skip_connection: True + +# path +path: + # use the pre-trained Real-ESRNet model + pretrain_network_g: experiments/pretrained_models/RealESRNet_x2plus.pth + param_key_g: params_ema + strict_load_g: true + resume_state: ~ + +# training settings +train: + ema_decay: 0.999 + optim_g: + type: Adam + lr: !!float 1e-4 + weight_decay: 0 + betas: [0.9, 0.99] + optim_d: + type: Adam + lr: !!float 1e-4 + weight_decay: 0 + betas: [0.9, 0.99] + + scheduler: + type: MultiStepLR + milestones: [400000] + gamma: 0.5 + + total_iter: 400000 + warmup_iter: -1 # no warm up + + # losses + pixel_opt: + type: L1Loss + loss_weight: 1.0 + reduction: mean + # perceptual loss (content and style losses) + perceptual_opt: + type: PerceptualLoss + layer_weights: + # before relu + 'conv1_2': 0.1 + 'conv2_2': 0.1 + 'conv3_4': 1 + 'conv4_4': 1 + 'conv5_4': 1 + vgg_type: vgg19 + use_input_norm: true + perceptual_weight: !!float 1.0 + style_weight: 0 + range_norm: false + criterion: l1 + # gan loss + gan_opt: + type: GANLoss + gan_type: vanilla + real_label_val: 1.0 + fake_label_val: 0.0 + loss_weight: !!float 1e-1 + + net_d_iters: 1 + net_d_init_iters: 0 + +# Uncomment these for validation +# validation settings +# val: +# val_freq: !!float 5e3 +# save_img: True + +# metrics: +# psnr: # metric name +# type: calculate_psnr +# crop_border: 4 +# test_y_channel: false + +# logging settings +logger: + print_freq: 100 + save_checkpoint_freq: !!float 5e3 + use_tb_logger: true + wandb: + project: ~ + resume_id: ~ + +# dist training settings +dist_params: + backend: nccl + port: 29500 diff --git a/Real-ESRGAN/options/train_realesrgan_x4plus.yml b/Real-ESRGAN/options/train_realesrgan_x4plus.yml new file mode 100644 index 0000000000000000000000000000000000000000..bd54d7bcfc540bef7a0dc8fe82121b98b9588be1 --- /dev/null +++ b/Real-ESRGAN/options/train_realesrgan_x4plus.yml @@ -0,0 +1,185 @@ +# general settings +name: train_RealESRGANx4plus_400k_B12G4 +model_type: RealESRGANModel +scale: 4 +num_gpu: auto # auto: can infer from your visible devices automatically. official: 4 GPUs +manual_seed: 0 + +# ----------------- options for synthesizing training data in RealESRGANModel ----------------- # +# USM the ground-truth +l1_gt_usm: True +percep_gt_usm: True +gan_gt_usm: False + +# the first degradation process +resize_prob: [0.2, 0.7, 0.1] # up, down, keep +resize_range: [0.15, 1.5] +gaussian_noise_prob: 0.5 +noise_range: [1, 30] +poisson_scale_range: [0.05, 3] +gray_noise_prob: 0.4 +jpeg_range: [30, 95] + +# the second degradation process +second_blur_prob: 0.8 +resize_prob2: [0.3, 0.4, 0.3] # up, down, keep +resize_range2: [0.3, 1.2] +gaussian_noise_prob2: 0.5 +noise_range2: [1, 25] +poisson_scale_range2: [0.05, 2.5] +gray_noise_prob2: 0.4 +jpeg_range2: [30, 95] + +gt_size: 256 +queue_size: 180 + +# dataset and data loader settings +datasets: + train: + name: DF2K+OST + type: RealESRGANDataset + dataroot_gt: datasets/DF2K + meta_info: datasets/DF2K/meta_info/meta_info_DF2Kmultiscale+OST_sub.txt + io_backend: + type: disk + + blur_kernel_size: 21 + kernel_list: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] + kernel_prob: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] + sinc_prob: 0.1 + blur_sigma: [0.2, 3] + betag_range: [0.5, 4] + betap_range: [1, 2] + + blur_kernel_size2: 21 + kernel_list2: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] + kernel_prob2: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] + sinc_prob2: 0.1 + blur_sigma2: [0.2, 1.5] + betag_range2: [0.5, 4] + betap_range2: [1, 2] + + final_sinc_prob: 0.8 + + gt_size: 256 + use_hflip: True + use_rot: False + + # data loader + use_shuffle: true + num_worker_per_gpu: 5 + batch_size_per_gpu: 12 + dataset_enlarge_ratio: 1 + prefetch_mode: ~ + + # Uncomment these for validation + # val: + # name: validation + # type: PairedImageDataset + # dataroot_gt: path_to_gt + # dataroot_lq: path_to_lq + # io_backend: + # type: disk + +# network structures +network_g: + type: RRDBNet + num_in_ch: 3 + num_out_ch: 3 + num_feat: 64 + num_block: 23 + num_grow_ch: 32 + +network_d: + type: UNetDiscriminatorSN + num_in_ch: 3 + num_feat: 64 + skip_connection: True + +# path +path: + # use the pre-trained Real-ESRNet model + pretrain_network_g: experiments/pretrained_models/RealESRNet_x4plus.pth + param_key_g: params_ema + strict_load_g: true + resume_state: ~ + +# training settings +train: + ema_decay: 0.999 + optim_g: + type: Adam + lr: !!float 1e-4 + weight_decay: 0 + betas: [0.9, 0.99] + optim_d: + type: Adam + lr: !!float 1e-4 + weight_decay: 0 + betas: [0.9, 0.99] + + scheduler: + type: MultiStepLR + milestones: [400000] + gamma: 0.5 + + total_iter: 400000 + warmup_iter: -1 # no warm up + + # losses + pixel_opt: + type: L1Loss + loss_weight: 1.0 + reduction: mean + # perceptual loss (content and style losses) + perceptual_opt: + type: PerceptualLoss + layer_weights: + # before relu + 'conv1_2': 0.1 + 'conv2_2': 0.1 + 'conv3_4': 1 + 'conv4_4': 1 + 'conv5_4': 1 + vgg_type: vgg19 + use_input_norm: true + perceptual_weight: !!float 1.0 + style_weight: 0 + range_norm: false + criterion: l1 + # gan loss + gan_opt: + type: GANLoss + gan_type: vanilla + real_label_val: 1.0 + fake_label_val: 0.0 + loss_weight: !!float 1e-1 + + net_d_iters: 1 + net_d_init_iters: 0 + +# Uncomment these for validation +# validation settings +# val: +# val_freq: !!float 5e3 +# save_img: True + +# metrics: +# psnr: # metric name +# type: calculate_psnr +# crop_border: 4 +# test_y_channel: false + +# logging settings +logger: + print_freq: 100 + save_checkpoint_freq: !!float 5e3 + use_tb_logger: true + wandb: + project: ~ + resume_id: ~ + +# dist training settings +dist_params: + backend: nccl + port: 29500 diff --git a/Real-ESRGAN/options/train_realesrnet_x2plus.yml b/Real-ESRGAN/options/train_realesrnet_x2plus.yml new file mode 100644 index 0000000000000000000000000000000000000000..b30b954e72727ac5e71aba86b2b6c5a29fda1f06 --- /dev/null +++ b/Real-ESRGAN/options/train_realesrnet_x2plus.yml @@ -0,0 +1,145 @@ +# general settings +name: train_RealESRNetx2plus_1000k_B12G4 +model_type: RealESRNetModel +scale: 2 +num_gpu: auto # auto: can infer from your visible devices automatically. official: 4 GPUs +manual_seed: 0 + +# ----------------- options for synthesizing training data in RealESRNetModel ----------------- # +gt_usm: True # USM the ground-truth + +# the first degradation process +resize_prob: [0.2, 0.7, 0.1] # up, down, keep +resize_range: [0.15, 1.5] +gaussian_noise_prob: 0.5 +noise_range: [1, 30] +poisson_scale_range: [0.05, 3] +gray_noise_prob: 0.4 +jpeg_range: [30, 95] + +# the second degradation process +second_blur_prob: 0.8 +resize_prob2: [0.3, 0.4, 0.3] # up, down, keep +resize_range2: [0.3, 1.2] +gaussian_noise_prob2: 0.5 +noise_range2: [1, 25] +poisson_scale_range2: [0.05, 2.5] +gray_noise_prob2: 0.4 +jpeg_range2: [30, 95] + +gt_size: 256 +queue_size: 180 + +# dataset and data loader settings +datasets: + train: + name: DF2K+OST + type: RealESRGANDataset + dataroot_gt: datasets/DF2K + meta_info: datasets/DF2K/meta_info/meta_info_DF2Kmultiscale+OST_sub.txt + io_backend: + type: disk + + blur_kernel_size: 21 + kernel_list: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] + kernel_prob: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] + sinc_prob: 0.1 + blur_sigma: [0.2, 3] + betag_range: [0.5, 4] + betap_range: [1, 2] + + blur_kernel_size2: 21 + kernel_list2: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] + kernel_prob2: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] + sinc_prob2: 0.1 + blur_sigma2: [0.2, 1.5] + betag_range2: [0.5, 4] + betap_range2: [1, 2] + + final_sinc_prob: 0.8 + + gt_size: 256 + use_hflip: True + use_rot: False + + # data loader + use_shuffle: true + num_worker_per_gpu: 5 + batch_size_per_gpu: 12 + dataset_enlarge_ratio: 1 + prefetch_mode: ~ + + # Uncomment these for validation + # val: + # name: validation + # type: PairedImageDataset + # dataroot_gt: path_to_gt + # dataroot_lq: path_to_lq + # io_backend: + # type: disk + +# network structures +network_g: + type: RRDBNet + num_in_ch: 3 + num_out_ch: 3 + num_feat: 64 + num_block: 23 + num_grow_ch: 32 + scale: 2 + +# path +path: + pretrain_network_g: experiments/pretrained_models/RealESRGAN_x4plus.pth + param_key_g: params_ema + strict_load_g: False + resume_state: ~ + +# training settings +train: + ema_decay: 0.999 + optim_g: + type: Adam + lr: !!float 2e-4 + weight_decay: 0 + betas: [0.9, 0.99] + + scheduler: + type: MultiStepLR + milestones: [1000000] + gamma: 0.5 + + total_iter: 1000000 + warmup_iter: -1 # no warm up + + # losses + pixel_opt: + type: L1Loss + loss_weight: 1.0 + reduction: mean + +# Uncomment these for validation +# validation settings +# val: +# val_freq: !!float 5e3 +# save_img: True + +# metrics: +# psnr: # metric name +# type: calculate_psnr +# crop_border: 4 +# test_y_channel: false + +# logging settings +logger: + print_freq: 100 + save_checkpoint_freq: !!float 5e3 + use_tb_logger: true + wandb: + project: ~ + resume_id: ~ + +# dist training settings +dist_params: + backend: nccl + port: 29500 diff --git a/Real-ESRGAN/options/train_realesrnet_x4plus.yml b/Real-ESRGAN/options/train_realesrnet_x4plus.yml new file mode 100644 index 0000000000000000000000000000000000000000..a000e9cbf0c4943c023950c052d7568dc457d498 --- /dev/null +++ b/Real-ESRGAN/options/train_realesrnet_x4plus.yml @@ -0,0 +1,144 @@ +# general settings +name: train_RealESRNetx4plus_1000k_B12G4 +model_type: RealESRNetModel +scale: 4 +num_gpu: auto # auto: can infer from your visible devices automatically. official: 4 GPUs +manual_seed: 0 + +# ----------------- options for synthesizing training data in RealESRNetModel ----------------- # +gt_usm: True # USM the ground-truth + +# the first degradation process +resize_prob: [0.2, 0.7, 0.1] # up, down, keep +resize_range: [0.15, 1.5] +gaussian_noise_prob: 0.5 +noise_range: [1, 30] +poisson_scale_range: [0.05, 3] +gray_noise_prob: 0.4 +jpeg_range: [30, 95] + +# the second degradation process +second_blur_prob: 0.8 +resize_prob2: [0.3, 0.4, 0.3] # up, down, keep +resize_range2: [0.3, 1.2] +gaussian_noise_prob2: 0.5 +noise_range2: [1, 25] +poisson_scale_range2: [0.05, 2.5] +gray_noise_prob2: 0.4 +jpeg_range2: [30, 95] + +gt_size: 256 +queue_size: 180 + +# dataset and data loader settings +datasets: + train: + name: DF2K+OST + type: RealESRGANDataset + dataroot_gt: datasets/DF2K + meta_info: datasets/DF2K/meta_info/meta_info_DF2Kmultiscale+OST_sub.txt + io_backend: + type: disk + + blur_kernel_size: 21 + kernel_list: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] + kernel_prob: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] + sinc_prob: 0.1 + blur_sigma: [0.2, 3] + betag_range: [0.5, 4] + betap_range: [1, 2] + + blur_kernel_size2: 21 + kernel_list2: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] + kernel_prob2: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] + sinc_prob2: 0.1 + blur_sigma2: [0.2, 1.5] + betag_range2: [0.5, 4] + betap_range2: [1, 2] + + final_sinc_prob: 0.8 + + gt_size: 256 + use_hflip: True + use_rot: False + + # data loader + use_shuffle: true + num_worker_per_gpu: 5 + batch_size_per_gpu: 12 + dataset_enlarge_ratio: 1 + prefetch_mode: ~ + + # Uncomment these for validation + # val: + # name: validation + # type: PairedImageDataset + # dataroot_gt: path_to_gt + # dataroot_lq: path_to_lq + # io_backend: + # type: disk + +# network structures +network_g: + type: RRDBNet + num_in_ch: 3 + num_out_ch: 3 + num_feat: 64 + num_block: 23 + num_grow_ch: 32 + +# path +path: + pretrain_network_g: experiments/pretrained_models/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth + param_key_g: params_ema + strict_load_g: true + resume_state: ~ + +# training settings +train: + ema_decay: 0.999 + optim_g: + type: Adam + lr: !!float 2e-4 + weight_decay: 0 + betas: [0.9, 0.99] + + scheduler: + type: MultiStepLR + milestones: [1000000] + gamma: 0.5 + + total_iter: 1000000 + warmup_iter: -1 # no warm up + + # losses + pixel_opt: + type: L1Loss + loss_weight: 1.0 + reduction: mean + +# Uncomment these for validation +# validation settings +# val: +# val_freq: !!float 5e3 +# save_img: True + +# metrics: +# psnr: # metric name +# type: calculate_psnr +# crop_border: 4 +# test_y_channel: false + +# logging settings +logger: + print_freq: 100 + save_checkpoint_freq: !!float 5e3 + use_tb_logger: true + wandb: + project: ~ + resume_id: ~ + +# dist training settings +dist_params: + backend: nccl + port: 29500 diff --git a/Real-ESRGAN/realesrgan/__init__.py b/Real-ESRGAN/realesrgan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f41e7bcf6a7fcc588f63036ae0a9b903c15187e3 --- /dev/null +++ b/Real-ESRGAN/realesrgan/__init__.py @@ -0,0 +1,6 @@ +# flake8: noqa +from .archs import * +from .data import * +from .models import * +from .utils import * +from .version import * diff --git a/Real-ESRGAN/realesrgan/archs/__init__.py b/Real-ESRGAN/realesrgan/archs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4bc94aeedcef180b06d100cbe86a57173762c78b --- /dev/null +++ b/Real-ESRGAN/realesrgan/archs/__init__.py @@ -0,0 +1,10 @@ +import importlib +from basicsr.utils import scandir +from os import path as osp + +# automatically scan and import arch modules for registry +# scan all the files that end with '_arch.py' under the archs folder +arch_folder = osp.dirname(osp.abspath(__file__)) +arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')] +# import all the arch modules +_arch_modules = [importlib.import_module(f'realesrgan.archs.{file_name}') for file_name in arch_filenames] diff --git a/Real-ESRGAN/realesrgan/archs/discriminator_arch.py b/Real-ESRGAN/realesrgan/archs/discriminator_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..03a9f8b2a539f079e472565ca8cf1baf2f0b9c5e --- /dev/null +++ b/Real-ESRGAN/realesrgan/archs/discriminator_arch.py @@ -0,0 +1,67 @@ +from basicsr.utils.registry import ARCH_REGISTRY +from torch import nn as nn +from torch.nn import functional as F +from torch.nn.utils import spectral_norm + + +@ARCH_REGISTRY.register() +class UNetDiscriminatorSN(nn.Module): + """Defines a U-Net discriminator with spectral normalization (SN) + + It is used in Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. + + Arg: + num_in_ch (int): Channel number of inputs. Default: 3. + num_feat (int): Channel number of base intermediate features. Default: 64. + skip_connection (bool): Whether to use skip connections between U-Net. Default: True. + """ + + def __init__(self, num_in_ch, num_feat=64, skip_connection=True): + super(UNetDiscriminatorSN, self).__init__() + self.skip_connection = skip_connection + norm = spectral_norm + # the first convolution + self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1) + # downsample + self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 4, 2, 1, bias=False)) + self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 4, 2, 1, bias=False)) + self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 4, 2, 1, bias=False)) + # upsample + self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False)) + self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False)) + self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False)) + # extra convolutions + self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False)) + self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False)) + self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1) + + def forward(self, x): + # downsample + x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True) + x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True) + x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True) + x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True) + + # upsample + x3 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False) + x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True) + + if self.skip_connection: + x4 = x4 + x2 + x4 = F.interpolate(x4, scale_factor=2, mode='bilinear', align_corners=False) + x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True) + + if self.skip_connection: + x5 = x5 + x1 + x5 = F.interpolate(x5, scale_factor=2, mode='bilinear', align_corners=False) + x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True) + + if self.skip_connection: + x6 = x6 + x0 + + # extra convolutions + out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True) + out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True) + out = self.conv9(out) + + return out diff --git a/Real-ESRGAN/realesrgan/archs/srvgg_arch.py b/Real-ESRGAN/realesrgan/archs/srvgg_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..302e70e2cc9a2db503f854c03c4c93d972f39305 --- /dev/null +++ b/Real-ESRGAN/realesrgan/archs/srvgg_arch.py @@ -0,0 +1,69 @@ +from basicsr.utils.registry import ARCH_REGISTRY +from torch import nn as nn +from torch.nn import functional as F + + +@ARCH_REGISTRY.register() +class SRVGGNetCompact(nn.Module): + """A compact VGG-style network structure for super-resolution. + + It is a compact network structure, which performs upsampling in the last layer and no convolution is + conducted on the HR feature space. + + Args: + num_in_ch (int): Channel number of inputs. Default: 3. + num_out_ch (int): Channel number of outputs. Default: 3. + num_feat (int): Channel number of intermediate features. Default: 64. + num_conv (int): Number of convolution layers in the body network. Default: 16. + upscale (int): Upsampling factor. Default: 4. + act_type (str): Activation type, options: 'relu', 'prelu', 'leakyrelu'. Default: prelu. + """ + + def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu'): + super(SRVGGNetCompact, self).__init__() + self.num_in_ch = num_in_ch + self.num_out_ch = num_out_ch + self.num_feat = num_feat + self.num_conv = num_conv + self.upscale = upscale + self.act_type = act_type + + self.body = nn.ModuleList() + # the first conv + self.body.append(nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)) + # the first activation + if act_type == 'relu': + activation = nn.ReLU(inplace=True) + elif act_type == 'prelu': + activation = nn.PReLU(num_parameters=num_feat) + elif act_type == 'leakyrelu': + activation = nn.LeakyReLU(negative_slope=0.1, inplace=True) + self.body.append(activation) + + # the body structure + for _ in range(num_conv): + self.body.append(nn.Conv2d(num_feat, num_feat, 3, 1, 1)) + # activation + if act_type == 'relu': + activation = nn.ReLU(inplace=True) + elif act_type == 'prelu': + activation = nn.PReLU(num_parameters=num_feat) + elif act_type == 'leakyrelu': + activation = nn.LeakyReLU(negative_slope=0.1, inplace=True) + self.body.append(activation) + + # the last conv + self.body.append(nn.Conv2d(num_feat, num_out_ch * upscale * upscale, 3, 1, 1)) + # upsample + self.upsampler = nn.PixelShuffle(upscale) + + def forward(self, x): + out = x + for i in range(0, len(self.body)): + out = self.body[i](out) + + out = self.upsampler(out) + # add the nearest upsampled image, so that the network learns the residual + base = F.interpolate(x, scale_factor=self.upscale, mode='nearest') + out += base + return out diff --git a/Real-ESRGAN/realesrgan/data/__init__.py b/Real-ESRGAN/realesrgan/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3b064996a1ed9b7457c3494f6ef7134a055d6a60 --- /dev/null +++ b/Real-ESRGAN/realesrgan/data/__init__.py @@ -0,0 +1,10 @@ +import importlib +from basicsr.utils import scandir +from os import path as osp + +# automatically scan and import dataset modules for registry +# scan all the files that end with '_dataset.py' under the data folder +data_folder = osp.dirname(osp.abspath(__file__)) +dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')] +# import all the dataset modules +_dataset_modules = [importlib.import_module(f'realesrgan.data.{file_name}') for file_name in dataset_filenames] diff --git a/Real-ESRGAN/realesrgan/data/realesrgan_dataset.py b/Real-ESRGAN/realesrgan/data/realesrgan_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..e63029da3daf6cac313f540c4d501cd92dda69cf --- /dev/null +++ b/Real-ESRGAN/realesrgan/data/realesrgan_dataset.py @@ -0,0 +1,192 @@ +import cv2 +import math +import numpy as np +import os +import os.path as osp +import random +import time +import torch +from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels +from basicsr.data.transforms import augment +from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor +from basicsr.utils.registry import DATASET_REGISTRY +from torch.utils import data as data + + +@DATASET_REGISTRY.register() +class RealESRGANDataset(data.Dataset): + """Dataset used for Real-ESRGAN model: + Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. + + It loads gt (Ground-Truth) images, and augments them. + It also generates blur kernels and sinc kernels for generating low-quality images. + Note that the low-quality images are processed in tensors on GPUS for faster processing. + + Args: + opt (dict): Config for train datasets. It contains the following keys: + dataroot_gt (str): Data root path for gt. + meta_info (str): Path for meta information file. + io_backend (dict): IO backend type and other kwarg. + use_hflip (bool): Use horizontal flips. + use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation). + Please see more options in the codes. + """ + + def __init__(self, opt): + super(RealESRGANDataset, self).__init__() + self.opt = opt + self.file_client = None + self.io_backend_opt = opt['io_backend'] + self.gt_folder = opt['dataroot_gt'] + + # file client (lmdb io backend) + if self.io_backend_opt['type'] == 'lmdb': + self.io_backend_opt['db_paths'] = [self.gt_folder] + self.io_backend_opt['client_keys'] = ['gt'] + if not self.gt_folder.endswith('.lmdb'): + raise ValueError(f"'dataroot_gt' should end with '.lmdb', but received {self.gt_folder}") + with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin: + self.paths = [line.split('.')[0] for line in fin] + else: + # disk backend with meta_info + # Each line in the meta_info describes the relative path to an image + with open(self.opt['meta_info']) as fin: + paths = [line.strip().split(' ')[0] for line in fin] + self.paths = [os.path.join(self.gt_folder, v) for v in paths] + + # blur settings for the first degradation + self.blur_kernel_size = opt['blur_kernel_size'] + self.kernel_list = opt['kernel_list'] + self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability + self.blur_sigma = opt['blur_sigma'] + self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels + self.betap_range = opt['betap_range'] # betap used in plateau blur kernels + self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters + + # blur settings for the second degradation + self.blur_kernel_size2 = opt['blur_kernel_size2'] + self.kernel_list2 = opt['kernel_list2'] + self.kernel_prob2 = opt['kernel_prob2'] + self.blur_sigma2 = opt['blur_sigma2'] + self.betag_range2 = opt['betag_range2'] + self.betap_range2 = opt['betap_range2'] + self.sinc_prob2 = opt['sinc_prob2'] + + # a final sinc filter + self.final_sinc_prob = opt['final_sinc_prob'] + + self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21 + # TODO: kernel range is now hard-coded, should be in the configure file + self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect + self.pulse_tensor[10, 10] = 1 + + def __getitem__(self, index): + if self.file_client is None: + self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) + + # -------------------------------- Load gt images -------------------------------- # + # Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32. + gt_path = self.paths[index] + # avoid errors caused by high latency in reading files + retry = 3 + while retry > 0: + try: + img_bytes = self.file_client.get(gt_path, 'gt') + except (IOError, OSError) as e: + logger = get_root_logger() + logger.warn(f'File client error: {e}, remaining retry times: {retry - 1}') + # change another file to read + index = random.randint(0, self.__len__()) + gt_path = self.paths[index] + time.sleep(1) # sleep 1s for occasional server congestion + else: + break + finally: + retry -= 1 + img_gt = imfrombytes(img_bytes, float32=True) + + # -------------------- Do augmentation for training: flip, rotation -------------------- # + img_gt = augment(img_gt, self.opt['use_hflip'], self.opt['use_rot']) + + # crop or pad to 400 + # TODO: 400 is hard-coded. You may change it accordingly + h, w = img_gt.shape[0:2] + crop_pad_size = 400 + # pad + if h < crop_pad_size or w < crop_pad_size: + pad_h = max(0, crop_pad_size - h) + pad_w = max(0, crop_pad_size - w) + img_gt = cv2.copyMakeBorder(img_gt, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT_101) + # crop + if img_gt.shape[0] > crop_pad_size or img_gt.shape[1] > crop_pad_size: + h, w = img_gt.shape[0:2] + # randomly choose top and left coordinates + top = random.randint(0, h - crop_pad_size) + left = random.randint(0, w - crop_pad_size) + img_gt = img_gt[top:top + crop_pad_size, left:left + crop_pad_size, ...] + + # ------------------------ Generate kernels (used in the first degradation) ------------------------ # + kernel_size = random.choice(self.kernel_range) + if np.random.uniform() < self.opt['sinc_prob']: + # this sinc filter setting is for kernels ranging from [7, 21] + if kernel_size < 13: + omega_c = np.random.uniform(np.pi / 3, np.pi) + else: + omega_c = np.random.uniform(np.pi / 5, np.pi) + kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False) + else: + kernel = random_mixed_kernels( + self.kernel_list, + self.kernel_prob, + kernel_size, + self.blur_sigma, + self.blur_sigma, [-math.pi, math.pi], + self.betag_range, + self.betap_range, + noise_range=None) + # pad kernel + pad_size = (21 - kernel_size) // 2 + kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size))) + + # ------------------------ Generate kernels (used in the second degradation) ------------------------ # + kernel_size = random.choice(self.kernel_range) + if np.random.uniform() < self.opt['sinc_prob2']: + if kernel_size < 13: + omega_c = np.random.uniform(np.pi / 3, np.pi) + else: + omega_c = np.random.uniform(np.pi / 5, np.pi) + kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False) + else: + kernel2 = random_mixed_kernels( + self.kernel_list2, + self.kernel_prob2, + kernel_size, + self.blur_sigma2, + self.blur_sigma2, [-math.pi, math.pi], + self.betag_range2, + self.betap_range2, + noise_range=None) + + # pad kernel + pad_size = (21 - kernel_size) // 2 + kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size))) + + # ------------------------------------- the final sinc kernel ------------------------------------- # + if np.random.uniform() < self.opt['final_sinc_prob']: + kernel_size = random.choice(self.kernel_range) + omega_c = np.random.uniform(np.pi / 3, np.pi) + sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21) + sinc_kernel = torch.FloatTensor(sinc_kernel) + else: + sinc_kernel = self.pulse_tensor + + # BGR to RGB, HWC to CHW, numpy to tensor + img_gt = img2tensor([img_gt], bgr2rgb=True, float32=True)[0] + kernel = torch.FloatTensor(kernel) + kernel2 = torch.FloatTensor(kernel2) + + return_d = {'gt': img_gt, 'kernel1': kernel, 'kernel2': kernel2, 'sinc_kernel': sinc_kernel, 'gt_path': gt_path} + return return_d + + def __len__(self): + return len(self.paths) diff --git a/Real-ESRGAN/realesrgan/data/realesrgan_paired_dataset.py b/Real-ESRGAN/realesrgan/data/realesrgan_paired_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..53407536d6154d7746a22a08eeaa0a5c97a1296d --- /dev/null +++ b/Real-ESRGAN/realesrgan/data/realesrgan_paired_dataset.py @@ -0,0 +1,108 @@ +import os +from basicsr.data.data_util import paired_paths_from_folder, paired_paths_from_lmdb +from basicsr.data.transforms import augment, paired_random_crop +from basicsr.utils import FileClient, imfrombytes, img2tensor +from basicsr.utils.registry import DATASET_REGISTRY +from torch.utils import data as data +from torchvision.transforms.functional import normalize + + +@DATASET_REGISTRY.register() +class RealESRGANPairedDataset(data.Dataset): + """Paired image dataset for image restoration. + + Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and GT image pairs. + + There are three modes: + 1. 'lmdb': Use lmdb files. + If opt['io_backend'] == lmdb. + 2. 'meta_info': Use meta information file to generate paths. + If opt['io_backend'] != lmdb and opt['meta_info'] is not None. + 3. 'folder': Scan folders to generate paths. + The rest. + + Args: + opt (dict): Config for train datasets. It contains the following keys: + dataroot_gt (str): Data root path for gt. + dataroot_lq (str): Data root path for lq. + meta_info (str): Path for meta information file. + io_backend (dict): IO backend type and other kwarg. + filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. + Default: '{}'. + gt_size (int): Cropped patched size for gt patches. + use_hflip (bool): Use horizontal flips. + use_rot (bool): Use rotation (use vertical flip and transposing h + and w for implementation). + + scale (bool): Scale, which will be added automatically. + phase (str): 'train' or 'val'. + """ + + def __init__(self, opt): + super(RealESRGANPairedDataset, self).__init__() + self.opt = opt + self.file_client = None + self.io_backend_opt = opt['io_backend'] + # mean and std for normalizing the input images + self.mean = opt['mean'] if 'mean' in opt else None + self.std = opt['std'] if 'std' in opt else None + + self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq'] + self.filename_tmpl = opt['filename_tmpl'] if 'filename_tmpl' in opt else '{}' + + # file client (lmdb io backend) + if self.io_backend_opt['type'] == 'lmdb': + self.io_backend_opt['db_paths'] = [self.lq_folder, self.gt_folder] + self.io_backend_opt['client_keys'] = ['lq', 'gt'] + self.paths = paired_paths_from_lmdb([self.lq_folder, self.gt_folder], ['lq', 'gt']) + elif 'meta_info' in self.opt and self.opt['meta_info'] is not None: + # disk backend with meta_info + # Each line in the meta_info describes the relative path to an image + with open(self.opt['meta_info']) as fin: + paths = [line.strip() for line in fin] + self.paths = [] + for path in paths: + gt_path, lq_path = path.split(', ') + gt_path = os.path.join(self.gt_folder, gt_path) + lq_path = os.path.join(self.lq_folder, lq_path) + self.paths.append(dict([('gt_path', gt_path), ('lq_path', lq_path)])) + else: + # disk backend + # it will scan the whole folder to get meta info + # it will be time-consuming for folders with too many files. It is recommended using an extra meta txt file + self.paths = paired_paths_from_folder([self.lq_folder, self.gt_folder], ['lq', 'gt'], self.filename_tmpl) + + def __getitem__(self, index): + if self.file_client is None: + self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) + + scale = self.opt['scale'] + + # Load gt and lq images. Dimension order: HWC; channel order: BGR; + # image range: [0, 1], float32. + gt_path = self.paths[index]['gt_path'] + img_bytes = self.file_client.get(gt_path, 'gt') + img_gt = imfrombytes(img_bytes, float32=True) + lq_path = self.paths[index]['lq_path'] + img_bytes = self.file_client.get(lq_path, 'lq') + img_lq = imfrombytes(img_bytes, float32=True) + + # augmentation for training + if self.opt['phase'] == 'train': + gt_size = self.opt['gt_size'] + # random crop + img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path) + # flip, rotation + img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_hflip'], self.opt['use_rot']) + + # BGR to RGB, HWC to CHW, numpy to tensor + img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True) + # normalize + if self.mean is not None or self.std is not None: + normalize(img_lq, self.mean, self.std, inplace=True) + normalize(img_gt, self.mean, self.std, inplace=True) + + return {'lq': img_lq, 'gt': img_gt, 'lq_path': lq_path, 'gt_path': gt_path} + + def __len__(self): + return len(self.paths) diff --git a/Real-ESRGAN/realesrgan/models/__init__.py b/Real-ESRGAN/realesrgan/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a03a8bb535b4381ce9b81a7c2a1901f1b5379181 --- /dev/null +++ b/Real-ESRGAN/realesrgan/models/__init__.py @@ -0,0 +1,10 @@ +import importlib +from basicsr.utils import scandir +from os import path as osp + +# automatically scan and import model modules for registry +# scan all the files that end with '_model.py' under the model folder +model_folder = osp.dirname(osp.abspath(__file__)) +model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')] +# import all the model modules +_model_modules = [importlib.import_module(f'realesrgan.models.{file_name}') for file_name in model_filenames] diff --git a/Real-ESRGAN/realesrgan/models/realesrgan_model.py b/Real-ESRGAN/realesrgan/models/realesrgan_model.py new file mode 100644 index 0000000000000000000000000000000000000000..5b6f792473b92ab621d07061159e8c9e76456bb9 --- /dev/null +++ b/Real-ESRGAN/realesrgan/models/realesrgan_model.py @@ -0,0 +1,258 @@ +import numpy as np +import random +import torch +from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt +from basicsr.data.transforms import paired_random_crop +from basicsr.models.srgan_model import SRGANModel +from basicsr.utils import DiffJPEG, USMSharp +from basicsr.utils.img_process_util import filter2D +from basicsr.utils.registry import MODEL_REGISTRY +from collections import OrderedDict +from torch.nn import functional as F + + +@MODEL_REGISTRY.register() +class RealESRGANModel(SRGANModel): + """RealESRGAN Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. + + It mainly performs: + 1. randomly synthesize LQ images in GPU tensors + 2. optimize the networks with GAN training. + """ + + def __init__(self, opt): + super(RealESRGANModel, self).__init__(opt) + self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts + self.usm_sharpener = USMSharp().cuda() # do usm sharpening + self.queue_size = opt.get('queue_size', 180) + + @torch.no_grad() + def _dequeue_and_enqueue(self): + """It is the training pair pool for increasing the diversity in a batch. + + Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a + batch could not have different resize scaling factors. Therefore, we employ this training pair pool + to increase the degradation diversity in a batch. + """ + # initialize + b, c, h, w = self.lq.size() + if not hasattr(self, 'queue_lr'): + assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' + self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() + _, c, h, w = self.gt.size() + self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() + self.queue_ptr = 0 + if self.queue_ptr == self.queue_size: # the pool is full + # do dequeue and enqueue + # shuffle + idx = torch.randperm(self.queue_size) + self.queue_lr = self.queue_lr[idx] + self.queue_gt = self.queue_gt[idx] + # get first b samples + lq_dequeue = self.queue_lr[0:b, :, :, :].clone() + gt_dequeue = self.queue_gt[0:b, :, :, :].clone() + # update the queue + self.queue_lr[0:b, :, :, :] = self.lq.clone() + self.queue_gt[0:b, :, :, :] = self.gt.clone() + + self.lq = lq_dequeue + self.gt = gt_dequeue + else: + # only do enqueue + self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() + self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() + self.queue_ptr = self.queue_ptr + b + + @torch.no_grad() + def feed_data(self, data): + """Accept data from dataloader, and then add two-order degradations to obtain LQ images. + """ + if self.is_train and self.opt.get('high_order_degradation', True): + # training data synthesis + self.gt = data['gt'].to(self.device) + self.gt_usm = self.usm_sharpener(self.gt) + + self.kernel1 = data['kernel1'].to(self.device) + self.kernel2 = data['kernel2'].to(self.device) + self.sinc_kernel = data['sinc_kernel'].to(self.device) + + ori_h, ori_w = self.gt.size()[2:4] + + # ----------------------- The first degradation process ----------------------- # + # blur + out = filter2D(self.gt_usm, self.kernel1) + # random resize + updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] + if updown_type == 'up': + scale = np.random.uniform(1, self.opt['resize_range'][1]) + elif updown_type == 'down': + scale = np.random.uniform(self.opt['resize_range'][0], 1) + else: + scale = 1 + mode = random.choice(['area', 'bilinear', 'bicubic']) + out = F.interpolate(out, scale_factor=scale, mode=mode) + # add noise + gray_noise_prob = self.opt['gray_noise_prob'] + if np.random.uniform() < self.opt['gaussian_noise_prob']: + out = random_add_gaussian_noise_pt( + out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob) + else: + out = random_add_poisson_noise_pt( + out, + scale_range=self.opt['poisson_scale_range'], + gray_prob=gray_noise_prob, + clip=True, + rounds=False) + # JPEG compression + jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range']) + out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts + out = self.jpeger(out, quality=jpeg_p) + + # ----------------------- The second degradation process ----------------------- # + # blur + if np.random.uniform() < self.opt['second_blur_prob']: + out = filter2D(out, self.kernel2) + # random resize + updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] + if updown_type == 'up': + scale = np.random.uniform(1, self.opt['resize_range2'][1]) + elif updown_type == 'down': + scale = np.random.uniform(self.opt['resize_range2'][0], 1) + else: + scale = 1 + mode = random.choice(['area', 'bilinear', 'bicubic']) + out = F.interpolate( + out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode) + # add noise + gray_noise_prob = self.opt['gray_noise_prob2'] + if np.random.uniform() < self.opt['gaussian_noise_prob2']: + out = random_add_gaussian_noise_pt( + out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob) + else: + out = random_add_poisson_noise_pt( + out, + scale_range=self.opt['poisson_scale_range2'], + gray_prob=gray_noise_prob, + clip=True, + rounds=False) + + # JPEG compression + the final sinc filter + # We also need to resize images to desired sizes. We group [resize back + sinc filter] together + # as one operation. + # We consider two orders: + # 1. [resize back + sinc filter] + JPEG compression + # 2. JPEG compression + [resize back + sinc filter] + # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines. + if np.random.uniform() < 0.5: + # resize back + the final sinc filter + mode = random.choice(['area', 'bilinear', 'bicubic']) + out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) + out = filter2D(out, self.sinc_kernel) + # JPEG compression + jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) + out = torch.clamp(out, 0, 1) + out = self.jpeger(out, quality=jpeg_p) + else: + # JPEG compression + jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) + out = torch.clamp(out, 0, 1) + out = self.jpeger(out, quality=jpeg_p) + # resize back + the final sinc filter + mode = random.choice(['area', 'bilinear', 'bicubic']) + out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) + out = filter2D(out, self.sinc_kernel) + + # clamp and round + self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255. + + # random crop + gt_size = self.opt['gt_size'] + (self.gt, self.gt_usm), self.lq = paired_random_crop([self.gt, self.gt_usm], self.lq, gt_size, + self.opt['scale']) + + # training pair pool + self._dequeue_and_enqueue() + # sharpen self.gt again, as we have changed the self.gt with self._dequeue_and_enqueue + self.gt_usm = self.usm_sharpener(self.gt) + self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract + else: + # for paired training or validation + self.lq = data['lq'].to(self.device) + if 'gt' in data: + self.gt = data['gt'].to(self.device) + self.gt_usm = self.usm_sharpener(self.gt) + + def nondist_validation(self, dataloader, current_iter, tb_logger, save_img): + # do not use the synthetic process during validation + self.is_train = False + super(RealESRGANModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img) + self.is_train = True + + def optimize_parameters(self, current_iter): + # usm sharpening + l1_gt = self.gt_usm + percep_gt = self.gt_usm + gan_gt = self.gt_usm + if self.opt['l1_gt_usm'] is False: + l1_gt = self.gt + if self.opt['percep_gt_usm'] is False: + percep_gt = self.gt + if self.opt['gan_gt_usm'] is False: + gan_gt = self.gt + + # optimize net_g + for p in self.net_d.parameters(): + p.requires_grad = False + + self.optimizer_g.zero_grad() + self.output = self.net_g(self.lq) + + l_g_total = 0 + loss_dict = OrderedDict() + if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters): + # pixel loss + if self.cri_pix: + l_g_pix = self.cri_pix(self.output, l1_gt) + l_g_total += l_g_pix + loss_dict['l_g_pix'] = l_g_pix + # perceptual loss + if self.cri_perceptual: + l_g_percep, l_g_style = self.cri_perceptual(self.output, percep_gt) + if l_g_percep is not None: + l_g_total += l_g_percep + loss_dict['l_g_percep'] = l_g_percep + if l_g_style is not None: + l_g_total += l_g_style + loss_dict['l_g_style'] = l_g_style + # gan loss + fake_g_pred = self.net_d(self.output) + l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False) + l_g_total += l_g_gan + loss_dict['l_g_gan'] = l_g_gan + + l_g_total.backward() + self.optimizer_g.step() + + # optimize net_d + for p in self.net_d.parameters(): + p.requires_grad = True + + self.optimizer_d.zero_grad() + # real + real_d_pred = self.net_d(gan_gt) + l_d_real = self.cri_gan(real_d_pred, True, is_disc=True) + loss_dict['l_d_real'] = l_d_real + loss_dict['out_d_real'] = torch.mean(real_d_pred.detach()) + l_d_real.backward() + # fake + fake_d_pred = self.net_d(self.output.detach().clone()) # clone for pt1.9 + l_d_fake = self.cri_gan(fake_d_pred, False, is_disc=True) + loss_dict['l_d_fake'] = l_d_fake + loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach()) + l_d_fake.backward() + self.optimizer_d.step() + + if self.ema_decay > 0: + self.model_ema(decay=self.ema_decay) + + self.log_dict = self.reduce_loss_dict(loss_dict) diff --git a/Real-ESRGAN/realesrgan/models/realesrnet_model.py b/Real-ESRGAN/realesrgan/models/realesrnet_model.py new file mode 100644 index 0000000000000000000000000000000000000000..29ae508b923830c9220b6889472b214170c657ee --- /dev/null +++ b/Real-ESRGAN/realesrgan/models/realesrnet_model.py @@ -0,0 +1,188 @@ +import numpy as np +import random +import torch +from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt +from basicsr.data.transforms import paired_random_crop +from basicsr.models.sr_model import SRModel +from basicsr.utils import DiffJPEG, USMSharp +from basicsr.utils.img_process_util import filter2D +from basicsr.utils.registry import MODEL_REGISTRY +from torch.nn import functional as F + + +@MODEL_REGISTRY.register() +class RealESRNetModel(SRModel): + """RealESRNet Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. + + It is trained without GAN losses. + It mainly performs: + 1. randomly synthesize LQ images in GPU tensors + 2. optimize the networks with GAN training. + """ + + def __init__(self, opt): + super(RealESRNetModel, self).__init__(opt) + self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts + self.usm_sharpener = USMSharp().cuda() # do usm sharpening + self.queue_size = opt.get('queue_size', 180) + + @torch.no_grad() + def _dequeue_and_enqueue(self): + """It is the training pair pool for increasing the diversity in a batch. + + Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a + batch could not have different resize scaling factors. Therefore, we employ this training pair pool + to increase the degradation diversity in a batch. + """ + # initialize + b, c, h, w = self.lq.size() + if not hasattr(self, 'queue_lr'): + assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' + self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() + _, c, h, w = self.gt.size() + self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() + self.queue_ptr = 0 + if self.queue_ptr == self.queue_size: # the pool is full + # do dequeue and enqueue + # shuffle + idx = torch.randperm(self.queue_size) + self.queue_lr = self.queue_lr[idx] + self.queue_gt = self.queue_gt[idx] + # get first b samples + lq_dequeue = self.queue_lr[0:b, :, :, :].clone() + gt_dequeue = self.queue_gt[0:b, :, :, :].clone() + # update the queue + self.queue_lr[0:b, :, :, :] = self.lq.clone() + self.queue_gt[0:b, :, :, :] = self.gt.clone() + + self.lq = lq_dequeue + self.gt = gt_dequeue + else: + # only do enqueue + self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() + self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() + self.queue_ptr = self.queue_ptr + b + + @torch.no_grad() + def feed_data(self, data): + """Accept data from dataloader, and then add two-order degradations to obtain LQ images. + """ + if self.is_train and self.opt.get('high_order_degradation', True): + # training data synthesis + self.gt = data['gt'].to(self.device) + # USM sharpen the GT images + if self.opt['gt_usm'] is True: + self.gt = self.usm_sharpener(self.gt) + + self.kernel1 = data['kernel1'].to(self.device) + self.kernel2 = data['kernel2'].to(self.device) + self.sinc_kernel = data['sinc_kernel'].to(self.device) + + ori_h, ori_w = self.gt.size()[2:4] + + # ----------------------- The first degradation process ----------------------- # + # blur + out = filter2D(self.gt, self.kernel1) + # random resize + updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] + if updown_type == 'up': + scale = np.random.uniform(1, self.opt['resize_range'][1]) + elif updown_type == 'down': + scale = np.random.uniform(self.opt['resize_range'][0], 1) + else: + scale = 1 + mode = random.choice(['area', 'bilinear', 'bicubic']) + out = F.interpolate(out, scale_factor=scale, mode=mode) + # add noise + gray_noise_prob = self.opt['gray_noise_prob'] + if np.random.uniform() < self.opt['gaussian_noise_prob']: + out = random_add_gaussian_noise_pt( + out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob) + else: + out = random_add_poisson_noise_pt( + out, + scale_range=self.opt['poisson_scale_range'], + gray_prob=gray_noise_prob, + clip=True, + rounds=False) + # JPEG compression + jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range']) + out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts + out = self.jpeger(out, quality=jpeg_p) + + # ----------------------- The second degradation process ----------------------- # + # blur + if np.random.uniform() < self.opt['second_blur_prob']: + out = filter2D(out, self.kernel2) + # random resize + updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] + if updown_type == 'up': + scale = np.random.uniform(1, self.opt['resize_range2'][1]) + elif updown_type == 'down': + scale = np.random.uniform(self.opt['resize_range2'][0], 1) + else: + scale = 1 + mode = random.choice(['area', 'bilinear', 'bicubic']) + out = F.interpolate( + out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode) + # add noise + gray_noise_prob = self.opt['gray_noise_prob2'] + if np.random.uniform() < self.opt['gaussian_noise_prob2']: + out = random_add_gaussian_noise_pt( + out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob) + else: + out = random_add_poisson_noise_pt( + out, + scale_range=self.opt['poisson_scale_range2'], + gray_prob=gray_noise_prob, + clip=True, + rounds=False) + + # JPEG compression + the final sinc filter + # We also need to resize images to desired sizes. We group [resize back + sinc filter] together + # as one operation. + # We consider two orders: + # 1. [resize back + sinc filter] + JPEG compression + # 2. JPEG compression + [resize back + sinc filter] + # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines. + if np.random.uniform() < 0.5: + # resize back + the final sinc filter + mode = random.choice(['area', 'bilinear', 'bicubic']) + out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) + out = filter2D(out, self.sinc_kernel) + # JPEG compression + jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) + out = torch.clamp(out, 0, 1) + out = self.jpeger(out, quality=jpeg_p) + else: + # JPEG compression + jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) + out = torch.clamp(out, 0, 1) + out = self.jpeger(out, quality=jpeg_p) + # resize back + the final sinc filter + mode = random.choice(['area', 'bilinear', 'bicubic']) + out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) + out = filter2D(out, self.sinc_kernel) + + # clamp and round + self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255. + + # random crop + gt_size = self.opt['gt_size'] + self.gt, self.lq = paired_random_crop(self.gt, self.lq, gt_size, self.opt['scale']) + + # training pair pool + self._dequeue_and_enqueue() + self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract + else: + # for paired training or validation + self.lq = data['lq'].to(self.device) + if 'gt' in data: + self.gt = data['gt'].to(self.device) + self.gt_usm = self.usm_sharpener(self.gt) + + def nondist_validation(self, dataloader, current_iter, tb_logger, save_img): + # do not use the synthetic process during validation + self.is_train = False + super(RealESRNetModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img) + self.is_train = True diff --git a/Real-ESRGAN/realesrgan/train.py b/Real-ESRGAN/realesrgan/train.py new file mode 100644 index 0000000000000000000000000000000000000000..ca9de771059eeb2a60f7cb83b59918e497bc10de --- /dev/null +++ b/Real-ESRGAN/realesrgan/train.py @@ -0,0 +1,11 @@ +# flake8: noqa +import os.path as osp +from basicsr.train import train_pipeline + +import realesrgan.archs +import realesrgan.data +import realesrgan.models + +if __name__ == '__main__': + root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) + train_pipeline(root_path) diff --git a/Real-ESRGAN/realesrgan/utils.py b/Real-ESRGAN/realesrgan/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bd4c34f8ed355f8cf790ba7f4564755dec8bc372 --- /dev/null +++ b/Real-ESRGAN/realesrgan/utils.py @@ -0,0 +1,313 @@ +import cv2 +import math +import numpy as np +import os +import queue +import threading +import torch +from basicsr.utils.download_util import load_file_from_url +from torch.nn import functional as F + +ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + + +class RealESRGANer(): + """A helper class for upsampling images with RealESRGAN. + + Args: + scale (int): Upsampling scale factor used in the networks. It is usually 2 or 4. + model_path (str): The path to the pretrained model. It can be urls (will first download it automatically). + model (nn.Module): The defined network. Default: None. + tile (int): As too large images result in the out of GPU memory issue, so this tile option will first crop + input images into tiles, and then process each of them. Finally, they will be merged into one image. + 0 denotes for do not use tile. Default: 0. + tile_pad (int): The pad size for each tile, to remove border artifacts. Default: 10. + pre_pad (int): Pad the input images to avoid border artifacts. Default: 10. + half (float): Whether to use half precision during inference. Default: False. + """ + + def __init__(self, + scale, + model_path, + dni_weight=None, + model=None, + tile=0, + tile_pad=10, + pre_pad=10, + half=False, + device=None, + gpu_id=None): + self.scale = scale + self.tile_size = tile + self.tile_pad = tile_pad + self.pre_pad = pre_pad + self.mod_scale = None + self.half = half + + # initialize model + if gpu_id: + self.device = torch.device( + f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu') if device is None else device + else: + self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device + + if isinstance(model_path, list): + # dni + assert len(model_path) == len(dni_weight), 'model_path and dni_weight should have the save length.' + loadnet = self.dni(model_path[0], model_path[1], dni_weight) + else: + # if the model_path starts with https, it will first download models to the folder: weights + if model_path.startswith('https://'): + model_path = load_file_from_url( + url=model_path, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None) + loadnet = torch.load(model_path, map_location=torch.device('cpu')) + + # prefer to use params_ema + if 'params_ema' in loadnet: + keyname = 'params_ema' + else: + keyname = 'params' + model.load_state_dict(loadnet[keyname], strict=True) + + model.eval() + self.model = model.to(self.device) + if self.half: + self.model = self.model.half() + + def dni(self, net_a, net_b, dni_weight, key='params', loc='cpu'): + """Deep network interpolation. + + ``Paper: Deep Network Interpolation for Continuous Imagery Effect Transition`` + """ + net_a = torch.load(net_a, map_location=torch.device(loc)) + net_b = torch.load(net_b, map_location=torch.device(loc)) + for k, v_a in net_a[key].items(): + net_a[key][k] = dni_weight[0] * v_a + dni_weight[1] * net_b[key][k] + return net_a + + def pre_process(self, img): + """Pre-process, such as pre-pad and mod pad, so that the images can be divisible + """ + img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float() + self.img = img.unsqueeze(0).to(self.device) + if self.half: + self.img = self.img.half() + + # pre_pad + if self.pre_pad != 0: + self.img = F.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), 'reflect') + # mod pad for divisible borders + if self.scale == 2: + self.mod_scale = 2 + elif self.scale == 1: + self.mod_scale = 4 + if self.mod_scale is not None: + self.mod_pad_h, self.mod_pad_w = 0, 0 + _, _, h, w = self.img.size() + if (h % self.mod_scale != 0): + self.mod_pad_h = (self.mod_scale - h % self.mod_scale) + if (w % self.mod_scale != 0): + self.mod_pad_w = (self.mod_scale - w % self.mod_scale) + self.img = F.pad(self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), 'reflect') + + def process(self): + # model inference + self.output = self.model(self.img) + + def tile_process(self): + """It will first crop input images to tiles, and then process each tile. + Finally, all the processed tiles are merged into one images. + + Modified from: https://github.com/ata4/esrgan-launcher + """ + batch, channel, height, width = self.img.shape + output_height = height * self.scale + output_width = width * self.scale + output_shape = (batch, channel, output_height, output_width) + + # start with black image + self.output = self.img.new_zeros(output_shape) + tiles_x = math.ceil(width / self.tile_size) + tiles_y = math.ceil(height / self.tile_size) + + # loop over all tiles + for y in range(tiles_y): + for x in range(tiles_x): + # extract tile from input image + ofs_x = x * self.tile_size + ofs_y = y * self.tile_size + # input tile area on total image + input_start_x = ofs_x + input_end_x = min(ofs_x + self.tile_size, width) + input_start_y = ofs_y + input_end_y = min(ofs_y + self.tile_size, height) + + # input tile area on total image with padding + input_start_x_pad = max(input_start_x - self.tile_pad, 0) + input_end_x_pad = min(input_end_x + self.tile_pad, width) + input_start_y_pad = max(input_start_y - self.tile_pad, 0) + input_end_y_pad = min(input_end_y + self.tile_pad, height) + + # input tile dimensions + input_tile_width = input_end_x - input_start_x + input_tile_height = input_end_y - input_start_y + tile_idx = y * tiles_x + x + 1 + input_tile = self.img[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad] + + # upscale tile + try: + with torch.no_grad(): + output_tile = self.model(input_tile) + except RuntimeError as error: + print('Error', error) + print(f'\tTile {tile_idx}/{tiles_x * tiles_y}') + + # output tile area on total image + output_start_x = input_start_x * self.scale + output_end_x = input_end_x * self.scale + output_start_y = input_start_y * self.scale + output_end_y = input_end_y * self.scale + + # output tile area without padding + output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale + output_end_x_tile = output_start_x_tile + input_tile_width * self.scale + output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale + output_end_y_tile = output_start_y_tile + input_tile_height * self.scale + + # put tile into output image + self.output[:, :, output_start_y:output_end_y, + output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile, + output_start_x_tile:output_end_x_tile] + + def post_process(self): + # remove extra pad + if self.mod_scale is not None: + _, _, h, w = self.output.size() + self.output = self.output[:, :, 0:h - self.mod_pad_h * self.scale, 0:w - self.mod_pad_w * self.scale] + # remove prepad + if self.pre_pad != 0: + _, _, h, w = self.output.size() + self.output = self.output[:, :, 0:h - self.pre_pad * self.scale, 0:w - self.pre_pad * self.scale] + return self.output + + @torch.no_grad() + def enhance(self, img, outscale=None, alpha_upsampler='realesrgan'): + h_input, w_input = img.shape[0:2] + # img: numpy + img = img.astype(np.float32) + if np.max(img) > 256: # 16-bit image + max_range = 65535 + print('\tInput is a 16-bit image') + else: + max_range = 255 + img = img / max_range + if len(img.shape) == 2: # gray image + img_mode = 'L' + img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) + elif img.shape[2] == 4: # RGBA image with alpha channel + img_mode = 'RGBA' + alpha = img[:, :, 3] + img = img[:, :, 0:3] + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + if alpha_upsampler == 'realesrgan': + alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2RGB) + else: + img_mode = 'RGB' + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + + # ------------------- process image (without the alpha channel) ------------------- # + self.pre_process(img) + if self.tile_size > 0: + self.tile_process() + else: + self.process() + output_img = self.post_process() + output_img = output_img.data.squeeze().float().cpu().clamp_(0, 1).numpy() + output_img = np.transpose(output_img[[2, 1, 0], :, :], (1, 2, 0)) + if img_mode == 'L': + output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY) + + # ------------------- process the alpha channel if necessary ------------------- # + if img_mode == 'RGBA': + if alpha_upsampler == 'realesrgan': + self.pre_process(alpha) + if self.tile_size > 0: + self.tile_process() + else: + self.process() + output_alpha = self.post_process() + output_alpha = output_alpha.data.squeeze().float().cpu().clamp_(0, 1).numpy() + output_alpha = np.transpose(output_alpha[[2, 1, 0], :, :], (1, 2, 0)) + output_alpha = cv2.cvtColor(output_alpha, cv2.COLOR_BGR2GRAY) + else: # use the cv2 resize for alpha channel + h, w = alpha.shape[0:2] + output_alpha = cv2.resize(alpha, (w * self.scale, h * self.scale), interpolation=cv2.INTER_LINEAR) + + # merge the alpha channel + output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2BGRA) + output_img[:, :, 3] = output_alpha + + # ------------------------------ return ------------------------------ # + if max_range == 65535: # 16-bit image + output = (output_img * 65535.0).round().astype(np.uint16) + else: + output = (output_img * 255.0).round().astype(np.uint8) + + if outscale is not None and outscale != float(self.scale): + output = cv2.resize( + output, ( + int(w_input * outscale), + int(h_input * outscale), + ), interpolation=cv2.INTER_LANCZOS4) + + return output, img_mode + + +class PrefetchReader(threading.Thread): + """Prefetch images. + + Args: + img_list (list[str]): A image list of image paths to be read. + num_prefetch_queue (int): Number of prefetch queue. + """ + + def __init__(self, img_list, num_prefetch_queue): + super().__init__() + self.que = queue.Queue(num_prefetch_queue) + self.img_list = img_list + + def run(self): + for img_path in self.img_list: + img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) + self.que.put(img) + + self.que.put(None) + + def __next__(self): + next_item = self.que.get() + if next_item is None: + raise StopIteration + return next_item + + def __iter__(self): + return self + + +class IOConsumer(threading.Thread): + + def __init__(self, opt, que, qid): + super().__init__() + self._queue = que + self.qid = qid + self.opt = opt + + def run(self): + while True: + msg = self._queue.get() + if isinstance(msg, str) and msg == 'quit': + break + + output = msg['output'] + save_path = msg['save_path'] + cv2.imwrite(save_path, output) + print(f'IO worker {self.qid} is done.') diff --git a/Real-ESRGAN/requirements.txt b/Real-ESRGAN/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e93a8836e6f07453b5ced639212feebd8c3aaf5f --- /dev/null +++ b/Real-ESRGAN/requirements.txt @@ -0,0 +1,9 @@ +basicsr>=1.4.2 +facexlib>=0.2.5 +gfpgan>=1.3.5 +numpy +opencv-python +Pillow +torch>=1.7 +torchvision +tqdm diff --git a/Real-ESRGAN/scripts/extract_subimages.py b/Real-ESRGAN/scripts/extract_subimages.py new file mode 100644 index 0000000000000000000000000000000000000000..4511de3495ec27f1745bf8a7f9d7f9d561303995 --- /dev/null +++ b/Real-ESRGAN/scripts/extract_subimages.py @@ -0,0 +1,135 @@ +import argparse +import cv2 +import numpy as np +import os +import sys +from basicsr.utils import scandir +from multiprocessing import Pool +from os import path as osp +from tqdm import tqdm + + +def main(args): + """A multi-thread tool to crop large images to sub-images for faster IO. + + opt (dict): Configuration dict. It contains: + n_thread (int): Thread number. + compression_level (int): CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size + and longer compression time. Use 0 for faster CPU decompression. Default: 3, same in cv2. + input_folder (str): Path to the input folder. + save_folder (str): Path to save folder. + crop_size (int): Crop size. + step (int): Step for overlapped sliding window. + thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped. + + Usage: + For each folder, run this script. + Typically, there are GT folder and LQ folder to be processed for DIV2K dataset. + After process, each sub_folder should have the same number of subimages. + Remember to modify opt configurations according to your settings. + """ + + opt = {} + opt['n_thread'] = args.n_thread + opt['compression_level'] = args.compression_level + opt['input_folder'] = args.input + opt['save_folder'] = args.output + opt['crop_size'] = args.crop_size + opt['step'] = args.step + opt['thresh_size'] = args.thresh_size + extract_subimages(opt) + + +def extract_subimages(opt): + """Crop images to subimages. + + Args: + opt (dict): Configuration dict. It contains: + input_folder (str): Path to the input folder. + save_folder (str): Path to save folder. + n_thread (int): Thread number. + """ + input_folder = opt['input_folder'] + save_folder = opt['save_folder'] + if not osp.exists(save_folder): + os.makedirs(save_folder) + print(f'mkdir {save_folder} ...') + else: + print(f'Folder {save_folder} already exists. Exit.') + sys.exit(1) + + # scan all images + img_list = list(scandir(input_folder, full_path=True)) + + pbar = tqdm(total=len(img_list), unit='image', desc='Extract') + pool = Pool(opt['n_thread']) + for path in img_list: + pool.apply_async(worker, args=(path, opt), callback=lambda arg: pbar.update(1)) + pool.close() + pool.join() + pbar.close() + print('All processes done.') + + +def worker(path, opt): + """Worker for each process. + + Args: + path (str): Image path. + opt (dict): Configuration dict. It contains: + crop_size (int): Crop size. + step (int): Step for overlapped sliding window. + thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped. + save_folder (str): Path to save folder. + compression_level (int): for cv2.IMWRITE_PNG_COMPRESSION. + + Returns: + process_info (str): Process information displayed in progress bar. + """ + crop_size = opt['crop_size'] + step = opt['step'] + thresh_size = opt['thresh_size'] + img_name, extension = osp.splitext(osp.basename(path)) + + # remove the x2, x3, x4 and x8 in the filename for DIV2K + img_name = img_name.replace('x2', '').replace('x3', '').replace('x4', '').replace('x8', '') + + img = cv2.imread(path, cv2.IMREAD_UNCHANGED) + + h, w = img.shape[0:2] + h_space = np.arange(0, h - crop_size + 1, step) + if h - (h_space[-1] + crop_size) > thresh_size: + h_space = np.append(h_space, h - crop_size) + w_space = np.arange(0, w - crop_size + 1, step) + if w - (w_space[-1] + crop_size) > thresh_size: + w_space = np.append(w_space, w - crop_size) + + index = 0 + for x in h_space: + for y in w_space: + index += 1 + cropped_img = img[x:x + crop_size, y:y + crop_size, ...] + cropped_img = np.ascontiguousarray(cropped_img) + cv2.imwrite( + osp.join(opt['save_folder'], f'{img_name}_s{index:03d}{extension}'), cropped_img, + [cv2.IMWRITE_PNG_COMPRESSION, opt['compression_level']]) + process_info = f'Processing {img_name} ...' + return process_info + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder') + parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_HR_sub', help='Output folder') + parser.add_argument('--crop_size', type=int, default=480, help='Crop size') + parser.add_argument('--step', type=int, default=240, help='Step for overlapped sliding window') + parser.add_argument( + '--thresh_size', + type=int, + default=0, + help='Threshold size. Patches whose size is lower than thresh_size will be dropped.') + parser.add_argument('--n_thread', type=int, default=20, help='Thread number.') + parser.add_argument('--compression_level', type=int, default=3, help='Compression level') + args = parser.parse_args() + + main(args) diff --git a/Real-ESRGAN/scripts/generate_meta_info.py b/Real-ESRGAN/scripts/generate_meta_info.py new file mode 100644 index 0000000000000000000000000000000000000000..bf2e1488038929ceffb490234879668189f904b3 --- /dev/null +++ b/Real-ESRGAN/scripts/generate_meta_info.py @@ -0,0 +1,58 @@ +import argparse +import cv2 +import glob +import os + + +def main(args): + txt_file = open(args.meta_info, 'w') + for folder, root in zip(args.input, args.root): + img_paths = sorted(glob.glob(os.path.join(folder, '*'))) + for img_path in img_paths: + status = True + if args.check: + # read the image once for check, as some images may have errors + try: + img = cv2.imread(img_path) + except (IOError, OSError) as error: + print(f'Read {img_path} error: {error}') + status = False + if img is None: + status = False + print(f'Img is None: {img_path}') + if status: + # get the relative path + img_name = os.path.relpath(img_path, root) + print(img_name) + txt_file.write(f'{img_name}\n') + + +if __name__ == '__main__': + """Generate meta info (txt file) for only Ground-Truth images. + + It can also generate meta info from several folders into one txt file. + """ + parser = argparse.ArgumentParser() + parser.add_argument( + '--input', + nargs='+', + default=['datasets/DF2K/DF2K_HR', 'datasets/DF2K/DF2K_multiscale'], + help='Input folder, can be a list') + parser.add_argument( + '--root', + nargs='+', + default=['datasets/DF2K', 'datasets/DF2K'], + help='Folder root, should have the length as input folders') + parser.add_argument( + '--meta_info', + type=str, + default='datasets/DF2K/meta_info/meta_info_DF2Kmultiscale.txt', + help='txt path for meta info') + parser.add_argument('--check', action='store_true', help='Read image to check whether it is ok') + args = parser.parse_args() + + assert len(args.input) == len(args.root), ('Input folder and folder root should have the same length, but got ' + f'{len(args.input)} and {len(args.root)}.') + os.makedirs(os.path.dirname(args.meta_info), exist_ok=True) + + main(args) diff --git a/Real-ESRGAN/scripts/generate_meta_info_pairdata.py b/Real-ESRGAN/scripts/generate_meta_info_pairdata.py new file mode 100644 index 0000000000000000000000000000000000000000..0976665f92ceddbab60b1061b7c9dbe725ccff0f --- /dev/null +++ b/Real-ESRGAN/scripts/generate_meta_info_pairdata.py @@ -0,0 +1,49 @@ +import argparse +import glob +import os + + +def main(args): + txt_file = open(args.meta_info, 'w') + # sca images + img_paths_gt = sorted(glob.glob(os.path.join(args.input[0], '*'))) + img_paths_lq = sorted(glob.glob(os.path.join(args.input[1], '*'))) + + assert len(img_paths_gt) == len(img_paths_lq), ('GT folder and LQ folder should have the same length, but got ' + f'{len(img_paths_gt)} and {len(img_paths_lq)}.') + + for img_path_gt, img_path_lq in zip(img_paths_gt, img_paths_lq): + # get the relative paths + img_name_gt = os.path.relpath(img_path_gt, args.root[0]) + img_name_lq = os.path.relpath(img_path_lq, args.root[1]) + print(f'{img_name_gt}, {img_name_lq}') + txt_file.write(f'{img_name_gt}, {img_name_lq}\n') + + +if __name__ == '__main__': + """This script is used to generate meta info (txt file) for paired images. + """ + parser = argparse.ArgumentParser() + parser.add_argument( + '--input', + nargs='+', + default=['datasets/DF2K/DIV2K_train_HR_sub', 'datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub'], + help='Input folder, should be [gt_folder, lq_folder]') + parser.add_argument('--root', nargs='+', default=[None, None], help='Folder root, will use the ') + parser.add_argument( + '--meta_info', + type=str, + default='datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt', + help='txt path for meta info') + args = parser.parse_args() + + assert len(args.input) == 2, 'Input folder should have two elements: gt folder and lq folder' + assert len(args.root) == 2, 'Root path should have two elements: root for gt folder and lq folder' + os.makedirs(os.path.dirname(args.meta_info), exist_ok=True) + for i in range(2): + if args.input[i].endswith('/'): + args.input[i] = args.input[i][:-1] + if args.root[i] is None: + args.root[i] = os.path.dirname(args.input[i]) + + main(args) diff --git a/Real-ESRGAN/scripts/generate_multiscale_DF2K.py b/Real-ESRGAN/scripts/generate_multiscale_DF2K.py new file mode 100644 index 0000000000000000000000000000000000000000..67bd28a16e7388bfbb799d7b0dc22ec7b86c779c --- /dev/null +++ b/Real-ESRGAN/scripts/generate_multiscale_DF2K.py @@ -0,0 +1,48 @@ +import argparse +import glob +import os +from PIL import Image + + +def main(args): + # For DF2K, we consider the following three scales, + # and the smallest image whose shortest edge is 400 + scale_list = [0.75, 0.5, 1 / 3] + shortest_edge = 400 + + path_list = sorted(glob.glob(os.path.join(args.input, '*'))) + for path in path_list: + print(path) + basename = os.path.splitext(os.path.basename(path))[0] + + img = Image.open(path) + width, height = img.size + for idx, scale in enumerate(scale_list): + print(f'\t{scale:.2f}') + rlt = img.resize((int(width * scale), int(height * scale)), resample=Image.LANCZOS) + rlt.save(os.path.join(args.output, f'{basename}T{idx}.png')) + + # save the smallest image which the shortest edge is 400 + if width < height: + ratio = height / width + width = shortest_edge + height = int(width * ratio) + else: + ratio = width / height + height = shortest_edge + width = int(height * ratio) + rlt = img.resize((int(width), int(height)), resample=Image.LANCZOS) + rlt.save(os.path.join(args.output, f'{basename}T{idx+1}.png')) + + +if __name__ == '__main__': + """Generate multi-scale versions for GT images with LANCZOS resampling. + It is now used for DF2K dataset (DIV2K + Flickr 2K) + """ + parser = argparse.ArgumentParser() + parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder') + parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_multiscale', help='Output folder') + args = parser.parse_args() + + os.makedirs(args.output, exist_ok=True) + main(args) diff --git a/Real-ESRGAN/scripts/pytorch2onnx.py b/Real-ESRGAN/scripts/pytorch2onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..5cd80cfb3efe9602fbd4d64f214988e7540165b1 --- /dev/null +++ b/Real-ESRGAN/scripts/pytorch2onnx.py @@ -0,0 +1,36 @@ +import argparse +import torch +import torch.onnx +from basicsr.archs.rrdbnet_arch import RRDBNet + + +def main(args): + # An instance of the model + model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) + if args.params: + keyname = 'params' + else: + keyname = 'params_ema' + model.load_state_dict(torch.load(args.input)[keyname]) + # set the train mode to false since we will only run the forward pass. + model.train(False) + model.cpu().eval() + + # An example input + x = torch.rand(1, 3, 64, 64) + # Export the model + with torch.no_grad(): + torch_out = torch.onnx._export(model, x, args.output, opset_version=11, export_params=True) + print(torch_out.shape) + + +if __name__ == '__main__': + """Convert pytorch model to onnx models""" + parser = argparse.ArgumentParser() + parser.add_argument( + '--input', type=str, default='experiments/pretrained_models/RealESRGAN_x4plus.pth', help='Input model path') + parser.add_argument('--output', type=str, default='realesrgan-x4.onnx', help='Output onnx path') + parser.add_argument('--params', action='store_false', help='Use params instead of params_ema') + args = parser.parse_args() + + main(args) diff --git a/Real-ESRGAN/setup.cfg b/Real-ESRGAN/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..02dc60311b0310fce17053fb2b4a700fa49a1056 --- /dev/null +++ b/Real-ESRGAN/setup.cfg @@ -0,0 +1,33 @@ +[flake8] +ignore = + # line break before binary operator (W503) + W503, + # line break after binary operator (W504) + W504, +max-line-length=120 + +[yapf] +based_on_style = pep8 +column_limit = 120 +blank_line_before_nested_class_or_def = true +split_before_expression_after_opening_paren = true + +[isort] +line_length = 120 +multi_line_output = 0 +known_standard_library = pkg_resources,setuptools +known_first_party = realesrgan +known_third_party = PIL,basicsr,cv2,numpy,pytest,torch,torchvision,tqdm,yaml +no_lines_before = STDLIB,LOCALFOLDER +default_section = THIRDPARTY + +[codespell] +skip = .git,./docs/build +count = +quiet-level = 3 + +[aliases] +test=pytest + +[tool:pytest] +addopts=tests/ diff --git a/Real-ESRGAN/setup.py b/Real-ESRGAN/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..f9d00bc3bb98ff2d39d3fe75c6c19fe673be2bcd --- /dev/null +++ b/Real-ESRGAN/setup.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python + +from setuptools import find_packages, setup + +import os +import subprocess +import time + +version_file = 'realesrgan/version.py' + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +def get_git_hash(): + + def _minimal_ext_cmd(cmd): + # construct minimal environment + env = {} + for k in ['SYSTEMROOT', 'PATH', 'HOME']: + v = os.environ.get(k) + if v is not None: + env[k] = v + # LANGUAGE is used on win32 + env['LANGUAGE'] = 'C' + env['LANG'] = 'C' + env['LC_ALL'] = 'C' + out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] + return out + + try: + out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) + sha = out.strip().decode('ascii') + except OSError: + sha = 'unknown' + + return sha + + +def get_hash(): + if os.path.exists('.git'): + sha = get_git_hash()[:7] + else: + sha = 'unknown' + + return sha + + +def write_version_py(): + content = """# GENERATED VERSION FILE +# TIME: {} +__version__ = '{}' +__gitsha__ = '{}' +version_info = ({}) +""" + sha = get_hash() + with open('VERSION', 'r') as f: + SHORT_VERSION = f.read().strip() + VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')]) + + version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO) + with open(version_file, 'w') as f: + f.write(version_file_str) + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +def get_requirements(filename='requirements.txt'): + here = os.path.dirname(os.path.realpath(__file__)) + with open(os.path.join(here, filename), 'r') as f: + requires = [line.replace('\n', '') for line in f.readlines()] + return requires + + +if __name__ == '__main__': + write_version_py() + setup( + name='realesrgan', + version=get_version(), + description='Real-ESRGAN aims at developing Practical Algorithms for General Image Restoration', + long_description=readme(), + long_description_content_type='text/markdown', + author='Xintao Wang', + author_email='xintao.wang@outlook.com', + keywords='computer vision, pytorch, image restoration, super-resolution, esrgan, real-esrgan', + url='https://github.com/xinntao/Real-ESRGAN', + include_package_data=True, + packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')), + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + ], + license='BSD-3-Clause License', + setup_requires=['cython', 'numpy'], + install_requires=get_requirements(), + zip_safe=False) diff --git a/Real-ESRGAN/tests/data/gt.lmdb/data.mdb b/Real-ESRGAN/tests/data/gt.lmdb/data.mdb new file mode 100644 index 0000000000000000000000000000000000000000..0d23371db74063a4ec5b7941407f91aecdfa002a --- /dev/null +++ b/Real-ESRGAN/tests/data/gt.lmdb/data.mdb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ad059ec1e8bc17d7ce4c4425fd5fa6004aaea1cd068f9544d48d6934bbd0ea4 +size 757760 diff --git a/Real-ESRGAN/tests/data/gt.lmdb/lock.mdb b/Real-ESRGAN/tests/data/gt.lmdb/lock.mdb new file mode 100644 index 0000000000000000000000000000000000000000..37b3f72fa44829db318abca1f9495d73d7d6e071 Binary files /dev/null and b/Real-ESRGAN/tests/data/gt.lmdb/lock.mdb differ diff --git a/Real-ESRGAN/tests/data/gt.lmdb/meta_info.txt b/Real-ESRGAN/tests/data/gt.lmdb/meta_info.txt new file mode 100644 index 0000000000000000000000000000000000000000..7d1f6a4461dde4deb7fa4b981bec5df3a3c6b70f --- /dev/null +++ b/Real-ESRGAN/tests/data/gt.lmdb/meta_info.txt @@ -0,0 +1,2 @@ +baboon.png (480,500,3) 1 +comic.png (360,240,3) 1 diff --git a/Real-ESRGAN/tests/data/gt/baboon.png b/Real-ESRGAN/tests/data/gt/baboon.png new file mode 100644 index 0000000000000000000000000000000000000000..6cdd28421e78e331539de53ce9b5f9c57208f5d9 --- /dev/null +++ b/Real-ESRGAN/tests/data/gt/baboon.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e3458ee0eb0e1f2ed7e15ca886352cfb7f7072e9abf8fda97d11f158cd10a1b +size 544501 diff --git a/Real-ESRGAN/tests/data/gt/comic.png b/Real-ESRGAN/tests/data/gt/comic.png new file mode 100644 index 0000000000000000000000000000000000000000..d911efcca230c94021cf72d4c6f8ce25058c77ad --- /dev/null +++ b/Real-ESRGAN/tests/data/gt/comic.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72d65ca500b0e99d3ef3e00ffdc9aed83263577f0315dbfe1d29e683492bb1dd +size 199679 diff --git a/Real-ESRGAN/tests/data/lq.lmdb/data.mdb b/Real-ESRGAN/tests/data/lq.lmdb/data.mdb new file mode 100644 index 0000000000000000000000000000000000000000..c0162153452f63afbc798e99bfcdc1a6866caa0a Binary files /dev/null and b/Real-ESRGAN/tests/data/lq.lmdb/data.mdb differ diff --git a/Real-ESRGAN/tests/data/lq.lmdb/lock.mdb b/Real-ESRGAN/tests/data/lq.lmdb/lock.mdb new file mode 100644 index 0000000000000000000000000000000000000000..c3b69ed59644c8337389f82010234aab8f688b09 Binary files /dev/null and b/Real-ESRGAN/tests/data/lq.lmdb/lock.mdb differ diff --git a/Real-ESRGAN/tests/data/lq.lmdb/meta_info.txt b/Real-ESRGAN/tests/data/lq.lmdb/meta_info.txt new file mode 100644 index 0000000000000000000000000000000000000000..1abe5c1cf510e19f37423d5011e79272af3102fc --- /dev/null +++ b/Real-ESRGAN/tests/data/lq.lmdb/meta_info.txt @@ -0,0 +1,2 @@ +baboon.png (120,125,3) 1 +comic.png (80,60,3) 1 diff --git a/Real-ESRGAN/tests/data/lq/baboon.png b/Real-ESRGAN/tests/data/lq/baboon.png new file mode 100644 index 0000000000000000000000000000000000000000..bbd201245f3bb1736bc35820eb28f0d59eef766f Binary files /dev/null and b/Real-ESRGAN/tests/data/lq/baboon.png differ diff --git a/Real-ESRGAN/tests/data/lq/comic.png b/Real-ESRGAN/tests/data/lq/comic.png new file mode 100644 index 0000000000000000000000000000000000000000..c4e38ab76ecb80deb84fdc8f16f5afa009d95ddd Binary files /dev/null and b/Real-ESRGAN/tests/data/lq/comic.png differ diff --git a/Real-ESRGAN/tests/data/meta_info_gt.txt b/Real-ESRGAN/tests/data/meta_info_gt.txt new file mode 100644 index 0000000000000000000000000000000000000000..aa18deec3c1a5fcc169bffc84f92e8df8903640a --- /dev/null +++ b/Real-ESRGAN/tests/data/meta_info_gt.txt @@ -0,0 +1,2 @@ +baboon.png +comic.png diff --git a/Real-ESRGAN/tests/data/meta_info_pair.txt b/Real-ESRGAN/tests/data/meta_info_pair.txt new file mode 100644 index 0000000000000000000000000000000000000000..883afdc13aa18c7a39d247817dcd39696cc2adb6 --- /dev/null +++ b/Real-ESRGAN/tests/data/meta_info_pair.txt @@ -0,0 +1,2 @@ +gt/baboon.png, lq/baboon.png +gt/comic.png, lq/comic.png diff --git a/Real-ESRGAN/tests/data/test_realesrgan_dataset.yml b/Real-ESRGAN/tests/data/test_realesrgan_dataset.yml new file mode 100644 index 0000000000000000000000000000000000000000..83f4315ff4fc0c844b48b50aa2d51be1862f726b --- /dev/null +++ b/Real-ESRGAN/tests/data/test_realesrgan_dataset.yml @@ -0,0 +1,28 @@ +name: Demo +type: RealESRGANDataset +dataroot_gt: tests/data/gt +meta_info: tests/data/meta_info_gt.txt +io_backend: + type: disk + +blur_kernel_size: 21 +kernel_list: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] +kernel_prob: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] +sinc_prob: 1 +blur_sigma: [0.2, 3] +betag_range: [0.5, 4] +betap_range: [1, 2] + +blur_kernel_size2: 21 +kernel_list2: ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'] +kernel_prob2: [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] +sinc_prob2: 1 +blur_sigma2: [0.2, 1.5] +betag_range2: [0.5, 4] +betap_range2: [1, 2] + +final_sinc_prob: 1 + +gt_size: 128 +use_hflip: True +use_rot: False diff --git a/Real-ESRGAN/tests/data/test_realesrgan_model.yml b/Real-ESRGAN/tests/data/test_realesrgan_model.yml new file mode 100644 index 0000000000000000000000000000000000000000..3969a707cec33d7975cb172990daaa3b45e1791b --- /dev/null +++ b/Real-ESRGAN/tests/data/test_realesrgan_model.yml @@ -0,0 +1,115 @@ +scale: 4 +num_gpu: 1 +manual_seed: 0 +is_train: True +dist: False + +# ----------------- options for synthesizing training data ----------------- # +# USM the ground-truth +l1_gt_usm: True +percep_gt_usm: True +gan_gt_usm: False + +# the first degradation process +resize_prob: [0.2, 0.7, 0.1] # up, down, keep +resize_range: [0.15, 1.5] +gaussian_noise_prob: 1 +noise_range: [1, 30] +poisson_scale_range: [0.05, 3] +gray_noise_prob: 1 +jpeg_range: [30, 95] + +# the second degradation process +second_blur_prob: 1 +resize_prob2: [0.3, 0.4, 0.3] # up, down, keep +resize_range2: [0.3, 1.2] +gaussian_noise_prob2: 1 +noise_range2: [1, 25] +poisson_scale_range2: [0.05, 2.5] +gray_noise_prob2: 1 +jpeg_range2: [30, 95] + +gt_size: 32 +queue_size: 1 + +# network structures +network_g: + type: RRDBNet + num_in_ch: 3 + num_out_ch: 3 + num_feat: 4 + num_block: 1 + num_grow_ch: 2 + +network_d: + type: UNetDiscriminatorSN + num_in_ch: 3 + num_feat: 2 + skip_connection: True + +# path +path: + pretrain_network_g: ~ + param_key_g: params_ema + strict_load_g: true + resume_state: ~ + +# training settings +train: + ema_decay: 0.999 + optim_g: + type: Adam + lr: !!float 1e-4 + weight_decay: 0 + betas: [0.9, 0.99] + optim_d: + type: Adam + lr: !!float 1e-4 + weight_decay: 0 + betas: [0.9, 0.99] + + scheduler: + type: MultiStepLR + milestones: [400000] + gamma: 0.5 + + total_iter: 400000 + warmup_iter: -1 # no warm up + + # losses + pixel_opt: + type: L1Loss + loss_weight: 1.0 + reduction: mean + # perceptual loss (content and style losses) + perceptual_opt: + type: PerceptualLoss + layer_weights: + # before relu + 'conv1_2': 0.1 + 'conv2_2': 0.1 + 'conv3_4': 1 + 'conv4_4': 1 + 'conv5_4': 1 + vgg_type: vgg19 + use_input_norm: true + perceptual_weight: !!float 1.0 + style_weight: 0 + range_norm: false + criterion: l1 + # gan loss + gan_opt: + type: GANLoss + gan_type: vanilla + real_label_val: 1.0 + fake_label_val: 0.0 + loss_weight: !!float 1e-1 + + net_d_iters: 1 + net_d_init_iters: 0 + + +# validation settings +val: + val_freq: !!float 5e3 + save_img: False diff --git a/Real-ESRGAN/tests/data/test_realesrgan_paired_dataset.yml b/Real-ESRGAN/tests/data/test_realesrgan_paired_dataset.yml new file mode 100644 index 0000000000000000000000000000000000000000..c80a3540e3df1c4d6239d8aadca54fd9060a4acf --- /dev/null +++ b/Real-ESRGAN/tests/data/test_realesrgan_paired_dataset.yml @@ -0,0 +1,13 @@ +name: Demo +type: RealESRGANPairedDataset +scale: 4 +dataroot_gt: tests/data +dataroot_lq: tests/data +meta_info: tests/data/meta_info_pair.txt +io_backend: + type: disk + +phase: train +gt_size: 128 +use_hflip: True +use_rot: False diff --git a/Real-ESRGAN/tests/data/test_realesrnet_model.yml b/Real-ESRGAN/tests/data/test_realesrnet_model.yml new file mode 100644 index 0000000000000000000000000000000000000000..4e0d42c1ed2efe60c9b0d1d7726cae8858c9aea2 --- /dev/null +++ b/Real-ESRGAN/tests/data/test_realesrnet_model.yml @@ -0,0 +1,75 @@ +scale: 4 +num_gpu: 1 +manual_seed: 0 +is_train: True +dist: False + +# ----------------- options for synthesizing training data ----------------- # +gt_usm: True # USM the ground-truth + +# the first degradation process +resize_prob: [0.2, 0.7, 0.1] # up, down, keep +resize_range: [0.15, 1.5] +gaussian_noise_prob: 1 +noise_range: [1, 30] +poisson_scale_range: [0.05, 3] +gray_noise_prob: 1 +jpeg_range: [30, 95] + +# the second degradation process +second_blur_prob: 1 +resize_prob2: [0.3, 0.4, 0.3] # up, down, keep +resize_range2: [0.3, 1.2] +gaussian_noise_prob2: 1 +noise_range2: [1, 25] +poisson_scale_range2: [0.05, 2.5] +gray_noise_prob2: 1 +jpeg_range2: [30, 95] + +gt_size: 32 +queue_size: 1 + +# network structures +network_g: + type: RRDBNet + num_in_ch: 3 + num_out_ch: 3 + num_feat: 4 + num_block: 1 + num_grow_ch: 2 + +# path +path: + pretrain_network_g: ~ + param_key_g: params_ema + strict_load_g: true + resume_state: ~ + +# training settings +train: + ema_decay: 0.999 + optim_g: + type: Adam + lr: !!float 2e-4 + weight_decay: 0 + betas: [0.9, 0.99] + + scheduler: + type: MultiStepLR + milestones: [1000000] + gamma: 0.5 + + total_iter: 1000000 + warmup_iter: -1 # no warm up + + # losses + pixel_opt: + type: L1Loss + loss_weight: 1.0 + reduction: mean + + +# validation settings +val: + val_freq: !!float 5e3 + save_img: False diff --git a/Real-ESRGAN/tests/test_dataset.py b/Real-ESRGAN/tests/test_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..ff00d7301584be21b44314cb5d31ce1842c0c61f --- /dev/null +++ b/Real-ESRGAN/tests/test_dataset.py @@ -0,0 +1,151 @@ +import pytest +import yaml + +from realesrgan.data.realesrgan_dataset import RealESRGANDataset +from realesrgan.data.realesrgan_paired_dataset import RealESRGANPairedDataset + + +def test_realesrgan_dataset(): + + with open('tests/data/test_realesrgan_dataset.yml', mode='r') as f: + opt = yaml.load(f, Loader=yaml.FullLoader) + + dataset = RealESRGANDataset(opt) + assert dataset.io_backend_opt['type'] == 'disk' # io backend + assert len(dataset) == 2 # whether to read correct meta info + assert dataset.kernel_list == [ + 'iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso' + ] # correct initialization the degradation configurations + assert dataset.betag_range2 == [0.5, 4] + + # test __getitem__ + result = dataset.__getitem__(0) + # check returned keys + expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path'] + assert set(expected_keys).issubset(set(result.keys())) + # check shape and contents + assert result['gt'].shape == (3, 400, 400) + assert result['kernel1'].shape == (21, 21) + assert result['kernel2'].shape == (21, 21) + assert result['sinc_kernel'].shape == (21, 21) + assert result['gt_path'] == 'tests/data/gt/baboon.png' + + # ------------------ test lmdb backend -------------------- # + opt['dataroot_gt'] = 'tests/data/gt.lmdb' + opt['io_backend']['type'] = 'lmdb' + + dataset = RealESRGANDataset(opt) + assert dataset.io_backend_opt['type'] == 'lmdb' # io backend + assert len(dataset.paths) == 2 # whether to read correct meta info + assert dataset.kernel_list == [ + 'iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso' + ] # correct initialization the degradation configurations + assert dataset.betag_range2 == [0.5, 4] + + # test __getitem__ + result = dataset.__getitem__(1) + # check returned keys + expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path'] + assert set(expected_keys).issubset(set(result.keys())) + # check shape and contents + assert result['gt'].shape == (3, 400, 400) + assert result['kernel1'].shape == (21, 21) + assert result['kernel2'].shape == (21, 21) + assert result['sinc_kernel'].shape == (21, 21) + assert result['gt_path'] == 'comic' + + # ------------------ test with sinc_prob = 0 -------------------- # + opt['dataroot_gt'] = 'tests/data/gt.lmdb' + opt['io_backend']['type'] = 'lmdb' + opt['sinc_prob'] = 0 + opt['sinc_prob2'] = 0 + opt['final_sinc_prob'] = 0 + dataset = RealESRGANDataset(opt) + result = dataset.__getitem__(0) + # check returned keys + expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path'] + assert set(expected_keys).issubset(set(result.keys())) + # check shape and contents + assert result['gt'].shape == (3, 400, 400) + assert result['kernel1'].shape == (21, 21) + assert result['kernel2'].shape == (21, 21) + assert result['sinc_kernel'].shape == (21, 21) + assert result['gt_path'] == 'baboon' + + # ------------------ lmdb backend should have paths ends with lmdb -------------------- # + with pytest.raises(ValueError): + opt['dataroot_gt'] = 'tests/data/gt' + opt['io_backend']['type'] = 'lmdb' + dataset = RealESRGANDataset(opt) + + +def test_realesrgan_paired_dataset(): + + with open('tests/data/test_realesrgan_paired_dataset.yml', mode='r') as f: + opt = yaml.load(f, Loader=yaml.FullLoader) + + dataset = RealESRGANPairedDataset(opt) + assert dataset.io_backend_opt['type'] == 'disk' # io backend + assert len(dataset) == 2 # whether to read correct meta info + + # test __getitem__ + result = dataset.__getitem__(0) + # check returned keys + expected_keys = ['gt', 'lq', 'gt_path', 'lq_path'] + assert set(expected_keys).issubset(set(result.keys())) + # check shape and contents + assert result['gt'].shape == (3, 128, 128) + assert result['lq'].shape == (3, 32, 32) + assert result['gt_path'] == 'tests/data/gt/baboon.png' + assert result['lq_path'] == 'tests/data/lq/baboon.png' + + # ------------------ test lmdb backend -------------------- # + opt['dataroot_gt'] = 'tests/data/gt.lmdb' + opt['dataroot_lq'] = 'tests/data/lq.lmdb' + opt['io_backend']['type'] = 'lmdb' + + dataset = RealESRGANPairedDataset(opt) + assert dataset.io_backend_opt['type'] == 'lmdb' # io backend + assert len(dataset) == 2 # whether to read correct meta info + + # test __getitem__ + result = dataset.__getitem__(1) + # check returned keys + expected_keys = ['gt', 'lq', 'gt_path', 'lq_path'] + assert set(expected_keys).issubset(set(result.keys())) + # check shape and contents + assert result['gt'].shape == (3, 128, 128) + assert result['lq'].shape == (3, 32, 32) + assert result['gt_path'] == 'comic' + assert result['lq_path'] == 'comic' + + # ------------------ test paired_paths_from_folder -------------------- # + opt['dataroot_gt'] = 'tests/data/gt' + opt['dataroot_lq'] = 'tests/data/lq' + opt['io_backend'] = dict(type='disk') + opt['meta_info'] = None + + dataset = RealESRGANPairedDataset(opt) + assert dataset.io_backend_opt['type'] == 'disk' # io backend + assert len(dataset) == 2 # whether to read correct meta info + + # test __getitem__ + result = dataset.__getitem__(0) + # check returned keys + expected_keys = ['gt', 'lq', 'gt_path', 'lq_path'] + assert set(expected_keys).issubset(set(result.keys())) + # check shape and contents + assert result['gt'].shape == (3, 128, 128) + assert result['lq'].shape == (3, 32, 32) + + # ------------------ test normalization -------------------- # + dataset.mean = [0.5, 0.5, 0.5] + dataset.std = [0.5, 0.5, 0.5] + # test __getitem__ + result = dataset.__getitem__(0) + # check returned keys + expected_keys = ['gt', 'lq', 'gt_path', 'lq_path'] + assert set(expected_keys).issubset(set(result.keys())) + # check shape and contents + assert result['gt'].shape == (3, 128, 128) + assert result['lq'].shape == (3, 32, 32) diff --git a/Real-ESRGAN/tests/test_discriminator_arch.py b/Real-ESRGAN/tests/test_discriminator_arch.py new file mode 100644 index 0000000000000000000000000000000000000000..51838642c269edd32956d90befe7dc521f3d0ddb --- /dev/null +++ b/Real-ESRGAN/tests/test_discriminator_arch.py @@ -0,0 +1,19 @@ +import torch + +from realesrgan.archs.discriminator_arch import UNetDiscriminatorSN + + +def test_unetdiscriminatorsn(): + """Test arch: UNetDiscriminatorSN.""" + + # model init and forward (cpu) + net = UNetDiscriminatorSN(num_in_ch=3, num_feat=4, skip_connection=True) + img = torch.rand((1, 3, 32, 32), dtype=torch.float32) + output = net(img) + assert output.shape == (1, 1, 32, 32) + + # model init and forward (gpu) + if torch.cuda.is_available(): + net.cuda() + output = net(img.cuda()) + assert output.shape == (1, 1, 32, 32) diff --git a/Real-ESRGAN/tests/test_model.py b/Real-ESRGAN/tests/test_model.py new file mode 100644 index 0000000000000000000000000000000000000000..51d8b1498368fdbc9c08c110a5795a91a2a1f678 --- /dev/null +++ b/Real-ESRGAN/tests/test_model.py @@ -0,0 +1,126 @@ +import torch +import yaml +from basicsr.archs.rrdbnet_arch import RRDBNet +from basicsr.data.paired_image_dataset import PairedImageDataset +from basicsr.losses.losses import GANLoss, L1Loss, PerceptualLoss + +from realesrgan.archs.discriminator_arch import UNetDiscriminatorSN +from realesrgan.models.realesrgan_model import RealESRGANModel +from realesrgan.models.realesrnet_model import RealESRNetModel + + +def test_realesrnet_model(): + with open('tests/data/test_realesrnet_model.yml', mode='r') as f: + opt = yaml.load(f, Loader=yaml.FullLoader) + + # build model + model = RealESRNetModel(opt) + # test attributes + assert model.__class__.__name__ == 'RealESRNetModel' + assert isinstance(model.net_g, RRDBNet) + assert isinstance(model.cri_pix, L1Loss) + assert isinstance(model.optimizers[0], torch.optim.Adam) + + # prepare data + gt = torch.rand((1, 3, 32, 32), dtype=torch.float32) + kernel1 = torch.rand((1, 5, 5), dtype=torch.float32) + kernel2 = torch.rand((1, 5, 5), dtype=torch.float32) + sinc_kernel = torch.rand((1, 5, 5), dtype=torch.float32) + data = dict(gt=gt, kernel1=kernel1, kernel2=kernel2, sinc_kernel=sinc_kernel) + model.feed_data(data) + # check dequeue + model.feed_data(data) + # check data shape + assert model.lq.shape == (1, 3, 8, 8) + assert model.gt.shape == (1, 3, 32, 32) + + # change probability to test if-else + model.opt['gaussian_noise_prob'] = 0 + model.opt['gray_noise_prob'] = 0 + model.opt['second_blur_prob'] = 0 + model.opt['gaussian_noise_prob2'] = 0 + model.opt['gray_noise_prob2'] = 0 + model.feed_data(data) + # check data shape + assert model.lq.shape == (1, 3, 8, 8) + assert model.gt.shape == (1, 3, 32, 32) + + # ----------------- test nondist_validation -------------------- # + # construct dataloader + dataset_opt = dict( + name='Demo', + dataroot_gt='tests/data/gt', + dataroot_lq='tests/data/lq', + io_backend=dict(type='disk'), + scale=4, + phase='val') + dataset = PairedImageDataset(dataset_opt) + dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0) + assert model.is_train is True + model.nondist_validation(dataloader, 1, None, False) + assert model.is_train is True + + +def test_realesrgan_model(): + with open('tests/data/test_realesrgan_model.yml', mode='r') as f: + opt = yaml.load(f, Loader=yaml.FullLoader) + + # build model + model = RealESRGANModel(opt) + # test attributes + assert model.__class__.__name__ == 'RealESRGANModel' + assert isinstance(model.net_g, RRDBNet) # generator + assert isinstance(model.net_d, UNetDiscriminatorSN) # discriminator + assert isinstance(model.cri_pix, L1Loss) + assert isinstance(model.cri_perceptual, PerceptualLoss) + assert isinstance(model.cri_gan, GANLoss) + assert isinstance(model.optimizers[0], torch.optim.Adam) + assert isinstance(model.optimizers[1], torch.optim.Adam) + + # prepare data + gt = torch.rand((1, 3, 32, 32), dtype=torch.float32) + kernel1 = torch.rand((1, 5, 5), dtype=torch.float32) + kernel2 = torch.rand((1, 5, 5), dtype=torch.float32) + sinc_kernel = torch.rand((1, 5, 5), dtype=torch.float32) + data = dict(gt=gt, kernel1=kernel1, kernel2=kernel2, sinc_kernel=sinc_kernel) + model.feed_data(data) + # check dequeue + model.feed_data(data) + # check data shape + assert model.lq.shape == (1, 3, 8, 8) + assert model.gt.shape == (1, 3, 32, 32) + + # change probability to test if-else + model.opt['gaussian_noise_prob'] = 0 + model.opt['gray_noise_prob'] = 0 + model.opt['second_blur_prob'] = 0 + model.opt['gaussian_noise_prob2'] = 0 + model.opt['gray_noise_prob2'] = 0 + model.feed_data(data) + # check data shape + assert model.lq.shape == (1, 3, 8, 8) + assert model.gt.shape == (1, 3, 32, 32) + + # ----------------- test nondist_validation -------------------- # + # construct dataloader + dataset_opt = dict( + name='Demo', + dataroot_gt='tests/data/gt', + dataroot_lq='tests/data/lq', + io_backend=dict(type='disk'), + scale=4, + phase='val') + dataset = PairedImageDataset(dataset_opt) + dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0) + assert model.is_train is True + model.nondist_validation(dataloader, 1, None, False) + assert model.is_train is True + + # ----------------- test optimize_parameters -------------------- # + model.feed_data(data) + model.optimize_parameters(1) + assert model.output.shape == (1, 3, 32, 32) + assert isinstance(model.log_dict, dict) + # check returned keys + expected_keys = ['l_g_pix', 'l_g_percep', 'l_g_gan', 'l_d_real', 'out_d_real', 'l_d_fake', 'out_d_fake'] + assert set(expected_keys).issubset(set(model.log_dict.keys())) diff --git a/Real-ESRGAN/tests/test_utils.py b/Real-ESRGAN/tests/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c37fe06b0fdbbc7b5f87c98434d3d990baae5335 --- /dev/null +++ b/Real-ESRGAN/tests/test_utils.py @@ -0,0 +1,87 @@ +import numpy as np +from basicsr.archs.rrdbnet_arch import RRDBNet + +from realesrgan.utils import RealESRGANer + + +def test_realesrganer(): + # initialize with default model + restorer = RealESRGANer( + scale=4, + model_path='experiments/pretrained_models/RealESRGAN_x4plus.pth', + model=None, + tile=10, + tile_pad=10, + pre_pad=2, + half=False) + assert isinstance(restorer.model, RRDBNet) + assert restorer.half is False + # initialize with user-defined model + model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) + restorer = RealESRGANer( + scale=4, + model_path='experiments/pretrained_models/RealESRGAN_x4plus_anime_6B.pth', + model=model, + tile=10, + tile_pad=10, + pre_pad=2, + half=True) + # test attribute + assert isinstance(restorer.model, RRDBNet) + assert restorer.half is True + + # ------------------ test pre_process ---------------- # + img = np.random.random((12, 12, 3)).astype(np.float32) + restorer.pre_process(img) + assert restorer.img.shape == (1, 3, 14, 14) + # with modcrop + restorer.scale = 1 + restorer.pre_process(img) + assert restorer.img.shape == (1, 3, 16, 16) + + # ------------------ test process ---------------- # + restorer.process() + assert restorer.output.shape == (1, 3, 64, 64) + + # ------------------ test post_process ---------------- # + restorer.mod_scale = 4 + output = restorer.post_process() + assert output.shape == (1, 3, 60, 60) + + # ------------------ test tile_process ---------------- # + restorer.scale = 4 + img = np.random.random((12, 12, 3)).astype(np.float32) + restorer.pre_process(img) + restorer.tile_process() + assert restorer.output.shape == (1, 3, 64, 64) + + # ------------------ test enhance ---------------- # + img = np.random.random((12, 12, 3)).astype(np.float32) + result = restorer.enhance(img, outscale=2) + assert result[0].shape == (24, 24, 3) + assert result[1] == 'RGB' + + # ------------------ test enhance with 16-bit image---------------- # + img = np.random.random((4, 4, 3)).astype(np.uint16) + 512 + result = restorer.enhance(img, outscale=2) + assert result[0].shape == (8, 8, 3) + assert result[1] == 'RGB' + + # ------------------ test enhance with gray image---------------- # + img = np.random.random((4, 4)).astype(np.float32) + result = restorer.enhance(img, outscale=2) + assert result[0].shape == (8, 8) + assert result[1] == 'L' + + # ------------------ test enhance with RGBA---------------- # + img = np.random.random((4, 4, 4)).astype(np.float32) + result = restorer.enhance(img, outscale=2) + assert result[0].shape == (8, 8, 4) + assert result[1] == 'RGBA' + + # ------------------ test enhance with RGBA, alpha_upsampler---------------- # + restorer.tile_size = 0 + img = np.random.random((4, 4, 4)).astype(np.float32) + result = restorer.enhance(img, outscale=2, alpha_upsampler=None) + assert result[0].shape == (8, 8, 4) + assert result[1] == 'RGBA' diff --git a/Real-ESRGAN/weights/README.md b/Real-ESRGAN/weights/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7b1294617509bcc8ee2ad17785bf5b63964412bc --- /dev/null +++ b/Real-ESRGAN/weights/README.md @@ -0,0 +1,3 @@ +# Weights + +Put the downloaded weights to this folder. diff --git a/Status.py b/Status.py new file mode 100644 index 0000000000000000000000000000000000000000..295a2a92bc64e02545e245c99d0c99766b480176 --- /dev/null +++ b/Status.py @@ -0,0 +1,52 @@ +import psutil +import time +import GPUtil +from rich.live import Live +from rich.progress import Progress, BarColumn, ProgressColumn +from rich.console import Console +from rich.text import Text + +class PercentageColumn(ProgressColumn): + def render(self, task): + return Text(f"{task.percentage:>5.1f}%") + +def main(): + console = Console() + progress = Progress( + "[progress.description]{task.description}", + BarColumn(bar_width=None), + PercentageColumn(), + ) + + # Create tasks for CPU and GPUs + cpu_task = progress.add_task("CPU Utilization", total=100) + gpu_tasks = [] + gpus = GPUtil.getGPUs() + for gpu in gpus: + gpu_task = progress.add_task(f"GPU {gpu.id} Utilization", total=100) + vram_task = progress.add_task(f"GPU {gpu.id} VRAM Usage", total=100) + gpu_tasks.append((gpu.id, gpu_task, vram_task)) + + with Live(progress, refresh_per_second=10): + try: + while True: + # Update CPU utilization + cpu_utilization = psutil.cpu_percent(interval=None) + progress.update(cpu_task, completed=cpu_utilization) + + # Update GPU info + gpus = GPUtil.getGPUs() + for gpu in gpus: + gpu_util = gpu.load * 100 + mem_util = gpu.memoryUtil * 100 + for gid, gpu_task_id, vram_task_id in gpu_tasks: + if gid == gpu.id: + progress.update(gpu_task_id, completed=gpu_util) + progress.update(vram_task_id, completed=mem_util) + + time.sleep(0.1) + except KeyboardInterrupt: + pass + +if __name__ == '__main__': + main() diff --git a/SwitcherAI/__init__.py b/SwitcherAI/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwitcherAI/__pycache__/__init__.cpython-311.pyc b/SwitcherAI/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88c18e9a860e5b00cf41e3abd1d8cd88d97d6aaf Binary files /dev/null and b/SwitcherAI/__pycache__/__init__.cpython-311.pyc differ diff --git a/SwitcherAI/__pycache__/choices.cpython-311.pyc b/SwitcherAI/__pycache__/choices.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..083e4efeb57c7e4208c93b1b376e87e6774b07cd Binary files /dev/null and b/SwitcherAI/__pycache__/choices.cpython-311.pyc differ diff --git a/SwitcherAI/__pycache__/core.cpython-311.pyc b/SwitcherAI/__pycache__/core.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7a78dc2c816104abd0b30fc5e19e5bb499642e1 Binary files /dev/null and b/SwitcherAI/__pycache__/core.cpython-311.pyc differ diff --git a/SwitcherAI/__pycache__/face_analyser.cpython-311.pyc b/SwitcherAI/__pycache__/face_analyser.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c12ebb7afdb1abf6d3c37a03c4aa37dd7af5cbde Binary files /dev/null and b/SwitcherAI/__pycache__/face_analyser.cpython-311.pyc differ diff --git a/SwitcherAI/__pycache__/face_reference.cpython-311.pyc b/SwitcherAI/__pycache__/face_reference.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..795395ba0f33d024f0a5980998cc746e2fda713b Binary files /dev/null and b/SwitcherAI/__pycache__/face_reference.cpython-311.pyc differ diff --git a/SwitcherAI/__pycache__/globals.cpython-311.pyc b/SwitcherAI/__pycache__/globals.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fb76b30b67488362573665ab1f5f8738c598422 Binary files /dev/null and b/SwitcherAI/__pycache__/globals.cpython-311.pyc differ diff --git a/SwitcherAI/__pycache__/metadata.cpython-311.pyc b/SwitcherAI/__pycache__/metadata.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f366cc9a40a1607f63296a9747310dbc85c35f20 Binary files /dev/null and b/SwitcherAI/__pycache__/metadata.cpython-311.pyc differ diff --git a/SwitcherAI/__pycache__/predictor.cpython-311.pyc b/SwitcherAI/__pycache__/predictor.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6665a859a6cf3c40aa27a2b4b4e411003448cab Binary files /dev/null and b/SwitcherAI/__pycache__/predictor.cpython-311.pyc differ diff --git a/SwitcherAI/__pycache__/typing.cpython-311.pyc b/SwitcherAI/__pycache__/typing.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2490e0f33cc8301314afe27dc73e5dab499faef Binary files /dev/null and b/SwitcherAI/__pycache__/typing.cpython-311.pyc differ diff --git a/SwitcherAI/__pycache__/utilities.cpython-311.pyc b/SwitcherAI/__pycache__/utilities.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b66aa64b63c375264901b09a4cfeef104c842bc7 Binary files /dev/null and b/SwitcherAI/__pycache__/utilities.cpython-311.pyc differ diff --git a/SwitcherAI/__pycache__/wording.cpython-311.pyc b/SwitcherAI/__pycache__/wording.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfca97db9bd8bec8bad19bd9dfb78aa73781e8ac Binary files /dev/null and b/SwitcherAI/__pycache__/wording.cpython-311.pyc differ diff --git a/SwitcherAI/capturer.py b/SwitcherAI/capturer.py new file mode 100644 index 0000000000000000000000000000000000000000..ebdda023ce38fcce2867cb9231fe28e70b62b3c2 --- /dev/null +++ b/SwitcherAI/capturer.py @@ -0,0 +1,22 @@ +from typing import Optional +import cv2 + +from SwitcherAI.typing import Frame + + +def get_video_frame(video_path : str, frame_number : int = 0) -> Optional[Frame]: + capture = cv2.VideoCapture(video_path) + frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT) + capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1)) + has_frame, frame = capture.read() + capture.release() + if has_frame: + return frame + return None + + +def get_video_frame_total(video_path : str) -> int: + capture = cv2.VideoCapture(video_path) + video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) + capture.release() + return video_frame_total diff --git a/SwitcherAI/choices.py b/SwitcherAI/choices.py new file mode 100644 index 0000000000000000000000000000000000000000..9f2230fefa60b66afc09bfa39429a478d141b255 --- /dev/null +++ b/SwitcherAI/choices.py @@ -0,0 +1,13 @@ +from typing import List + +from SwitcherAI.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder, LipSyncerModel + +face_recognition : List[FaceRecognition] = [ 'reference', 'many' ] +face_analyser_direction : List[FaceAnalyserDirection] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small'] +face_analyser_age : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ] +face_analyser_gender : List[FaceAnalyserGender] = [ 'male', 'female' ] +temp_frame_format : List[TempFrameFormat] = [ 'jpg', 'png' ] +output_video_encoder : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ] + +# Lip syncer model choices +lip_syncer_models : List[LipSyncerModel] = [ 'wav2lip_96', 'wav2lip_gan_96' ] \ No newline at end of file diff --git a/SwitcherAI/core.py b/SwitcherAI/core.py new file mode 100644 index 0000000000000000000000000000000000000000..f734692ac35e8deba7fa3057efb9a4e9debfd069 --- /dev/null +++ b/SwitcherAI/core.py @@ -0,0 +1,348 @@ +#!/usr/bin/env python3 +import sqlite3 +import os +# single thread doubles cuda performance +os.environ['OMP_NUM_THREADS'] = '1' +# reduce tensorflow log level +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +import sys +import warnings +from typing import List +import platform +import signal +import shutil +import argparse +from time import time +import onnxruntime +import tensorflow + +import SwitcherAI.choices +import SwitcherAI.globals +from SwitcherAI import wording, metadata +from SwitcherAI.predictor import predict_image, predict_video +from SwitcherAI.processors.frame.core import get_frame_processors_modules +from SwitcherAI.utilities import is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clear_temp, normalize_output_path, list_module_names, decode_execution_providers, encode_execution_providers + +warnings.filterwarnings('ignore', category = FutureWarning, module = 'insightface') +warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision') + + +def parse_args() -> None: + signal.signal(signal.SIGINT, lambda signal_number, frame: destroy()) + program = argparse.ArgumentParser(formatter_class = lambda prog: argparse.HelpFormatter(prog, max_help_position = 120)) + program.add_argument('-s', '--source', help = wording.get('source_help'), dest = 'source_path') + program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path') + program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path') + program.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(list_module_names('SwitcherAI/processors/frame/modules'))), dest = 'frame_processors', default = ['face_swapper'], nargs='+') + program.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('SwitcherAI/uis/layouts'))), dest = 'ui_layouts', default = ['default'], nargs='+') + program.add_argument('--keep-fps', help = wording.get('keep_fps_help'), dest = 'keep_fps', action='store_true') + program.add_argument('--keep-temp', help = wording.get('keep_temp_help'), dest = 'keep_temp', action='store_true') + program.add_argument('--skip-audio', help = wording.get('skip_audio_help'), dest = 'skip_audio', action='store_true') + program.add_argument('--face-recognition', help = wording.get('face_recognition_help'), dest = 'face_recognition', default = 'reference', choices = SwitcherAI.choices.face_recognition) + program.add_argument('--face-analyser-direction', help = wording.get('face_analyser_direction_help'), dest = 'face_analyser_direction', default = 'left-right', choices = SwitcherAI.choices.face_analyser_direction) + program.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), dest = 'face_analyser_age', choices = SwitcherAI.choices.face_analyser_age) + program.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), dest = 'face_analyser_gender', choices = SwitcherAI.choices.face_analyser_gender) + program.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), dest = 'reference_face_position', type = int, default = 0) + program.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), dest = 'reference_face_distance', type = float, default = 1.5) + program.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), dest = 'reference_frame_number', type = int, default = 0) + program.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), dest = 'trim_frame_start', type = int) + program.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), dest = 'trim_frame_end', type = int) + program.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), dest = 'temp_frame_format', default = 'jpg', choices = SwitcherAI.choices.temp_frame_format) + program.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), dest = 'temp_frame_quality', type = int, default = 100, choices = range(101), metavar = '[0-100]') + program.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), dest = 'output_video_encoder', default = 'libx264', choices = SwitcherAI.choices.output_video_encoder) + program.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), dest = 'output_video_quality', type = int, default = 100, choices = range(101), metavar = '[0-100]') + program.add_argument('--max-memory', help = wording.get('max_memory_help'), dest = 'max_memory', type = int) + program.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = 'cpu'), dest = 'execution_providers', default = ['cpu'], choices = suggest_execution_providers_choices(), nargs='+') + program.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), dest = 'execution_thread_count', type = int, default = suggest_execution_thread_count_default()) + program.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), dest = 'execution_queue_count', type = int, default = 1) + + # Lip sync arguments + program.add_argument('--source-paths', help = 'Source paths including audio files for lip sync', dest = 'source_paths', nargs='+') + program.add_argument('--lip-syncer-model', help = 'Lip syncer model to use', dest = 'lip_syncer_model', default = 'wav2lip_gan_96', choices = ['wav2lip_96', 'wav2lip_gan_96']) + + program.add_argument('-v', '--version', action='version', version = metadata.get('name') + ' ' + metadata.get('version')) + + args = program.parse_args() + + SwitcherAI.globals.source_path = args.source_path + SwitcherAI.globals.target_path = args.target_path + SwitcherAI.globals.output_path = normalize_output_path(SwitcherAI.globals.source_path, SwitcherAI.globals.target_path, args.output_path) + SwitcherAI.globals.headless = SwitcherAI.globals.source_path is not None and SwitcherAI.globals.target_path is not None and SwitcherAI.globals.output_path is not None + SwitcherAI.globals.frame_processors = args.frame_processors + SwitcherAI.globals.ui_layouts = args.ui_layouts + SwitcherAI.globals.keep_fps = args.keep_fps + SwitcherAI.globals.keep_temp = args.keep_temp + SwitcherAI.globals.skip_audio = args.skip_audio + SwitcherAI.globals.face_recognition = args.face_recognition + SwitcherAI.globals.face_analyser_direction = args.face_analyser_direction + SwitcherAI.globals.face_analyser_age = args.face_analyser_age + SwitcherAI.globals.face_analyser_gender = args.face_analyser_gender + SwitcherAI.globals.reference_face_position = args.reference_face_position + SwitcherAI.globals.reference_frame_number = args.reference_frame_number + SwitcherAI.globals.reference_face_distance = args.reference_face_distance + SwitcherAI.globals.trim_frame_start = args.trim_frame_start + SwitcherAI.globals.trim_frame_end = args.trim_frame_end + SwitcherAI.globals.temp_frame_format = args.temp_frame_format + SwitcherAI.globals.temp_frame_quality = args.temp_frame_quality + SwitcherAI.globals.output_video_encoder = args.output_video_encoder + SwitcherAI.globals.output_video_quality = args.output_video_quality + SwitcherAI.globals.max_memory = args.max_memory + SwitcherAI.globals.execution_providers = decode_execution_providers(args.execution_providers) + SwitcherAI.globals.execution_thread_count = args.execution_thread_count + SwitcherAI.globals.execution_queue_count = args.execution_queue_count + + # Lip sync globals + SwitcherAI.globals.source_paths = args.source_paths if args.source_paths else [] + SwitcherAI.globals.lip_syncer_model = args.lip_syncer_model + + # Extract audio paths for lip sync + if hasattr(SwitcherAI.globals, 'source_paths') and SwitcherAI.globals.source_paths: + audio_extensions = ['.mp3', '.wav', '.aac', '.m4a', '.flac'] + SwitcherAI.globals.source_audio_paths = [ + path for path in SwitcherAI.globals.source_paths + if any(path.lower().endswith(ext) for ext in audio_extensions) + ] + else: + SwitcherAI.globals.source_audio_paths = [] + + +def suggest_execution_providers_choices() -> List[str]: + return encode_execution_providers(onnxruntime.get_available_providers()) + + +def suggest_execution_thread_count_default() -> int: + if 'CUDAExecutionProvider' in onnxruntime.get_available_providers(): + return 4 + return 1 + + +def limit_resources() -> None: + # prevent tensorflow memory leak + gpus = tensorflow.config.experimental.list_physical_devices('GPU') + for gpu in gpus: + tensorflow.config.experimental.set_virtual_device_configuration(gpu, [ + tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit = 22000) + ]) + # limit memory usage + if SwitcherAI.globals.max_memory: + memory = SwitcherAI.globals.max_memory * 1024 ** 3 + if platform.system().lower() == 'darwin': + memory = SwitcherAI.globals.max_memory * 1024 ** 6 + if platform.system().lower() == 'windows': + import ctypes + kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined] + kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory)) + else: + import resource + resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) + + +def update_status(message : str, scope : str = 'FACEFUSION.CORE') -> None: + print('[' + scope + '] ' + message) + + +def pre_check() -> bool: + if sys.version_info < (3, 10): + update_status(wording.get('python_not_supported').format(version = '3.10')) + return False + if not shutil.which('ffmpeg'): + update_status(wording.get('ffmpeg_not_installed')) + return False + + # Check lip sync dependencies if lip_syncer is in frame processors + if 'lip_syncer' in SwitcherAI.globals.frame_processors: + try: + import librosa + import soundfile + except ImportError as e: + update_status(f'Lip sync dependencies missing: {e}') + update_status('Please install: pip install librosa soundfile') + return False + + return True + + +def save_to_db(source_path, target_path, output_path): + try: + # Open the images in binary mode + with open(source_path, 'rb') as source_file, \ + open(target_path, 'rb') as target_file, \ + open(output_path, 'rb') as output_file: + + # read data from the image files + source_data = source_file.read() + target_data = target_file.read() + output_data = output_file.read() + + # Extract original filenames from the paths + source_filename = os.path.basename(source_path) + target_filename = os.path.basename(target_path) + output_filename = os.path.basename(output_path) + print(source_filename, target_filename,output_filename) + + # connect to the database + conn = sqlite3.connect('./feed.db') + c = conn.cursor() + + # Create the table if it doesn't exist + c.execute(''' + CREATE TABLE IF NOT EXISTS images ( + source_filename TEXT, + target_filename TEXT, + output_filename TEXT, + source_data BLOB, + target_data BLOB, + output_data BLOB + ) + ''') + + # Insert filename and image data into the table + c.execute("INSERT INTO images VALUES (?, ?, ?, ?, ?, ?)", + (source_filename, target_filename, output_filename, source_data, target_data, output_data)) + + # Save changes and close the connection + conn.commit() + + except Exception as e: + # Print any error occurred while saving data in SQLite + print(f"An error occurred: {e}") + + finally: + # Ensure the DB connection is closed + if conn: + conn.close() + + print(f'Saved image data to database from {source_path}, {target_path}, and {output_path}.') + + +def process_image() -> None: + start_time = time() # Added timing + + if predict_image(SwitcherAI.globals.target_path): + return + shutil.copy2(SwitcherAI.globals.target_path, SwitcherAI.globals.output_path) + + # process frame + for frame_processor_module in get_frame_processors_modules(SwitcherAI.globals.frame_processors): + update_status(wording.get('processing'), frame_processor_module.NAME) + frame_processor_module.process_image(SwitcherAI.globals.source_path, SwitcherAI.globals.output_path, SwitcherAI.globals.output_path) + frame_processor_module.post_process() + + # validate image + if is_image(SwitcherAI.globals.target_path): + seconds = '{:.2f}'.format((time() - start_time) % 60) # Added timing info + update_status(wording.get('processing_image_succeed') + f' (took {seconds}s)') + save_to_db(SwitcherAI.globals.source_path, SwitcherAI.globals.target_path, SwitcherAI.globals.output_path) + else: + update_status(wording.get('processing_image_failed')) + + +def process_video() -> None: + start_time = time() # Added timing + + if predict_video(SwitcherAI.globals.target_path): + return + fps = detect_fps(SwitcherAI.globals.target_path) if SwitcherAI.globals.keep_fps else 25.0 + + # Store fps for lip sync + SwitcherAI.globals.output_video_fps = fps + + update_status(wording.get('creating_temp')) + create_temp(SwitcherAI.globals.target_path) + + # extract frames + update_status(wording.get('extracting_frames_fps').format(fps = fps)) + extract_frames(SwitcherAI.globals.target_path, fps) + + # process frame + temp_frame_paths = get_temp_frame_paths(SwitcherAI.globals.target_path) + if temp_frame_paths: + for frame_processor_module in get_frame_processors_modules(SwitcherAI.globals.frame_processors): + update_status(wording.get('processing'), frame_processor_module.NAME) + frame_processor_module.process_video(SwitcherAI.globals.source_path, temp_frame_paths) + frame_processor_module.post_process() + else: + update_status(wording.get('temp_frames_not_found')) + return + + # create video + update_status(wording.get('creating_video_fps').format(fps = fps)) + if not create_video(SwitcherAI.globals.target_path, fps): + update_status(wording.get('creating_video_failed')) + return + + # handle audio + if SwitcherAI.globals.skip_audio: + update_status(wording.get('skipping_audio')) + move_temp(SwitcherAI.globals.target_path, SwitcherAI.globals.output_path) + else: + update_status(wording.get('restoring_audio')) + restore_audio(SwitcherAI.globals.target_path, SwitcherAI.globals.output_path) + + # clear temp + if not SwitcherAI.globals.keep_temp: # Added conditional temp cleanup + update_status(wording.get('clearing_temp')) + clear_temp(SwitcherAI.globals.target_path) + + # validate video + if is_video(SwitcherAI.globals.target_path): + seconds = '{:.2f}'.format((time() - start_time)) # Added timing + update_status(wording.get('processing_video_succeed') + f' (took {seconds}s)') + save_to_db(SwitcherAI.globals.source_path, SwitcherAI.globals.target_path, SwitcherAI.globals.output_path) + else: + update_status(wording.get('processing_video_failed')) + + +def conditional_process() -> None: + # Enhanced pre-processing checks + for frame_processor_module in get_frame_processors_modules(SwitcherAI.globals.frame_processors): + if not frame_processor_module.pre_process(): + update_status('Pre-processing failed for ' + frame_processor_module.NAME) + return + + if is_image(SwitcherAI.globals.target_path): + process_image() + if is_video(SwitcherAI.globals.target_path): + process_video() + + +def graceful_exit(exit_code: int = 0) -> None: + """Enhanced graceful exit with cleanup""" + update_status('Shutting down gracefully...') + destroy() + sys.exit(exit_code) + + +def run() -> None: + parse_args() + limit_resources() + + # Enhanced pre-check + if not pre_check(): + update_status('Pre-check failed') + return + + # Enhanced frame processor checks + for frame_processor in get_frame_processors_modules(SwitcherAI.globals.frame_processors): + if not frame_processor.pre_check(): + update_status('Frame processor check failed for ' + frame_processor.NAME) + return + + # process or launch + if SwitcherAI.globals.headless: + try: + conditional_process() + except KeyboardInterrupt: + graceful_exit(0) + except Exception as e: + update_status(f'Processing failed: {str(e)}') + graceful_exit(1) + else: + import SwitcherAI.uis.core as ui + ui.launch() + + +def destroy() -> None: + if SwitcherAI.globals.target_path: + clear_temp(SwitcherAI.globals.target_path) + sys.exit() \ No newline at end of file diff --git a/SwitcherAI/face_analyser.py b/SwitcherAI/face_analyser.py new file mode 100644 index 0000000000000000000000000000000000000000..c27e7bfdcedfeda6c61eaf032a695b8a6e55fac6 --- /dev/null +++ b/SwitcherAI/face_analyser.py @@ -0,0 +1,106 @@ +import threading +from typing import Any, Optional, List +import insightface +import numpy + +import SwitcherAI.globals +from SwitcherAI.typing import Frame, Face, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender + +FACE_ANALYSER = None +THREAD_LOCK = threading.Lock() + + +def get_face_analyser() -> Any: + global FACE_ANALYSER + + with THREAD_LOCK: + if FACE_ANALYSER is None: + FACE_ANALYSER = insightface.app.FaceAnalysis(name = 'buffalo_l', providers = SwitcherAI.globals.execution_providers) + FACE_ANALYSER.prepare(ctx_id = 0) + return FACE_ANALYSER + + +def clear_face_analyser() -> Any: + global FACE_ANALYSER + + FACE_ANALYSER = None + + +def get_one_face(frame : Frame, position : int = 0) -> Optional[Face]: + many_faces = get_many_faces(frame) + if many_faces: + try: + return many_faces[position] + except IndexError: + return many_faces[-1] + return None + + +def get_many_faces(frame : Frame) -> List[Face]: + try: + faces = get_face_analyser().get(frame) + if SwitcherAI.globals.face_analyser_direction: + faces = sort_by_direction(faces, SwitcherAI.globals.face_analyser_direction) + if SwitcherAI.globals.face_analyser_age: + faces = filter_by_age(faces, SwitcherAI.globals.face_analyser_age) + if SwitcherAI.globals.face_analyser_gender: + faces = filter_by_gender(faces, SwitcherAI.globals.face_analyser_gender) + return faces + except (AttributeError, ValueError): + return [] + + +def find_similar_faces(frame : Frame, reference_face : Face, face_distance : float) -> List[Face]: + many_faces = get_many_faces(frame) + similar_faces = [] + if many_faces: + for face in many_faces: + if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'): + current_face_distance = numpy.sum(numpy.square(face.normed_embedding - reference_face.normed_embedding)) + if current_face_distance < face_distance: + similar_faces.append(face) + return similar_faces + + +def sort_by_direction(faces : List[Face], direction : FaceAnalyserDirection) -> List[Face]: + if direction == 'left-right': + return sorted(faces, key = lambda face: face['bbox'][0]) + if direction == 'right-left': + return sorted(faces, key = lambda face: face['bbox'][0], reverse = True) + if direction == 'top-bottom': + return sorted(faces, key = lambda face: face['bbox'][1]) + if direction == 'bottom-top': + return sorted(faces, key = lambda face: face['bbox'][1], reverse = True) + if direction == 'small-large': + return sorted(faces, key = lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1])) + if direction == 'large-small': + return sorted(faces, key = lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1]), reverse = True) + return faces + + +def filter_by_age(faces : List[Face], age : FaceAnalyserAge) -> List[Face]: + filter_faces = [] + for face in faces: + if face['age'] < 13 and age == 'child': + filter_faces.append(face) + elif face['age'] < 19 and age == 'teen': + filter_faces.append(face) + elif face['age'] < 60 and age == 'adult': + filter_faces.append(face) + elif face['age'] > 59 and age == 'senior': + filter_faces.append(face) + return filter_faces + + +def filter_by_gender(faces : List[Face], gender : FaceAnalyserGender) -> List[Face]: + filter_faces = [] + for face in faces: + if face['gender'] == 1 and gender == 'male': + filter_faces.append(face) + if face['gender'] == 0 and gender == 'female': + filter_faces.append(face) + return filter_faces + + +def get_faces_total(frame : Frame) -> int: + return len(get_many_faces(frame)) diff --git a/SwitcherAI/face_reference.py b/SwitcherAI/face_reference.py new file mode 100644 index 0000000000000000000000000000000000000000..64a0204a4483b1f250ca9a801dfbe8aac6896dc6 --- /dev/null +++ b/SwitcherAI/face_reference.py @@ -0,0 +1,21 @@ +from typing import Optional + +from SwitcherAI.typing import Face + +FACE_REFERENCE = None + + +def get_face_reference() -> Optional[Face]: + return FACE_REFERENCE + + +def set_face_reference(face : Face) -> None: + global FACE_REFERENCE + + FACE_REFERENCE = face + + +def clear_face_reference() -> None: + global FACE_REFERENCE + + FACE_REFERENCE = None diff --git a/SwitcherAI/globals.py b/SwitcherAI/globals.py new file mode 100644 index 0000000000000000000000000000000000000000..e952596726f7599feccc635be265dd35828b1684 --- /dev/null +++ b/SwitcherAI/globals.py @@ -0,0 +1,39 @@ +from typing import List, Optional + +from SwitcherAI.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, LipSyncerModel + +source_path : Optional[str] = None +target_path : Optional[str] = None +output_path : Optional[str] = None +headless : Optional[bool] = None +frame_processors : List[str] = [] +ui_layouts : List[str] = [] +keep_fps : Optional[bool] = None +keep_temp : Optional[bool] = None +skip_audio : Optional[bool] = None +face_recognition : Optional[FaceRecognition] = None +face_analyser_direction : Optional[FaceAnalyserDirection] = None +face_analyser_age : Optional[FaceAnalyserAge] = 'teen' +face_analyser_gender : Optional[FaceAnalyserGender] = None +reference_face_position : Optional[int] = None +reference_frame_number : Optional[int] = None +reference_face_distance : Optional[float] = 1.5 +trim_frame_start : Optional[int] = None +trim_frame_end : Optional[int] = None +temp_frame_format : Optional[TempFrameFormat] = 'png' +temp_frame_quality : Optional[int] = None +output_video_encoder : Optional[str] = 'libx265' +output_video_quality : Optional[int] = None +max_memory : Optional[int] = None +execution_providers : List[str] = [] +execution_thread_count : Optional[int] = None +execution_queue_count : Optional[int] = None +face_enhancer_model : Optional[str] = 'gfpgan_1.4' +face_enhancer_blend : Optional[int] = 80 +face_enhancer_weight : Optional[float] = 1.0 + +# Lip sync globals +source_paths : List[str] = [] +source_audio_paths : List[str] = [] +lip_syncer_model : Optional[LipSyncerModel] = 'wav2lip_gan_96' +output_video_fps : Optional[float] = 25.0 \ No newline at end of file diff --git a/SwitcherAI/metadata.py b/SwitcherAI/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..0ef8278106227a3e695e83d5643ab55033e8d31c --- /dev/null +++ b/SwitcherAI/metadata.py @@ -0,0 +1,13 @@ +METADATA =\ +{ + 'name': 'SwitcherAI', + 'description': 'Next generation face swapper and enhancer', + 'version': '2.0.0', + 'license': 'Moms Basement', + 'author': 'AnonAJ', + 'url': 'AintNoTrace' +} + + +def get(key : str) -> str: + return METADATA[key] diff --git a/SwitcherAI/predictor.py b/SwitcherAI/predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..adcc0b92a309e6753fa1628829ab26e134ab956e --- /dev/null +++ b/SwitcherAI/predictor.py @@ -0,0 +1,46 @@ +import threading +import numpy +import opennsfw2 +from PIL import Image +from keras import Model + +from SwitcherAI.typing import Frame + +PREDICTOR = None +THREAD_LOCK = threading.Lock() +MAX_PROBABILITY = 0.75 + + +def get_predictor() -> Model: + global PREDICTOR + + with THREAD_LOCK: + if PREDICTOR is None: + PREDICTOR = opennsfw2.make_open_nsfw_model() + return PREDICTOR + + +def clear_predictor() -> None: + global PREDICTOR + + PREDICTOR = None + + +def predict_frame(target_frame : Frame) -> bool: + return False + #image = Image.fromarray(target_frame) + #image = opennsfw2.preprocess_image(image, opennsfw2.Preprocessing.YAHOO) + #views = numpy.expand_dims(image, axis = 0) + #_, probability = get_predictor().predict(views)[0] + #return probability > MAX_PROBABILITY + + +def predict_image(target_path : str) -> bool: + return False + #return opennsfw2.predict_image(target_path) > MAX_PROBABILITY + + +def predict_video(target_path : str) -> bool: + return False + #_, probabilities = opennsfw2.predict_video_frames(video_path = target_path, frame_interval = 100) + #return any(probability > MAX_PROBABILITY for probability in probabilities) diff --git a/SwitcherAI/processors/__init__.py b/SwitcherAI/processors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwitcherAI/processors/__pycache__/__init__.cpython-311.pyc b/SwitcherAI/processors/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f5ad230f0475469cd417dd3a8fa0ed087074ffd Binary files /dev/null and b/SwitcherAI/processors/__pycache__/__init__.cpython-311.pyc differ diff --git a/SwitcherAI/processors/frame/__init__.py b/SwitcherAI/processors/frame/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwitcherAI/processors/frame/__pycache__/__init__.cpython-311.pyc b/SwitcherAI/processors/frame/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32f52d48b321a2a3c06a70e291201c24d736b2a0 Binary files /dev/null and b/SwitcherAI/processors/frame/__pycache__/__init__.cpython-311.pyc differ diff --git a/SwitcherAI/processors/frame/__pycache__/core.cpython-311.pyc b/SwitcherAI/processors/frame/__pycache__/core.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..322151b6f5715852e62910f02a255ca0f50210da Binary files /dev/null and b/SwitcherAI/processors/frame/__pycache__/core.cpython-311.pyc differ diff --git a/SwitcherAI/processors/frame/core.py b/SwitcherAI/processors/frame/core.py new file mode 100644 index 0000000000000000000000000000000000000000..86ae44ba406d74a78b83661c013e2c7000380fc8 --- /dev/null +++ b/SwitcherAI/processors/frame/core.py @@ -0,0 +1,113 @@ +import os +import sys +import importlib +import psutil +from concurrent.futures import ThreadPoolExecutor, as_completed +from queue import Queue +from types import ModuleType +from typing import Any, List, Callable +from tqdm import tqdm + +import SwitcherAI.globals +from SwitcherAI import wording + +FRAME_PROCESSORS_MODULES : List[ModuleType] = [] +FRAME_PROCESSORS_METHODS =\ +[ + 'get_frame_processor', + 'clear_frame_processor', + 'pre_check', + 'pre_process', + 'process_frame', + 'process_frames', + 'process_image', + 'process_video', + 'post_process' +] + + +def load_frame_processor_module(frame_processor : str) -> Any: + try: + frame_processor_module = importlib.import_module('SwitcherAI.processors.frame.modules.' + frame_processor) + for method_name in FRAME_PROCESSORS_METHODS: + if not hasattr(frame_processor_module, method_name): + raise NotImplementedError + except ModuleNotFoundError: + sys.exit(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor)) + except NotImplementedError: + sys.exit(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor)) + return frame_processor_module + + +def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleType]: + global FRAME_PROCESSORS_MODULES + + if not FRAME_PROCESSORS_MODULES: + for frame_processor in frame_processors: + frame_processor_module = load_frame_processor_module(frame_processor) + FRAME_PROCESSORS_MODULES.append(frame_processor_module) + return FRAME_PROCESSORS_MODULES + + +def clear_frame_processors_modules() -> None: + global FRAME_PROCESSORS_MODULES + + for frame_processor_module in get_frame_processors_modules(SwitcherAI.globals.frame_processors): + frame_processor_module.clear_frame_processor() + FRAME_PROCESSORS_MODULES = [] + + +def multi_process_frame(source_path : str, temp_frame_paths : List[str], process_frames: Callable[[str, List[str], Any], None], update: Callable[[], None]) -> None: + with ThreadPoolExecutor(max_workers = SwitcherAI.globals.execution_thread_count) as executor: + futures = [] + queue = create_queue(temp_frame_paths) + queue_per_future = max(len(temp_frame_paths) // SwitcherAI.globals.execution_thread_count * SwitcherAI.globals.execution_queue_count, 1) + while not queue.empty(): + future = executor.submit(process_frames, source_path, pick_queue(queue, queue_per_future), update) + futures.append(future) + for future in as_completed(futures): + future.result() + + +def create_queue(temp_frame_paths : List[str]) -> Queue[str]: + queue: Queue[str] = Queue() + for frame_path in temp_frame_paths: + queue.put(frame_path) + return queue + + +def pick_queue(queue : Queue[str], queue_per_future : int) -> List[str]: + queues = [] + for _ in range(queue_per_future): + if not queue.empty(): + queues.append(queue.get()) + return queues + + +def process_video(source_path : str, frame_paths : List[str], process_frames : Callable[[str, List[str], Any], None]) -> None: + progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]' + total = len(frame_paths) + with tqdm(total = total, desc = wording.get('processing'), unit = 'frame', dynamic_ncols = True, bar_format = progress_bar_format) as progress: + multi_process_frame(source_path, frame_paths, process_frames, lambda: update_progress(progress)) + + +def update_progress(progress : Any = None) -> None: + process = psutil.Process(os.getpid()) + memory_usage = process.memory_info().rss / 1024 / 1024 / 1024 + progress.set_postfix( + { + 'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB', + 'execution_providers': SwitcherAI.globals.execution_providers, + 'execution_thread_count': SwitcherAI.globals.execution_thread_count, + 'execution_queue_count': SwitcherAI.globals.execution_queue_count + }) + progress.refresh() + progress.update(1) + + +def get_device() -> str: + if 'CUDAExecutionProvider' in SwitcherAI.globals.execution_providers: + return 'cuda' + if 'CoreMLExecutionProvider' in SwitcherAI.globals.execution_providers: + return 'mps' + return 'cpu' diff --git a/SwitcherAI/processors/frame/modules/__init__.py b/SwitcherAI/processors/frame/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwitcherAI/processors/frame/modules/__pycache__/__init__.cpython-311.pyc b/SwitcherAI/processors/frame/modules/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3abdb2878b2cce1828a4642f0a8117c3c310871 Binary files /dev/null and b/SwitcherAI/processors/frame/modules/__pycache__/__init__.cpython-311.pyc differ diff --git a/SwitcherAI/processors/frame/modules/__pycache__/face_enhancer.cpython-311.pyc b/SwitcherAI/processors/frame/modules/__pycache__/face_enhancer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01be6add9327b05ea41ec0153969a59f76484b3b Binary files /dev/null and b/SwitcherAI/processors/frame/modules/__pycache__/face_enhancer.cpython-311.pyc differ diff --git a/SwitcherAI/processors/frame/modules/__pycache__/face_swapper.cpython-311.pyc b/SwitcherAI/processors/frame/modules/__pycache__/face_swapper.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4154841bb5c9034866cfd577c2a67b70a8b08905 Binary files /dev/null and b/SwitcherAI/processors/frame/modules/__pycache__/face_swapper.cpython-311.pyc differ diff --git a/SwitcherAI/processors/frame/modules/__pycache__/frame_enhancer.cpython-311.pyc b/SwitcherAI/processors/frame/modules/__pycache__/frame_enhancer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..255311b7fc77cea341cff3801b5dfc9b9bb51550 Binary files /dev/null and b/SwitcherAI/processors/frame/modules/__pycache__/frame_enhancer.cpython-311.pyc differ diff --git a/SwitcherAI/processors/frame/modules/face_enhancer.py b/SwitcherAI/processors/frame/modules/face_enhancer.py new file mode 100644 index 0000000000000000000000000000000000000000..0b8709b4a6ad76da83e0d5bfc2de3dc54534a880 --- /dev/null +++ b/SwitcherAI/processors/frame/modules/face_enhancer.py @@ -0,0 +1,100 @@ +from typing import Any, List, Callable +import cv2 +import threading +from gfpgan.utils import GFPGANer + +import SwitcherAI.globals +import SwitcherAI.processors.frame.core as frame_processors +from SwitcherAI import wording +from SwitcherAI.core import update_status +from SwitcherAI.face_analyser import get_many_faces +from SwitcherAI.typing import Frame, Face +from SwitcherAI.utilities import conditional_download, resolve_relative_path, is_image, is_video + +FRAME_PROCESSOR = None +THREAD_SEMAPHORE = threading.Semaphore() +THREAD_LOCK = threading.Lock() +NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_ENHANCER' + + +def get_frame_processor() -> Any: + global FRAME_PROCESSOR + + with THREAD_LOCK: + if FRAME_PROCESSOR is None: + model_path = resolve_relative_path('../.assets/models/GFPGANv1.4.pth') + FRAME_PROCESSOR = GFPGANer( + model_path = model_path, + upscale = 1, + device = frame_processors.get_device() + ) + return FRAME_PROCESSOR + + +def clear_frame_processor() -> None: + global FRAME_PROCESSOR + + FRAME_PROCESSOR = None + + +def pre_check() -> bool: + download_directory_path = resolve_relative_path('../.assets/models') + conditional_download(download_directory_path, ['https://github.com/SwitcherAI/SwitcherAI-assets/releases/download/models/GFPGANv1.4.pth']) + return True + + +def pre_process() -> bool: + if not is_image(SwitcherAI.globals.target_path) and not is_video(SwitcherAI.globals.target_path): + update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) + return False + return True + + +def post_process() -> None: + clear_frame_processor() + + +def enhance_face(target_face : Face, temp_frame : Frame) -> Frame: + start_x, start_y, end_x, end_y = map(int, target_face['bbox']) + padding_x = int((end_x - start_x) * 0.5) + padding_y = int((end_y - start_y) * 0.5) + start_x = max(0, start_x - padding_x) + start_y = max(0, start_y - padding_y) + end_x = max(0, end_x + padding_x) + end_y = max(0, end_y + padding_y) + crop_frame = temp_frame[start_y:end_y, start_x:end_x] + if crop_frame.size: + with THREAD_SEMAPHORE: + _, _, crop_frame = get_frame_processor().enhance( + crop_frame, + paste_back = True + ) + temp_frame[start_y:end_y, start_x:end_x] = crop_frame + return temp_frame + + +def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame: + many_faces = get_many_faces(temp_frame) + if many_faces: + for target_face in many_faces: + temp_frame = enhance_face(target_face, temp_frame) + return temp_frame + + +def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None: + for temp_frame_path in temp_frame_paths: + temp_frame = cv2.imread(temp_frame_path) + result_frame = process_frame(None, None, temp_frame) + cv2.imwrite(temp_frame_path, result_frame) + if update: + update() + + +def process_image(source_path : str, target_path : str, output_path : str) -> None: + target_frame = cv2.imread(target_path) + result_frame = process_frame(None, None, target_frame) + cv2.imwrite(output_path, result_frame) + + +def process_video(source_path : str, temp_frame_paths : List[str]) -> None: + SwitcherAI.processors.frame.core.process_video(None, temp_frame_paths, process_frames) diff --git a/SwitcherAI/processors/frame/modules/face_swapper.py b/SwitcherAI/processors/frame/modules/face_swapper.py new file mode 100644 index 0000000000000000000000000000000000000000..87a7809e1e9d6f17d04709d8454a60629b4bd467 --- /dev/null +++ b/SwitcherAI/processors/frame/modules/face_swapper.py @@ -0,0 +1,331 @@ +from typing import Any, List, Callable, Dict, Tuple +import cv2 +import insightface +import threading +import numpy as np +from functools import lru_cache + +import SwitcherAI.globals +import SwitcherAI.processors.frame.core as frame_processors +from SwitcherAI import wording +from SwitcherAI.core import update_status +from SwitcherAI.face_analyser import get_one_face, get_many_faces, find_similar_faces +from SwitcherAI.face_reference import get_face_reference, set_face_reference +from SwitcherAI.typing import Face, Frame +from SwitcherAI.utilities import conditional_download, resolve_relative_path, is_image, is_video + +FRAME_PROCESSOR = None +EMBEDDING_CONVERTER = None +THREAD_LOCK = threading.Lock() +NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_SWAPPER' + +# Model configurations similar to the newer version +MODEL_CONFIGS = { + 'inswapper_128': { + 'url': 'https://huggingface.co/ezioruan/inswapper_128.onnx', + 'path': '../.assets/models/inswapper_128.onnx', + 'type': 'inswapper', + 'size': (128, 128), + 'mean': [0.0, 0.0, 0.0], + 'standard_deviation': [1.0, 1.0, 1.0], + 'requires_converter': False + }, + 'inswapper_128_fp16': { + 'url': 'https://huggingface.co/ezioruan/inswapper_128_fp16.onnx', + 'path': '../.assets/models/inswapper_128_fp16.onnx', + 'type': 'inswapper', + 'size': (128, 128), + 'mean': [0.0, 0.0, 0.0], + 'standard_deviation': [1.0, 1.0, 1.0], + 'requires_converter': False + }, + 'simswap_256': { + 'url': 'https://huggingface.co/ezioruan/simswap_256.onnx', + 'path': '../.assets/models/simswap_256.onnx', + 'type': 'simswap', + 'size': (256, 256), + 'mean': [0.485, 0.456, 0.406], + 'standard_deviation': [0.229, 0.224, 0.225], + 'requires_converter': True, + 'converter_url': 'https://huggingface.co/ezioruan/arcface_converter_simswap.onnx', + 'converter_path': '../.assets/models/arcface_converter_simswap.onnx' + } +} + +# Default model - can be changed via globals +DEFAULT_MODEL = 'inswapper_128' + + +def get_current_model_config() -> Dict: + """Get the current model configuration""" + model_name = getattr(SwitcherAI.globals, 'face_swapper_model', DEFAULT_MODEL) + return MODEL_CONFIGS.get(model_name, MODEL_CONFIGS[DEFAULT_MODEL]) + + +@lru_cache(maxsize=None) +def get_static_model_initializer(model_path: str) -> np.ndarray: + """Cache model initialization data""" + try: + # This would need to be implemented based on the specific model requirements + # For now, return identity matrix as fallback + return np.eye(512, dtype=np.float32) + except Exception: + return np.eye(512, dtype=np.float32) + + +def get_frame_processor() -> Any: + global FRAME_PROCESSOR + + with THREAD_LOCK: + if FRAME_PROCESSOR is None: + config = get_current_model_config() + model_path = resolve_relative_path(config['path']) + FRAME_PROCESSOR = insightface.model_zoo.get_model(model_path, providers=SwitcherAI.globals.execution_providers) + return FRAME_PROCESSOR + + +def get_embedding_converter() -> Any: + global EMBEDDING_CONVERTER + + config = get_current_model_config() + if not config.get('requires_converter', False): + return None + + with THREAD_LOCK: + if EMBEDDING_CONVERTER is None: + converter_path = resolve_relative_path(config['converter_path']) + try: + EMBEDDING_CONVERTER = insightface.model_zoo.get_model(converter_path, providers=SwitcherAI.globals.execution_providers) + except Exception: + EMBEDDING_CONVERTER = None + return EMBEDDING_CONVERTER + + +def clear_frame_processor() -> None: + global FRAME_PROCESSOR, EMBEDDING_CONVERTER + + FRAME_PROCESSOR = None + EMBEDDING_CONVERTER = None + + +def pre_check() -> bool: + download_directory_path = resolve_relative_path('../.assets/models') + config = get_current_model_config() + + # Download main model + download_urls = [config['url']] + + # Download converter if needed + if config.get('requires_converter', False): + download_urls.append(config['converter_url']) + + conditional_download(download_directory_path, download_urls) + return True + + +def pre_process() -> bool: + if not is_image(SwitcherAI.globals.source_path): + update_status(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME) + return False + elif not get_one_face(cv2.imread(SwitcherAI.globals.source_path)): + update_status(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME) + return False + if not is_image(SwitcherAI.globals.target_path) and not is_video(SwitcherAI.globals.target_path): + update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) + return False + return True + + +def post_process() -> None: + clear_frame_processor() + # Clear caches like the newer version + get_static_model_initializer.cache_clear() + + +def prepare_source_embedding(source_face: Face) -> np.ndarray: + """Prepare source face embedding based on model type""" + config = get_current_model_config() + model_type = config['type'] + + if model_type == 'inswapper': + # Enhanced embedding preparation for inswapper + model_path = resolve_relative_path(config['path']) + model_initializer = get_static_model_initializer(model_path) + source_embedding = source_face.embedding.reshape((1, -1)) + source_embedding = np.dot(source_embedding, model_initializer) / np.linalg.norm(source_embedding) + return source_embedding + elif model_type == 'simswap': + # Use embedding converter for simswap + converter = get_embedding_converter() + if converter is not None: + embedding = source_face.embedding.reshape(-1, 512) + try: + converted_embedding = converter.run(None, {'input': embedding})[0] + converted_embedding = converted_embedding.ravel() + normed_embedding = converted_embedding / np.linalg.norm(converted_embedding) + return normed_embedding.reshape(1, -1) + except Exception: + pass + + # Fallback to original embedding + return source_face.embedding.reshape(1, -1) + else: + # Default behavior + return source_face.embedding.reshape(1, -1) + + +def prepare_crop_frame(crop_frame: Frame) -> np.ndarray: + """Prepare cropped frame for model input with normalization""" + config = get_current_model_config() + model_mean = config['mean'] + model_std = config['standard_deviation'] + + # Convert to float and normalize + crop_frame = crop_frame[:, :, ::-1] / 255.0 + crop_frame = (crop_frame - model_mean) / model_std + crop_frame = crop_frame.transpose(2, 0, 1) + crop_frame = np.expand_dims(crop_frame, axis=0).astype(np.float32) + return crop_frame + + +def normalize_crop_frame(crop_frame: np.ndarray) -> Frame: + """Normalize cropped frame back to image format""" + config = get_current_model_config() + model_type = config['type'] + model_mean = config['mean'] + model_std = config['standard_deviation'] + + crop_frame = crop_frame.transpose(1, 2, 0) + + # Apply reverse normalization for certain model types + if model_type in ['simswap']: + crop_frame = crop_frame * model_std + model_mean + + crop_frame = crop_frame.clip(0, 1) + crop_frame = crop_frame[:, :, ::-1] * 255 + return crop_frame.astype(np.uint8) + + +def enhanced_swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame: + """Enhanced face swapping with improved preprocessing""" + config = get_current_model_config() + model_type = config['type'] + + if model_type == 'inswapper': + # Use original method for inswapper + return get_frame_processor().get(temp_frame, target_face, source_face, paste_back=True) + else: + # Enhanced method for other models + try: + # Prepare source embedding + source_embedding = prepare_source_embedding(source_face) + + # Get crop region (this would need proper implementation) + # For now, fall back to original method + return get_frame_processor().get(temp_frame, target_face, source_face, paste_back=True) + except Exception: + # Fallback to original method + return get_frame_processor().get(temp_frame, target_face, source_face, paste_back=True) + + +def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame: + """Main face swapping function with model-specific handling""" + config = get_current_model_config() + + # Use enhanced swapping for supported models + if config['type'] in ['simswap', 'inswapper']: + return enhanced_swap_face(source_face, target_face, temp_frame) + else: + # Original method + return get_frame_processor().get(temp_frame, target_face, source_face, paste_back=True) + + +def process_frame(source_face: Face, reference_face: Face, temp_frame: Frame) -> Frame: + """Process frame with enhanced face selection logic""" + if 'reference' in SwitcherAI.globals.face_recognition: + similar_faces = find_similar_faces(temp_frame, reference_face, SwitcherAI.globals.reference_face_distance) + if similar_faces: + for similar_face in similar_faces: + temp_frame = swap_face(source_face, similar_face, temp_frame) + + if 'many' in SwitcherAI.globals.face_recognition: + many_faces = get_many_faces(temp_frame) + if many_faces: + # Sort faces by size (largest first) like the newer version + many_faces = sorted(many_faces, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]), reverse=True) + for target_face in many_faces: + temp_frame = swap_face(source_face, target_face, temp_frame) + + return temp_frame + + +def get_average_face(faces: List[Face]) -> Face: + """Get average face from multiple faces (simplified version)""" + if not faces: + return None + if len(faces) == 1: + return faces[0] + + # For now, return the first face + # In a full implementation, this would average the embeddings + return faces[0] + + +def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None: + """Enhanced frame processing with better source face handling""" + source_frame = cv2.imread(source_path) + source_faces = get_many_faces(source_frame) + + # Get best source face (largest) + if source_faces: + source_faces = sorted(source_faces, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]), reverse=True) + source_face = source_faces[0] + else: + source_face = get_one_face(source_frame) + + # Handle multiple source faces if available + if len(source_faces) > 1: + source_face = get_average_face(source_faces) + + reference_face = get_face_reference() if 'reference' in SwitcherAI.globals.face_recognition else None + + for temp_frame_path in temp_frame_paths: + temp_frame = cv2.imread(temp_frame_path) + result_frame = process_frame(source_face, reference_face, temp_frame) + cv2.imwrite(temp_frame_path, result_frame) + if update: + update() + + +def process_image(source_path: str, target_path: str, output_path: str) -> None: + """Enhanced image processing""" + source_frame = cv2.imread(source_path) + source_faces = get_many_faces(source_frame) + + # Get best source face + if source_faces: + source_faces = sorted(source_faces, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]), reverse=True) + source_face = source_faces[0] + + # Handle multiple source faces + if len(source_faces) > 1: + source_face = get_average_face(source_faces) + else: + source_face = get_one_face(source_frame) + + target_frame = cv2.imread(target_path) + reference_face = get_one_face(target_frame, SwitcherAI.globals.reference_face_position) if 'reference' in SwitcherAI.globals.face_recognition else None + result_frame = process_frame(source_face, reference_face, target_frame) + cv2.imwrite(output_path, result_frame) + + +def process_video(source_path: str, temp_frame_paths: List[str]) -> None: + conditional_set_face_reference(temp_frame_paths) + frame_processors.process_video(source_path, temp_frame_paths, process_frames) + + +def conditional_set_face_reference(temp_frame_paths: List[str]) -> None: + if 'reference' in SwitcherAI.globals.face_recognition and not get_face_reference(): + reference_frame = cv2.imread(temp_frame_paths[SwitcherAI.globals.reference_face_position]) + reference_face = get_one_face(reference_frame, SwitcherAI.globals.reference_face_position) + set_face_reference(reference_face) \ No newline at end of file diff --git a/SwitcherAI/processors/frame/modules/frame_enhancer.py b/SwitcherAI/processors/frame/modules/frame_enhancer.py new file mode 100644 index 0000000000000000000000000000000000000000..161923cb2e98421239ce2d636db6f9c125fac7a9 --- /dev/null +++ b/SwitcherAI/processors/frame/modules/frame_enhancer.py @@ -0,0 +1,258 @@ +from typing import Any, List, Callable, Dict, Optional +import cv2 +import threading +import numpy +from functools import lru_cache +from basicsr.archs.rrdbnet_arch import RRDBNet +from realesrgan import RealESRGANer +import torch +import SwitcherAI.processors.frame.core as frame_processors +from SwitcherAI.typing import Frame, Face +from SwitcherAI.utilities import conditional_download, resolve_relative_path + +# Global variables (maintaining your original structure) +FRAME_PROCESSOR = None +THREAD_SEMAPHORE = threading.Semaphore(1) +THREAD_LOCK = threading.Lock() +NAME = 'FACEFUSION.FRAME_PROCESSOR.FRAME_ENHANCER' + +# Enhanced model configuration inspired by FaceFusion +@lru_cache(maxsize=None) +def get_model_config() -> Dict[str, Any]: + """Get model configuration with enhanced options""" + return { + 'real_esrgan_x4': { + 'model_path': resolve_relative_path('../.assets/models/RealESRGAN_x4plus.pth'), + 'scale': 4, + 'tile_size': 256, + 'tile_pad': 16, + 'num_feat': 64, + 'num_block': 23, + 'num_grow_ch': 32 + } + } + + +def get_frame_processor() -> Any: + global FRAME_PROCESSOR + + with THREAD_LOCK: + if FRAME_PROCESSOR is None: + config = get_model_config()['real_esrgan_x4'] + model_path = config['model_path'] + + FRAME_PROCESSOR = RealESRGANer( + model_path=model_path, + model=RRDBNet( + num_in_ch=3, + num_out_ch=3, + num_feat=config['num_feat'], + num_block=config['num_block'], + num_grow_ch=config['num_grow_ch'], + scale=config['scale'] + ), + device=frame_processors.get_device(), + tile=config['tile_size'], + tile_pad=config['tile_pad'], + pre_pad=0, + scale=config['scale'] + ) + + # Ensure CUDA device is set if available + if torch.cuda.is_available(): + torch.cuda.set_device(0) + + return FRAME_PROCESSOR + + +def clear_frame_processor() -> None: + global FRAME_PROCESSOR + FRAME_PROCESSOR = None + + +def pre_check() -> bool: + download_directory_path = resolve_relative_path('../.assets/models') + conditional_download(download_directory_path, [ + 'Awwfuck.com' + ]) + return True + + +def pre_process() -> bool: + return True + + +def post_process() -> None: + clear_frame_processor() + # Clear cache as in FaceFusion version + get_model_config.cache_clear() + + +def create_tile_frames(temp_vision_frame: Frame, tile_size: tuple = (256, 256)) -> tuple: + """ + Enhanced tiling function inspired by FaceFusion for better memory management + """ + height, width = temp_vision_frame.shape[:2] + tile_height, tile_width = tile_size[0], tile_size[1] + + # Calculate padding + pad_height = (tile_height - height % tile_height) % tile_height + pad_width = (tile_width - width % tile_width) % tile_width + + # Pad the frame + if pad_height > 0 or pad_width > 0: + temp_vision_frame = numpy.pad( + temp_vision_frame, + ((0, pad_height), (0, pad_width), (0, 0)), + mode='reflect' + ) + + # Create tiles + tiles = [] + padded_height, padded_width = temp_vision_frame.shape[:2] + + for y in range(0, padded_height, tile_height): + for x in range(0, padded_width, tile_width): + tile = temp_vision_frame[y:y+tile_height, x:x+tile_width] + tiles.append(tile) + + return tiles, pad_width, pad_height + + +def merge_tile_frames(tiles: List[Frame], original_width: int, original_height: int, + pad_width: int, pad_height: int, tile_size: tuple) -> Frame: + """ + Enhanced tile merging function inspired by FaceFusion + """ + tile_height, tile_width = tile_size[0], tile_size[1] + padded_height = original_height + pad_height + padded_width = original_width + pad_width + + # Reconstruct the image from tiles + result = numpy.zeros((padded_height, padded_width, 3), dtype=numpy.uint8) + tile_idx = 0 + + for y in range(0, padded_height, tile_height): + for x in range(0, padded_width, tile_width): + if tile_idx < len(tiles): + tile = tiles[tile_idx] + result[y:y+tile_height, x:x+tile_width] = tile + tile_idx += 1 + + # Remove padding and return to original size + if pad_height > 0 or pad_width > 0: + result = result[:original_height, :original_width] + + return result + + +def enhance_frame_with_tiling(temp_frame: Frame) -> Frame: + """ + Enhanced frame enhancement with improved tiling (inspired by FaceFusion) + """ + config = get_model_config()['real_esrgan_x4'] + tile_size = (config['tile_size'], config['tile_size']) + scale = config['scale'] + + # Create tiles for processing + tiles, pad_width, pad_height = create_tile_frames(temp_frame, tile_size) + enhanced_tiles = [] + + with THREAD_SEMAPHORE: + frame_processor = get_frame_processor() + + for tile in tiles: + # Process each tile individually to manage memory + enhanced_tile, _ = frame_processor.enhance(tile, outscale=scale) + enhanced_tiles.append(enhanced_tile) + + # Merge tiles back together + original_height, original_width = temp_frame.shape[:2] + enhanced_frame = merge_tile_frames( + enhanced_tiles, + original_width * scale, + original_height * scale, + pad_width * scale, + pad_height * scale, + (tile_size[0] * scale, tile_size[1] * scale) + ) + + return enhanced_frame + + +def enhance_frame(temp_frame: Frame) -> Frame: + """ + Main enhancement function with fallback to original method + """ + try: + # Try enhanced tiling method first + return enhance_frame_with_tiling(temp_frame) + except Exception: + # Fallback to original method + with THREAD_SEMAPHORE: + temp_frame, _ = get_frame_processor().enhance(temp_frame, outscale=1) + return temp_frame + + +def blend_frame(original_frame: Frame, enhanced_frame: Frame, blend_ratio: float = 0.8) -> Frame: + """ + Blend original and enhanced frames (inspired by FaceFusion) + """ + if original_frame.shape != enhanced_frame.shape: + original_frame = cv2.resize(original_frame, (enhanced_frame.shape[1], enhanced_frame.shape[0])) + + # Convert blend ratio (0-1 where 1 = full enhancement) + return cv2.addWeighted(original_frame, 1 - blend_ratio, enhanced_frame, blend_ratio, 0) + + +def process_frame(source_face: Face, reference_face: Face, temp_frame: Frame) -> Frame: + """ + Main processing function (maintains your original interface) + """ + return enhance_frame(temp_frame) + + +def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None: + """ + Process multiple frames (maintains your original interface) + """ + for temp_frame_path in temp_frame_paths: + temp_frame = cv2.imread(temp_frame_path) + result_frame = process_frame(None, None, temp_frame) + cv2.imwrite(temp_frame_path, result_frame) + if update: + update() + + +def process_image(source_path: str, target_path: str, output_path: str) -> None: + """ + Process single image (maintains your original interface) + """ + target_frame = cv2.imread(target_path) + result = process_frame(None, None, target_frame) + cv2.imwrite(output_path, result) + + +def process_video(source_path: str, temp_frame_paths: List[str]) -> None: + """ + Process video frames (maintains your original interface) + """ + frame_processors.process_video(None, temp_frame_paths, process_frames) + + +# Additional utility functions inspired by FaceFusion +def get_model_scale() -> int: + """Get the current model's scale factor""" + return get_model_config()['real_esrgan_x4']['scale'] + + +def prepare_frame(frame: Frame) -> Frame: + """Prepare frame for processing""" + if frame.dtype != numpy.uint8: + frame = frame.astype(numpy.uint8) + return frame + + +def normalize_frame(frame: Frame) -> Frame: + """Normalize frame after processing""" + return numpy.clip(frame, 0, 255).astype(numpy.uint8) \ No newline at end of file diff --git a/SwitcherAI/processors/frame/modules/lip_syncer.py b/SwitcherAI/processors/frame/modules/lip_syncer.py new file mode 100644 index 0000000000000000000000000000000000000000..371319973b73b1f3e74cda1292317ffa070bc7fa --- /dev/null +++ b/SwitcherAI/processors/frame/modules/lip_syncer.py @@ -0,0 +1,303 @@ +from typing import Any, List, Callable +import cv2 +import threading +import numpy as np +import os + +# Environment fixes +os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python' +os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1' + +import SwitcherAI.globals +import SwitcherAI.processors.frame.core as frame_processors +from SwitcherAI import wording +from SwitcherAI.core import update_status +from SwitcherAI.face_analyser import get_many_faces, get_one_face +from SwitcherAI.typing import Frame, Face +from SwitcherAI.utilities import conditional_download, resolve_relative_path, is_image, is_video + +# Global variables matching the pattern +FRAME_PROCESSOR = None +THREAD_SEMAPHORE = threading.Semaphore() +THREAD_LOCK = threading.Lock() +NAME = 'FACEFUSION.FRAME_PROCESSOR.LIP_SYNCER' + +def get_frame_processor() -> Any: + """Get the lip sync processor - using ONNX Runtime like FaceFusion""" + global FRAME_PROCESSOR + + with THREAD_LOCK: + if FRAME_PROCESSOR is None: + try: + # Get the model name from globals + model_name = getattr(SwitcherAI.globals, 'lip_syncer_model', 'wav2lip_gan_96') + model_path = resolve_relative_path(f'../.assets/models/{model_name}.onnx') + + print(f"[{NAME}] Loading model: {model_path}") + + if os.path.exists(model_path): + # Load ONNX model like FaceFusion does + import onnxruntime + + providers = getattr(SwitcherAI.globals, 'execution_providers', ['CPUExecutionProvider']) + FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers=providers) + + print(f"[{NAME}] ONNX model loaded successfully") + + else: + print(f"[{NAME}] Model file not found: {model_path}") + FRAME_PROCESSOR = None + + except ImportError: + print(f"[{NAME}] onnxruntime not available, using passthrough mode") + FRAME_PROCESSOR = None + except Exception as e: + print(f"[{NAME}] Error loading ONNX model: {e}") + FRAME_PROCESSOR = None + + return FRAME_PROCESSOR + +def clear_frame_processor() -> None: + """Clear the frame processor""" + global FRAME_PROCESSOR + FRAME_PROCESSOR = None + +def pre_check() -> bool: + """Pre-check for lip syncer requirements""" + print(f"[{NAME}] Pre-check starting...") + + try: + # Check if we need to download models + download_directory_path = resolve_relative_path('../.assets/models') + + # Get model name from globals + model_name = getattr(SwitcherAI.globals, 'lip_syncer_model', 'wav2lip_gan_96') + model_path = os.path.join(download_directory_path, f'{model_name}.onnx') + + if not os.path.exists(model_path): + print(f"[{NAME}] Model not found: {model_path}") + + # Model download URLs + model_urls = { + 'wav2lip_96': ['Awwfuck.com'], + 'wav2lip_gan_96': ['Awwfuck.com'] + } + + if model_name in model_urls: + print(f"[{NAME}] Attempting to download {model_name}") + conditional_download(download_directory_path, model_urls[model_name]) + + print(f"[{NAME}] Pre-check passed") + return True + + except Exception as e: + print(f"[{NAME}] Pre-check error: {e}") + return True + +def pre_process() -> bool: + """Pre-process initialization""" + print(f"[{NAME}] Pre-processing...") + + # Check target type like FaceFusion does + if not is_image(SwitcherAI.globals.target_path) and not is_video(SwitcherAI.globals.target_path): + update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) + return False + + print(f"[{NAME}] Pre-processing completed") + return True + +def post_process() -> None: + """Post-process cleanup""" + clear_frame_processor() + print(f"[{NAME}] Post-processing completed") + +def prepare_audio_frame(audio_frame: np.ndarray) -> np.ndarray: + """Prepare audio frame like FaceFusion - convert mel spectrogram properly""" + # FaceFusion audio preprocessing + audio_frame = np.maximum(np.exp(-5 * np.log(10)), audio_frame) + audio_frame = np.log10(audio_frame) * 1.6 + 3.2 + audio_frame = audio_frame.clip(-4, 4).astype(np.float32) + audio_frame = np.expand_dims(audio_frame, axis=(0, 1)) + return audio_frame + +def prepare_crop_frame(crop_vision_frame: np.ndarray) -> np.ndarray: + """Prepare crop frame like FaceFusion""" + crop_vision_frame = np.expand_dims(crop_vision_frame, axis=0) + prepare_vision_frame = crop_vision_frame.copy() + prepare_vision_frame[:, 48:] = 0 # Mask bottom half + crop_vision_frame = np.concatenate((prepare_vision_frame, crop_vision_frame), axis=3) + crop_vision_frame = crop_vision_frame.transpose(0, 3, 1, 2).astype('float32') / 255.0 + return crop_vision_frame + +def normalize_close_frame(crop_vision_frame: np.ndarray) -> np.ndarray: + """Normalize frame like FaceFusion""" + crop_vision_frame = crop_vision_frame[0].transpose(1, 2, 0) + crop_vision_frame = crop_vision_frame.clip(0, 1) * 255 + crop_vision_frame = crop_vision_frame.astype(np.uint8) + return crop_vision_frame + +def forward(temp_audio_frame: np.ndarray, close_vision_frame: np.ndarray) -> np.ndarray: + """Forward pass through model like FaceFusion""" + lip_syncer = get_frame_processor() + if lip_syncer is None: + return close_vision_frame + + try: + with THREAD_SEMAPHORE: + # Get input names from the model + input_names = [inp.name for inp in lip_syncer.get_inputs()] + + # Create input dictionary - FaceFusion uses 'source' and 'target' + inputs = {} + for name in input_names: + if 'source' in name.lower() or 'audio' in name.lower() or 'mel' in name.lower(): + inputs[name] = temp_audio_frame + elif 'target' in name.lower() or 'video' in name.lower() or 'frame' in name.lower(): + inputs[name] = close_vision_frame + + # Run inference + close_vision_frame = lip_syncer.run(None, inputs)[0] + + return close_vision_frame + + except Exception as e: + print(f"[{NAME}] Forward pass error: {e}") + return close_vision_frame + +def sync_lip(target_face: Face, temp_audio_frame: np.ndarray, temp_vision_frame: Frame) -> Frame: + """Main lip sync function following FaceFusion's approach""" + try: + # For now, create dummy audio frame if none provided + if temp_audio_frame is None: + # Create empty mel spectrogram (80 features x 16 frames) + temp_audio_frame = np.zeros((80, 16), dtype=np.float32) + + # Prepare audio frame + temp_audio_frame = prepare_audio_frame(temp_audio_frame) + + # Extract face region using face landmarks + if hasattr(target_face, 'bbox'): + bbox = target_face.bbox + x1, y1, x2, y2 = map(int, bbox) + + # Ensure coordinates are within frame bounds + h, w = temp_vision_frame.shape[:2] + x1 = max(0, min(x1, w-1)) + y1 = max(0, min(y1, h-1)) + x2 = max(0, min(x2, w-1)) + y2 = max(0, min(y2, h-1)) + + if x2 <= x1 or y2 <= y1: + return temp_vision_frame + + # Extract and resize face region to 96x96 + face_region = temp_vision_frame[y1:y2, x1:x2] + close_vision_frame = cv2.resize(face_region, (96, 96)) + + # Prepare crop frame + close_vision_frame = prepare_crop_frame(close_vision_frame) + + # Forward pass + close_vision_frame = forward(temp_audio_frame, close_vision_frame) + + # Normalize output + close_vision_frame = normalize_close_frame(close_vision_frame) + + # Resize back and paste + close_vision_frame = cv2.resize(close_vision_frame, (x2-x1, y2-y1)) + + # Simple paste back + result_frame = temp_vision_frame.copy() + result_frame[y1:y2, x1:x2] = close_vision_frame + + return result_frame + + return temp_vision_frame + + except Exception as e: + print(f"[{NAME}] Lip sync error: {e}") + return temp_vision_frame + +def process_frame(source_face: Face, reference_face: Face, temp_frame: Frame) -> Frame: + """Process a single frame""" + try: + # Get all faces in the frame + many_faces = get_many_faces(temp_frame) + + if not many_faces: + return temp_frame + + # Process each face with lip sync + result_frame = temp_frame + + for target_face in many_faces: + # Create dummy audio frame for now + temp_audio_frame = np.zeros((80, 16), dtype=np.float32) + result_frame = sync_lip(target_face, temp_audio_frame, result_frame) + + return result_frame + + except Exception as e: + print(f"[{NAME}] Error processing frame: {e}") + return temp_frame + +def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None: + """Process multiple frames""" + total_frames = len(temp_frame_paths) + print(f"[{NAME}] Processing {total_frames} frames") + + for i, temp_frame_path in enumerate(temp_frame_paths): + try: + # Read frame + temp_frame = cv2.imread(temp_frame_path) + if temp_frame is None: + continue + + # Process frame + result_frame = process_frame(None, None, temp_frame) + + # Save processed frame + cv2.imwrite(temp_frame_path, result_frame) + + # Update progress + if update: + update() + + # Progress feedback + if i % 100 == 0: + print(f"[{NAME}] Progress: {i}/{total_frames} frames") + + except Exception as e: + print(f"[{NAME}] Error processing frame {i}: {e}") + continue + + print(f"[{NAME}] Frame processing completed") + +def process_image(source_path: str, target_path: str, output_path: str) -> None: + """Process a single image""" + try: + print(f"[{NAME}] Processing image: {os.path.basename(target_path)}") + + # Read image + target_frame = cv2.imread(target_path) + if target_frame is None: + import shutil + shutil.copy2(target_path, output_path) + return + + # Process frame + result_frame = process_frame(None, None, target_frame) + + # Save result + cv2.imwrite(output_path, result_frame) + print(f"[{NAME}] Image processing completed") + + except Exception as e: + print(f"[{NAME}] Error processing image: {e}") + # Fallback: copy original + import shutil + shutil.copy2(target_path, output_path) + +def process_video(source_path: str, temp_frame_paths: List[str]) -> None: + """Process video using the frame processor core""" + frame_processors.process_video(source_path, temp_frame_paths, process_frames) \ No newline at end of file diff --git a/SwitcherAI/typing.py b/SwitcherAI/typing.py new file mode 100644 index 0000000000000000000000000000000000000000..97a182760647d9ad656baf38ec60ae73549491d7 --- /dev/null +++ b/SwitcherAI/typing.py @@ -0,0 +1,14 @@ +from typing import Any, Literal +from insightface.app.common import Face +import numpy + +Face = Face +Frame = numpy.ndarray[Any, Any] + +FaceRecognition = Literal[ 'reference', 'many' ] +FaceAnalyserDirection = Literal[ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small' ] +FaceAnalyserAge = Literal[ 'child', 'teen', 'adult', 'senior' ] +FaceAnalyserGender = Literal[ 'male', 'female' ] +TempFrameFormat = Literal[ 'jpg', 'png' ] +OutputVideoEncoder = Literal[ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ] +LipSyncerModel = Literal[ 'wav2lip_96', 'wav2lip_gan_96' ] \ No newline at end of file diff --git a/SwitcherAI/uis/__init__.py b/SwitcherAI/uis/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components___init__.py b/SwitcherAI/uis/components/SwitcherAI_uis_components___init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components_about.py b/SwitcherAI/uis/components/SwitcherAI_uis_components_about.py new file mode 100644 index 0000000000000000000000000000000000000000..9d55c362b485aff453fc467c1f421477e8fb7301 --- /dev/null +++ b/SwitcherAI/uis/components/SwitcherAI_uis_components_about.py @@ -0,0 +1,13 @@ +from typing import Optional +import gradio + +from SwitcherAI import metadata + +ABOUT_HTML : Optional[gradio.HTML] = None + + +def render() -> None: + global ABOUT_HTML + + with gradio.Box(): + ABOUT_HTML = gradio.HTML('
' + metadata.get('name') + ' ' + metadata.get('version') + '
') diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components_benchmark.py b/SwitcherAI/uis/components/SwitcherAI_uis_components_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..f6df7710f9667474b823cb74088f690034d1731f --- /dev/null +++ b/SwitcherAI/uis/components/SwitcherAI_uis_components_benchmark.py @@ -0,0 +1,116 @@ +from typing import Any, Optional, List +import time +import tempfile +import statistics +import gradio + +import SwitcherAI.globals +from SwitcherAI import wording +from SwitcherAI.capturer import get_video_frame_total +from SwitcherAI.core import conditional_process +from SwitcherAI.uis.typing import Update +from SwitcherAI.utilities import normalize_output_path, clear_temp + +BENCHMARK_RESULT_DATAFRAME : Optional[gradio.Dataframe] = None +BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None +BENCHMARK_START_BUTTON : Optional[gradio.Button] = None +BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None + + +def render() -> None: + global BENCHMARK_RESULT_DATAFRAME + global BENCHMARK_CYCLES_SLIDER + global BENCHMARK_START_BUTTON + global BENCHMARK_CLEAR_BUTTON + + with gradio.Box(): + BENCHMARK_RESULT_DATAFRAME = gradio.Dataframe( + label = wording.get('benchmark_result_dataframe_label'), + headers = + [ + 'target_path', + 'benchmark_cycles', + 'average_run', + 'fastest_run', + 'slowest_run', + 'relative_fps' + ], + col_count = (6, 'fixed'), + row_count = (7, 'fixed'), + datatype = + [ + 'str', + 'number', + 'number', + 'number', + 'number', + 'number' + ] + ) + BENCHMARK_CYCLES_SLIDER = gradio.Slider( + label = wording.get('benchmark_cycles_slider_label'), + minimum = 1, + step = 1, + value = 3, + maximum = 10 + ) + with gradio.Row(): + BENCHMARK_START_BUTTON = gradio.Button(wording.get('start_button_label')) + BENCHMARK_CLEAR_BUTTON = gradio.Button(wording.get('clear_button_label')) + + +def listen() -> None: + BENCHMARK_START_BUTTON.click(update, inputs = BENCHMARK_CYCLES_SLIDER, outputs = BENCHMARK_RESULT_DATAFRAME) + BENCHMARK_CLEAR_BUTTON.click(clear, outputs = BENCHMARK_RESULT_DATAFRAME) + + +def update(benchmark_cycles : int) -> Update: + SwitcherAI.globals.source_path = '.assets/examples/source.jpg' + target_paths =\ + [ + '.assets/examples/target-240p.mp4', + '.assets/examples/target-360p.mp4', + '.assets/examples/target-540p.mp4', + '.assets/examples/target-720p.mp4', + '.assets/examples/target-1080p.mp4', + '.assets/examples/target-1440p.mp4', + '.assets/examples/target-2160p.mp4' + ] + value = [ benchmark(target_path, benchmark_cycles) for target_path in target_paths ] + return gradio.update(value = value) + + +def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]: + process_times = [] + total_fps = 0.0 + for i in range(benchmark_cycles + 1): + SwitcherAI.globals.target_path = target_path + SwitcherAI.globals.output_path = normalize_output_path(SwitcherAI.globals.source_path, SwitcherAI.globals.target_path, tempfile.gettempdir()) + video_frame_total = get_video_frame_total(SwitcherAI.globals.target_path) + start_time = time.perf_counter() + conditional_process() + end_time = time.perf_counter() + process_time = end_time - start_time + fps = video_frame_total / process_time + if i > 0: + process_times.append(process_time) + total_fps += fps + average_run = round(statistics.mean(process_times), 2) + fastest_run = round(min(process_times), 2) + slowest_run = round(max(process_times), 2) + relative_fps = round(total_fps / benchmark_cycles, 2) + return\ + [ + SwitcherAI.globals.target_path, + benchmark_cycles, + average_run, + fastest_run, + slowest_run, + relative_fps + ] + + +def clear() -> Update: + if SwitcherAI.globals.target_path: + clear_temp(SwitcherAI.globals.target_path) + return gradio.update(value = None) diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components_execution.py b/SwitcherAI/uis/components/SwitcherAI_uis_components_execution.py new file mode 100644 index 0000000000000000000000000000000000000000..ca71f4cf178763d47adc7aae092f07d6eaecece8 --- /dev/null +++ b/SwitcherAI/uis/components/SwitcherAI_uis_components_execution.py @@ -0,0 +1,64 @@ +from typing import List, Optional +import gradio +import onnxruntime + +import SwitcherAI.globals +from SwitcherAI import wording +from SwitcherAI.face_analyser import clear_face_analyser +from SwitcherAI.processors.frame.core import clear_frame_processors_modules +from SwitcherAI.uis.typing import Update +from SwitcherAI.utilities import encode_execution_providers, decode_execution_providers + +EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None +EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None +EXECUTION_QUEUE_COUNT_SLIDER : Optional[gradio.Slider] = None + + +def render() -> None: + global EXECUTION_PROVIDERS_CHECKBOX_GROUP + global EXECUTION_THREAD_COUNT_SLIDER + global EXECUTION_QUEUE_COUNT_SLIDER + + with gradio.Box(): + EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup( + label = wording.get('execution_providers_checkbox_group_label'), + choices = encode_execution_providers(onnxruntime.get_available_providers()), + value = encode_execution_providers(SwitcherAI.globals.execution_providers) + ) + EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider( + label = wording.get('execution_thread_count_slider_label'), + value = SwitcherAI.globals.execution_thread_count, + step = 1, + minimum = 1, + maximum = 128 + ) + EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider( + label = wording.get('execution_queue_count_slider_label'), + value = SwitcherAI.globals.execution_queue_count, + step = 1, + minimum = 1, + maximum = 16 + ) + + +def listen() -> None: + EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP) + EXECUTION_THREAD_COUNT_SLIDER.change(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER, outputs = EXECUTION_THREAD_COUNT_SLIDER) + EXECUTION_QUEUE_COUNT_SLIDER.change(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER, outputs = EXECUTION_QUEUE_COUNT_SLIDER) + + +def update_execution_providers(execution_providers : List[str]) -> Update: + clear_face_analyser() + clear_frame_processors_modules() + SwitcherAI.globals.execution_providers = decode_execution_providers(execution_providers) + return gradio.update(value = execution_providers) + + +def update_execution_thread_count(execution_thread_count : int = 1) -> Update: + SwitcherAI.globals.execution_thread_count = execution_thread_count + return gradio.update(value = execution_thread_count) + + +def update_execution_queue_count(execution_queue_count : int = 1) -> Update: + SwitcherAI.globals.execution_queue_count = execution_queue_count + return gradio.update(value = execution_queue_count) diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components_face_analyser.py b/SwitcherAI/uis/components/SwitcherAI_uis_components_face_analyser.py new file mode 100644 index 0000000000000000000000000000000000000000..cd24c1874ebc7aa8f8bfc2784ace8952aae5deba --- /dev/null +++ b/SwitcherAI/uis/components/SwitcherAI_uis_components_face_analyser.py @@ -0,0 +1,54 @@ +from typing import Optional + +import gradio + +import SwitcherAI.choices +import SwitcherAI.globals +from SwitcherAI import wording +from SwitcherAI.uis import core as ui +from SwitcherAI.uis.typing import Update + +FACE_ANALYSER_DIRECTION_DROPDOWN : Optional[gradio.Dropdown] = None +FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None +FACE_ANALYSER_GENDER_DROPDOWN : Optional[gradio.Dropdown] = None + + +def render() -> None: + global FACE_ANALYSER_DIRECTION_DROPDOWN + global FACE_ANALYSER_AGE_DROPDOWN + global FACE_ANALYSER_GENDER_DROPDOWN + + with gradio.Box(): + with gradio.Row(): + FACE_ANALYSER_DIRECTION_DROPDOWN = gradio.Dropdown( + label = wording.get('face_analyser_direction_dropdown_label'), + choices = SwitcherAI.choices.face_analyser_direction, + value = SwitcherAI.globals.face_analyser_direction + ) + FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown( + label = wording.get('face_analyser_age_dropdown_label'), + choices = ['none'] + SwitcherAI.choices.face_analyser_age, + value = SwitcherAI.globals.face_analyser_age or 'none' + ) + FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown( + label = wording.get('face_analyser_gender_dropdown_label'), + choices = ['none'] + SwitcherAI.choices.face_analyser_gender, + value = SwitcherAI.globals.face_analyser_gender or 'none' + ) + ui.register_component('face_analyser_direction_dropdown', FACE_ANALYSER_DIRECTION_DROPDOWN) + ui.register_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN) + ui.register_component('face_analyser_gender_dropdown', FACE_ANALYSER_GENDER_DROPDOWN) + + +def listen() -> None: + FACE_ANALYSER_DIRECTION_DROPDOWN.select(lambda value: update_dropdown('face_analyser_direction', value), inputs = FACE_ANALYSER_DIRECTION_DROPDOWN, outputs = FACE_ANALYSER_DIRECTION_DROPDOWN) + FACE_ANALYSER_AGE_DROPDOWN.select(lambda value: update_dropdown('face_analyser_age', value), inputs = FACE_ANALYSER_AGE_DROPDOWN, outputs = FACE_ANALYSER_AGE_DROPDOWN) + FACE_ANALYSER_GENDER_DROPDOWN.select(lambda value: update_dropdown('face_analyser_gender', value), inputs = FACE_ANALYSER_GENDER_DROPDOWN, outputs = FACE_ANALYSER_GENDER_DROPDOWN) + + +def update_dropdown(name : str, value : str) -> Update: + if value == 'none': + setattr(SwitcherAI.globals, name, None) + else: + setattr(SwitcherAI.globals, name, value) + return gradio.update(value = value) diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components_face_selector.py b/SwitcherAI/uis/components/SwitcherAI_uis_components_face_selector.py new file mode 100644 index 0000000000000000000000000000000000000000..c5f9bf9096d6abd9398fcd6e38969590e9105276 --- /dev/null +++ b/SwitcherAI/uis/components/SwitcherAI_uis_components_face_selector.py @@ -0,0 +1,133 @@ +from typing import List, Optional, Tuple, Any, Dict +from time import sleep + +import cv2 +import gradio + +import SwitcherAI.choices +import SwitcherAI.globals +from SwitcherAI import wording +from SwitcherAI.capturer import get_video_frame +from SwitcherAI.face_analyser import get_many_faces +from SwitcherAI.face_reference import clear_face_reference +from SwitcherAI.typing import Frame, FaceRecognition +from SwitcherAI.uis import core as ui +from SwitcherAI.uis.typing import ComponentName, Update +from SwitcherAI.utilities import is_image, is_video + +FACE_RECOGNITION_DROPDOWN : Optional[gradio.Dropdown] = None +REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None +REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None + + +def render() -> None: + global FACE_RECOGNITION_DROPDOWN + global REFERENCE_FACE_POSITION_GALLERY + global REFERENCE_FACE_DISTANCE_SLIDER + + with gradio.Box(): + reference_face_gallery_args: Dict[str, Any] = { + 'label': wording.get('reference_face_gallery_label'), + 'height': 120, + 'object_fit': 'cover', + 'columns': 10, + 'allow_preview': False, + 'visible': 'reference' in SwitcherAI.globals.face_recognition + } + if is_image(SwitcherAI.globals.target_path): + reference_frame = cv2.imread(SwitcherAI.globals.target_path) + reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) + if is_video(SwitcherAI.globals.target_path): + reference_frame = get_video_frame(SwitcherAI.globals.target_path, SwitcherAI.globals.reference_frame_number) + reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) + FACE_RECOGNITION_DROPDOWN = gradio.Dropdown( + label = wording.get('face_recognition_dropdown_label'), + choices = SwitcherAI.choices.face_recognition, + value = SwitcherAI.globals.face_recognition + ) + REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args) + REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider( + label = wording.get('reference_face_distance_slider_label'), + value = SwitcherAI.globals.reference_face_distance, + maximum = 3, + step = 0.05, + visible = 'reference' in SwitcherAI.globals.face_recognition + ) + ui.register_component('face_recognition_dropdown', FACE_RECOGNITION_DROPDOWN) + ui.register_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY) + ui.register_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER) + + +def listen() -> None: + FACE_RECOGNITION_DROPDOWN.select(update_face_recognition, inputs = FACE_RECOGNITION_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ]) + REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_face_reference_position) + REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER) + update_component_names : List[ComponentName] =\ + [ + 'target_file', + 'preview_frame_slider' + ] + for component_name in update_component_names: + component = ui.get_component(component_name) + if component: + component.change(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY) + select_component_names : List[ComponentName] =\ + [ + 'face_analyser_direction_dropdown', + 'face_analyser_age_dropdown', + 'face_analyser_gender_dropdown' + ] + for component_name in select_component_names: + component = ui.get_component(component_name) + if component: + component.select(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY) + + +def update_face_recognition(face_recognition : FaceRecognition) -> Tuple[Update, Update]: + if face_recognition == 'reference': + SwitcherAI.globals.face_recognition = face_recognition + return gradio.update(visible = True), gradio.update(visible = True) + if face_recognition == 'many': + SwitcherAI.globals.face_recognition = face_recognition + return gradio.update(visible = False), gradio.update(visible = False) + + +def clear_and_update_face_reference_position(event: gradio.SelectData) -> Update: + clear_face_reference() + return update_face_reference_position(event.index) + + +def update_face_reference_position(reference_face_position : int = 0) -> Update: + sleep(0.2) + gallery_frames = [] + SwitcherAI.globals.reference_face_position = reference_face_position + if is_image(SwitcherAI.globals.target_path): + reference_frame = cv2.imread(SwitcherAI.globals.target_path) + gallery_frames = extract_gallery_frames(reference_frame) + if is_video(SwitcherAI.globals.target_path): + reference_frame = get_video_frame(SwitcherAI.globals.target_path, SwitcherAI.globals.reference_frame_number) + gallery_frames = extract_gallery_frames(reference_frame) + if gallery_frames: + return gradio.update(value = gallery_frames) + return gradio.update(value = None) + + +def update_reference_face_distance(reference_face_distance : float) -> Update: + SwitcherAI.globals.reference_face_distance = reference_face_distance + return gradio.update(value = reference_face_distance) + + +def extract_gallery_frames(reference_frame : Frame) -> List[Frame]: + crop_frames = [] + faces = get_many_faces(reference_frame) + for face in faces: + start_x, start_y, end_x, end_y = map(int, face['bbox']) + padding_x = int((end_x - start_x) * 0.25) + padding_y = int((end_y - start_y) * 0.25) + start_x = max(0, start_x - padding_x) + start_y = max(0, start_y - padding_y) + end_x = max(0, end_x + padding_x) + end_y = max(0, end_y + padding_y) + crop_frame = reference_frame[start_y:end_y, start_x:end_x] + crop_frames.append(ui.normalize_frame(crop_frame)) + return crop_frames diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components_output.py b/SwitcherAI/uis/components/SwitcherAI_uis_components_output.py new file mode 100644 index 0000000000000000000000000000000000000000..5d9bf2c5cc8afaaf9e054b844067952bc3d1d144 --- /dev/null +++ b/SwitcherAI/uis/components/SwitcherAI_uis_components_output.py @@ -0,0 +1,55 @@ +from typing import Tuple, Optional +import gradio + +import SwitcherAI.globals +from SwitcherAI import wording +from SwitcherAI.core import conditional_process +from SwitcherAI.uis.typing import Update +from SwitcherAI.utilities import is_image, is_video, normalize_output_path, clear_temp + +OUTPUT_START_BUTTON : Optional[gradio.Button] = None +OUTPUT_CLEAR_BUTTON : Optional[gradio.Button] = None +OUTPUT_IMAGE : Optional[gradio.Image] = None +OUTPUT_VIDEO : Optional[gradio.Video] = None + + +def render() -> None: + global OUTPUT_START_BUTTON + global OUTPUT_CLEAR_BUTTON + global OUTPUT_IMAGE + global OUTPUT_VIDEO + + with gradio.Row(): + with gradio.Box(): + OUTPUT_IMAGE = gradio.Image( + label = wording.get('output_image_or_video_label'), + visible = False + ) + OUTPUT_VIDEO = gradio.Video( + label = wording.get('output_image_or_video_label') + ) + with gradio.Row(): + OUTPUT_START_BUTTON = gradio.Button(wording.get('start_button_label')) + OUTPUT_CLEAR_BUTTON = gradio.Button(wording.get('clear_button_label')) + + +def listen() -> None: + OUTPUT_START_BUTTON.click(update, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ]) + OUTPUT_CLEAR_BUTTON.click(clear, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ]) + + +def update() -> Tuple[Update, Update]: + SwitcherAI.globals.output_path = normalize_output_path(SwitcherAI.globals.source_path, SwitcherAI.globals.target_path, '.') + if SwitcherAI.globals.output_path: + conditional_process() + if is_image(SwitcherAI.globals.output_path): + return gradio.update(value = SwitcherAI.globals.output_path, visible = True), gradio.update(value = None, visible = False) + if is_video(SwitcherAI.globals.output_path): + return gradio.update(value = None, visible = False), gradio.update(value = SwitcherAI.globals.output_path, visible = True) + return gradio.update(value = None, visible = False), gradio.update(value = None, visible = False) + + +def clear() -> Tuple[Update, Update]: + if SwitcherAI.globals.target_path: + clear_temp(SwitcherAI.globals.target_path) + return gradio.update(value = None), gradio.update(value = None) diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components_output_settings.py b/SwitcherAI/uis/components/SwitcherAI_uis_components_output_settings.py new file mode 100644 index 0000000000000000000000000000000000000000..ebc17e850b900419721fd82669b8027fefcacccf --- /dev/null +++ b/SwitcherAI/uis/components/SwitcherAI_uis_components_output_settings.py @@ -0,0 +1,43 @@ +from typing import Optional +import gradio + +import SwitcherAI.choices +import SwitcherAI.globals +from SwitcherAI import wording +from SwitcherAI.typing import OutputVideoEncoder +from SwitcherAI.uis.typing import Update + +OUTPUT_VIDEO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None +OUTPUT_VIDEO_QUALITY_SLIDER : Optional[gradio.Slider] = None + + +def render() -> None: + global OUTPUT_VIDEO_ENCODER_DROPDOWN + global OUTPUT_VIDEO_QUALITY_SLIDER + + with gradio.Box(): + OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown( + label = wording.get('output_video_encoder_dropdown_label'), + choices = SwitcherAI.choices.output_video_encoder, + value = SwitcherAI.globals.output_video_encoder + ) + OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider( + label = wording.get('output_video_quality_slider_label'), + value = SwitcherAI.globals.output_video_quality, + step = 1 + ) + + +def listen() -> None: + OUTPUT_VIDEO_ENCODER_DROPDOWN.select(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN, outputs = OUTPUT_VIDEO_ENCODER_DROPDOWN) + OUTPUT_VIDEO_QUALITY_SLIDER.change(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER, outputs = OUTPUT_VIDEO_QUALITY_SLIDER) + + +def update_output_video_encoder(output_video_encoder: OutputVideoEncoder) -> Update: + SwitcherAI.globals.output_video_encoder = output_video_encoder + return gradio.update(value = output_video_encoder) + + +def update_output_video_quality(output_video_quality : int) -> Update: + SwitcherAI.globals.output_video_quality = output_video_quality + return gradio.update(value = output_video_quality) diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components_preview.py b/SwitcherAI/uis/components/SwitcherAI_uis_components_preview.py new file mode 100644 index 0000000000000000000000000000000000000000..53690c8cbdfb8bebd78bc7b1966e64be0c805113 --- /dev/null +++ b/SwitcherAI/uis/components/SwitcherAI_uis_components_preview.py @@ -0,0 +1,121 @@ +from time import sleep +from typing import Any, Dict, Tuple, List, Optional +import cv2 +import gradio + +import SwitcherAI.globals +from SwitcherAI import wording +from SwitcherAI.capturer import get_video_frame, get_video_frame_total +from SwitcherAI.face_analyser import get_one_face +from SwitcherAI.face_reference import get_face_reference, set_face_reference +from SwitcherAI.predictor import predict_frame +from SwitcherAI.processors.frame.core import load_frame_processor_module +from SwitcherAI.typing import Frame +from SwitcherAI.uis import core as ui +from SwitcherAI.uis.typing import ComponentName, Update +from SwitcherAI.utilities import is_video, is_image + +PREVIEW_IMAGE : Optional[gradio.Image] = None +PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None + + +def render() -> None: + global PREVIEW_IMAGE + global PREVIEW_FRAME_SLIDER + + with gradio.Box(): + preview_image_args: Dict[str, Any] = { + 'label': wording.get('preview_image_label') + } + preview_frame_slider_args: Dict[str, Any] = { + 'label': wording.get('preview_frame_slider_label'), + 'step': 1, + 'visible': False + } + if is_image(SwitcherAI.globals.target_path): + target_frame = cv2.imread(SwitcherAI.globals.target_path) + preview_frame = extract_preview_frame(target_frame) + preview_image_args['value'] = ui.normalize_frame(preview_frame) + if is_video(SwitcherAI.globals.target_path): + temp_frame = get_video_frame(SwitcherAI.globals.target_path, SwitcherAI.globals.reference_frame_number) + preview_frame = extract_preview_frame(temp_frame) + preview_image_args['value'] = ui.normalize_frame(preview_frame) + preview_image_args['visible'] = True + preview_frame_slider_args['value'] = SwitcherAI.globals.reference_frame_number + preview_frame_slider_args['maximum'] = get_video_frame_total(SwitcherAI.globals.target_path) + preview_frame_slider_args['visible'] = True + PREVIEW_IMAGE = gradio.Image(**preview_image_args) + PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_args) + ui.register_component('preview_frame_slider', PREVIEW_FRAME_SLIDER) + + +def listen() -> None: + PREVIEW_FRAME_SLIDER.change(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ]) + update_component_names : List[ComponentName] =\ + [ + 'source_file', + 'target_file', + 'face_recognition_dropdown', + 'reference_face_distance_slider', + 'frame_processors_checkbox_group' + ] + for component_name in update_component_names: + component = ui.get_component(component_name) + if component: + component.change(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ]) + select_component_names : List[ComponentName] =\ + [ + 'reference_face_position_gallery', + 'face_analyser_direction_dropdown', + 'face_analyser_age_dropdown', + 'face_analyser_gender_dropdown' + ] + for component_name in select_component_names: + component = ui.get_component(component_name) + if component: + component.select(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ]) + + +def update(frame_number : int = 0) -> Tuple[Update, Update]: + sleep(0.1) + if is_image(SwitcherAI.globals.target_path): + target_frame = cv2.imread(SwitcherAI.globals.target_path) + preview_frame = extract_preview_frame(target_frame) + return gradio.update(value = ui.normalize_frame(preview_frame)), gradio.update(value = None, maximum = None, visible = False) + if is_video(SwitcherAI.globals.target_path): + SwitcherAI.globals.reference_frame_number = frame_number + video_frame_total = get_video_frame_total(SwitcherAI.globals.target_path) + temp_frame = get_video_frame(SwitcherAI.globals.target_path, SwitcherAI.globals.reference_frame_number) + preview_frame = extract_preview_frame(temp_frame) + return gradio.update(value = ui.normalize_frame(preview_frame)), gradio.update(maximum = video_frame_total, visible = True) + return gradio.update(value = None), gradio.update(value = None, maximum = None, visible = False) + + +def extract_preview_frame(temp_frame : Frame) -> Frame: + if predict_frame(temp_frame): + return cv2.GaussianBlur(temp_frame, (99, 99), 0) + source_face = get_one_face(cv2.imread(SwitcherAI.globals.source_path)) if SwitcherAI.globals.source_path else None + temp_frame = reduce_preview_frame(temp_frame) + if 'reference' in SwitcherAI.globals.face_recognition and not get_face_reference(): + reference_frame = get_video_frame(SwitcherAI.globals.target_path, SwitcherAI.globals.reference_frame_number) + reference_face = get_one_face(reference_frame, SwitcherAI.globals.reference_face_position) + set_face_reference(reference_face) + reference_face = get_face_reference() if 'reference' in SwitcherAI.globals.face_recognition else None + for frame_processor in SwitcherAI.globals.frame_processors: + frame_processor_module = load_frame_processor_module(frame_processor) + if frame_processor_module.pre_process(): + temp_frame = frame_processor_module.process_frame( + source_face, + reference_face, + temp_frame + ) + return temp_frame + + +def reduce_preview_frame(temp_frame : Frame, max_height : int = 480) -> Frame: + height, width = temp_frame.shape[:2] + if height > max_height: + scale = max_height / height + max_width = int(width * scale) + temp_frame = cv2.resize(temp_frame, (max_width, max_height)) + return temp_frame diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components_processors.py b/SwitcherAI/uis/components/SwitcherAI_uis_components_processors.py new file mode 100644 index 0000000000000000000000000000000000000000..e0e97037843b416caebaa6a87f579e7ad735f264 --- /dev/null +++ b/SwitcherAI/uis/components/SwitcherAI_uis_components_processors.py @@ -0,0 +1,41 @@ +from typing import List, Optional +import gradio + +import SwitcherAI.globals +from SwitcherAI import wording +from SwitcherAI.processors.frame.core import load_frame_processor_module, clear_frame_processors_modules +from SwitcherAI.uis import core as ui +from SwitcherAI.uis.typing import Update +from SwitcherAI.utilities import list_module_names + +FRAME_PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None + + +def render() -> None: + global FRAME_PROCESSORS_CHECKBOX_GROUP + + with gradio.Box(): + FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup( + label = wording.get('frame_processors_checkbox_group_label'), + choices = sort_frame_processors(SwitcherAI.globals.frame_processors), + value = SwitcherAI.globals.frame_processors + ) + ui.register_component('frame_processors_checkbox_group', FRAME_PROCESSORS_CHECKBOX_GROUP) + + +def listen() -> None: + FRAME_PROCESSORS_CHECKBOX_GROUP.change(update_frame_processors, inputs = FRAME_PROCESSORS_CHECKBOX_GROUP, outputs = FRAME_PROCESSORS_CHECKBOX_GROUP) + + +def update_frame_processors(frame_processors : List[str]) -> Update: + clear_frame_processors_modules() + SwitcherAI.globals.frame_processors = frame_processors + for frame_processor in SwitcherAI.globals.frame_processors: + frame_processor_module = load_frame_processor_module(frame_processor) + frame_processor_module.pre_check() + return gradio.update(value = frame_processors, choices = sort_frame_processors(frame_processors)) + + +def sort_frame_processors(frame_processors : List[str]) -> list[str]: + frame_processors_names = list_module_names('SwitcherAI/processors/frame/modules') + return sorted(frame_processors_names, key = lambda frame_processor : frame_processors.index(frame_processor) if frame_processor in frame_processors else len(frame_processors)) diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components_settings.py b/SwitcherAI/uis/components/SwitcherAI_uis_components_settings.py new file mode 100644 index 0000000000000000000000000000000000000000..08b0edfde98d72af8d58e4ef69d6dad0314a2fa1 --- /dev/null +++ b/SwitcherAI/uis/components/SwitcherAI_uis_components_settings.py @@ -0,0 +1,41 @@ +from typing import Optional +import gradio + +import SwitcherAI.globals +from SwitcherAI import wording +from SwitcherAI.uis.typing import Update + +KEEP_FPS_CHECKBOX : Optional[gradio.Checkbox] = None +KEEP_TEMP_CHECKBOX : Optional[gradio.Checkbox] = None +SKIP_AUDIO_CHECKBOX : Optional[gradio.Checkbox] = None + + +def render() -> None: + global KEEP_FPS_CHECKBOX + global KEEP_TEMP_CHECKBOX + global SKIP_AUDIO_CHECKBOX + + with gradio.Box(): + KEEP_FPS_CHECKBOX = gradio.Checkbox( + label = wording.get('keep_fps_checkbox_label'), + value = SwitcherAI.globals.keep_fps + ) + KEEP_TEMP_CHECKBOX = gradio.Checkbox( + label = wording.get('keep_temp_checkbox_label'), + value = SwitcherAI.globals.keep_temp + ) + SKIP_AUDIO_CHECKBOX = gradio.Checkbox( + label = wording.get('skip_audio_checkbox_label'), + value = SwitcherAI.globals.skip_audio + ) + + +def listen() -> None: + KEEP_FPS_CHECKBOX.change(lambda value: update_checkbox('keep_fps', value), inputs = KEEP_FPS_CHECKBOX, outputs = KEEP_FPS_CHECKBOX) + KEEP_TEMP_CHECKBOX.change(lambda value: update_checkbox('keep_temp', value), inputs = KEEP_TEMP_CHECKBOX, outputs = KEEP_TEMP_CHECKBOX) + SKIP_AUDIO_CHECKBOX.change(lambda value: update_checkbox('skip_audio', value), inputs = SKIP_AUDIO_CHECKBOX, outputs = SKIP_AUDIO_CHECKBOX) + + +def update_checkbox(name : str, value: bool) -> Update: + setattr(SwitcherAI.globals, name, value) + return gradio.update(value = value) diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components_source.py b/SwitcherAI/uis/components/SwitcherAI_uis_components_source.py new file mode 100644 index 0000000000000000000000000000000000000000..db232df00800597450ed6eff5e81f7bc97ca0d7e --- /dev/null +++ b/SwitcherAI/uis/components/SwitcherAI_uis_components_source.py @@ -0,0 +1,48 @@ +from typing import Any, IO, Optional +import gradio + +import SwitcherAI.globals +from SwitcherAI import wording +from SwitcherAI.uis import core as ui +from SwitcherAI.uis.typing import Update +from SwitcherAI.utilities import is_image + +SOURCE_FILE : Optional[gradio.File] = None +SOURCE_IMAGE : Optional[gradio.Image] = None + + +def render() -> None: + global SOURCE_FILE + global SOURCE_IMAGE + + with gradio.Box(): + is_source_image = is_image(SwitcherAI.globals.source_path) + SOURCE_FILE = gradio.File( + file_count = 'single', + file_types= + [ + '.png', + '.jpg', + '.webp' + ], + label = wording.get('source_file_label'), + value = SwitcherAI.globals.source_path if is_source_image else None + ) + ui.register_component('source_file', SOURCE_FILE) + SOURCE_IMAGE = gradio.Image( + value = SOURCE_FILE.value['name'] if is_source_image else None, + visible = is_source_image, + show_label = False + ) + + +def listen() -> None: + SOURCE_FILE.change(update, inputs = SOURCE_FILE, outputs = SOURCE_IMAGE) + + +def update(file: IO[Any]) -> Update: + if file and is_image(file.name): + SwitcherAI.globals.source_path = file.name + return gradio.update(value = file.name, visible = True) + SwitcherAI.globals.source_path = None + return gradio.update(value = None, visible = False) diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components_target.py b/SwitcherAI/uis/components/SwitcherAI_uis_components_target.py new file mode 100644 index 0000000000000000000000000000000000000000..a07a4cbc7047555471bab0d85b5ee2bb67615839 --- /dev/null +++ b/SwitcherAI/uis/components/SwitcherAI_uis_components_target.py @@ -0,0 +1,62 @@ +from typing import Any, IO, Tuple, Optional +import gradio + +import SwitcherAI.globals +from SwitcherAI import wording +from SwitcherAI.face_reference import clear_face_reference +from SwitcherAI.uis import core as ui +from SwitcherAI.uis.typing import Update +from SwitcherAI.utilities import is_image, is_video + +TARGET_FILE : Optional[gradio.File] = None +TARGET_IMAGE : Optional[gradio.Image] = None +TARGET_VIDEO : Optional[gradio.Video] = None + + +def render() -> None: + global TARGET_FILE + global TARGET_IMAGE + global TARGET_VIDEO + + with gradio.Box(): + is_target_image = is_image(SwitcherAI.globals.target_path) + is_target_video = is_video(SwitcherAI.globals.target_path) + TARGET_FILE = gradio.File( + label = wording.get('target_file_label'), + file_count = 'single', + file_types = + [ + '.png', + '.jpg', + '.webp', + '.mp4' + ], + value = SwitcherAI.globals.target_path if is_target_image or is_target_video else None + ) + TARGET_IMAGE = gradio.Image( + value = TARGET_FILE.value['name'] if is_target_image else None, + visible = is_target_image, + show_label = False + ) + TARGET_VIDEO = gradio.Video( + value = TARGET_FILE.value['name'] if is_target_video else None, + visible = is_target_video, + show_label = False + ) + ui.register_component('target_file', TARGET_FILE) + + +def listen() -> None: + TARGET_FILE.change(update, inputs = TARGET_FILE, outputs = [ TARGET_IMAGE, TARGET_VIDEO ]) + + +def update(file : IO[Any]) -> Tuple[Update, Update]: + clear_face_reference() + if file and is_image(file.name): + SwitcherAI.globals.target_path = file.name + return gradio.update(value = file.name, visible = True), gradio.update(value = None, visible = False) + if file and is_video(file.name): + SwitcherAI.globals.target_path = file.name + return gradio.update(value = None, visible = False), gradio.update(value = file.name, visible = True) + SwitcherAI.globals.target_path = None + return gradio.update(value = None, visible = False), gradio.update(value = None, visible = False) diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components_temp_frame.py b/SwitcherAI/uis/components/SwitcherAI_uis_components_temp_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..4c7cc012cb672faef8b103ef2199fc1d0a11d82c --- /dev/null +++ b/SwitcherAI/uis/components/SwitcherAI_uis_components_temp_frame.py @@ -0,0 +1,44 @@ +from typing import Optional +import gradio + +import SwitcherAI.choices +import SwitcherAI.globals +from SwitcherAI import wording +from SwitcherAI.typing import TempFrameFormat + +from SwitcherAI.uis.typing import Update + +TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None +TEMP_FRAME_QUALITY_SLIDER : Optional[gradio.Slider] = None + + +def render() -> None: + global TEMP_FRAME_FORMAT_DROPDOWN + global TEMP_FRAME_QUALITY_SLIDER + + with gradio.Box(): + TEMP_FRAME_FORMAT_DROPDOWN = gradio.Dropdown( + label = wording.get('temp_frame_format_dropdown_label'), + choices = SwitcherAI.choices.temp_frame_format, + value = SwitcherAI.globals.temp_frame_format + ) + TEMP_FRAME_QUALITY_SLIDER = gradio.Slider( + label = wording.get('temp_frame_quality_slider_label'), + value = SwitcherAI.globals.temp_frame_quality, + step = 1 + ) + + +def listen() -> None: + TEMP_FRAME_FORMAT_DROPDOWN.select(update_temp_frame_format, inputs = TEMP_FRAME_FORMAT_DROPDOWN, outputs = TEMP_FRAME_FORMAT_DROPDOWN) + TEMP_FRAME_QUALITY_SLIDER.change(update_temp_frame_quality, inputs = TEMP_FRAME_QUALITY_SLIDER, outputs = TEMP_FRAME_QUALITY_SLIDER) + + +def update_temp_frame_format(temp_frame_format : TempFrameFormat) -> Update: + SwitcherAI.globals.temp_frame_format = temp_frame_format + return gradio.update(value = temp_frame_format) + + +def update_temp_frame_quality(temp_frame_quality : int) -> Update: + SwitcherAI.globals.temp_frame_quality = temp_frame_quality + return gradio.update(value = temp_frame_quality) diff --git a/SwitcherAI/uis/components/SwitcherAI_uis_components_trim_frame.py b/SwitcherAI/uis/components/SwitcherAI_uis_components_trim_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..63ea022b2f14bb375ab8a43887830a7d19fb464f --- /dev/null +++ b/SwitcherAI/uis/components/SwitcherAI_uis_components_trim_frame.py @@ -0,0 +1,65 @@ +from time import sleep +from typing import Any, Dict, Tuple, Optional + +import gradio + +import SwitcherAI.globals +from SwitcherAI import wording +from SwitcherAI.capturer import get_video_frame_total +from SwitcherAI.uis import core as ui +from SwitcherAI.uis.typing import Update +from SwitcherAI.utilities import is_video + +TRIM_FRAME_START_SLIDER : Optional[gradio.Slider] = None +TRIM_FRAME_END_SLIDER : Optional[gradio.Slider] = None + + +def render() -> None: + global TRIM_FRAME_START_SLIDER + global TRIM_FRAME_END_SLIDER + + with gradio.Box(): + trim_frame_start_slider_args : Dict[str, Any] = { + 'label': wording.get('trim_frame_start_slider_label'), + 'step': 1, + 'visible': False + } + trim_frame_end_slider_args : Dict[str, Any] = { + 'label': wording.get('trim_frame_end_slider_label'), + 'step': 1, + 'visible': False + } + if is_video(SwitcherAI.globals.target_path): + video_frame_total = get_video_frame_total(SwitcherAI.globals.target_path) + trim_frame_start_slider_args['value'] = SwitcherAI.globals.trim_frame_start or 0 + trim_frame_start_slider_args['maximum'] = video_frame_total + trim_frame_start_slider_args['visible'] = True + trim_frame_end_slider_args['value'] = SwitcherAI.globals.trim_frame_end or video_frame_total + trim_frame_end_slider_args['maximum'] = video_frame_total + trim_frame_end_slider_args['visible'] = True + with gradio.Row(): + TRIM_FRAME_START_SLIDER = gradio.Slider(**trim_frame_start_slider_args) + TRIM_FRAME_END_SLIDER = gradio.Slider(**trim_frame_end_slider_args) + + +def listen() -> None: + target_file = ui.get_component('target_file') + if target_file: + target_file.change(remote_update, outputs = [ TRIM_FRAME_START_SLIDER, TRIM_FRAME_END_SLIDER ]) + TRIM_FRAME_START_SLIDER.change(lambda value : update_number('trim_frame_start', int(value)), inputs = TRIM_FRAME_START_SLIDER, outputs = TRIM_FRAME_START_SLIDER) + TRIM_FRAME_END_SLIDER.change(lambda value : update_number('trim_frame_end', int(value)), inputs = TRIM_FRAME_END_SLIDER, outputs = TRIM_FRAME_END_SLIDER) + + +def remote_update() -> Tuple[Update, Update]: + sleep(0.1) + if is_video(SwitcherAI.globals.target_path): + video_frame_total = get_video_frame_total(SwitcherAI.globals.target_path) + SwitcherAI.globals.trim_frame_start = 0 + SwitcherAI.globals.trim_frame_end = video_frame_total + return gradio.update(value = 0, maximum = video_frame_total, visible = True), gradio.update(value = video_frame_total, maximum = video_frame_total, visible = True) + return gradio.update(value = None, maximum = None, visible = False), gradio.update(value = None, maximum = None, visible = False) + + +def update_number(name : str, value : int) -> Update: + setattr(SwitcherAI.globals, name, value) + return gradio.update(value = value) diff --git a/SwitcherAI/uis/core.py b/SwitcherAI/uis/core.py new file mode 100644 index 0000000000000000000000000000000000000000..f4343db62fc31072fedee2ec56ba9b6045efc10a --- /dev/null +++ b/SwitcherAI/uis/core.py @@ -0,0 +1,67 @@ +from typing import Dict, Optional, Any +import importlib +import sys +import cv2 +import gradio + +import SwitcherAI.globals +from SwitcherAI import metadata, wording +from SwitcherAI.typing import Frame +from SwitcherAI.uis.typing import Component, ComponentName + +COMPONENTS: Dict[ComponentName, Component] = {} +UI_LAYOUT_METHODS =\ +[ + 'pre_check', + 'render', + 'listen' +] + + +def launch() -> None: + with gradio.Blocks(theme = get_theme(), title = metadata.get('name') + ' ' + metadata.get('version')) as ui: + for ui_layout in SwitcherAI.globals.ui_layouts: + ui_layout_module = load_ui_layout_module(ui_layout) + ui_layout_module.pre_check() + ui_layout_module.render() + ui_layout_module.listen() + ui.launch(debug=True, show_api=True) + + +def load_ui_layout_module(ui_layout : str) -> Any: + try: + ui_layout_module = importlib.import_module('SwitcherAI.uis.layouts.' + ui_layout) + for method_name in UI_LAYOUT_METHODS: + if not hasattr(ui_layout_module, method_name): + raise NotImplementedError + except ModuleNotFoundError: + sys.exit(wording.get('ui_layout_not_loaded').format(ui_layout = ui_layout)) + except NotImplementedError: + sys.exit(wording.get('ui_layout_not_implemented').format(ui_layout = ui_layout)) + return ui_layout_module + + +def get_theme() -> gradio.Theme: + return gradio.themes.Soft( + primary_hue = gradio.themes.colors.red, + secondary_hue = gradio.themes.colors.gray, + font = gradio.themes.GoogleFont('Inter') + ).set( + background_fill_primary = '*neutral_50', + block_label_text_size = '*text_sm', + block_title_text_size = '*text_sm' + ) + + +def get_component(name: ComponentName) -> Optional[Component]: + if name in COMPONENTS: + return COMPONENTS[name] + return None + + +def register_component(name: ComponentName, component: Component) -> None: + COMPONENTS[name] = component + + +def normalize_frame(frame : Frame) -> Frame: + return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) diff --git a/SwitcherAI/uis/layouts/default.py b/SwitcherAI/uis/layouts/default.py new file mode 100644 index 0000000000000000000000000000000000000000..58514fa3aee30bff7095cfc442bff61cc896477e --- /dev/null +++ b/SwitcherAI/uis/layouts/default.py @@ -0,0 +1,44 @@ +import gradio + +from SwitcherAI.uis.components import about, processors, execution, temp_frame, settings, source, target, preview, trim_frame, face_analyser, face_selector, output_settings, output + + +def pre_check() -> bool: + return True + + +def render() -> gradio.Blocks: + with gradio.Blocks() as layout: + with gradio.Row(): + with gradio.Column(scale = 2): + about.render() + processors.render() + execution.render() + temp_frame.render() + settings.render() + with gradio.Column(scale = 2): + source.render() + target.render() + output_settings.render() + output.render() + with gradio.Column(scale = 3): + #preview.render() + trim_frame.render() + face_selector.render() + face_analyser.render() + return layout + + +def listen() -> None: + processors.listen() + execution.listen() + settings.listen() + temp_frame.listen() + source.listen() + target.listen() + #preview.listen() + trim_frame.listen() + face_selector.listen() + face_analyser.listen() + output_settings.listen() + output.listen() diff --git a/SwitcherAI/uis/typing.py b/SwitcherAI/uis/typing.py new file mode 100644 index 0000000000000000000000000000000000000000..4abe384f07c4b90504e47291674905f85a5b8f52 --- /dev/null +++ b/SwitcherAI/uis/typing.py @@ -0,0 +1,18 @@ +from typing import Literal, Dict, Any +import gradio + +Component = gradio.File or gradio.Image or gradio.Video or gradio.Slider +ComponentName = Literal\ +[ + 'source_file', + 'target_file', + 'preview_frame_slider', + 'face_recognition_dropdown', + 'reference_face_position_gallery', + 'reference_face_distance_slider', + 'face_analyser_direction_dropdown', + 'face_analyser_age_dropdown', + 'face_analyser_gender_dropdown', + 'frame_processors_checkbox_group' +] +Update = Dict[Any, Any] diff --git a/SwitcherAI/utilities.py b/SwitcherAI/utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..4cdb664c46e6305070ccb4c8605854b67354e275 --- /dev/null +++ b/SwitcherAI/utilities.py @@ -0,0 +1,190 @@ +import glob +import mimetypes +import os +import platform +import shutil +import ssl +import subprocess +import tempfile +import urllib +from pathlib import Path +from typing import List, Optional + +import onnxruntime +from tqdm import tqdm + +import SwitcherAI.globals +from SwitcherAI import wording + +TEMP_DIRECTORY_PATH = r"D:\Switcher\Temp\SwitcherAI\resize-vid" +TEMP_OUTPUT_NAME = 'temp.mp4' + +# monkey patch ssl +if platform.system().lower() == 'darwin': + ssl._create_default_https_context = ssl._create_unverified_context + + +def run_ffmpeg(args : List[str]) -> bool: + commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ] + commands.extend(args) + try: + subprocess.check_output(commands, stderr = subprocess.STDOUT) + return True + except subprocess.CalledProcessError: + return False + + +def detect_fps(target_path : str) -> Optional[float]: + commands = [ 'ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=r_frame_rate', '-of', 'default=noprint_wrappers = 1:nokey = 1', target_path ] + output = subprocess.check_output(commands).decode().strip().split('/') + try: + numerator, denominator = map(int, output) + return numerator / denominator + except (ValueError, ZeroDivisionError): + return None + + +def extract_frames(target_path : str, fps : float) -> bool: + temp_directory_path = get_temp_directory_path(target_path) + temp_frame_quality = round(31 - (SwitcherAI.globals.temp_frame_quality * 0.31)) + trim_frame_start = SwitcherAI.globals.trim_frame_start + trim_frame_end = SwitcherAI.globals.trim_frame_end + commands = [ '-hwaccel', 'auto', '-i', target_path, '-q:v', str(temp_frame_quality), '-pix_fmt', 'rgb24', ] + if trim_frame_start is not None and trim_frame_end is not None: + commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ':end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ]) + elif trim_frame_start is not None: + commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ',fps=' + str(fps) ]) + elif trim_frame_end is not None: + commands.extend([ '-vf', 'trim=end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ]) + else: + commands.extend([ '-vf', 'fps=' + str(fps) ]) + commands.extend([os.path.join(temp_directory_path, '%04d.' + SwitcherAI.globals.temp_frame_format)]) + return run_ffmpeg(commands) + + +def create_video(target_path : str, fps : float) -> bool: + temp_output_path = get_temp_output_path(target_path) + temp_directory_path = get_temp_directory_path(target_path) + output_video_quality = round(51 - (SwitcherAI.globals.output_video_quality * 0.5)) + commands = [ '-hwaccel', 'auto', '-r', str(fps), '-i', os.path.join(temp_directory_path, '%04d.' + SwitcherAI.globals.temp_frame_format), '-c:v', SwitcherAI.globals.output_video_encoder ] + if SwitcherAI.globals.output_video_encoder in [ 'libx264', 'libx265', 'libvpx' ]: + commands.extend([ '-crf', str(output_video_quality) ]) + if SwitcherAI.globals.output_video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]: + commands.extend([ '-cq', str(output_video_quality) ]) + commands.extend([ '-pix_fmt', 'yuv420p', '-vf', 'colorspace=bt709:iall=bt601-6-625', '-y', temp_output_path ]) + return run_ffmpeg(commands) + + +def restore_audio(target_path : str, output_path : str) -> None: + fps = detect_fps(target_path) + trim_frame_start = SwitcherAI.globals.trim_frame_start + trim_frame_end = SwitcherAI.globals.trim_frame_end + temp_output_path = get_temp_output_path(target_path) + commands = [ '-hwaccel', 'auto', '-i', temp_output_path, '-i', target_path ] + if trim_frame_start is None and trim_frame_end is None: + commands.extend([ '-c:a', 'copy' ]) + else: + if trim_frame_start is not None: + start_time = trim_frame_start / fps + commands.extend([ '-ss', str(start_time) ]) + else: + commands.extend([ '-ss', '0' ]) + if trim_frame_end is not None: + end_time = trim_frame_end / fps + commands.extend([ '-to', str(end_time) ]) + commands.extend([ '-c:a', 'aac' ]) + commands.extend([ '-map', '0:v:0', '-map', '1:a:0', '-y', output_path ]) + done = run_ffmpeg(commands) + if not done: + move_temp(target_path, output_path) + + +def get_temp_frame_paths(target_path : str) -> List[str]: + temp_directory_path = get_temp_directory_path(target_path) + return glob.glob((os.path.join(glob.escape(temp_directory_path), '*.' + SwitcherAI.globals.temp_frame_format))) + + +def get_temp_directory_path(target_path : str) -> str: + target_name, _ = os.path.splitext(os.path.basename(target_path)) + return os.path.join(TEMP_DIRECTORY_PATH, target_name) + + +def get_temp_output_path(target_path : str) -> str: + temp_directory_path = get_temp_directory_path(target_path) + return os.path.join(temp_directory_path, TEMP_OUTPUT_NAME) + + +def normalize_output_path(source_path : str, target_path : str, output_path : str) -> Optional[str]: + if source_path and target_path and output_path: + source_name, _ = os.path.splitext(os.path.basename(source_path)) + target_name, target_extension = os.path.splitext(os.path.basename(target_path)) + if os.path.isdir(output_path): + return os.path.join(output_path, source_name + '-' + target_name + target_extension) + return output_path + + +def create_temp(target_path : str) -> None: + temp_directory_path = get_temp_directory_path(target_path) + Path(temp_directory_path).mkdir(parents = True, exist_ok = True) + + +def move_temp(target_path : str, output_path : str) -> None: + temp_output_path = get_temp_output_path(target_path) + if os.path.isfile(temp_output_path): + if os.path.isfile(output_path): + os.remove(output_path) + shutil.move(temp_output_path, output_path) + + +def clear_temp(target_path : str) -> None: + temp_directory_path = get_temp_directory_path(target_path) + parent_directory_path = os.path.dirname(temp_directory_path) + if not SwitcherAI.globals.keep_temp and os.path.isdir(temp_directory_path): + shutil.rmtree(temp_directory_path) + if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path): + os.rmdir(parent_directory_path) + + +def is_image(image_path : str) -> bool: + if image_path and os.path.isfile(image_path): + mimetype, _ = mimetypes.guess_type(image_path) + return bool(mimetype and mimetype.startswith('image/')) + return False + + +def is_video(video_path : str) -> bool: + if video_path and os.path.isfile(video_path): + mimetype, _ = mimetypes.guess_type(video_path) + return bool(mimetype and mimetype.startswith('video/')) + return False + + +def conditional_download(download_directory_path : str, urls : List[str]) -> None: + if not os.path.exists(download_directory_path): + os.makedirs(download_directory_path) + for url in urls: + download_file_path = os.path.join(download_directory_path, os.path.basename(url)) + if not os.path.exists(download_file_path): + request = urllib.request.urlopen(url) # type: ignore[attr-defined] + total = int(request.headers.get('Content-Length', 0)) + with tqdm(total = total, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024) as progress: + urllib.request.urlretrieve(url, download_file_path, reporthook = lambda count, block_size, total_size: progress.update(block_size)) # type: ignore[attr-defined] + + +def resolve_relative_path(path : str) -> str: + return os.path.abspath(os.path.join(os.path.dirname(__file__), path)) + + +def list_module_names(path : str) -> Optional[List[str]]: + if os.path.exists(path): + files = os.listdir(path) + return [Path(file).stem for file in files if not Path(file).stem.startswith('__')] + return None + + +def encode_execution_providers(execution_providers : List[str]) -> List[str]: + return [execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers] + + +def decode_execution_providers(execution_providers : List[str]) -> List[str]: + return [provider for provider, encoded_execution_provider in zip(onnxruntime.get_available_providers(), encode_execution_providers(onnxruntime.get_available_providers())) if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)] diff --git a/SwitcherAI/wording.py b/SwitcherAI/wording.py new file mode 100644 index 0000000000000000000000000000000000000000..1d70363ea7546eeb3b3ec224eb04848db727718e --- /dev/null +++ b/SwitcherAI/wording.py @@ -0,0 +1,88 @@ +WORDING =\ +{ + 'python_not_supported': 'Python version is not supported, upgrade to {version} or higher', + 'ffmpeg_not_installed': 'FFMpeg is not installed', + 'source_help': 'select a source image', + 'target_help': 'select a target image or video', + 'output_help': 'specify the output file or directory', + 'frame_processors_help': 'choose from the available frame processors (choices: {choices}, ...)', + 'ui_layouts_help': 'choose from the available ui layouts (choices: {choices}, ...)', + 'keep_fps_help': 'preserve the frames per second (fps) of the target', + 'keep_temp_help': 'retain temporary frames after processing', + 'skip_audio_help': 'omit audio from the target', + 'face_recognition_help': 'specify the method for face recognition', + 'face_analyser_direction_help': 'specify the direction used for face analysis', + 'face_analyser_age_help': 'specify the age used for face analysis', + 'face_analyser_gender_help': 'specify the gender used for face analysis', + 'reference_face_position_help': 'specify the position of the reference face', + 'reference_face_distance_help': 'specify the distance between the reference face and the target face', + 'reference_frame_number_help': 'specify the number of the reference frame', + 'trim_frame_start_help': 'specify the start frame for extraction', + 'trim_frame_end_help': 'specify the end frame for extraction', + 'temp_frame_format_help': 'specify the image format used for frame extraction', + 'temp_frame_quality_help': 'specify the image quality used for frame extraction', + 'output_video_encoder_help': 'specify the encoder used for the output video', + 'output_video_quality_help': 'specify the quality used for the output video', + 'max_memory_help': 'specify the maximum amount of ram to be used (in gb)', + 'execution_providers_help': 'choose from the available execution providers (choices: {choices}, ...)', + 'execution_thread_count_help': 'specify the number of execution threads', + 'execution_queue_count_help': 'specify the number of execution queries', + 'creating_temp': 'Creating temporary resources', + 'extracting_frames_fps': 'Extracting frames with {fps} FPS', + 'processing': 'Processing', + 'downloading': 'Downloading', + 'temp_frames_not_found': 'Temporary frames not found', + 'creating_video_fps': 'Creating video with {fps} FPS', + 'creating_video_failed': 'Creating video failed', + 'skipping_audio': 'Skipping audio', + 'restoring_audio': 'Restoring audio', + 'clearing_temp': 'Clearing temporary resources', + 'processing_image_succeed': 'Processing to image succeed', + 'processing_image_failed': 'Processing to image failed', + 'processing_video_succeed': 'Processing to video succeed', + 'processing_video_failed': 'Processing to video failed', + 'select_image_source': 'Select an image for source path', + 'select_image_or_video_target': 'Select an image or video for target path', + 'no_source_face_detected': 'No source face detected', + 'frame_processor_not_loaded': 'Frame processor {frame_processor} could not be loaded', + 'frame_processor_not_implemented': 'Frame processor {frame_processor} not implemented correctly', + 'ui_layout_not_loaded': 'UI layout {ui_layout} could not be loaded', + 'ui_layout_not_implemented': 'UI layout {ui_layout} not implemented correctly', + 'start_button_label': 'START', + 'clear_button_label': 'CLEAR', + 'benchmark_result_dataframe_label': 'BENCHMARK RESULT', + 'benchmark_cycles_slider_label': 'BENCHMARK CYCLES', + 'execution_providers_checkbox_group_label': 'EXECUTION PROVIDERS', + 'execution_thread_count_slider_label': 'EXECUTION THREAD COUNT', + 'execution_queue_count_slider_label': 'EXECUTION QUEUE COUNT', + 'face_analyser_direction_dropdown_label': 'FACE ANALYSER DIRECTION', + 'face_analyser_age_dropdown_label': 'FACE ANALYSER AGE', + 'face_analyser_gender_dropdown_label': 'FACE ANALYSER GENDER', + 'reference_face_gallery_label': 'REFERENCE FACE', + 'face_recognition_dropdown_label': 'FACE RECOGNITION', + 'reference_face_distance_slider_label': 'REFERENCE FACE DISTANCE', + 'output_image_or_video_label': 'OUTPUT', + 'output_video_encoder_dropdown_label': 'OUTPUT VIDEO ENCODER', + 'output_video_quality_slider_label': 'OUTPUT VIDEO QUALITY', + 'preview_image_label': 'PREVIEW', + 'preview_frame_slider_label': 'PREVIEW FRAME', + 'frame_processors_checkbox_group_label': 'FRAME PROCESSORS', + 'keep_fps_checkbox_label': 'KEEP FPS', + 'keep_temp_checkbox_label': 'KEEP TEMP', + 'skip_audio_checkbox_label': 'SKIP AUDIO', + 'temp_frame_format_dropdown_label': 'TEMP FRAME FORMAT', + 'temp_frame_quality_slider_label': 'TEMP FRAME QUALITY', + 'trim_frame_start_slider_label': 'TRIM FRAME START', + 'trim_frame_end_slider_label': 'TRIM FRAME END', + 'source_file_label': 'SOURCE', + 'target_file_label': 'TARGET', + 'point': '.', + 'comma': ',', + 'colon': ':', + 'question_mark': '?', + 'exclamation_mark': '!' +} + + +def get(key : str) -> str: + return WORDING[key] diff --git a/Temp/New Text Document.txt b/Temp/New Text Document.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..177a088ad873f9fa5a3968448e96703972c6f559 --- /dev/null +++ b/app.py @@ -0,0 +1,1859 @@ +""" +🎭 Advanced Face Swap Studio - Cross-Environment Compatible Version +================================================================ + +✅ FEATURES: +- Professional 3-column layout optimized for all screen sizes +- GPU/CPU processing with automatic detection +- Batch processing mode for multiple videos +- Lip sync integration (beta) +- Real-time processing monitor +- Cross-platform compatibility (Windows, Linux, macOS) + +🔧 COMPATIBILITY: +- HuggingFace Spaces ready +- Google Colab compatible +- Local development friendly +- Graceful degradation for missing dependencies +- Automatic environment detection and configuration + +📋 REQUIREMENTS: +- gradio, torch, onnxruntime (core) +- moviepy (optional - for video processing) +- SwitcherAI modules (optional - for enhancement) + +🚀 USAGE: +- Local: python app.py +- HuggingFace: Upload and run as Space +- Colab: Upload and execute in notebook +""" + +import os +import sys +import zipfile +import time + +# Get the directory where app.py is located +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) + +# Create unique instance ID for multi-instance support +INSTANCE_ID = f"instance_{os.getpid()}_{int(time.time() % 10000)}" +INSTANCE_TEMP_DIR = os.path.join(BASE_DIR, "Temp", INSTANCE_ID) +INSTANCE_OUTPUT_DIR = os.path.join(BASE_DIR, "output", INSTANCE_ID) + +# Create instance-specific directories +os.makedirs(INSTANCE_TEMP_DIR, exist_ok=True) +os.makedirs(INSTANCE_OUTPUT_DIR, exist_ok=True) + +# Set up environment variables using relative paths (removed Conda and CUDA paths) +os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" +os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "TRUE" +os.environ["GRADIO_TEMP_DIR"] = INSTANCE_TEMP_DIR + +import gradio as gr +import subprocess as sp +import uuid +import time +import shutil +try: + from moviepy.editor import * + MOVIEPY_AVAILABLE = True + print("✅ MoviePy loaded successfully") +except ImportError as e: + print(f"⚠️ MoviePy not available: {e}") + print("🔄 Some video processing features may be limited") + MOVIEPY_AVAILABLE = False +import gc # Import garbage collector + +# Add relative paths to sys.path +sys.path.append(os.path.join(BASE_DIR, "SwitcherAI", "processors", "frame", "modules")) + +# Try to import optional enhancement modules (graceful degradation) +try: + import face_enhancer + import frame_enhancer + ENHANCEMENT_AVAILABLE = True + print("✅ Enhancement modules loaded successfully") +except ImportError as e: + print(f"⚠️ Enhancement modules not available: {e}") + print("🔄 App will run in basic mode without enhancement features") + ENHANCEMENT_AVAILABLE = False + +sys.path.append(BASE_DIR) # Add base directory + +import onnxruntime as ort +import torch +import shlex + +def find_available_port(start_port=7860, max_attempts=10): + """Find an available port starting from start_port""" + import socket + + for i in range(max_attempts): + port = start_port + i + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('localhost', port)) + print(f"🌐 Found available port: {port}") + return port + except OSError: + continue + + print(f"⚠️ Could not find available port in range {start_port}-{start_port + max_attempts}") + return start_port # Fallback to original port + +def get_instance_file_path(filename): + """Get instance-specific file path to avoid conflicts""" + return os.path.join(INSTANCE_TEMP_DIR, filename) + +def get_available_gpus(): + """Get list of available CUDA devices with enhanced debugging""" + print("\n🔍 Starting GPU Detection...") + available_gpus = [] + + # Check CUDA availability first + cuda_available = torch.cuda.is_available() + print(f"🚀 CUDA Available: {cuda_available}") + + if not cuda_available: + print("❌ CUDA not available - returning CPU only") + return ["CPU Only"] + + # Get device count + device_count = torch.cuda.device_count() + print(f"🔢 Total CUDA devices detected: {device_count}") + + if device_count == 0: + print("❌ No CUDA devices found despite CUDA being available") + return ["CPU Only"] + + # Check environment variables that might limit GPU visibility + cuda_visible = os.environ.get('CUDA_VISIBLE_DEVICES') + if cuda_visible is not None: + print(f"🌍 CUDA_VISIBLE_DEVICES: {cuda_visible}") + + # Detect each GPU + for i in range(device_count): + try: + print(f"\n--- Checking GPU {i} ---") + + # Get device properties + props = torch.cuda.get_device_properties(i) + gpu_name = props.name + gpu_memory = props.total_memory / (1024**3) # GB + + print(f" Name: {gpu_name}") + print(f" Memory: {gpu_memory:.1f}GB") + print(f" Compute Capability: {props.major}.{props.minor}") + + # Test device accessibility + try: + # Save current device + current_device = torch.cuda.current_device() if torch.cuda.is_available() else None + + # Test the device + torch.cuda.set_device(i) + test_tensor = torch.tensor([1.0], device=f'cuda:{i}') + + # Successful - add to list + gpu_entry = f"GPU {i}: {gpu_name} ({gpu_memory:.1f}GB)" + available_gpus.append(gpu_entry) + print(f" Status: ✅ Accessible") + print(f" Added: {gpu_entry}") + + # Cleanup + del test_tensor + + # Restore previous device if it existed + if current_device is not None: + torch.cuda.set_device(current_device) + + except Exception as device_error: + print(f" Status: ❌ Not accessible - {device_error}") + # Still add it to the list but mark as problematic + gpu_entry = f"GPU {i}: {gpu_name} (⚠️ Issues)" + available_gpus.append(gpu_entry) + print(f" Added with warning: {gpu_entry}") + + except Exception as e: + print(f" ❌ Error detecting GPU {i}: {e}") + # Add as unknown GPU + available_gpus.append(f"GPU {i}: Unknown GPU (Error)") + + # Always add CPU option + available_gpus.append("CPU Only") + + # Final summary + print(f"\n📊 GPU Detection Summary:") + print(f" Total devices found: {len(available_gpus)}") + for i, gpu in enumerate(available_gpus): + print(f" {i+1}. {gpu}") + + print("✅ GPU detection complete\n") + return available_gpus + +def set_gpu_device(gpu_selection): + """Set the CUDA device based on user selection""" + if gpu_selection.startswith("GPU"): + try: + gpu_id = gpu_selection.split(":")[0].split(" ")[1] + os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id + print(f"Set CUDA_VISIBLE_DEVICES to: {gpu_id}") + return gpu_id + except (IndexError, ValueError) as e: + print(f"⚠️ Error parsing GPU selection '{gpu_selection}': {e}") + os.environ["CUDA_VISIBLE_DEVICES"] = "" + print("Falling back to CPU mode") + return "cpu" + else: + os.environ["CUDA_VISIBLE_DEVICES"] = "" + print("Using CPU mode") + return "cpu" + +# Get the port early for display in UI +INSTANCE_PORT = find_available_port(7860) if not os.getenv('SPACE_ID') else 7860 + +def check_environment(): + """Check environment and display compatibility status""" + print("\n🔍 ENVIRONMENT CHECK:") + print("=" * 50) + + # Check Python version + import sys + python_version = sys.version_info + print(f"🐍 Python: {python_version.major}.{python_version.minor}.{python_version.micro}") + + # Check key dependencies + dependencies = { + 'gradio': '✅ Available', + 'torch': '✅ Available' if torch.__version__ else '❌ Not Available', + 'onnxruntime': '✅ Available', + 'moviepy': '✅ Available' if MOVIEPY_AVAILABLE else '⚠️ Limited functionality', + 'enhancement_modules': '✅ Available' if ENHANCEMENT_AVAILABLE else '⚠️ Basic mode only' + } + + for dep, status in dependencies.items(): + print(f"📦 {dep}: {status}") + + # Check CUDA availability + if torch.cuda.is_available(): + print(f"🚀 CUDA: ✅ Available ({torch.cuda.device_count()} device(s))") + for i in range(torch.cuda.device_count()): + try: + print(f" └─ GPU {i}: {torch.cuda.get_device_name(i)}") + except: + print(f" └─ GPU {i}: Unknown GPU") + else: + print("🚀 CUDA: ⚠️ Not available (CPU mode only)") + + print("=" * 50) + return True + +def create_requirements_file(): + """Create a requirements.txt file for easy deployment""" + requirements = [ + "gradio>=4.0.0", + "torch>=2.0.0", + "onnxruntime>=1.15.0", + "moviepy>=1.0.3", + "opencv-python>=4.8.0", + "numpy>=1.24.0", + "Pillow>=9.5.0" + ] + + req_path = os.path.join(BASE_DIR, "requirements.txt") + try: + with open(req_path, 'w') as f: + f.write('\n'.join(requirements)) + print(f"📝 Created requirements.txt at: {req_path}") + except Exception as e: + print(f"⚠️ Could not create requirements.txt: {e}") + +def create_multi_instance_scripts(): + """Create helper scripts for running multiple instances""" + + # Windows batch script + batch_script = """@echo off +echo Starting Face Swap Studio Instance... +echo Instance will auto-detect available port starting from 7860 +echo. +python app.py +pause +""" + + # Linux/Mac shell script + shell_script = """#!/bin/bash +echo "Starting Face Swap Studio Instance..." +echo "Instance will auto-detect available port starting from 7860" +echo "" +python3 app.py +""" + + try: + # Create Windows script + with open(os.path.join(BASE_DIR, "launch_instance.bat"), 'w') as f: + f.write(batch_script) + + # Create Linux/Mac script + script_path = os.path.join(BASE_DIR, "launch_instance.sh") + with open(script_path, 'w') as f: + f.write(shell_script) + + # Make shell script executable + try: + os.chmod(script_path, 0o755) + except: + pass # Windows doesn't support chmod + + print("📝 Created multi-instance launch scripts:") + print(" - launch_instance.bat (Windows)") + print(" - launch_instance.sh (Linux/Mac)") + + except Exception as e: + print(f"⚠️ Could not create launch scripts: {e}") + +# Create launch scripts +create_multi_instance_scripts() + +# Create requirements file for deployment +create_requirements_file() + +# Run environment check +check_environment() + +# Get available GPUs for the dropdown +AVAILABLE_GPUS = get_available_gpus() + +# Print available GPUs to console for debugging +print("\n" + "="*60) +print("🖥️ GPU INITIALIZATION FOR DROPDOWN") +print("="*60) +print(f"📊 Final GPU List for Dropdown ({len(AVAILABLE_GPUS)} items):") +for i, gpu in enumerate(AVAILABLE_GPUS): + print(f" [{i}] {gpu}") +print(f"🎯 Default selection: {AVAILABLE_GPUS[0] if AVAILABLE_GPUS else 'None'}") +print(f"📋 List contents: {AVAILABLE_GPUS}") +print(f"🔢 Total choices for dropdown: {len(AVAILABLE_GPUS)}") + +# Verify CUDA one more time +print(f"\n🚀 CUDA Status:") +print(f" Available: {torch.cuda.is_available()}") +if torch.cuda.is_available(): + print(f" Device count: {torch.cuda.device_count()}") + for i in range(torch.cuda.device_count()): + try: + name = torch.cuda.get_device_name(i) + print(f" GPU {i}: {name}") + except: + print(f" GPU {i}: Error getting name") +print("="*60 + "\n") + +# Create a simple GPU test function +def debug_gpu_choices(): + """Debug function to show what GPUs are available""" + print("🔍 Debug GPU Choices Called:") + print(f" AVAILABLE_GPUS: {AVAILABLE_GPUS}") + print(f" Length: {len(AVAILABLE_GPUS)}") + return AVAILABLE_GPUS + +def on_gpu_selection_change(selected_gpu): + """Handle GPU selection change - for debugging""" + print(f"🖥️ GPU Selection Changed: {selected_gpu}") + return selected_gpu + +def refresh_gpu_list(): + """Refresh the GPU list and return updated choices""" + global AVAILABLE_GPUS + print("🔄 Refreshing GPU list...") + AVAILABLE_GPUS = get_available_gpus() + + return gr.update( + choices=AVAILABLE_GPUS, + value=AVAILABLE_GPUS[0] if AVAILABLE_GPUS else "CPU Only", + interactive=True, + allow_custom_value=False + ) + +def debug_gpu_dropdown(): + """Debug the GPU dropdown and return status""" + global AVAILABLE_GPUS + print("🔍 GPU Debug Button Clicked") + print(f" Current AVAILABLE_GPUS: {AVAILABLE_GPUS}") + print(f" Length: {len(AVAILABLE_GPUS)}") + + # Force complete refresh of GPU detection + print("🔄 Force refreshing GPU detection...") + AVAILABLE_GPUS = get_available_gpus() + + debug_info = f"✅ DEBUG RESULTS:\n" + debug_info += f"• CUDA Available: {torch.cuda.is_available()}\n" + debug_info += f"• Device Count: {torch.cuda.device_count() if torch.cuda.is_available() else 0}\n" + debug_info += f"• Detected Options: {len(AVAILABLE_GPUS)}\n" + + for i, gpu in enumerate(AVAILABLE_GPUS): + debug_info += f" [{i}] {gpu}\n" + + # Create completely new dropdown configuration + dropdown_update = gr.update( + choices=AVAILABLE_GPUS, + value=AVAILABLE_GPUS[0] if AVAILABLE_GPUS else "CPU Only", + interactive=True, + visible=True + ) + + print(f"🔄 Updated dropdown with {len(AVAILABLE_GPUS)} choices") + return dropdown_update, debug_info + +# Global variable to track current process for cancellation +current_process = None +last_output_path = None +last_batch_mode = False + +def create_batch_zip(): + """Create a zip file of all output files""" + try: + if not os.path.exists(INSTANCE_OUTPUT_DIR): + print(f"❌ Output directory does not exist: {INSTANCE_OUTPUT_DIR}") + return None + + files = os.listdir(INSTANCE_OUTPUT_DIR) + if not files: + print("❌ No files found in output directory") + return None + + zip_path = os.path.join(INSTANCE_OUTPUT_DIR, f"batch_results_{INSTANCE_ID}.zip") + + # Remove old zip if exists + if os.path.exists(zip_path): + os.remove(zip_path) + print("🗑️ Removed old zip file") + + with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: + for file in files: + if not file.endswith('.zip'): # Don't zip existing zips + file_path = os.path.join(INSTANCE_OUTPUT_DIR, file) + if os.path.isfile(file_path): + zipf.write(file_path, file) + print(f"📦 Added to zip: {file}") + + zip_size = os.path.getsize(zip_path) / (1024 * 1024) # MB + print(f"✅ Batch zip created: {zip_path} ({zip_size:.1f}MB)") + return zip_path + + except Exception as e: + print(f"❌ Error creating batch zip: {e}") + return None + +def get_instance_downloads(): + """Get download file(s) from the current instance output directory""" + try: + print(f"🔍 Checking downloads in: {INSTANCE_OUTPUT_DIR}") # Debug + + if not os.path.exists(INSTANCE_OUTPUT_DIR): + print(f"❌ Output directory does not exist: {INSTANCE_OUTPUT_DIR}") + return None, "📁 No output directory found for this instance" + + # Get all video and zip files from the instance output directory + files = [] + all_files = os.listdir(INSTANCE_OUTPUT_DIR) + print(f"📂 Files in output directory: {all_files}") # Debug + + for file in all_files: + if file.lower().endswith(('.mp4', '.avi', '.mov', '.mkv', '.zip')): + full_path = os.path.join(INSTANCE_OUTPUT_DIR, file) + files.append(full_path) + print(f"✅ Found downloadable file: {file}") # Debug + + if not files: + print("❌ No downloadable files found") # Debug + return None, "📁 No completed files found in output directory" + + # If only one file, return it directly + if len(files) == 1: + file_name = os.path.basename(files[0]) + file_size = os.path.getsize(files[0]) / (1024 * 1024) # MB + # Normalize path for current OS + normalized_path = os.path.normpath(files[0]) + print(f"📥 Returning single file: {normalized_path}") # Debug + return normalized_path, f"📥 Ready to download: {file_name} ({file_size:.1f}MB)" + + # If multiple files, create a zip + zip_path = os.path.join(INSTANCE_OUTPUT_DIR, f"all_results_{INSTANCE_ID}.zip") + print(f"📦 Creating zip file: {zip_path}") # Debug + + # Remove old zip if exists + if os.path.exists(zip_path): + os.remove(zip_path) + print("🗑️ Removed old zip file") # Debug + + with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: + for file_path in files: + if not file_path.endswith('.zip'): # Don't zip existing zips + file_name = os.path.basename(file_path) + zipf.write(file_path, file_name) + print(f"📦 Added to zip: {file_name}") # Debug + + zip_size = os.path.getsize(zip_path) / (1024 * 1024) # MB + print(f"✅ Zip created successfully: {zip_size:.1f}MB") # Debug + # Normalize path for current OS + normalized_zip_path = os.path.normpath(zip_path) + return normalized_zip_path, f"📦 Ready to download: {len(files)} files ({zip_size:.1f}MB total)" + + except Exception as e: + error_msg = f"❌ Error accessing downloads: {str(e)}" + print(error_msg) # Debug + return None, error_msg + +def handle_download_click(): + """Handle download button click and return file + status""" + download_file_path, status_message = get_instance_downloads() + print(f"🔍 Download click - File: {download_file_path}, Status: {status_message}") + + if download_file_path and os.path.exists(download_file_path): + # Show download component and hide download button temporarily + return ( + download_file_path, # Set file for download + status_message, # Update status + gr.update(visible=True), # Show download file component + gr.update(visible=False) # Hide download button temporarily + ) + else: + return ( + None, + status_message, + gr.update(visible=False), # Keep download component hidden + gr.update(visible=True) # Keep download button visible + ) + +def reset_download_ui(): + """Reset download UI after download completes""" + # Called when download file component changes (indicating download started) + return ( + gr.update(visible=False), # Hide download file component + gr.update(visible=True), # Show download button again + "📥 Download completed! Ready for next download." + ) + +def check_downloads_status(): + """Check and return download status for the UI""" + download_file, status_message = get_instance_downloads() + return status_message + +def reset_to_defaults(): + """Reset all settings to their default values""" + return ( + None, # source_image + None, # target_video + ['face_swapper', 'face_enhancer'], # frame_processor + 'top-bottom', # face_analyser_direction + 'reference', # face_recognition + 'female', # face_analyser_gender + 'adult', # face_analyser_age + False, # skip_audio + True, # keep_fps + False, # keep_temp + 'wav2lip_gan_96', # lip_syncer_model + False, # enable_lip_sync + False, # use_folder_mode + AVAILABLE_GPUS[0] if AVAILABLE_GPUS else "CPU Only", # gpu_selection + "🔧 Configuration reset to defaults. Ready for new processing session!\n", # cli_output + "🔄 RESET CONFIGURATION" # button text + ) + +def cancel_processing(): + """Cancel the current processing operation""" + global current_process + if current_process and current_process.poll() is None: + try: + current_process.terminate() + current_process.wait(timeout=10) # Wait up to 10 seconds + except subprocess.TimeoutExpired: + current_process.kill() # Force kill if it doesn't terminate + current_process.wait() + except Exception as e: + print(f"⚠️ Error cancelling process: {e}") + return "⏹️ Processing operation cancelled by user request.\n🔄 System ready for new configuration.\n" + return "⚠️ No active processing session found.\n" + +def compress_video_if_needed(input_path, max_size_mb=1000): + """Compress video if it exceeds the size limit""" + if not MOVIEPY_AVAILABLE: + print("⚠️ MoviePy not available - skipping compression") + return input_path + + try: + file_size_mb = os.path.getsize(input_path) / (1024 * 1024) + + if file_size_mb > max_size_mb: + print(f"Video size ({file_size_mb:.1f}MB) exceeds limit. Compressing...") + compressed_path = input_path.replace('.mp4', '_compressed.mp4') + + clip = VideoFileClip(input_path) + # Reduce quality for compression + clip.write_videofile( + compressed_path, + fps=24, # Reduce FPS + bitrate="2000k", # Reduce bitrate + audio_codec='aac' + ) + clip.close() + return compressed_path + except Exception as e: + print(f"❌ Compression failed: {e}") + + return input_path + +def resize_video(file, export, fps): + """Resize video with fallback if MoviePy unavailable""" + if not MOVIEPY_AVAILABLE: + print("⚠️ MoviePy not available - copying video without resizing") + shutil.copy(file, export) + return + + # Get the Convert directory path + convert_dir = os.path.join(BASE_DIR, "Convert") + + # Compress if needed before processing (only for single video mode) + if not file.startswith(convert_dir): + file = compress_video_if_needed(file) + + try: + # Load the video without applying crossfade blending to maintain sharpness in frames + clip = VideoFileClip(file) + # Write the video at the original resolution and fps, without blending + clip.write_videofile(export, fps=fps, audio_codec='aac') + clip.close() + except Exception as e: + print(f"❌ Video processing failed: {e}") + shutil.copy(file, export) + +def extract_audio(video_path, audio_path): + """Extract audio from video file""" + if not MOVIEPY_AVAILABLE: + print("⚠️ MoviePy not available - cannot extract audio") + return False + + try: + clip = VideoFileClip(video_path) + if clip.audio is not None: + clip.audio.write_audiofile(audio_path, logger=None) + clip.close() + return True + else: + clip.close() + return False + except Exception as e: + print(f"❌ Error extracting audio: {e}") + return False + +def cleanup_temp_files(): + """Clean up temporary files from previous runs""" + temp_files = [ + get_instance_file_path('source-image.jpg'), + get_instance_file_path('resize-vid.mp4'), + get_instance_file_path('target-audio.wav') + ] + + cleanup_count = 0 + + # Remove specific temp files for this instance + for temp_file in temp_files: + if os.path.exists(temp_file): + try: + os.remove(temp_file) + print(f"🧹 Cleaned up: {temp_file}") + cleanup_count += 1 + except Exception as e: + print(f"⚠️ Could not remove {temp_file}: {e}") + + # Clean up old instance directories (older than 1 hour) + temp_base_dir = os.path.join(BASE_DIR, "Temp") + if os.path.exists(temp_base_dir): + try: + for instance_dir in os.listdir(temp_base_dir): + instance_path = os.path.join(temp_base_dir, instance_dir) + if os.path.isdir(instance_path) and instance_dir.startswith('instance_'): + # Check if directory is old enough to clean up + if time.time() - os.path.getctime(instance_path) > 3600: # 1 hour + try: + shutil.rmtree(instance_path) + print(f"🧹 Cleaned up old instance: {instance_path}") + cleanup_count += 1 + except Exception as e: + print(f"⚠️ Could not remove {instance_path}: {e}") + except Exception as e: + print(f"⚠️ Could not access temp directory: {e}") + + if cleanup_count > 0: + print(f"✅ Startup cleanup completed: {cleanup_count} items removed") + else: + print("✨ Startup cleanup: No temp files found to remove") + +# Run cleanup on startup +cleanup_temp_files() + +os.makedirs(INSTANCE_OUTPUT_DIR, exist_ok=True) + +def run_single_video(source_image, target_video, frame_processor, face_analyser_direction, + face_recognition, face_analyser_gender, face_analyser_age, skip_audio, + keep_fps, keep_temp, lip_syncer_model, enable_lip_sync, gpu_selection): + """Process a single uploaded video""" + global last_output_path, last_batch_mode + last_batch_mode = False + + # Set GPU device + set_gpu_device(gpu_selection) + + print(f'🎬 Processing target video: {target_video}') + + # Saving the uploaded image and video with instance-specific paths + new_source = get_instance_file_path('source-image.jpg') + new_target = get_instance_file_path('resize-vid.mp4') + + # Copy the files locally + shutil.copy(source_image, new_source) + resize_video(file=target_video, export=new_target, fps=30) + + if not os.path.exists(new_source): + return "❌ Source image file does not exist", "" + if not os.path.exists(new_target): + return "❌ Target video file does not exist", "" + + # Extract the original filenames of the source image and target video + source_image_name = os.path.splitext(os.path.basename(source_image))[0] + target_video_name = os.path.splitext(os.path.basename(target_video))[0] + + selected_frame_processors = ' '.join(frame_processor) + + # Handle audio extraction for lip sync from the TARGET video itself + audio_source_path = None + if enable_lip_sync: + audio_source_path = get_instance_file_path('target-audio.wav') + if not extract_audio(new_target, audio_source_path): + print(f"⚠️ Warning: Could not extract audio from {target_video}. Skipping lip sync.") + enable_lip_sync = False + + # Add lip sync suffix to filename if enabled + suffix = "_lipsynced" if enable_lip_sync else "" + output_filename = f"{source_image_name}_{target_video_name}{suffix}.mp4" + output_path = os.path.join(INSTANCE_OUTPUT_DIR, output_filename) + + os.makedirs(INSTANCE_OUTPUT_DIR, exist_ok=True) + + # Determine execution provider based on GPU selection + if gpu_selection.startswith("GPU"): + execution_provider = "cuda" + else: + execution_provider = "cpu" + + # Construct command as a single string and use shlex.split to handle it + cmd = ( + f"python run.py --execution-providers {execution_provider} " + f"--execution-thread-count 8 " # Changed from 16 to 8 + f"--reference-face-distance 1.5 " + f"-s {shlex.quote(new_source)} -t {shlex.quote(new_target)} -o {shlex.quote(output_path)} " + f"--frame-processors {selected_frame_processors} " + f"--face-analyser-direction {face_analyser_direction} " + ) + + # Add lip sync parameters if enabled + if enable_lip_sync and audio_source_path: + cmd += f"--source-paths {shlex.quote(audio_source_path)} " + cmd += f"--lip-syncer-model {lip_syncer_model} " + # Ensure lip_syncer is in frame processors + if 'lip_syncer' not in frame_processor: + frame_processor_with_lip = list(frame_processor) + ['lip_syncer'] + cmd = cmd.replace(f"--frame-processors {selected_frame_processors}", + f"--frame-processors {' '.join(frame_processor_with_lip)}") + + if face_recognition != 'none': + cmd += f"--face-recognition {face_recognition} " + if face_analyser_gender != 'none': + cmd += f"--face-analyser-gender {face_analyser_gender} " + + # Add the face_analyser_age parameter + cmd += f"--face-analyser-age {face_analyser_age} " + + if skip_audio and not enable_lip_sync: # Don't skip audio if lip syncing + cmd += "--skip-audio " + if keep_fps: + cmd += "--keep-fps " + if keep_temp: + cmd += "--keep-temp " + + try: + print("Started command...", cmd) + start_time = time.time() + + # Use shlex.split(cmd) to safely handle spaces in paths + global current_process + current_process = sp.Popen(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.STDOUT, text=True, bufsize=1, universal_newlines=True) + process = current_process + + output_lines = [] + cli_output = "" + + while True: + output = process.stdout.readline() + if output == '' and process.poll() is not None: + break + if output: + line = output.strip() + print(line) + output_lines.append(line) + + # Build up CLI output for display + cli_output += line + "\n" + + # Keep only last 50 lines to prevent memory issues + if len(output_lines) > 50: + output_lines = output_lines[-50:] + cli_output = "\n".join(output_lines[-50:]) + "\n" + + # Yield intermediate results to update the interface + yield None, cli_output + + rc = process.poll() + end_time = time.time() + execution_time = end_time - start_time + + final_output = cli_output + f"\n\nCommand execution time: {execution_time:.2f} seconds" + + if rc != 0: + return f"An error occurred during command execution.", final_output + + # Clean up to free GPU memory + del process + if torch.cuda.is_available(): + torch.cuda.empty_cache() + gc.collect() + + # Clean up temporary audio file + if audio_source_path and os.path.exists(audio_source_path): + os.remove(audio_source_path) + + # Set the last output path for downloading + last_output_path = output_path + return output_path, final_output + + except Exception as e: + # Clean up temporary audio file in case of error + if audio_source_path and os.path.exists(audio_source_path): + os.remove(audio_source_path) + return f"An error occurred: {str(e)}", cli_output + +def run_folder_batch(source_image, frame_processor, face_analyser_direction, face_recognition, + face_analyser_gender, skip_audio, keep_fps, keep_temp, lip_syncer_model, enable_lip_sync, gpu_selection): + """Process all videos in Convert folder""" + global last_output_path, last_batch_mode + last_batch_mode = True + + # Set GPU device + set_gpu_device(gpu_selection) + + video_directory = os.path.join(BASE_DIR, "Convert") + + # Create Convert directory if it doesn't exist + os.makedirs(video_directory, exist_ok=True) + + video_files = [os.path.join(video_directory, f) for f in os.listdir(video_directory) if f.lower().endswith(('.mp4', '.avi', '.mov', '.mkv'))] + + if not video_files: + yield None, f"📁 No video files found in the directory: {video_directory}" + return + + new_source = get_instance_file_path('source-image.jpg') + shutil.copy(source_image, new_source) + + if not os.path.exists(new_source): + yield None, "❌ Source image file does not exist" + return + + # Extract the original filename of the source image + source_image_name = os.path.splitext(os.path.basename(source_image))[0] + + cli_output = f"📊 Found {len(video_files)} videos to process in {video_directory}\n" + cli_output += f"🎯 Source image: {source_image_name}\n" + cli_output += f"🖥️ GPU Selection: {gpu_selection}\n" + cli_output += f"📁 Instance Output: {INSTANCE_OUTPUT_DIR}\n\n" + yield None, cli_output + + successful_videos = 0 + failed_videos = 0 + + for i, target_video in enumerate(video_files, 1): + current_video_output = f"[{i}/{len(video_files)}] 🎬 Processing: {os.path.basename(target_video)}\n" + cli_output += current_video_output + print(f"[{i}/{len(video_files)}] Processing: {os.path.basename(target_video)}") # Console output + yield None, cli_output + + new_target = get_instance_file_path('resize-vid.mp4') + + try: + resize_video(file=target_video, export=new_target, fps=30) + except Exception as e: + error_msg = f"❌ Error resizing video {target_video}: {e}\n" + cli_output += error_msg + print(error_msg.strip()) # Console output + failed_videos += 1 + yield None, cli_output + continue # Proceed to next video + + if not os.path.exists(new_target): + error_msg = f"❌ Target video file {target_video} does not exist after resizing.\n" + cli_output += error_msg + print(error_msg.strip()) # Console output + failed_videos += 1 + yield None, cli_output + continue # Proceed to next video + + target_video_name = os.path.splitext(os.path.basename(target_video))[0] + + # Handle audio extraction for lip sync from the TARGET video itself + audio_source_path = None + if enable_lip_sync: + audio_source_path = get_instance_file_path('target-audio.wav') + if not extract_audio(new_target, audio_source_path): + warning_msg = f"⚠️ Warning: Could not extract audio from {target_video}. Skipping lip sync.\n" + cli_output += warning_msg + print(warning_msg.strip()) # Console output + yield None, cli_output + enable_lip_sync = False + + # Add lip sync suffix to filename if enabled + suffix = "_lipsynced" if enable_lip_sync else "" + output_filename = f"{source_image_name}_{target_video_name}{suffix}.mp4" + output_path = os.path.join(INSTANCE_OUTPUT_DIR, output_filename) + + os.makedirs(INSTANCE_OUTPUT_DIR, exist_ok=True) + + # Determine execution provider based on GPU selection + if gpu_selection.startswith("GPU"): + execution_provider = "cuda" + else: + execution_provider = "cpu" + + # Construct command as a single string and use shlex.split to handle it + cmd = ( + f"python run.py --execution-providers {execution_provider} " + f"--execution-thread-count 8 " # Changed from 16 to 8 + f"--reference-face-distance 1.5 " + f"-s {shlex.quote(new_source)} -t {shlex.quote(new_target)} -o {shlex.quote(output_path)} " + f"--frame-processors {' '.join(frame_processor)} " + f"--face-analyser-direction {face_analyser_direction} " + ) + + # Add lip sync parameters if enabled + if enable_lip_sync and audio_source_path: + cmd += f"--source-paths {shlex.quote(audio_source_path)} " + cmd += f"--lip-syncer-model {lip_syncer_model} " + # Ensure lip_syncer is in frame processors + if 'lip_syncer' not in frame_processor: + frame_processor_with_lip = list(frame_processor) + ['lip_syncer'] + else: + frame_processor_with_lip = frame_processor + # Update the command with the new frame processors + cmd = cmd.replace(f"--frame-processors {' '.join(frame_processor)}", + f"--frame-processors {' '.join(frame_processor_with_lip)}") + + if face_recognition != 'none': + cmd += f"--face-recognition {face_recognition} " + if face_analyser_gender != 'none': + cmd += f"--face-analyser-gender {face_analyser_gender} " + + if skip_audio and not enable_lip_sync: # Don't skip audio if lip syncing + cmd += "--skip-audio " + if keep_fps: + cmd += "--keep-fps " + if keep_temp: + cmd += "--keep-temp " + + try: + cmd_msg = f"Starting processing with command...\n" + cli_output += cmd_msg + print("Starting processing...") # Console output + yield None, cli_output + + start_time = time.time() + # Use shlex.split(cmd) to safely handle spaces in paths + global current_process + current_process = sp.Popen(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.STDOUT, text=True, bufsize=1, universal_newlines=True) + process = current_process + + line_count = 0 + last_update_time = time.time() + + while True: + output = process.stdout.readline() + if output == '' and process.poll() is not None: + break + if output: + line = output.strip() + print(line) # Always show in console + + # Only update web interface every 10 lines or every 2 seconds to prevent slowdown + line_count += 1 + current_time = time.time() + + if line_count % 10 == 0 or current_time - last_update_time > 2: + cli_output += line + "\n" + # Keep only last 50 lines to prevent memory issues + lines = cli_output.split('\n') + if len(lines) > 50: + cli_output = '\n'.join(lines[-50:]) + yield None, cli_output + last_update_time = current_time + + rc = process.poll() + end_time = time.time() + execution_time = end_time - start_time + + if rc != 0: + error_msg = f"An error occurred during command execution for {target_video}.\n" + cli_output += error_msg + print(error_msg.strip()) # Console output + failed_videos += 1 + yield None, cli_output + # Ensure the process is terminated + try: + process.kill() + process.wait() + except: + pass + continue # Proceed to next video + else: + success_msg = f"Processing completed for {target_video} in {execution_time:.2f} seconds.\n\n" + cli_output += success_msg + print(f"Processing completed for {os.path.basename(target_video)} in {execution_time:.2f} seconds.") # Console output + successful_videos += 1 + yield None, cli_output + + # Clean up to free GPU memory + del process + if torch.cuda.is_available(): + torch.cuda.empty_cache() + gc.collect() + + except Exception as e: + error_msg = f"An error occurred while processing {target_video}: {str(e)}\n" + cli_output += error_msg + print(error_msg.strip()) # Console output + failed_videos += 1 + yield None, cli_output + continue # Proceed to next video + + # Clean up temporary audio file + if audio_source_path and os.path.exists(audio_source_path): + try: + os.remove(audio_source_path) + except Exception as e: + print(f"⚠️ Could not remove audio file: {e}") + + final_msg = f"\n=== BATCH PROCESSING COMPLETE ===\n" + final_msg += f"✅ Successfully processed: {successful_videos} videos\n" + final_msg += f"❌ Failed: {failed_videos} videos\n" + final_msg += f"📁 Total videos: {len(video_files)}\n" + final_msg += f"🗂️ Check the output folder for results: {INSTANCE_OUTPUT_DIR}" + cli_output += final_msg + print(f"=== BATCH PROCESSING COMPLETE === Successfully processed: {successful_videos}/{len(video_files)} videos") # Console output + + # Set up for batch download + if successful_videos > 0: + last_output_path = create_batch_zip() + if last_output_path: + cli_output += f"\n📦 Batch zip created: {os.path.basename(last_output_path)}" + else: + cli_output += f"\n⚠️ Warning: Could not create batch zip file" + + yield None, cli_output + +def handle_button_action(button_text, source_image, target_video, frame_processor, face_analyser_direction, + face_recognition, face_analyser_gender, face_analyser_age, skip_audio, + keep_fps, keep_temp, lip_syncer_model, enable_lip_sync, use_folder_mode, gpu_selection): + """Handle the multi-purpose button actions""" + global last_output_path, last_batch_mode + + if "RESET" in button_text: + # Reset to defaults + return reset_to_defaults() + elif "CANCEL" in button_text: + # Cancel processing + cancel_msg = cancel_processing() + return ( + source_image, target_video, frame_processor, face_analyser_direction, face_recognition, + face_analyser_gender, face_analyser_age, skip_audio, keep_fps, keep_temp, + lip_syncer_model, enable_lip_sync, use_folder_mode, gpu_selection, + cancel_msg, "🔄 RESET CONFIGURATION" + ) + elif "DOWNLOAD" in button_text: + # Download results + if last_batch_mode and last_output_path: + # Return the zip file for batch download + return ( + source_image, target_video, frame_processor, face_analyser_direction, face_recognition, + face_analyser_gender, face_analyser_age, skip_audio, keep_fps, keep_temp, + lip_syncer_model, enable_lip_sync, use_folder_mode, gpu_selection, + "📥 Batch processing complete! Click download to get your zipped results!", "📥 DOWNLOAD BATCH RESULTS" + ) + elif not last_batch_mode and last_output_path: + # Return the single file for download + return ( + source_image, target_video, frame_processor, face_analyser_direction, face_recognition, + face_analyser_gender, face_analyser_age, skip_audio, keep_fps, keep_temp, + lip_syncer_model, enable_lip_sync, use_folder_mode, gpu_selection, + "📥 Processing complete! Click download to get your enhanced video!", "📥 DOWNLOAD VIDEO" + ) + + # Default return (shouldn't reach here normally) + return ( + source_image, target_video, frame_processor, face_analyser_direction, face_recognition, + face_analyser_gender, face_analyser_age, skip_audio, keep_fps, keep_temp, + lip_syncer_model, enable_lip_sync, use_folder_mode, gpu_selection, + "", "🔄 RESET CONFIGURATION" + ) + +def run_processing(source_image, target_video, frame_processor, face_analyser_direction, face_recognition, + face_analyser_gender, face_analyser_age, skip_audio, keep_fps, keep_temp, + lip_syncer_model, enable_lip_sync, use_folder_mode, gpu_selection): + """Main processing function""" + + if use_folder_mode: + # Folder batch mode + for _, cli_output in run_folder_batch( + source_image, frame_processor, face_analyser_direction, face_recognition, + face_analyser_gender, skip_audio, keep_fps, keep_temp, lip_syncer_model, enable_lip_sync, gpu_selection + ): + yield cli_output, "⏹️ CANCEL PROCESSING" + # Processing complete + yield cli_output + "\n\n✅ Batch processing completed successfully!", "📥 DOWNLOAD BATCH RESULTS" + else: + # Single video mode + output_path = None + for video_result, cli_output in run_single_video( + source_image, target_video, frame_processor, face_analyser_direction, face_recognition, + face_analyser_gender, face_analyser_age, skip_audio, keep_fps, keep_temp, + lip_syncer_model, enable_lip_sync, gpu_selection + ): + if video_result and not video_result.startswith("An error occurred"): + output_path = video_result + yield cli_output, "⏹️ CANCEL PROCESSING" + + # Processing complete + if output_path and os.path.exists(output_path): + yield cli_output + "\n\n🎉 Video processing completed successfully!", "📥 DOWNLOAD VIDEO" + else: + yield cli_output + "\n\n✅ I did what I was told!", "🔄 RESET CONFIGURATION" + +def get_download_file(): + """Get the appropriate file for download""" + global last_output_path, last_batch_mode + if last_output_path and os.path.exists(last_output_path): + return last_output_path + return None + +def get_theme() -> gr.Theme: + return gr.themes.Monochrome( + primary_hue=gr.themes.colors.teal, + secondary_hue=gr.themes.colors.gray, + font=gr.themes.GoogleFont('Inter') + ).set( + background_fill_primary="#1f1f1f", + background_fill_secondary="#2d2d2d", + block_label_text_size="*text_sm", + block_title_text_size="*text_md" + ) + +def toggle_lip_sync_visibility(enable_lip_sync): + """Toggle visibility of lip sync related components""" + return { + lip_syncer_model_dropdown: gr.update(visible=enable_lip_sync) + } + +def toggle_folder_mode(use_folder_mode): + """Toggle visibility of target video upload based on folder mode""" + return { + target_video: gr.update(visible=not use_folder_mode), + face_analyser_age_dropdown: gr.update(visible=not use_folder_mode) + } + +with gr.Blocks(theme=get_theme(), css=""" + .gradio-container { + max-width: 1800px !important; + margin: 0 auto !important; + background: linear-gradient(135deg, #0f0f23 0%, #1a1a2e 50%, #16213e 100%); + min-height: 100vh; + padding: 0.3rem !important; + } + .main-header { + text-align: center; + margin-bottom: 0.6rem; + padding: 0.8rem; + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); + border-radius: 12px; + color: white; + box-shadow: 0 10px 20px rgba(102, 126, 234, 0.3); + position: relative; + overflow: hidden; + } + .main-header h1 { + font-size: 1.8rem !important; + margin: 0 !important; + } + .main-header p { + font-size: 0.9rem !important; + margin: 0.2rem 0 0 0 !important; + } + .section-header { + font-weight: 600; + font-size: 0.95rem; + margin-bottom: 0.6rem; + color: #667eea; + background: linear-gradient(90deg, #667eea, #764ba2); + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + background-clip: text; + border-bottom: 2px solid #667eea; + padding-bottom: 0.3rem; + position: relative; + } + .section-header::after { + content: ''; + position: absolute; + bottom: -2px; + left: 0; + width: 30px; + height: 2px; + background: linear-gradient(90deg, #667eea, #764ba2); + border-radius: 2px; + } + .control-panel { + background: linear-gradient(135deg, rgba(102, 126, 234, 0.1) 0%, rgba(118, 75, 162, 0.1) 100%); + border-radius: 12px; + padding: 0.8rem; + margin-bottom: 0.5rem; + border: 2px solid rgba(102, 126, 234, 0.2); + box-shadow: 0 8px 20px rgba(102, 126, 234, 0.1); + backdrop-filter: blur(10px); + position: relative; + overflow: hidden; + height: fit-content; + } + .control-panel::before { + content: ''; + position: absolute; + top: 0; + left: 0; + right: 0; + height: 1px; + background: linear-gradient(90deg, transparent, rgba(255,255,255,0.3), transparent); + } + .button-row { + display: flex; + gap: 1rem; + justify-content: center; + margin: 0.8rem 0; + } + #action-buttons { + margin-top: 0.5rem !important; + } + #action-buttons .gr-button { + width: 100% !important; + margin: 0.2rem 0 !important; + } + #download-btn { + background: linear-gradient(135deg, #10b981, #059669) !important; + border: none !important; + color: white !important; + font-weight: 600 !important; + transition: all 0.3s ease !important; + } + #download-btn:hover { + background: linear-gradient(135deg, #059669, #047857) !important; + transform: translateY(-1px) !important; + box-shadow: 0 4px 12px rgba(16, 185, 129, 0.4) !important; + } + + /* Download status styling */ + .download-status { + font-size: 0.8rem !important; + background: rgba(16, 185, 129, 0.1) !important; + border: 1px solid rgba(16, 185, 129, 0.3) !important; + border-radius: 6px !important; + margin-top: 0.3rem !important; + } + + /* Download file component styling when visible */ + .download-component { + background: rgba(16, 185, 129, 0.1) !important; + border: 2px solid rgba(16, 185, 129, 0.4) !important; + border-radius: 8px !important; + padding: 0.5rem !important; + margin-top: 0.3rem !important; + } + + .processing-log { + margin-top: 0.5rem; + background: linear-gradient(135deg, rgba(15, 15, 35, 0.9) 0%, rgba(26, 26, 46, 0.9) 100%); + border-radius: 12px; + padding: 0.8rem; + border: 2px solid rgba(102, 126, 234, 0.3); + box-shadow: inset 0 2px 10px rgba(0,0,0,0.3), 0 8px 20px rgba(102, 126, 234, 0.1); + height: fit-content; + } + /* Enhanced form styling */ + .gr-form { + background: transparent !important; + } + .gr-box { + border-radius: 8px !important; + border: 1px solid rgba(102, 126, 234, 0.2) !important; + background: rgba(255, 255, 255, 0.02) !important; + margin: 0.2rem 0 !important; + padding: 0.3rem !important; + } + .gr-button { + border-radius: 10px !important; + font-weight: 600 !important; + text-transform: uppercase !important; + letter-spacing: 0.5px !important; + transition: all 0.3s ease !important; + box-shadow: 0 4px 15px rgba(0,0,0,0.3) !important; + padding: 0.5rem 1.2rem !important; + font-size: 0.85rem !important; + } + .gr-button:hover { + transform: translateY(-1px) !important; + box-shadow: 0 6px 20px rgba(0,0,0,0.4) !important; + } + .gr-button-primary { + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; + border: none !important; + } + .gr-button-secondary { + background: linear-gradient(135deg, #434343 0%, #000000 100%) !important; + border: 1px solid rgba(102, 126, 234, 0.3) !important; + color: white !important; + } + /* Configuration grid styling */ + .config-section { + background: rgba(255, 255, 255, 0.03); + border-radius: 10px; + padding: 0.6rem; + border: 1px solid rgba(102, 126, 234, 0.15); + margin: 0.2rem; + } + .config-section h4 { + font-size: 0.85rem !important; + margin: 0 0 0.4rem 0 !important; + } + /* Compact textbox styling */ + .gr-textbox { + background: rgba(15, 15, 35, 0.8) !important; + border: 1px solid rgba(102, 126, 234, 0.3) !important; + border-radius: 8px !important; + color: #e2e8f0 !important; + font-size: 0.8rem !important; + } + /* Compact file upload styling */ + .gr-file { + border: 2px dashed rgba(102, 126, 234, 0.4) !important; + border-radius: 10px !important; + background: rgba(102, 126, 234, 0.05) !important; + transition: all 0.3s ease !important; + min-height: 85px !important; + max-height: 90px !important; + } + .gr-file:hover { + border-color: rgba(102, 126, 234, 0.6) !important; + background: rgba(102, 126, 234, 0.1) !important; + } + .gr-file .gr-file-label { + font-size: 0.75rem !important; + line-height: 1.2 !important; + } + .gr-file .upload-container { + padding: 0.8rem !important; + } + /* Simplified dropdown styling - remove complex CSS that might interfere */ + .gr-dropdown { + background: rgba(255, 255, 255, 0.05) !important; + border: 1px solid rgba(102, 126, 234, 0.3) !important; + border-radius: 6px !important; + font-size: 0.8rem !important; + } + + /* Let Gradio handle dropdown positioning naturally */ + #main-gpu-dropdown { + position: relative !important; + } + + /* GPU debug styling */ + .gpu-debug { + font-size: 0.75rem !important; + background: rgba(255, 165, 0, 0.1) !important; + border: 1px solid rgba(255, 165, 0, 0.3) !important; + border-radius: 6px !important; + margin-top: 0.3rem !important; + } + /* Compact checkbox styling */ + .gr-checkbox { + font-size: 0.8rem !important; + } + /* Make everything more compact */ + .gr-group { + gap: 0.3rem !important; + } + .gr-column { + gap: 0.3rem !important; + } + .gr-row { + gap: 0.5rem !important; + } + /* Text size adjustments */ + .gr-label { + font-size: 0.8rem !important; + } + .gr-info { + font-size: 0.7rem !important; + } + /* Ultra compact mode for smaller screens */ + @media (max-height: 900px) { + .processing-log .gr-textbox { + max-height: 200px !important; + } + .main-header { + padding: 0.6rem !important; + margin-bottom: 0.4rem !important; + } + .control-panel { + padding: 0.6rem !important; + margin-bottom: 0.3rem !important; + } + .gr-file { + min-height: 70px !important; + max-height: 75px !important; + } + } + @media (max-height: 800px) { + .processing-log .gr-textbox { + max-height: 150px !important; + } + .main-header h1 { + font-size: 1.5rem !important; + } + .gr-file { + min-height: 65px !important; + max-height: 70px !important; + } + } + @media (max-height: 700px) { + .gr-file { + min-height: 60px !important; + max-height: 65px !important; + } + .control-panel { + padding: 0.4rem !important; + } + } +""") as ui: + + with gr.Column(elem_classes="main-header"): + gr.Markdown(f""" + # 🎭 Advanced Face Swap Studio + **Professional-grade AI face swapping technology** + *Instance: {INSTANCE_ID} | Port: {INSTANCE_PORT}* + """, elem_classes="main-header") + + with gr.Tabs(): + # Main processing tab + with gr.Tab("🎭 Face Swap", id="main"): + with gr.Row(): + # Left Column - Source Input + Action Buttons + with gr.Column(scale=2): + with gr.Group(elem_classes="control-panel"): + gr.HTML('
📸 Source Input
') + + gr.HTML('

🎯 Face Source

') + source_image = gr.File( + label="Upload Source Image", + file_types=["image"], + file_count="single", + height=85 + ) + gr.HTML('

Clear image with the face to use

') + + # Target video upload (visible by default) + gr.HTML('

🎬 Target Video

') + target_video = gr.File( + label="Upload Target Video", + file_types=["video"], + file_count="single", + visible=True, + height=85 + ) + gr.HTML('

Video where faces will be replaced

') + + # Action Buttons moved to left column + with gr.Group(elem_classes="control-panel", elem_id="action-buttons"): + gr.HTML('
🎮 Controls
') + with gr.Row(): + start_button = gr.Button( + "🚀 LAUNCH PROCESSING", + variant="primary", + size="lg", + elem_id="start-btn" + ) + with gr.Row(): + action_button = gr.Button( + "🔄 RESET CONFIGURATION", + variant="secondary", + size="lg", + elem_id="action-btn" + ) + with gr.Row(): + download_button = gr.Button( + "📥 DOWNLOAD RESULTS", + variant="secondary", + size="lg", + elem_id="download-btn", + visible=True + ) + with gr.Row(): + download_status = gr.Textbox( + label="📥 Download Status", + value="startup_status", # Show startup status + interactive=False, + visible=True, + lines=2, + elem_classes="download-status" + ) + + # Download file component - now visible when needed + download_file = gr.File( + label="📥 Click to Download", + visible=False, + file_count="single", + file_types=None, # Allow all file types + interactive=False, + elem_classes="download-component" + ) + + # Middle Column - Core Processing Configuration + with gr.Column(scale=3): + with gr.Group(elem_classes="control-panel"): + gr.HTML('
⚙️ Processing Configuration
') + + # Main configuration in a clean grid layout + with gr.Row(): + with gr.Column(scale=1, elem_classes="config-section"): + gr.HTML('

🎭 Frame Processing

') + # Get available frame processors based on what's installed + available_processors = ['face_swapper'] + if ENHANCEMENT_AVAILABLE: + available_processors.extend(['face_enhancer', 'frame_enhancer']) + + frame_processor_checkbox = gr.CheckboxGroup( + choices=available_processors, + label='Active Processors', + value=['face_swapper'] + (['face_enhancer'] if ENHANCEMENT_AVAILABLE else []), + visible=True, + info="⚠️ frame_enhancer increases processing time" if ENHANCEMENT_AVAILABLE else "🔧 Basic mode - enhancement modules not available" + ) + + # Lip sync controls + enable_lip_sync = gr.Checkbox( + label="🎵 Enable Lip Sync", + value=False, + info="⚠️ Beta feature" + ) + + lip_syncer_model_dropdown = gr.Dropdown( + label='Lip Sync Model', + choices=['wav2lip_96', 'wav2lip_gan_96'], + value='wav2lip_gan_96', + visible=False, + scale=1 + ) + + with gr.Column(scale=1, elem_classes="config-section"): + gr.HTML('

🔍 Face Analysis

') + + face_recognition_dropdown = gr.Dropdown( + label='Recognition Mode', + choices=['none', 'reference', 'many'], + value='reference', + visible=True + ) + + face_analyser_direction_dropdown = gr.Dropdown( + label='Analysis Direction', + choices=['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small'], + value='top-bottom', + visible=True + ) + + face_analyser_gender_dropdown = gr.Dropdown( + label='Target Gender', + choices=['none', 'male', 'female'], + value='female', + visible=True + ) + + face_analyser_age_dropdown = gr.Dropdown( + label='Target Age Group', + choices=['child', 'teen', 'adult', 'senior'], + value='adult', + visible=True + ) + + # Hidden option + keep_temp = gr.Checkbox( + label="🗂️ Keep Temp Files", + value=False, + visible=False + ) + + # Right Column - Processing Log + Processing Options + with gr.Column(scale=3): + with gr.Group(elem_classes="processing-log"): + gr.HTML('
🖥️ Real-time Monitor
') + + cli_output = gr.Textbox( + label="📊 Live Processing Output", + lines=12, + max_lines=15, + interactive=False, + show_copy_button=True, + container=True, + placeholder=f"🔧 System ready. Configure settings and click 'Launch Processing'...\n\n⚡ Real-time progress updates\n📈 Performance metrics\n🎯 Processing logs\n✨ Completion notifications\n\n🆔 Instance: {INSTANCE_ID}\n📁 Output: {INSTANCE_OUTPUT_DIR}\n📥 Download button scans output folder automatically", + elem_id="processing-monitor" + ) + + # Processing options moved to right column + with gr.Group(elem_classes="control-panel"): + gr.HTML('
🛠️ Processing Options
') + with gr.Row(): + with gr.Column(): + # Simple GPU selection - mirroring working test + gpu_selection_dropdown = gr.Dropdown( + label="🖥️ Compute Device", + choices=AVAILABLE_GPUS, + value=AVAILABLE_GPUS[0] if AVAILABLE_GPUS else "CPU Only", + info="Select your GPU or CPU for processing", + interactive=True, + allow_custom_value=False, + elem_id="main-gpu-dropdown" + ) + + gpu_debug_btn = gr.Button("🔍 Debug GPU", size="sm") + + gpu_debug_output = gr.Textbox( + label="🔍 GPU Status", + value=f"Detected: {len(AVAILABLE_GPUS)} options → {', '.join(AVAILABLE_GPUS)}", + interactive=False, + lines=2, + elem_classes="gpu-debug" + ) + + skip_audio = gr.Checkbox( + label="🔇 Skip Audio", + value=False, + info="Video only processing" + ) + with gr.Column(): + use_folder_mode = gr.Checkbox( + label="📁 Batch Mode", + value=False, + info="Process ./Convert folder" + ) + keep_fps = gr.Checkbox( + label="🎬 Preserve FPS", + value=True, + info="Keep original frame rate" + ) + + # GPU Test Tab + with gr.Tab("🔧 GPU Test", id="test"): + gr.Markdown("## GPU Dropdown Test") + gr.Markdown("This tab tests if the GPU dropdown works correctly") + + test_gpu_dropdown = gr.Dropdown( + label="Test GPU Selection", + choices=AVAILABLE_GPUS, + value=AVAILABLE_GPUS[0] if AVAILABLE_GPUS else "CPU Only", + interactive=True, + allow_custom_value=False, + info="This should show all your GPUs as a proper dropdown" + ) + + test_output = gr.Textbox( + label="Selected GPU", + value=f"Current: {AVAILABLE_GPUS[0] if AVAILABLE_GPUS else 'None'}", + interactive=False + ) + + test_status = gr.Textbox( + label="GPU Detection Status", + value=f"Detected {len(AVAILABLE_GPUS)} options: {', '.join(AVAILABLE_GPUS)}", + interactive=False, + lines=3 + ) + + def test_gpu_change(selected): + print(f"🧪 Test GPU Selected: {selected}") + return f"You selected: {selected}" + + test_gpu_dropdown.change( + test_gpu_change, + inputs=[test_gpu_dropdown], + outputs=[test_output] + ) + + # Toggle lip sync components visibility + enable_lip_sync.change( + toggle_lip_sync_visibility, + inputs=[enable_lip_sync], + outputs=[lip_syncer_model_dropdown] + ) + + # Toggle folder mode visibility + use_folder_mode.change( + toggle_folder_mode, + inputs=[use_folder_mode], + outputs=[target_video, face_analyser_age_dropdown] + ) + + # GPU selection change handler for debugging + gpu_selection_dropdown.change( + on_gpu_selection_change, + inputs=[gpu_selection_dropdown], + outputs=[] + ) + + # GPU debug button + gpu_debug_btn.click( + debug_gpu_dropdown, + inputs=[], + outputs=[gpu_selection_dropdown, gpu_debug_output] + ) + + # Main processing button + start_button.click( + run_processing, + inputs=[ + source_image, + target_video, + frame_processor_checkbox, + face_analyser_direction_dropdown, + face_recognition_dropdown, + face_analyser_gender_dropdown, + face_analyser_age_dropdown, + skip_audio, + keep_fps, + keep_temp, + lip_syncer_model_dropdown, + enable_lip_sync, + use_folder_mode, + gpu_selection_dropdown + ], + outputs=[cli_output, action_button] + ) + + # Multi-purpose action button + action_button.click( + handle_button_action, + inputs=[ + action_button, + source_image, + target_video, + frame_processor_checkbox, + face_analyser_direction_dropdown, + face_recognition_dropdown, + face_analyser_gender_dropdown, + face_analyser_age_dropdown, + skip_audio, + keep_fps, + keep_temp, + lip_syncer_model_dropdown, + enable_lip_sync, + use_folder_mode, + gpu_selection_dropdown + ], + outputs=[ + source_image, + target_video, + frame_processor_checkbox, + face_analyser_direction_dropdown, + face_recognition_dropdown, + face_analyser_gender_dropdown, + face_analyser_age_dropdown, + skip_audio, + keep_fps, + keep_temp, + lip_syncer_model_dropdown, + enable_lip_sync, + use_folder_mode, + gpu_selection_dropdown, + cli_output, + action_button + ] + ) + + # FIXED: Download button functionality with proper download component management + download_button.click( + handle_download_click, + inputs=[], + outputs=[download_file, download_status, download_file, download_button], + show_progress=True + ) + + # FIXED: Reset download UI when download component changes (download completes) + download_file.change( + reset_download_ui, + inputs=[], + outputs=[download_file, download_button, download_status] + ) + +# Print system information for verification +print(f"📁 Base directory: {BASE_DIR}") +print(f"🆔 Instance ID: {INSTANCE_ID}") +print(f"📂 Convert directory: {os.path.join(BASE_DIR, 'Convert')}") +print(f"🗂️ Instance temp: {INSTANCE_TEMP_DIR}") +print(f"📤 Instance output: {INSTANCE_OUTPUT_DIR}") +print(f"🖥️ Available GPUs: {AVAILABLE_GPUS}") +print(f"🔧 Enhancement modules: {'✅ Available' if ENHANCEMENT_AVAILABLE else '❌ Not Available'}") + +# Determine if running in HuggingFace Spaces or similar cloud environment +def get_launch_config(): + """Get appropriate launch configuration based on environment""" + if os.getenv('SPACE_ID'): # HuggingFace Spaces + return { + 'server_name': "0.0.0.0", + 'server_port': 7860, # HF Spaces always use 7860 + 'share': False, + 'debug': False + } + elif os.getenv('COLAB_GPU'): # Google Colab + return { + 'server_name': "127.0.0.1", + 'server_port': INSTANCE_PORT, + 'share': True, + 'debug': False + } + else: # Local development - use pre-determined port + return { + 'server_name': "127.0.0.1", + 'server_port': INSTANCE_PORT, + 'share': False, + 'debug': True + } + +# Launch configuration for better cross-platform compatibility +launch_config = get_launch_config() +print(f"🚀 Launching on port: {launch_config['server_port']}") +print(f"🔗 Access URL: http://localhost:{launch_config['server_port']}") + +ui.launch( + max_file_size="2100mb", + **launch_config +) \ No newline at end of file diff --git a/del.py b/del.py new file mode 100644 index 0000000000000000000000000000000000000000..d0e8f29496fc132c2c04590b0b37e365a2664817 --- /dev/null +++ b/del.py @@ -0,0 +1,9 @@ +import shutil +import gradio as gr + +def delt(text): + txt = text + shutil.rmtree("./output") + return "Removed successfully..." + +gr.Interface(delt, "text","text").launch(debug=True) \ No newline at end of file diff --git a/gfpgan/weights/detection_Resnet50_Final.pth b/gfpgan/weights/detection_Resnet50_Final.pth new file mode 100644 index 0000000000000000000000000000000000000000..16546738ce0a00a9fd47585e0fc52744d31cc117 --- /dev/null +++ b/gfpgan/weights/detection_Resnet50_Final.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d1de9c2944f2ccddca5f5e010ea5ae64a39845a86311af6fdf30841b0a5a16d +size 109497761 diff --git a/gfpgan/weights/parsing_parsenet.pth b/gfpgan/weights/parsing_parsenet.pth new file mode 100644 index 0000000000000000000000000000000000000000..1ac2efc50360a79c9905dbac57d9d99cbfbe863c --- /dev/null +++ b/gfpgan/weights/parsing_parsenet.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d558d8d0e42c20224f13cf5a29c79eba2d59913419f945545d8cf7b72920de2 +size 85331193 diff --git a/launch_instance.bat b/launch_instance.bat new file mode 100644 index 0000000000000000000000000000000000000000..17bec616186aa213daa4665b17d4cd0aa2a601f8 --- /dev/null +++ b/launch_instance.bat @@ -0,0 +1,6 @@ +@echo off +echo Starting Face Swap Studio Instance... +echo Instance will auto-detect available port starting from 7860 +echo. +python app.py +pause diff --git a/launch_instance.sh b/launch_instance.sh new file mode 100644 index 0000000000000000000000000000000000000000..5ccddaa1f9a8dee33311acf42010078abc5d0ce6 --- /dev/null +++ b/launch_instance.sh @@ -0,0 +1,5 @@ +#!/bin/bash +echo "Starting Face Swap Studio Instance..." +echo "Instance will auto-detect available port starting from 7860" +echo "" +python3 app.py diff --git a/output/New Text Document.txt b/output/New Text Document.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/requirements - Copy.txt b/requirements - Copy.txt new file mode 100644 index 0000000000000000000000000000000000000000..bfc0136e29d3d05742a447259e9f599fbdb34c75 --- /dev/null +++ b/requirements - Copy.txt @@ -0,0 +1,45 @@ +# === CORE ML/AI STACK === +transformers==4.46.2 +diffusers==0.31.0 +accelerate==1.7.0 +safetensors==0.4.5 +tokenizers==0.20.3 +huggingface-hub==0.32.5 + +# === WEB/API FRAMEWORKS === +gradio==5.33.1 +fastapi==0.115.12 +uvicorn==0.34.3 + +# === COMPUTER VISION === +opencv-python==4.10.0.84 +pillow==11.2.1 +albumentations==2.0.8 +scikit-image==0.25.2 + +# === SCIENTIFIC/DATA SCIENCE === +numpy==1.26.4 +scipy==1.15.3 +scikit-learn==1.7.0 +pandas==2.2.2 +matplotlib==3.9.2 +onnx==1.18.0 +onnxruntime-gpu==1.19.2 + +# === TRAINING/MONITORING === +pytorch-lightning==2.5.1 +tensorflow==2.18.0 +tensorboard==2.18.0 + +# === UTILITIES === +requests==2.32.4 +tqdm==4.67.1 +psutil==7.0.0 +moviepy==1.0.3 +opennsfw2==0.14.0 + +--extra-index-url https://download.pytorch.org/whl/cu118 +torch==2.0.1+cu118 +torchvision==0.15.2+cu118 +torchaudio==2.0.2+cu118 +torchmetrics==1.7.2 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..bfc0136e29d3d05742a447259e9f599fbdb34c75 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,45 @@ +# === CORE ML/AI STACK === +transformers==4.46.2 +diffusers==0.31.0 +accelerate==1.7.0 +safetensors==0.4.5 +tokenizers==0.20.3 +huggingface-hub==0.32.5 + +# === WEB/API FRAMEWORKS === +gradio==5.33.1 +fastapi==0.115.12 +uvicorn==0.34.3 + +# === COMPUTER VISION === +opencv-python==4.10.0.84 +pillow==11.2.1 +albumentations==2.0.8 +scikit-image==0.25.2 + +# === SCIENTIFIC/DATA SCIENCE === +numpy==1.26.4 +scipy==1.15.3 +scikit-learn==1.7.0 +pandas==2.2.2 +matplotlib==3.9.2 +onnx==1.18.0 +onnxruntime-gpu==1.19.2 + +# === TRAINING/MONITORING === +pytorch-lightning==2.5.1 +tensorflow==2.18.0 +tensorboard==2.18.0 + +# === UTILITIES === +requests==2.32.4 +tqdm==4.67.1 +psutil==7.0.0 +moviepy==1.0.3 +opennsfw2==0.14.0 + +--extra-index-url https://download.pytorch.org/whl/cu118 +torch==2.0.1+cu118 +torchvision==0.15.2+cu118 +torchaudio==2.0.2+cu118 +torchmetrics==1.7.2 \ No newline at end of file diff --git a/requirements_working_backup.txt b/requirements_working_backup.txt new file mode 100644 index 0000000000000000000000000000000000000000..43b33c2bb7f402d0c4e486fe35e2be7d05850881 Binary files /dev/null and b/requirements_working_backup.txt differ diff --git a/run.py b/run.py new file mode 100644 index 0000000000000000000000000000000000000000..2e75a5ddf4035f48e2ff7c7a5bd7565704308723 --- /dev/null +++ b/run.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python3 +import sys +sys.path.append("D:/Switcher") # Ensure this path points to where SwitcherAI is located + +from SwitcherAI import core + +if __name__ == '__main__': + core.run() diff --git a/test.mp4 b/test.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..7130db795f1526013a585a459ff7b47d084555b8 --- /dev/null +++ b/test.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91f6e1a8394205e7ac70cc1b348d1fb0dbbb5bff11774f2ec8894261f2d49b1d +size 1527004 diff --git a/test.py b/test.py new file mode 100644 index 0000000000000000000000000000000000000000..5c524474c329097549aec79a2832043194b5fc7e --- /dev/null +++ b/test.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +""" +Simple Gradio Dropdown Test +Test basic dropdown functionality with GPU detection +""" + +import gradio as gr +import torch +import os + +def get_test_gpus(): + """Simple GPU detection for testing""" + gpus = [] + + if torch.cuda.is_available(): + device_count = torch.cuda.device_count() + print(f"CUDA devices found: {device_count}") + + for i in range(device_count): + try: + name = torch.cuda.get_device_name(i) + memory = torch.cuda.get_device_properties(i).total_memory / (1024**3) + gpu_option = f"GPU {i}: {name} ({memory:.1f}GB)" + gpus.append(gpu_option) + print(f"Added: {gpu_option}") + except Exception as e: + print(f"Error with GPU {i}: {e}") + gpus.append(f"GPU {i}: Error") + + gpus.append("CPU Only") + + print(f"Final GPU list: {gpus}") + return gpus + +def on_gpu_change(selected_gpu): + """Handle GPU selection change""" + print(f"Selected GPU: {selected_gpu}") + return f"You selected: {selected_gpu}" + +def refresh_gpus(): + """Refresh GPU list""" + new_gpus = get_test_gpus() + print(f"Refreshed GPUs: {new_gpus}") + return gr.update(choices=new_gpus, value=new_gpus[0]) + +# Get initial GPU list +gpu_list = get_test_gpus() + +# Create simple interface +with gr.Blocks(title="GPU Dropdown Test") as demo: + gr.Markdown("# GPU Dropdown Test") + + with gr.Row(): + gpu_dropdown = gr.Dropdown( + label="Select GPU", + choices=gpu_list, + value=gpu_list[0] if gpu_list else None, + interactive=True, + allow_custom_value=False, + scale=4 + ) + + refresh_btn = gr.Button("🔄 Refresh", scale=1) + + output_text = gr.Textbox( + label="Selected", + value=f"Current selection: {gpu_list[0] if gpu_list else 'None'}", + interactive=False + ) + + status_text = gr.Textbox( + label="Status", + value=f"Detected {len(gpu_list)} options: {', '.join(gpu_list)}", + interactive=False, + lines=2 + ) + + # Event handlers + gpu_dropdown.change(on_gpu_change, inputs=[gpu_dropdown], outputs=[output_text]) + refresh_btn.click(refresh_gpus, outputs=[gpu_dropdown]) + +if __name__ == "__main__": + demo.launch(debug=True) \ No newline at end of file