repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
timapage/pyqt6-yolov8
main.py
[ { "identifier": "CameraCaptureThread", "path": "src/qt/stream/video_capture.py", "snippet": "class CameraCaptureThread(QThread):\n send_video_info = pyqtSignal(dict)\n send_frame = pyqtSignal(list)\n def __init__(self):\n super(CameraCaptureThread, self).__init__()\n self.thread_n...
from src.qt.stream.video_capture import CameraCaptureThread from src.qt.stream.visualize import VideoVisualizationThread from src.qt.stream.ai_worker import AiWorkerThread from src.ui.main_window import Ui_MainWindow from src.qt.video.video_worker import FileProcessThread from PyQt6 import QtGui, QtWidgets from PyQt6.QtCore import Qt import sys import numpy as np
12,247
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setupUi(self) self.ai_thread = AiWorkerThread() self.camera_thread = CameraCaptureThread() self.display_thread = VideoVisualizationThread()
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setupUi(self) self.ai_thread = AiWorkerThread() self.camera_thread = CameraCaptureThread() self.display_thread = VideoVisualizationThread()
self.file_process_thread = FileProcessThread()
4
2023-10-18 09:21:01+00:00
16k
S-LoRA/S-LoRA
slora/models/llama/model.py
[ { "identifier": "LlamaPreLayerInfer", "path": "slora/models/llama/layer_infer/pre_layer_infer.py", "snippet": "class LlamaPreLayerInfer(PreLayerInferTpl):\n \"\"\"\n \"\"\"\n\n def __init__(self, tp_rank, world_size, network_config, mode):\n super().__init__(tp_rank, world_size, network_...
import os import json import torch from slora.models.llama.layer_infer.pre_layer_infer import LlamaPreLayerInfer from slora.models.llama.layer_infer.post_layer_infer import LlamaPostLayerInfer from slora.models.llama.layer_infer.transformer_layer_infer import LlamaTransformerLayerInfer from slora.models.llama.layer_weights.pre_and_post_layer_weight import LlamaPreAndPostLayerWeight from slora.models.llama.layer_weights.transformer_layer_weight import LlamaTransformerLayerWeight from slora.models.llama.infer_struct import LlamaInferStateInfo from slora.common.mem_allocator import MemoryAllocator from slora.common.int8kv_mem_manager import INT8KVMemoryManager from slora.common.basemodel import TpPartBaseModel
11,586
# from slora.common.mem_manager import MemoryManager class LlamaTpPartModel(TpPartBaseModel): # weight class pre_and_post_weight_class = LlamaPreAndPostLayerWeight transformer_weight_class = LlamaTransformerLayerWeight # infer class pre_layer_infer_class = LlamaPreLayerInfer post_layer_infer_class = LlamaPostLayerInfer
# from slora.common.mem_manager import MemoryManager class LlamaTpPartModel(TpPartBaseModel): # weight class pre_and_post_weight_class = LlamaPreAndPostLayerWeight transformer_weight_class = LlamaTransformerLayerWeight # infer class pre_layer_infer_class = LlamaPreLayerInfer post_layer_infer_class = LlamaPostLayerInfer
transformer_layer_infer_class = LlamaTransformerLayerInfer
2
2023-11-05 04:08:36+00:00
16k
fleet-ai/context
cli.py
[ { "identifier": "print_markdown", "path": "utils/utils.py", "snippet": "def print_markdown(message):\n for line in message.split(\"\\n\"):\n line = line.strip()\n if line == \"\":\n print(\"\")\n elif line == \"---\":\n rprint(Rule(style=\"white\"))\n ...
import os import openai import sys import argparse import traceback from getpass import getpass from rich import print as rprint from utils.utils import print_markdown, print_exception, extract_code_blocks, print_help from utils.stream import TextStream from utils.ai import ( retrieve_context, construct_prompt, get_remote_chat_response, get_other_chat_response, ) from constants.cli import ARGUMENTS, LIBRARIES, OPENAI_MODELS from constants.ai import MODELS_TO_TOKENS
13,073
if library not in LIBRARIES: rprint( "Library not found. Please refer to the list of available libraries." ) return filters["library_name"] = args.libraries # Get context window if model in OPENAI_MODELS: context_window = MODELS_TO_TOKENS[model] else: context_window = args.context_window # If local model requested, use LMStudio api_key = "" if args.local: model = "local-model" print_markdown( f"""--- **You are using a local model.** We're working with LM Studio to provide access to local models for you. Download and start your model to get started. Instructions: 1. Download LM Studio. You can find the download link here: https://lmstudio.ai 2. Open LM Studio and download your model of choice. 3. Click the **↔ icon** on the very left sidebar 4. Select your model and click "Start Server" Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) else: openrouter_key = os.environ.get("OPENROUTER_API_KEY") openai_key = os.environ.get("OPENAI_API_KEY") # Get the OpenAI API key, if not found if model in OPENAI_MODELS and not openai_key: print_markdown( """--- !!!**OpenAI API key not found.** Please provide a key to proceed. --- """ ) openai_key = getpass("OpenAI API key: ") os.environ["OPENAI_API_KEY"] = openai_key print_markdown( """ --- **Tip**: To save this key for later, run `export OPENAI_API_KEY=<your key>` on mac/linux or `setx OPENAI_API_KEY <your key>` on windows. For non-OpenAI models, you should set `OPENROUTER_API_KEY`, and optionally `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`. ---""" ) # Otherwise, grab the openrouter key, if not found elif model not in OPENAI_MODELS and not openrouter_key: print_markdown( """--- !!!**OpenRouter API key not found.** Please provide a key to proceed. --- """ ) api_key = getpass("OpenRouter API key: ") os.environ["OPENROUTER_API_KEY"] = api_key print_markdown( f""" --- **Tip**: To save this key for later, run `export OPENROUTER_API_KEY=<your key>` on mac/linux or `setx OPENROUTER_API_KEY <your key>` on windows. You can optionally set `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`, too. Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) if model == "gpt-4-1106-preview": print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. *Warning*: You are using gpt-4-turbo, which is not yet stable and is rate limited at 100 requests per day. Please use with caution. """ ) else: print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. """ ) messages = [] while True: try: query = input("> ") query = query.strip() if not query: continue if query.lower() == "exit": rprint("Exiting. Goodbye!") break messages.append({"role": "user", "content": query}) rag_context = retrieve_context(query, k=k, filters=filters)
# pylint: disable=E0401 # pylint: disable=W0122 # pylint: disable=W0718 def main(): parser = argparse.ArgumentParser(description="Fleet Data Retriever", add_help=False) parser.add_argument("help", nargs="?", default=argparse.SUPPRESS) # Add arguments for arg in ARGUMENTS: if arg["type"] == bool: default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action="store_true", default=default, ) elif arg["type"] == list: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=str, nargs="+", choices=choices, default=default, ) else: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"], choices=choices, default=default, ) # Hit the retrieve endpoint args = parser.parse_args() k = args.k_value model = args.model cite_sources = args.cite_sources filters = {} if getattr(args, "help", None) is not None: print_help() return # If library specified, match library name to uuid if args.libraries: for library in args.libraries: if library not in LIBRARIES: rprint( "Library not found. Please refer to the list of available libraries." ) return filters["library_name"] = args.libraries # Get context window if model in OPENAI_MODELS: context_window = MODELS_TO_TOKENS[model] else: context_window = args.context_window # If local model requested, use LMStudio api_key = "" if args.local: model = "local-model" print_markdown( f"""--- **You are using a local model.** We're working with LM Studio to provide access to local models for you. Download and start your model to get started. Instructions: 1. Download LM Studio. You can find the download link here: https://lmstudio.ai 2. Open LM Studio and download your model of choice. 3. Click the **↔ icon** on the very left sidebar 4. Select your model and click "Start Server" Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) else: openrouter_key = os.environ.get("OPENROUTER_API_KEY") openai_key = os.environ.get("OPENAI_API_KEY") # Get the OpenAI API key, if not found if model in OPENAI_MODELS and not openai_key: print_markdown( """--- !!!**OpenAI API key not found.** Please provide a key to proceed. --- """ ) openai_key = getpass("OpenAI API key: ") os.environ["OPENAI_API_KEY"] = openai_key print_markdown( """ --- **Tip**: To save this key for later, run `export OPENAI_API_KEY=<your key>` on mac/linux or `setx OPENAI_API_KEY <your key>` on windows. For non-OpenAI models, you should set `OPENROUTER_API_KEY`, and optionally `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`. ---""" ) # Otherwise, grab the openrouter key, if not found elif model not in OPENAI_MODELS and not openrouter_key: print_markdown( """--- !!!**OpenRouter API key not found.** Please provide a key to proceed. --- """ ) api_key = getpass("OpenRouter API key: ") os.environ["OPENROUTER_API_KEY"] = api_key print_markdown( f""" --- **Tip**: To save this key for later, run `export OPENROUTER_API_KEY=<your key>` on mac/linux or `setx OPENROUTER_API_KEY <your key>` on windows. You can optionally set `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`, too. Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) if model == "gpt-4-1106-preview": print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. *Warning*: You are using gpt-4-turbo, which is not yet stable and is rate limited at 100 requests per day. Please use with caution. """ ) else: print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. """ ) messages = [] while True: try: query = input("> ") query = query.strip() if not query: continue if query.lower() == "exit": rprint("Exiting. Goodbye!") break messages.append({"role": "user", "content": query}) rag_context = retrieve_context(query, k=k, filters=filters)
prompts = construct_prompt(
6
2023-11-02 07:07:13+00:00
16k
ForceFledgling/proxyhub
proxyhub/api.py
[ { "identifier": "Checker", "path": "proxyhub/checker.py", "snippet": "class Checker:\n \"\"\"Proxy checker.\"\"\"\n\n def __init__(\n self,\n judges,\n max_tries=3,\n timeout=8,\n verify_ssl=False,\n strict=False,\n dnsbl=None,\n real_ext_ip=...
import asyncio import io import signal import warnings from collections import Counter, defaultdict from functools import partial from pprint import pprint from .checker import Checker from .errors import ResolveError from .providers import PROVIDERS, Provider from .proxy import Proxy from .resolver import Resolver from .server import Server from .utils import IPPortPatternLine, log
12,780
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue()
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue()
self._resolver = Resolver(loop=self._loop)
5
2023-11-05 13:28:57+00:00
16k
TheFunny/ArisuAutoSweeper
module/device/method/minitouch.py
[ { "identifier": "Config", "path": "module/base/decorator.py", "snippet": "class Config:\n \"\"\"\n Decorator that calls different function with a same name according to config.\n\n func_list likes:\n func_list = {\n 'func1': [\n {'options': {'ENABLE': True}, 'func': 1},\n ...
import asyncio import json import re import socket import time import websockets from functools import wraps from typing import List from adbutils.errors import AdbError from uiautomator2 import _Service from module.base.decorator import Config, cached_property, del_cached_property from module.base.timer import Timer from module.base.utils import * from module.device.connection import Connection from module.device.method.utils import RETRY_TRIES, retry_sleep, handle_adb_error from module.exception import RequestHumanTakeover, ScriptError from module.logger import logger
12,343
def to_atx_agent(self) -> List[str]: return [command.to_atx_agent(self.max_x, self.max_y) for command in self.commands] def send(self): return self.device.minitouch_send(builder=self) class MinitouchNotInstalledError(Exception): pass class MinitouchOccupiedError(Exception): pass class U2Service(_Service): def __init__(self, name, u2obj): self.name = name self.u2obj = u2obj self.service_url = self.u2obj.path2url("/services/" + name) def retry(func): @wraps(func) def retry_wrapper(self, *args, **kwargs): """ Args: self (Minitouch): """ init = None for _ in range(RETRY_TRIES): try: if callable(init): retry_sleep(_) init() return func(self, *args, **kwargs) # Can't handle except RequestHumanTakeover: break # When adb server was killed except ConnectionResetError as e: logger.error(e) def init(): self.adb_reconnect() # Emulator closed except ConnectionAbortedError as e: logger.error(e) def init(): self.adb_reconnect() # MinitouchNotInstalledError: Received empty data from minitouch except MinitouchNotInstalledError as e: logger.error(e) def init(): self.install_uiautomator2() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # MinitouchOccupiedError: Timeout when connecting to minitouch except MinitouchOccupiedError as e: logger.error(e) def init(): self.restart_atx() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # AdbError except AdbError as e: if handle_adb_error(e): def init(): self.adb_reconnect() else: break except BrokenPipeError as e: logger.error(e) def init(): del_cached_property(self, 'minitouch_builder') # Unknown, probably a trucked image except Exception as e: logger.exception(e) def init(): pass logger.critical(f'Retry {func.__name__}() failed') raise RequestHumanTakeover return retry_wrapper class Minitouch(Connection): _minitouch_port: int = 0 _minitouch_client: socket.socket _minitouch_pid: int _minitouch_ws: websockets.WebSocketClientProtocol max_x: int max_y: int @cached_property def minitouch_builder(self): self.minitouch_init() return CommandBuilder(self) @Config.when(DEVICE_OVER_HTTP=False) def minitouch_init(self): logger.hr('MiniTouch init') max_x, max_y = 1280, 720 max_contacts = 2 max_pressure = 50 self.get_orientation() self._minitouch_port = self.adb_forward("localabstract:minitouch") # No need, minitouch already started by uiautomator2 # self.adb_shell([self.config.MINITOUCH_FILEPATH_REMOTE])
def random_normal_distribution(a, b, n=5): output = np.mean(np.random.uniform(a, b, size=n)) return output def random_theta(): theta = np.random.uniform(0, 2 * np.pi) return np.array([np.sin(theta), np.cos(theta)]) def random_rho(dis): return random_normal_distribution(-dis, dis) def insert_swipe(p0, p3, speed=15, min_distance=10): """ Insert way point from start to end. First generate a cubic bézier curve Args: p0: Start point. p3: End point. speed: Average move speed, pixels per 10ms. min_distance: Returns: list[list[int]]: List of points. Examples: > insert_swipe((400, 400), (600, 600), speed=20) [[400, 400], [406, 406], [416, 415], [429, 428], [444, 442], [462, 459], [481, 478], [504, 500], [527, 522], [545, 540], [560, 557], [573, 570], [584, 582], [592, 590], [597, 596], [600, 600]] """ p0 = np.array(p0) p3 = np.array(p3) # Random control points in Bézier curve distance = np.linalg.norm(p3 - p0) p1 = 2 / 3 * p0 + 1 / 3 * p3 + random_theta() * random_rho(distance * 0.1) p2 = 1 / 3 * p0 + 2 / 3 * p3 + random_theta() * random_rho(distance * 0.1) # Random `t` on Bézier curve, sparse in the middle, dense at start and end segments = max(int(distance / speed) + 1, 5) lower = random_normal_distribution(-85, -60) upper = random_normal_distribution(80, 90) theta = np.arange(lower + 0., upper + 0.0001, (upper - lower) / segments) ts = np.sin(theta / 180 * np.pi) ts = np.sign(ts) * abs(ts) ** 0.9 ts = (ts - min(ts)) / (max(ts) - min(ts)) # Generate cubic Bézier curve points = [] prev = (-100, -100) for t in ts: point = p0 * (1 - t) ** 3 + 3 * p1 * t * (1 - t) ** 2 + 3 * p2 * t ** 2 * (1 - t) + p3 * t ** 3 point = point.astype(int).tolist() if np.linalg.norm(np.subtract(point, prev)) < min_distance: continue points.append(point) prev = point # Delete nearing points if len(points[1:]): distance = np.linalg.norm(np.subtract(points[1:], points[0]), axis=1) mask = np.append(True, distance > min_distance) points = np.array(points)[mask].tolist() else: points = [p0, p3] return points class Command: def __init__( self, operation: str, contact: int = 0, x: int = 0, y: int = 0, ms: int = 10, pressure: int = 100 ): """ See https://github.com/openstf/minitouch#writable-to-the-socket Args: operation: c, r, d, m, u, w contact: x: y: ms: pressure: """ self.operation = operation self.contact = contact self.x = x self.y = y self.ms = ms self.pressure = pressure def to_minitouch(self) -> str: """ String that write into minitouch socket """ if self.operation == 'c': return f'{self.operation}\n' elif self.operation == 'r': return f'{self.operation}\n' elif self.operation == 'd': return f'{self.operation} {self.contact} {self.x} {self.y} {self.pressure}\n' elif self.operation == 'm': return f'{self.operation} {self.contact} {self.x} {self.y} {self.pressure}\n' elif self.operation == 'u': return f'{self.operation} {self.contact}\n' elif self.operation == 'w': return f'{self.operation} {self.ms}\n' else: return '' def to_atx_agent(self, max_x=1280, max_y=720) -> str: """ Dict that send to atx-agent, $DEVICE_URL/minitouch See https://github.com/openatx/atx-agent#minitouch%E6%93%8D%E4%BD%9C%E6%96%B9%E6%B3%95 """ x, y = self.x / max_x, self.y / max_y if self.operation == 'c': out = dict(operation=self.operation) elif self.operation == 'r': out = dict(operation=self.operation) elif self.operation == 'd': out = dict(operation=self.operation, index=self.contact, pressure=self.pressure, xP=x, yP=y) elif self.operation == 'm': out = dict(operation=self.operation, index=self.contact, pressure=self.pressure, xP=x, yP=y) elif self.operation == 'u': out = dict(operation=self.operation, index=self.contact) elif self.operation == 'w': out = dict(operation=self.operation, milliseconds=self.ms) else: out = dict() return json.dumps(out) class CommandBuilder: """Build command str for minitouch. You can use this, to custom actions as you wish:: with safe_connection(_DEVICE_ID) as connection: builder = CommandBuilder() builder.down(0, 400, 400, 50) builder.commit() builder.move(0, 500, 500, 50) builder.commit() builder.move(0, 800, 400, 50) builder.commit() builder.up(0) builder.commit() builder.publish(connection) """ DEFAULT_DELAY = 0.05 max_x = 1280 max_y = 720 def __init__(self, device, contact=0, handle_orientation=True): """ Args: device: """ self.device = device self.commands = [] self.delay = 0 self.contact = contact self.handle_orientation = handle_orientation @property def orientation(self): if self.handle_orientation: return self.device.orientation else: return 0 def convert(self, x, y): max_x, max_y = self.device.max_x, self.device.max_y orientation = self.orientation if orientation == 0: pass elif orientation == 1: x, y = 720 - y, x max_x, max_y = max_y, max_x elif orientation == 2: x, y = 1280 - x, 720 - y elif orientation == 3: x, y = y, 1280 - x max_x, max_y = max_y, max_x else: raise ScriptError(f'Invalid device orientation: {orientation}') self.max_x, self.max_y = max_x, max_y if not self.device.config.DEVICE_OVER_HTTP: # Maximum X and Y coordinates may, but usually do not, match the display size. x, y = int(x / 1280 * max_x), int(y / 720 * max_y) else: # When over http, max_x and max_y are default to 1280 and 720, skip matching display size x, y = int(x), int(y) return x, y def commit(self): """ add minitouch command: 'c\n' """ self.commands.append(Command('c')) return self def reset(self): """ add minitouch command: 'r\n' """ self.commands.append(Command('r')) return self def wait(self, ms=10): """ add minitouch command: 'w <ms>\n' """ self.commands.append(Command('w', ms=ms)) self.delay += ms return self def up(self): """ add minitouch command: 'u <contact>\n' """ self.commands.append(Command('u', contact=self.contact)) return self def down(self, x, y, pressure=100): """ add minitouch command: 'd <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('d', x=x, y=y, contact=self.contact, pressure=pressure)) return self def move(self, x, y, pressure=100): """ add minitouch command: 'm <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('m', x=x, y=y, contact=self.contact, pressure=pressure)) return self def clear(self): """ clear current commands """ self.commands = [] self.delay = 0 def to_minitouch(self) -> str: return ''.join([command.to_minitouch() for command in self.commands]) def to_atx_agent(self) -> List[str]: return [command.to_atx_agent(self.max_x, self.max_y) for command in self.commands] def send(self): return self.device.minitouch_send(builder=self) class MinitouchNotInstalledError(Exception): pass class MinitouchOccupiedError(Exception): pass class U2Service(_Service): def __init__(self, name, u2obj): self.name = name self.u2obj = u2obj self.service_url = self.u2obj.path2url("/services/" + name) def retry(func): @wraps(func) def retry_wrapper(self, *args, **kwargs): """ Args: self (Minitouch): """ init = None for _ in range(RETRY_TRIES): try: if callable(init): retry_sleep(_) init() return func(self, *args, **kwargs) # Can't handle except RequestHumanTakeover: break # When adb server was killed except ConnectionResetError as e: logger.error(e) def init(): self.adb_reconnect() # Emulator closed except ConnectionAbortedError as e: logger.error(e) def init(): self.adb_reconnect() # MinitouchNotInstalledError: Received empty data from minitouch except MinitouchNotInstalledError as e: logger.error(e) def init(): self.install_uiautomator2() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # MinitouchOccupiedError: Timeout when connecting to minitouch except MinitouchOccupiedError as e: logger.error(e) def init(): self.restart_atx() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # AdbError except AdbError as e: if handle_adb_error(e): def init(): self.adb_reconnect() else: break except BrokenPipeError as e: logger.error(e) def init(): del_cached_property(self, 'minitouch_builder') # Unknown, probably a trucked image except Exception as e: logger.exception(e) def init(): pass logger.critical(f'Retry {func.__name__}() failed') raise RequestHumanTakeover return retry_wrapper class Minitouch(Connection): _minitouch_port: int = 0 _minitouch_client: socket.socket _minitouch_pid: int _minitouch_ws: websockets.WebSocketClientProtocol max_x: int max_y: int @cached_property def minitouch_builder(self): self.minitouch_init() return CommandBuilder(self) @Config.when(DEVICE_OVER_HTTP=False) def minitouch_init(self): logger.hr('MiniTouch init') max_x, max_y = 1280, 720 max_contacts = 2 max_pressure = 50 self.get_orientation() self._minitouch_port = self.adb_forward("localabstract:minitouch") # No need, minitouch already started by uiautomator2 # self.adb_shell([self.config.MINITOUCH_FILEPATH_REMOTE])
retry_timeout = Timer(2).start()
3
2023-11-01 07:09:45+00:00
16k
BrianPugh/cyclopts
tests/test_help.py
[ { "identifier": "App", "path": "cyclopts/core.py", "snippet": "class App:\n _name: Optional[Tuple[str, ...]] = field(default=None, alias=\"name\", converter=optional_to_tuple_converter)\n\n _help: Optional[str] = field(default=None, alias=\"help\")\n\n usage: Optional[str] = field(default=None)...
import inspect import sys import attrs import pytest from enum import Enum from textwrap import dedent from typing import List, Literal, Optional, Union from typing_extensions import Annotated from typing import Annotated from cyclopts import App, Group, Parameter from cyclopts.help import ( HelpEntry, HelpPanel, create_parameter_help_panel, format_command_entries, format_doc, format_usage, ) from cyclopts.resolve import ResolvedCommand
11,381
"""Docstring for foo. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_no_show(app, console): @app.command def foo(): """Docstring for foo.""" pass @app.command(show=False) def bar(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app,))) with console.capture() as capture: app.help_print([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ foo Docstring for foo. │ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_format_commands_explicit_help(app, console): @app.command(help="Docstring for foo.") def foo(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_explicit_name(app, console): @app.command(name="bar") def foo(): """Docstring for bar. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["bar"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ bar Docstring for bar. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_help_empty(console): app = App(name="foo", version_flags=[], help_flags=[]) with console.capture() as capture: app.help_print(console=console) actual = capture.get() assert actual == "Usage: foo\n\n" @pytest.fixture def capture_format_group_parameters(console, default_function_groups): def inner(cmd): command = ResolvedCommand(cmd, *default_function_groups) with console.capture() as capture: group, iparams = command.groups_iparams[0] cparams = [command.iparam_to_cparam[x] for x in iparams] console.print(create_parameter_help_panel(group, iparams, cparams)) return capture.get() return inner def test_help_format_group_parameters(capture_format_group_parameters): def cmd(
if sys.version_info < (3, 9): else: @pytest.fixture def app(): return App( name="app", help="App Help String Line 1.", ) def test_empty_help_panel_rich_silent(console): help_panel = HelpPanel(format="command", title="test") with console.capture() as capture: console.print(help_panel) actual = capture.get() assert actual == "" def test_help_default_action(app, console): """No command should default to help.""" with console.capture() as capture: app([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_custom_usage(app, console): app.usage = "My custom usage." with console.capture() as capture: app([], console=console) actual = capture.get() expected = dedent( """\ My custom usage. App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_custom_usage_subapp(app, console): app.command(App(name="foo", usage="My custom usage.")) with console.capture() as capture: app(["foo", "--help"], console=console) actual = capture.get() expected = dedent( """\ My custom usage. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_default_help_flags(console): """Standard help flags.""" app = App(name="app", help="App Help String Line 1.") with console.capture() as capture: app(["--help"], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_usage_empty(console): app = App( name="app", help="App Help String Line 1.", help_flags=[], version_flags=[], ) with console.capture() as capture: console.print(format_usage(app, [])) actual = capture.get() assert actual == "Usage: app\n\n" def test_help_format_usage_command(app, console): @app.command def foo(): pass with console.capture() as capture: console.print(format_usage(app, [])) actual = capture.get() assert actual == "Usage: app COMMAND\n\n" def test_format_commands_docstring(app, console): @app.command def foo(): """Docstring for foo. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_no_show(app, console): @app.command def foo(): """Docstring for foo.""" pass @app.command(show=False) def bar(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app,))) with console.capture() as capture: app.help_print([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ foo Docstring for foo. │ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_format_commands_explicit_help(app, console): @app.command(help="Docstring for foo.") def foo(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_explicit_name(app, console): @app.command(name="bar") def foo(): """Docstring for bar. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["bar"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ bar Docstring for bar. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_help_empty(console): app = App(name="foo", version_flags=[], help_flags=[]) with console.capture() as capture: app.help_print(console=console) actual = capture.get() assert actual == "Usage: foo\n\n" @pytest.fixture def capture_format_group_parameters(console, default_function_groups): def inner(cmd): command = ResolvedCommand(cmd, *default_function_groups) with console.capture() as capture: group, iparams = command.groups_iparams[0] cparams = [command.iparam_to_cparam[x] for x in iparams] console.print(create_parameter_help_panel(group, iparams, cparams)) return capture.get() return inner def test_help_format_group_parameters(capture_format_group_parameters): def cmd(
foo: Annotated[str, Parameter(help="Docstring for foo.")],
2
2023-11-03 02:24:25+00:00
16k
RoboFlamingo/RoboFlamingo
robot_flamingo/models/factory.py
[ { "identifier": "BCFlamingo", "path": "robot_flamingo/models/flamingo_bc.py", "snippet": "class BCFlamingo(nn.Module):\n def __init__(\n self,\n vision_encoder: nn.Module,\n lang_encoder: nn.Module,\n eoc_token_id: int,\n media_token_id: int,\n vis_dim: int,\...
from logging import debug from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig from typing import Optional from robot_flamingo.models.flamingo_bc import BCFlamingo from robot_flamingo.models.flamingo_mpt import MPTFlamingo from open_flamingo.src.flamingo_lm import FlamingoLMMixin from open_flamingo.src.utils import extend_instance from open_flamingo.src.factory import _infer_decoder_layers_attr_name import open_clip
13,277
def create_model_and_transforms( clip_vision_encoder_path: str, clip_vision_encoder_pretrained: str, lang_encoder_path: str, tokenizer_path: str, cross_attn_every_n_layers: int = 1, use_local_files: bool = False, decoder_layers_attr_name: str = None, # this is the window size sampled from the episode window_size: int = 32, freeze_embed: bool = False, train_params = -1, use_gripper=False, use_state=False, last_action=False, fusion_mode='', pad_length=-1, debug=False, sep_resampler=False, sep_lm_head=False, unfreeze_vit=False, return_feature=False, multi_step_action=1, llm_name='llama_9b', pooling='max', residual=False, tcp_rel=False, replan=-1, decoder_type='lstm', hidden_size=None, freeze_sampler=False, fwd_pred=False, fwd_pred_hand=False, no_image_patch=False, global_latent=1, refresh=-1, **flamingo_kwargs, ): """ Initialize a Flamingo model from a pretrained vision encoder and language encoder. Appends special tokens to the tokenizer and freezes backbones. Args: clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32") clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k") lang_encoder_path (str): path to pretrained language encoder tokenizer_path (str): path to pretrained tokenizer cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1. use_local_files (bool, optional): whether to use local files. Defaults to False. decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None. Returns: Flamingo: Flamingo model from pretrained vision and language encoders Image processor: Pipeline to preprocess input images Tokenizer: A tokenizer for the language model """ vision_encoder, _, image_processor = open_clip.create_model_and_transforms( clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained ) # set the vision encoder to output the visual features vision_encoder.visual.output_tokens = True text_tokenizer = AutoTokenizer.from_pretrained( tokenizer_path, local_files_only=use_local_files ) # add Flamingo special tokens to the tokenizer text_tokenizer.add_special_tokens( {"additional_special_tokens": ["<|endofchunk|>", "<image>"]} ) if text_tokenizer.pad_token is None: # Issue: GPT models don't have a pad token, which we use to # modify labels for the loss. text_tokenizer.add_special_tokens({"pad_token": "<PAD>"}) if debug: # Load the local checkpoint into a model instance. lang_encoder = AutoModelForCausalLM.from_pretrained(lang_encoder_path, ignore_keys=["config"], trust_remote_code=True) # Set the `init_weights` parameter to `False` to prevent the model from loading the pretrained weights. lang_encoder.init_weights(False) else: print(lang_encoder_path) lang_encoder = AutoModelForCausalLM.from_pretrained( lang_encoder_path, local_files_only=use_local_files, trust_remote_code=True ) # print(lang_encoder_path) # if llm_name == 'llama': # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # else: # # name = 'mosaicml/mpt-7b' # config = { # "model_type": "auto", # "add_lm_head": True, # } # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # hacks for MPT-1B, which doesn't have a get_input_embeddings method if "mpt-1b-redpajama-200b" in lang_encoder_path: class EmbeddingFnMixin: def get_input_embeddings(self): return self.transformer.wte def set_input_embeddings(self, new_embeddings): self.transformer.wte = new_embeddings extend_instance(lang_encoder, EmbeddingFnMixin) extend_instance(lang_encoder, FlamingoLMMixin) if decoder_layers_attr_name is None: decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder) lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name) # print(lang_encoder.base_model_prefix) # print(getattr(lang_encoder, lang_encoder.base_model_prefix, lang_encoder)) # print(lang_encoder) lang_encoder.resize_token_embeddings(len(text_tokenizer)) if 'llama' in llm_name: Model_fn = BCFlamingo elif 'mpt' in llm_name:
mpt_dict = { "mpt_3b": { "lang_encoder_path": "path_to/mpt-1b-redpajama-200b", "tokenizer_path": "path_to/mpt-1b-redpajama-200b", "cross_attn_every_n_layers": 1, "openflamingo_checkpoint": "path_to/OpenFlamingo-3B-vitl-mpt1b/checkpoint.pt" }, "mpt_dolly_3b": { "lang_encoder_path": "path_to/mpt-1b-redpajama-200b-dolly", "tokenizer_path": "path_to/mpt-1b-redpajama-200b-dolly", "cross_attn_every_n_layers": 1, "openflamingo_checkpoint": "path_to/OpenFlamingo-3B-vitl-mpt1b-langinstruct/checkpoint.pt" }, "mpt_4b": { "lang_encoder_path": "path_to/RedPajama-INCITE-Instruct-3B-v1", "tokenizer_path": "path_to/RedPajama-INCITE-Instruct-3B-v1", "cross_attn_every_n_layers": 2, "openflamingo_checkpoint": "path_to/OpenFlamingo-4B-vitl-rpj3b-langinstruct/checkpoint.pt" }, "mpt_base_4b": { "lang_encoder_path": "path_to/RedPajama-INCITE-Base-3B-v1", "tokenizer_path": "path_to/RedPajama-INCITE-Base-3B-v1", "cross_attn_every_n_layers": 2, "openflamingo_checkpoint": "path_to/OpenFlamingo-4B-vitl-rpj3b/checkpoint.pt" }, "mpt_9b": { "lang_encoder_path": "path_to/mpt-7b", "tokenizer_path": "path_to/mpt-7b", "cross_attn_every_n_layers": 4, "openflamingo_checkpoint": "path_to/OpenFlamingo-9B-vitl-mpt7b/checkpoint.pt" }, "llama_9b": { "lang_encoder_path": "path_to/llama-7b-hf-jxu124", "tokenizer_path": "path_to/llama-7b-hf-jxu124", "cross_attn_every_n_layers": 4, "openflamingo_checkpoint": "path_to/OpenFlamingo-9B/checkpoint.pt" } } def get_transforms( clip_vision_encoder_path: str = "ViT-L-14", clip_vision_encoder_pretrained: str = "openai", tokenizer_path: str = "path_to/llama-7b-hf-jxu124", use_local_files: bool = False, ): vision_encoder, _, image_processor = open_clip.create_model_and_transforms( clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained ) text_tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") # add Flamingo special tokens to the tokenizer text_tokenizer.add_special_tokens( {"additional_special_tokens": ["<|endofchunk|>", "<image>"]} ) if text_tokenizer.pad_token is None: # Issue: GPT models don't have a pad token, which we use to # modify labels for the loss. text_tokenizer.add_special_tokens({"pad_token": "<PAD>"}) return image_processor, text_tokenizer def create_model_and_transforms( clip_vision_encoder_path: str, clip_vision_encoder_pretrained: str, lang_encoder_path: str, tokenizer_path: str, cross_attn_every_n_layers: int = 1, use_local_files: bool = False, decoder_layers_attr_name: str = None, # this is the window size sampled from the episode window_size: int = 32, freeze_embed: bool = False, train_params = -1, use_gripper=False, use_state=False, last_action=False, fusion_mode='', pad_length=-1, debug=False, sep_resampler=False, sep_lm_head=False, unfreeze_vit=False, return_feature=False, multi_step_action=1, llm_name='llama_9b', pooling='max', residual=False, tcp_rel=False, replan=-1, decoder_type='lstm', hidden_size=None, freeze_sampler=False, fwd_pred=False, fwd_pred_hand=False, no_image_patch=False, global_latent=1, refresh=-1, **flamingo_kwargs, ): """ Initialize a Flamingo model from a pretrained vision encoder and language encoder. Appends special tokens to the tokenizer and freezes backbones. Args: clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32") clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k") lang_encoder_path (str): path to pretrained language encoder tokenizer_path (str): path to pretrained tokenizer cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1. use_local_files (bool, optional): whether to use local files. Defaults to False. decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None. Returns: Flamingo: Flamingo model from pretrained vision and language encoders Image processor: Pipeline to preprocess input images Tokenizer: A tokenizer for the language model """ vision_encoder, _, image_processor = open_clip.create_model_and_transforms( clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained ) # set the vision encoder to output the visual features vision_encoder.visual.output_tokens = True text_tokenizer = AutoTokenizer.from_pretrained( tokenizer_path, local_files_only=use_local_files ) # add Flamingo special tokens to the tokenizer text_tokenizer.add_special_tokens( {"additional_special_tokens": ["<|endofchunk|>", "<image>"]} ) if text_tokenizer.pad_token is None: # Issue: GPT models don't have a pad token, which we use to # modify labels for the loss. text_tokenizer.add_special_tokens({"pad_token": "<PAD>"}) if debug: # Load the local checkpoint into a model instance. lang_encoder = AutoModelForCausalLM.from_pretrained(lang_encoder_path, ignore_keys=["config"], trust_remote_code=True) # Set the `init_weights` parameter to `False` to prevent the model from loading the pretrained weights. lang_encoder.init_weights(False) else: print(lang_encoder_path) lang_encoder = AutoModelForCausalLM.from_pretrained( lang_encoder_path, local_files_only=use_local_files, trust_remote_code=True ) # print(lang_encoder_path) # if llm_name == 'llama': # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # else: # # name = 'mosaicml/mpt-7b' # config = { # "model_type": "auto", # "add_lm_head": True, # } # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # hacks for MPT-1B, which doesn't have a get_input_embeddings method if "mpt-1b-redpajama-200b" in lang_encoder_path: class EmbeddingFnMixin: def get_input_embeddings(self): return self.transformer.wte def set_input_embeddings(self, new_embeddings): self.transformer.wte = new_embeddings extend_instance(lang_encoder, EmbeddingFnMixin) extend_instance(lang_encoder, FlamingoLMMixin) if decoder_layers_attr_name is None: decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder) lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name) # print(lang_encoder.base_model_prefix) # print(getattr(lang_encoder, lang_encoder.base_model_prefix, lang_encoder)) # print(lang_encoder) lang_encoder.resize_token_embeddings(len(text_tokenizer)) if 'llama' in llm_name: Model_fn = BCFlamingo elif 'mpt' in llm_name:
Model_fn = MPTFlamingo
1
2023-11-02 01:36:23+00:00
16k
bigai-nlco/langsuite
langsuite/envs/teach/libs/teach/dataset/dataset.py
[ { "identifier": "Definitions", "path": "langsuite/envs/teach/libs/teach/dataset/definitions.py", "snippet": "class Definitions:\n def __init__(self, definitions=None, simulator=\"THOR\", version=\"2.0\"):\n self.simulator = simulator\n self.version = version\n if definitions is N...
import json import os from collections import OrderedDict from langsuite.envs.teach.libs.teach.dataset.definitions import Definitions from langsuite.envs.teach.libs.teach.dataset.task import Task from langsuite.envs.teach.libs.teach.dataset.task_THOR import Task_THOR
14,080
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 from __future__ import annotations class Dataset: def __init__( self, task_type=None, definitions=None, comments="", version=None, tasks=None ): self.version = "2.0" if version is None else version self.task_type = task_type self.comments = comments self.definitions = ( Definitions(definitions=definitions, version=version) if definitions is None else definitions ) self.tasks = tasks if tasks is not None else [] def add_task(self, task): self.tasks.append(task) def to_dict(self): _dict = OrderedDict() _dict["version"] = self.version _dict["task_type"] = self.task_type _dict["comments"] = self.comments _dict["definitions"] = self.definitions.to_dict() _dict["tasks"] = [x.to_dict() for x in self.tasks] return _dict @classmethod def from_dict( cls, dataset_dict, process_init_state=True, version="2.0" ) -> "Dataset": definitions = Definitions(dataset_dict["definitions"]) if version == "2.0": tasks = [ Task_THOR.from_dict(task_dict, definitions, process_init_state) for task_dict in dataset_dict.get("tasks") ] else: tasks = [
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 from __future__ import annotations class Dataset: def __init__( self, task_type=None, definitions=None, comments="", version=None, tasks=None ): self.version = "2.0" if version is None else version self.task_type = task_type self.comments = comments self.definitions = ( Definitions(definitions=definitions, version=version) if definitions is None else definitions ) self.tasks = tasks if tasks is not None else [] def add_task(self, task): self.tasks.append(task) def to_dict(self): _dict = OrderedDict() _dict["version"] = self.version _dict["task_type"] = self.task_type _dict["comments"] = self.comments _dict["definitions"] = self.definitions.to_dict() _dict["tasks"] = [x.to_dict() for x in self.tasks] return _dict @classmethod def from_dict( cls, dataset_dict, process_init_state=True, version="2.0" ) -> "Dataset": definitions = Definitions(dataset_dict["definitions"]) if version == "2.0": tasks = [ Task_THOR.from_dict(task_dict, definitions, process_init_state) for task_dict in dataset_dict.get("tasks") ] else: tasks = [
Task.from_dict(task_dict, definitions, process_init_state)
1
2023-11-01 01:47:00+00:00
16k
radekd91/inferno
inferno/datasets/VideoDatasetBase.py
[ { "identifier": "KeypointNormalization", "path": "inferno/transforms/keypoints.py", "snippet": "class KeypointNormalization(KeypointTransform):\n\n def __init__(self, scale_x=1., scale_y=1.):\n super().__init__(scale_x, scale_y)\n\n def forward(self, points):\n # normalization the wa...
import torch import numpy as np import imgaug import pandas as pd import torch.nn.functional as F import subprocess import traceback import cv2 import timeit import cv2 import librosa import mediapipe as mp import plotly.express as px import plotly.graph_objects as go import pandas as pd from inferno.transforms.keypoints import KeypointNormalization, KeypointScale from inferno.utils.MediaPipeFaceOccluder import MediaPipeFaceOccluder, sizes_to_bb_batch from pathlib import Path from scipy.io import wavfile from python_speech_features import logfbank from inferno.datasets.FaceDataModuleBase import FaceDataModuleBase from inferno.datasets.IO import (load_and_process_segmentation, process_segmentation, load_segmentation, load_segmentation_list, load_segmentation_list_v2, load_reconstruction_list, load_emotion_list, load_reconstruction_list_v2, load_emotion_list_v2, ) from inferno.datasets.ImageDatasetHelpers import bbox2point, bbpoint_warp from inferno.utils.FaceDetector import load_landmark from skvideo.io import vread, vreader, FFmpegReader from inferno.layers.losses.MediaPipeLandmarkLosses import MEDIAPIPE_LANDMARK_NUMBER from decord import VideoReader, cpu from inferno.models.mica.MicaInputProcessing import MicaInputProcessor from munch import Munch from inferno.models.temporal.Preprocessors import FlamePreprocessor from tqdm import auto from inferno.utils.MediaPipeLandmarkDetector import np2mediapipe from inferno.utils.DecaUtils import tensor_vis_landmarks
13,194
else: # landmarks are saved per frame landmark_validity = np.ones((len(landmarks), 1), dtype=np.float32) for i in range(start_frame, sequence_length + start_frame): landmark_path = landmarks_dir / f"{i:05d}_000.pkl" landmark_type, landmark = load_landmark(landmark_path) landmarks += [landmark] if len(landmark) == 0: # dropped detection landmark = [0, 0] landmark_validity[li] = 0. elif len(landmark) > 1: # multiple faces detected landmarks[li] = landmarks[li][0] # just take the first one for now else: landmark[li] = landmarks[li][0] landmarks = np.stack(landmarks, axis=0) # if landmark_type == "mediapipe" and self.align_images: # # # [WARNING] mediapipe landmarks coordinates are saved in the scale [0.0-1.0] (for absolute they need to be multiplied by img size) # # # landmarks -= 0.5 # # landmarks -= 1. # landmarks *= 2 # # # landmarks *= 2 # landmarks -= 1 # pad landmarks with zeros if necessary to match the desired video length if landmarks.shape[0] < sequence_length: landmarks = np.concatenate([landmarks, np.zeros( (sequence_length - landmarks.shape[0], *landmarks.shape[1:]), dtype=landmarks.dtype)], axis=0) landmark_validity = np.concatenate([landmark_validity, np.zeros((sequence_length - landmark_validity.shape[0], 1), dtype=landmark_validity.dtype)], axis=0) landmark_dict[landmark_type] = landmarks.astype(np.float32) landmark_validity_dict[landmark_type] = landmark_validity sample["landmarks"] = landmark_dict sample["landmarks_validity"] = landmark_validity_dict return sample def _path_to_segmentations(self, index): return (Path(self.output_dir) / f"segmentations_{self.segmentation_source}" / self.segmentation_type / self.video_list[self.video_indices[index]]).with_suffix("") def _read_segmentations(self, index, start_frame=None, end_frame=None): segmentations_dir = self._path_to_segmentations(index) if (segmentations_dir / "segmentations.hdf5").exists(): # if random access hdf5 exists (newest), let's use it segmentations, seg_types, seg_names = load_segmentation_list_v2(segmentations_dir / "segmentations.hdf5", start_frame, end_frame) elif (segmentations_dir / "segmentations.pkl").exists(): # segmentations are saved in a single pickle (no random access) segmentations, seg_types, seg_names = load_segmentation_list(segmentations_dir / "segmentations.pkl") if start_frame is not None and end_frame is not None: segmentations = segmentations[start_frame: end_frame] seg_types = seg_types[start_frame: end_frame] seg_names = seg_names[start_frame: end_frame] if isinstance(segmentations, list): segmentations = np.stack(segmentations, axis=0) if segmentations.ndim == 4: # T, C=1, W, H segmentations = segmentations[:,0,...] if isinstance(seg_types[0], bytes): seg_types = [seg_type.decode("utf-8") for seg_type in seg_types] if isinstance(seg_names[0], bytes): seg_names = [seg_name.decode("utf-8") for seg_name in seg_names] return segmentations, seg_types, seg_names def _retrieve_segmentations(self, index, start_frame, end_frame): if not self.preload_videos: # segmentations_dir = self._path_to_segmentations(index) # if (segmentations_dir / "segmentations.hdf5").exists(): # random access hdf5 exists, let's use it # segmentations, seg_types, seg_names = load_segmentation_list_v2(segmentations_dir / "segmentations.hdf5", start_frame, end_frame) # elif (segmentations_dir / "segmentations.pkl").exists(): # segmentations are saved in a single pickle # seg_images, seg_types, seg_names = load_segmentation_list(segmentations_dir / "segmentations.pkl") # segmentations = seg_images[start_frame: end_frame] # if isinstance(seg_images, list): # segmentations = np.stack(seg_images, axis=0) # if seg_images.ndim == 4: # T, C=1, W, H # segmentations = segmentations[:,0,...] segmentations, seg_types, seg_names = self._read_segmentations(index, start_frame, end_frame) return segmentations, seg_types, seg_names else: video_path = str(self._get_video_path(index)) segmentations, seg_types, seg_names = self.seg_cache[video_path] segmentations = segmentations[start_frame: end_frame] seg_types = seg_types[start_frame: end_frame] seg_names = seg_names[start_frame: end_frame] return segmentations, seg_types, seg_names def _load_reconstructions(self, index, rec_type, appearance=False, start_frame=None, end_frame=None): reconstructions_dir = self._path_to_reconstructions(index, rec_type) if (reconstructions_dir / "shape_pose_cam.hdf5").exists(): # random access hdf5 exists, let's use it shape_pose_cam = load_reconstruction_list_v2(reconstructions_dir / "shape_pose_cam.hdf5", start_frame=start_frame, end_frame=end_frame) if appearance: appearance = load_reconstruction_list_v2(reconstructions_dir / "appearance.hdf5", start_frame=start_frame, end_frame=end_frame) else: appearance = None elif (reconstructions_dir / "shape_pose_cam.pkl").exists(): # reconstructions are saved in a single pickle shape_pose_cam = load_reconstruction_list(reconstructions_dir / "shape_pose_cam.pkl", start_frame=start_frame, end_frame=end_frame) if appearance: appearance = load_reconstruction_list(reconstructions_dir / "appearance.pkl", start_frame=start_frame, end_frame=end_frame) else: appearance = None ## should no longer be necessary as the start/end frame is now handled in the load_reconstruction_list function # if start_frame is not None and end_frame is not None: # shape_pose_cam = {key: shape_pose_cam[key][:, start_frame: end_frame] for key in shape_pose_cam.keys()} # if appearance is not None: # appearance = {key: appearance[key][:, start_frame: end_frame] for key in appearance.keys()} else: raise RuntimeError(f"Reconstruction file not found in {reconstructions_dir}") # for key in shape_pose_cam.keys(): # shape_pose_cam[key] = np.copy(shape_pose_cam[key]) # for key in appearance.keys(): # appearance[key] = np.copy(appearance[key]) return shape_pose_cam, appearance def _load_emotions(self, index, features=False, start_frame=None, end_frame=None): emotions_dir = self._path_to_emotions(index) if (emotions_dir / "emotions.hdf5").exists(): # random access hdf5 exists, let's use it
""" Author: Radek Danecek Copyright (c) 2023, Radek Danecek All rights reserved. # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # Using this computer program means that you agree to the terms # in the LICENSE file included with this software distribution. # Any use not explicitly granted by the LICENSE is prohibited. # # Copyright©2022 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # For comments or questions, please email us at emote@tue.mpg.de # For commercial licensing contact, please contact ps-license@tuebingen.mpg.de """ class AbstractVideoDataset(torch.utils.data.Dataset): def __init__(self) -> None: super().__init__() def _augment_sequence_sample(self, index, sample): raise NotImplementedError() def visualize_sample(self, sample): raise NotImplementedError() class VideoDatasetBase(AbstractVideoDataset): def __init__(self, root_path, output_dir, video_list, video_metas, video_indices, # audio_paths, audio_metas, sequence_length, audio_noise_prob=0.0, stack_order_audio=4, audio_normalization="layer_norm", landmark_types="mediapipe", segmentation_type = "bisenet", landmark_source = "original", segmentation_source = "original", occlusion_length=0, occlusion_probability_mouth = 0.0, occlusion_probability_left_eye = 0.0, occlusion_probability_right_eye = 0.0, occlusion_probability_face = 0.0, image_size=None, ## the image size that the dataset will output transforms : imgaug.augmenters.Augmenter = None, hack_length=False, use_original_video=True, include_processed_audio = True, include_raw_audio = True, temporal_split_start=None, # if temporally splitting the video (train, val, test), this is the start of the split temporal_split_end=None, # if temporally splitting the video (train, val, test), this is the end of the split preload_videos=False, # cache all videos in memory (recommended for smaller datasets) inflate_by_video_size=False, include_filename=False, # if True includes the filename of the video in the sample align_images = True, use_audio=True, #if True, includes audio in the sample reconstruction_type = None, return_global_pose = False, return_appearance = False, average_shape_decode = False, emotion_type = None, return_emotion_feature=False, read_video=True, read_audio=True, original_image_size=None, ## the processed videos may be different in size and if they are, the landmarks will be, too. This is to remember return_mica_images=False, ) -> None: super().__init__() self.root_path = root_path self.output_dir = output_dir self.video_list = video_list self.video_indices = video_indices self.video_metas = video_metas self.sequence_length = sequence_length or 1 # self.audio_paths = audio_paths self.audio_metas = audio_metas self.audio_noise_prob = audio_noise_prob self.image_size = image_size self.original_image_size = original_image_size or image_size self.scale = 1.25 # if the video is 25 fps and the audio is 16 kHz, stack_order_audio corresponds to 4 # (i.e. 4 consecutive filterbanks will be concatenated to sync with the visual frame) self.stack_order_audio = stack_order_audio self.audio_normalization = audio_normalization self.landmark_types = landmark_types if isinstance(self.landmark_types, str): self.landmark_types = [self.landmark_types] self.landmark_source = landmark_source if isinstance(self.landmark_source, str): self.landmark_source = [self.landmark_source] * len(self.landmark_types) assert len(self.landmark_types) == len(self.landmark_source), "landmark_types and landmark_source must have the same length" self.segmentation_type = segmentation_type self.segmentation_source = segmentation_source self.landmark_normalizer = KeypointNormalization() # postprocesses final landmarks to be in [-1, 1] self.occluder = MediaPipeFaceOccluder() self.occlusion_probability_mouth = occlusion_probability_mouth self.occlusion_probability_left_eye = occlusion_probability_left_eye self.occlusion_probability_right_eye = occlusion_probability_right_eye self.occlusion_probability_face = occlusion_probability_face self.occlusion_length = occlusion_length if isinstance(self.occlusion_length, int): self.occlusion_length = [self.occlusion_length, self.occlusion_length+1] # self.occlusion_length = [20, 30] self.occlusion_length = sorted(self.occlusion_length) self.include_processed_audio = include_processed_audio self.include_raw_audio = include_raw_audio # self.align_images = True # self.align_images = False self.align_images = align_images self.use_audio = use_audio self.use_original_video = use_original_video self.transforms = transforms or imgaug.augmenters.Resize((image_size, image_size)) self.hack_length = hack_length if self.hack_length == "auto": if self._true_len() < 64: # hacks the length for supersmall test datasets self.hack_length = (64 // self._true_len()) if 64 % self._true_len() != 0: self.hack_length += 1 self.hack_length = float(self.hack_length) # useful hack to repeat the elements in the dataset for really small datasets else: self.hack_length = False assert self.occlusion_length[0] >= 0 # assert self.occlusion_length[1] <= self.sequence_length + 1 self.temporal_split_start = temporal_split_start self.temporal_split_end = temporal_split_end self.preload_videos = preload_videos self.inflate_by_video_size = inflate_by_video_size # self.read_video = True self.read_video = read_video self.read_audio = read_audio self.reconstruction_type = reconstruction_type if self.reconstruction_type is not None: if isinstance(self.reconstruction_type, str): self.reconstruction_type = [self.reconstruction_type] assert isinstance(self.reconstruction_type, list), "reconstruction_type must be a list or None" self.return_global_pose = return_global_pose self.return_appearance = return_appearance self.average_shape_decode = average_shape_decode self.emotion_type = emotion_type self.return_emotion_feature = return_emotion_feature self.video_cache = {} self.audio_cache = {} self.seg_cache = {} self.lmk_cache = {} self.rec_cache = {} self.emo_cache = {} if self.preload_videos: self._preload_videos() self.video_sample_indices = None if self.inflate_by_video_size: self._inflate_by_video_size() self.include_filename = include_filename # if True, face alignment will not crash if invalid. By default this should be False to avoid silent data errors self._allow_alignment_fail = False self.return_mica_image = return_mica_images if not self.read_video: assert not bool(self.return_mica_image), "return_mica_image is only supported when read_video is True" if bool(self.return_mica_image): if self.return_mica_image is True: self.return_mica_image = "fan" self.mica_preprocessor = MicaInputProcessor(self.return_mica_image) @property def invalid_cutoff(self): max_cutoff = 0 for rec_type in self.reconstruction_type: if rec_type == "spectre": max_cutoff = max(max_cutoff, 2 ) elif rec_type in ["emoca", "deca"] or "emoca" in rec_type.lower() or "emica" in rec_type.lower() or "edeca" in rec_type.lower(): max_cutoff = max(max_cutoff, 0 ) else: raise ValueError(f"Invalid reconstruction type: '{rec_type}'") return max_cutoff def _load_flame(self): if self.reconstruction_type is not None: flame_cfg = Munch() flame_cfg.type = "flame" flame_cfg.flame = Munch({ "flame_model_path": "/ps/scratch/rdanecek/data/FLAME/geometry/generic_model.pkl" , "n_shape": 100 , # n_exp: 100 "n_exp": 50, "flame_lmk_embedding_path": "/ps/scratch/rdanecek/data/FLAME/geometry/landmark_embedding.npy" }) flame_cfg.use_texture = False self.flame = FlamePreprocessor(flame_cfg) # prep = prep.to("cuda") def _preload_videos(self): # indices = np.unique(self.video_indices) for i in auto.tqdm( range(len(self.video_indices)), desc="Preloading videos" ): video_path = str(self._get_video_path(i)) if self.read_video: if video_path not in self.video_cache: self.video_cache[video_path] = vread(video_path) self.seg_cache[video_path] = self._read_segmentations(i) if self.read_audio: if video_path not in self.audio_cache: self.audio_cache[i] = self._read_audio(i) for lmk_type, lmk_source in zip(self.landmark_types, self.landmark_source): if i not in self.lmk_cache: self.lmk_cache[i] = {} if lmk_type not in self.lmk_cache[i]: self.lmk_cache[i][lmk_type] = {} # if lmk_source not in self.lmk_cache[i][lmk_type]: self.lmk_cache[i][lmk_type][lmk_source] = self._read_landmarks(i, lmk_type, lmk_source) if self.reconstruction_type is not None: for rec_type in self.reconstruction_type: shape_pose_cam, appearance = self._load_reconstructions(i, rec_type, self.return_appearance) if i not in self.rec_cache: self.rec_cache[i] = {} video_dict = self.rec_cache[i] if rec_type not in video_dict: video_dict[rec_type] = {} self.rec_cache[i][rec_type]["shape_pose_cam"] = shape_pose_cam self.rec_cache[i][rec_type]["appearance"] = appearance if self.emotion_type is not None: emotions, features = self._load_emotions(i, features=self.return_emotion_feature) if i not in self.emo_cache: self.emo_cache[i] = {} self.emo_cache[i]["emotions"] = emotions self.emo_cache[i]["features"] = features print("Video cache loaded") def _inflate_by_video_size(self): assert isinstance( self.sequence_length, int), "'sequence_length' must be an integer when inflating by video size" inflated_video_indices = [] video_sample_indices = [] for i in range(len(self.video_indices)): # for i in self.video_indices: idx = self.video_indices[i] num_frames = self._get_num_frames(i) if self.temporal_split_start is not None and self.temporal_split_end is not None: num_frames = int((self.temporal_split_end - self.temporal_split_start) * num_frames) num_samples_in_video = num_frames // self.sequence_length if num_frames % self.sequence_length != 0: num_samples_in_video += 1 num_samples_in_video = max(1, num_samples_in_video) inflated_video_indices += [idx] * num_samples_in_video video_sample_indices += list(range(num_samples_in_video)) self.video_indices = np.array(inflated_video_indices, dtype=np.int32) self.video_sample_indices = np.array(video_sample_indices, dtype=np.int32) def __getitem__(self, index): # max_attempts = 10 max_attempts = 50 for i in range(max_attempts): try: return self._getitem(index) except AssertionError as e: if not hasattr(self, "num_total_failed_attempts"): self.num_total_failed_attempts = 0 old_index = index index = np.random.randint(0, self.__len__()) tb = traceback.format_exc() if self.num_total_failed_attempts % 50 == 0: print(f"[ERROR] AssertionError in {self.__class__.__name__} dataset while retrieving sample {old_index}, retrying with new index {index}") print(f"In total, there has been {self.num_total_failed_attempts} failed attempts. This number should be very small. If it's not, check the data.") print("See the exception message for more details.") print(tb) self.num_total_failed_attempts += 1 print("[ERROR] Failed to retrieve sample after {} attempts".format(max_attempts)) raise RuntimeError("Failed to retrieve sample after {} attempts".format(max_attempts)) def _getitem(self, index): time = False if time: start_time = timeit.default_timer() if self.hack_length: index = index % self._true_len() # 1) VIDEO # load the video sample, start_frame, num_read_frames, video_fps, num_frames, num_available_frames = self._get_video(index) if time: video_read_time = timeit.default_timer() - start_time # 2) AUDIO if self.read_audio: sample = self._get_audio(index, start_frame, num_read_frames, video_fps, num_frames, sample) if time: audio_read_time = timeit.default_timer() - start_time - video_read_time # 3) LANDMARKS sample = self._get_landmarks(index, start_frame, num_read_frames, video_fps, num_frames, sample) if time: lmk_read_time = timeit.default_timer() - start_time - video_read_time - audio_read_time # 4) SEGMENTATIONS if self.read_video: sample = self._get_segmentations(index, start_frame, num_read_frames, video_fps, num_frames, sample) if time: seg_read_time = timeit.default_timer() - start_time - video_read_time - audio_read_time - lmk_read_time # 5) FACE ALIGNMENT IF ANY if self.read_video: sample = self._align_faces(index, sample) if time: face_align_time = timeit.default_timer() - start_time - video_read_time - audio_read_time - lmk_read_time - seg_read_time # 6) GEOMETRY if self.reconstruction_type is not None: sample = self._get_reconstructions(index, start_frame, num_read_frames, video_fps, num_frames, sample) if time: geom_read_time = timeit.default_timer() - start_time - video_read_time - audio_read_time - lmk_read_time - seg_read_time - face_align_time # 7) EMOTION if self.emotion_type is not None: sample = self._get_emotions(index, start_frame, num_read_frames, video_fps, num_frames, sample) if time: emo_read_time = timeit.default_timer() - start_time - video_read_time - audio_read_time - lmk_read_time - seg_read_time - face_align_time - geom_read_time # 8) AUGMENTATION if self.read_video: sample = self._augment_sequence_sample(index, sample) if time: aug_time = timeit.default_timer() - start_time - video_read_time - audio_read_time - lmk_read_time - seg_read_time - face_align_time - geom_read_time - emo_read_time # TO TORCH sample = to_torch(sample) # AUDIO NORMALIZATION (if any), this is a remnant from av-hubert and is not being used anywhere, will be removed in the future if self.read_audio: if self.include_processed_audio: if self.audio_normalization is not None: if self.audio_normalization == "layer_norm": sample["audio"] = F.layer_norm(sample["audio"], sample["audio"].shape[1:]) else: raise ValueError(f"Unsupported audio normalization {self.audio_normalization}") # audio_process_time = timeit.default_timer() - start_time - video_read_time - audio_read_time - lmk_read_time - seg_read_time - face_align_time - geom_read_time - emo_read_time - aug_time if self.read_video: # T,H,W,C to T,C,H,W sample["video"] = sample["video"].permute(0, 3, 1, 2) if "video_masked" in sample.keys(): sample["video_masked"] = sample["video_masked"].permute(0, 3, 1, 2) # sample["segmenation"] = sample["segmenation"].permute(0, 2, 1) # sample["segmentation_masked"] = sample["segmentation_masked"].permute(0, 2, 1) if self.return_mica_image: fan_landmarks = None landmarks_validity = None if "landmarks" in sample.keys(): if isinstance(sample["landmarks"], dict): if "fan3d" in sample["landmarks"].keys(): fan_landmarks = sample["landmarks"]["fan3d"] landmarks_validity = sample["landmarks_validity"]["fan3d"] elif "fan" in sample["landmarks"].keys(): fan_landmarks = sample["landmarks"]["fan"] landmarks_validity = sample["landmarks_validity"]["fan"] elif isinstance(sample["landmarks"], (np.ndarray, torch.Tensor)): if sample["landmarks"].shape[1] == 68: fan_landmarks = sample["landmarks"] landmarks_validity = sample["landmarks_validity"] sample["mica_video"] = self.mica_preprocessor(sample["video"], fan_landmarks, landmarks_validity=landmarks_validity) sample["mica_video_masked"] = self.mica_preprocessor(sample["video_masked"], fan_landmarks, landmarks_validity=landmarks_validity) # # normalize landmarks # if self.landmark_normalizer is not None: # if isinstance(self.landmark_normalizer, KeypointScale): # raise NotImplementedError("Landmark normalization is deprecated") # self.landmark_normalizer.set_scale( # img.shape[0] / input_img_shape[0], # img.shape[1] / input_img_shape[1]) # elif isinstance(self.landmark_normalizer, KeypointNormalization): # self.landmark_normalizer.set_scale(sample["video"].shape[2], sample["video"].shape[3]) # else: # raise ValueError(f"Unsupported landmark normalizer type: {type(self.landmark_normalizer)}") # for key in sample["landmarks"].keys(): # sample["landmarks"][key] = self.landmark_normalizer(sample["landmarks"][key]) if time: print(f"Video read time: {video_read_time:.2f} s") print(f"Audio read time: {audio_read_time:.2f} s") print(f"Landmark read time: {lmk_read_time:.2f} s") print(f"Segmentation read time: {seg_read_time:.2f} s") print(f"Face alignment time: {face_align_time:.2f} s") print(f"Geometry read time: {geom_read_time:.2f} s") print(f"Emotion read time: {emo_read_time:.2f} s") print(f"Augmentation time: {aug_time:.2f} s") # print(f"Audio process time: {audio_process_time:.2f} s") print(f"Total read time: {timeit.default_timer() - start_time:.2f} s") return sample def _get_video_path(self, index): if self.use_original_video: video_path = self.root_path / self.video_list[self.video_indices[index]] else: video_path = Path(self.output_dir) / "videos_aligned" / self.video_list[self.video_indices[index]] return video_path def _get_audio_path(self, index): audio_path = (Path(self.output_dir) / "audio" / self.video_list[self.video_indices[index]]).with_suffix(".wav") return audio_path def _get_num_frames(self, index): video_meta = self.video_metas[self.video_indices[index]] # print("Video path: ", video_path) # num video frames num_frames = video_meta["num_frames"] video_path = self._get_video_path(index) if num_frames == 0: # use ffprobe to get the number of frames num_frames = int(subprocess.check_output(["ffprobe", "-v", "error", "-select_streams", "v:0", "-count_packets", "-show_entries", "stream=nb_read_packets", "-of", "csv=p=0", str(video_path)])) if num_frames == 0: _vr = FFmpegReader(str(video_path)) num_frames = _vr.getShape()[0] del _vr return num_frames def _get_sample_length(self, index): if isinstance(self.sequence_length, int): # if sequence length set, use it return self.sequence_length elif isinstance(self.sequence_length, str): # otherwise use the one from the metadata if self.sequence_length == "all": if self.temporal_split_start is not None and self.temporal_split_end is not None: num_frames = self._get_num_frames(index) temporal_split_start_frame = int(self.temporal_split_start * num_frames) temporal_split_end_frame = int(self.temporal_split_end * num_frames) return temporal_split_end_frame - temporal_split_start_frame else: num_frames = self._get_num_frames(index) else: raise ValueError(f"Unsupported sequence length value: '{self.sequence_length}'") return num_frames raise def _get_video(self, index): video_path = self._get_video_path(index) video_meta = self.video_metas[self.video_indices[index]] # print("Video path: ", video_path) # num video frames num_frames = self._get_num_frames(index) assert num_frames > 0, "Number of frames is 0 for video {}".format(video_path) video_fps = video_meta["fps"] n1, n2 = video_fps.split("/") n1 = int(n1) n2 = int(n2) assert n1 % n2 == 0 video_fps = n1 // n2 # assert num_frames >= self.sequence_length, f"Video {video_path} has only {num_frames} frames, but sequence length is {self.sequence_length}" # TODO: handle the case when sequence length is longer than the video length sequence_length = self._get_sample_length(index) # pick the starting video frame if self.temporal_split_start is not None and self.temporal_split_end is not None: temporal_split_start = int(self.temporal_split_start * num_frames) temporal_split_end = int(self.temporal_split_end * num_frames) num_available_frames = temporal_split_end - temporal_split_start # start_frame = np.random.randint(temporal_split_start, temporal_split_end - sequence_length) else: temporal_split_start = 0 temporal_split_end = num_frames num_available_frames = num_frames if num_available_frames <= sequence_length: start_frame = temporal_split_start else: if self.video_sample_indices is None: # one video is one sample start_frame = np.random.randint(temporal_split_start, temporal_split_end - sequence_length) else: # one video is multiple samples (as many as the sequence length allows without repetition) start_frame = temporal_split_start + (self.video_sample_indices[index] * sequence_length) # start_frame = np.random.randint(0, num_frames - sequence_length) sample = {} if self.include_filename: sample["filename"] = str(video_path) sample["fps"] = video_fps # include the fps in the sample # TODO: picking the starting frame should probably be done a bit more robustly # (e.g. by ensuring the sequence has at least some valid landmarks) ... # maybe the video should be skipped altogether if it can't provide that # load the frames # frames = [] # for i in range(start_frame, start_frame + sequence_length): # frame_path = video_path / f"frame_{i:04d}.jpg" # frame = imread(str(frame_path)) # frames.append(frame) assert video_path.is_file(), f"Video {video_path} does not exist" num_read_frames = self._get_sample_length(index) num_read_frames_ = self._get_sample_length(index) if self.read_video: num_read_frames = 0 try: if not self.preload_videos: # import timeit # start_time = timeit.default_timer() # frames = vread(video_path.as_posix()) # end_time = timeit.default_timer() # print(f"Video read time: {end_time - start_time:.2f} s") # from decord import VideoReader # from decord import cpu, gpu # start_time = timeit.default_timer() vr = VideoReader(video_path.as_posix(), ctx=cpu(0), width=self.image_size, height=self.image_size) if len(vr) < sequence_length: sequence_length_ = len(vr) else: sequence_length_ = sequence_length frames = vr.get_batch(range(start_frame,(start_frame + sequence_length_))) frames = frames.asnumpy() if sequence_length_ < sequence_length: # pad with zeros if video shorter than sequence length frames = np.concatenate([frames, np.zeros((sequence_length - frames.shape[0], frames.shape[1], frames.shape[2], frames.shape[3]), dtype=frames.dtype)]) # end_time = timeit.default_timer() # print(f"Video read time: {end_time - start_time:.2f} s") else: frames = self.video_cache[video_path.as_posix()] assert len(frames) == num_frames, f"Video {video_path} has {len(frames)} frames, but meta says it has {num_frames}" frames = frames[start_frame:(start_frame + sequence_length)] num_read_frames = frames.shape[0] # # plot frames # import matplotlib.pyplot as plt # frame_idx = 0 # plt.figure() # plt.imshow(frames[frame_idx]) # plt.show() if frames.shape[0] < sequence_length: # pad with zeros if video shorter than sequence length frames = np.concatenate([frames, np.zeros((sequence_length - frames.shape[0], frames.shape[1], frames.shape[2]), dtype=frames.dtype)]) except ValueError: # reader = vreader(video_path.as_posix()) # create an opencv video reader reader = cv2.VideoCapture(video_path.as_posix()) fi = 0 frames = [] while fi < start_frame: fi += 1 # _ = next(reader) _, frame = reader.read() for i in range(sequence_length): # frames.append(next(reader)) if reader.isOpened(): _, frame = reader.read() if frame is None: # frame = np.zeros((self.image_size, self.image_size, 3), dtype=np.uint8) frame = np.zeros_like(frames[0]) frames.append(frame) continue num_read_frames += 1 # bgr to rgb frame = frame[:, :, ::-1] else: # if we ran out of frames, pad with black frame = np.zeros_like(frames[0]) frames.append(frame) reader.release() frames = np.stack(frames, axis=0) frames = frames.astype(np.float32) / 255.0 # sample = { sample["video"] = frames sample["frame_indices"] = np.arange(start_frame, start_frame + sequence_length, dtype=np.int32) if num_read_frames_ != num_read_frames: print(f"[Warning]: read {num_read_frames} frames instead of {num_read_frames_} for video {video_path}") return sample, start_frame, num_read_frames, video_fps, num_frames, num_available_frames def _read_audio(self, index): # audio_path = (Path(self.output_dir) / "audio" / self.video_list[self.video_indices[index]]).with_suffix(".wav") audio_path = self._get_audio_path(index) # audio_meta = self.audio_metas[self.video_indices[index]] # load the audio # if self.include_raw_audio: sampling_rate = 16000 wavdata, sampling_rate = librosa.load(audio_path, sr=sampling_rate) # wavdata, sampling_rate = librosa.load(audio_path, sr=sampling_rate) if wavdata.ndim > 1: wavdata = librosa.to_mono(wavdata) wavdata = (wavdata.astype(np.float64) * 32768.0).astype(np.int16) return wavdata, sampling_rate def _get_audio(self, index, start_frame, num_read_frames, video_fps, num_frames, sample): if self.preload_videos: wavdata, sampling_rate = self.audio_cache[index] else: wavdata, sampling_rate = self._read_audio(index) sequence_length = self._get_sample_length(index) # audio augmentation if np.random.rand() < self.audio_noise_prob: wavdata = self.add_noise(wavdata) if self.include_processed_audio: # sampling_rate, wavdata = wavfile.read(audio_path.as_posix()) # assert samplerate == 16000 and len(wavdata.shape) == 1 audio_feats = logfbank(wavdata, samplerate=sampling_rate).astype(np.float32) # [T (num audio frames), F (num filters)] # the audio feats frequency (and therefore num frames) is too high, so we stack them together to match num visual frames audio_feats = stacker(audio_feats, self.stack_order_audio) # audio_feats = audio_feats[start_frame:(start_frame + sequence_length)] audio_feats = audio_feats[start_frame:(start_frame + num_read_frames)] # temporal pad with zeros if necessary to match the desired video length if audio_feats.shape[0] < sequence_length: # concatente with zeros audio_feats = np.concatenate([audio_feats, np.zeros((sequence_length - audio_feats.shape[0], audio_feats.shape[1]), dtype=audio_feats.dtype)], axis=0) # stack the frames and audio feats together sample["audio"] = audio_feats if self.include_raw_audio: assert sampling_rate % video_fps == 0 wav_per_frame = sampling_rate // video_fps wavdata_ = np.zeros((num_frames, wav_per_frame), dtype=wavdata.dtype) wavdata_ = wavdata_.reshape(-1) if wavdata.size > wavdata_.size: wavdata_[...] = wavdata[:wavdata_.size] else: wavdata_[:wavdata.size] = wavdata wavdata_ = wavdata_.reshape((num_frames, wav_per_frame)) wavdata_ = wavdata_[start_frame:(start_frame + num_read_frames)] if wavdata_.shape[0] < sequence_length: # concatente with zeros wavdata_ = np.concatenate([wavdata_, np.zeros((sequence_length - wavdata_.shape[0], wavdata_.shape[1]), dtype=wavdata_.dtype)], axis=0) wavdata_ = wavdata_.astype(np.float64) / np.int16(np.iinfo(np.int16).max) # wavdata_ = np.zeros((sequence_length, samplerate // video_fps), dtype=wavdata.dtype) # wavdata_ = np.zeros((n * frames.shape[0]), dtype=wavdata.dtype) # wavdata_[:wavdata.shape[0]] = wavdata # wavdata_ = wavdata_.reshape((frames.shape[0], -1)) sample["raw_audio"] = wavdata_ sample["samplerate"] = sampling_rate return sample def _path_to_landmarks(self, index, landmark_type, landmark_source): return (Path(self.output_dir) / f"landmarks_{landmark_source}" / landmark_type / self.video_list[self.video_indices[index]]).with_suffix("") def _read_landmarks(self, index, landmark_type, landmark_source): landmarks_dir = self._path_to_landmarks(index, landmark_type, landmark_source) landmark_list = FaceDataModuleBase.load_landmark_list(landmarks_dir / f"landmarks_{landmark_source}.pkl") return landmark_list def _get_landmarks(self, index, start_frame, num_read_frames, video_fps, num_frames, sample): sequence_length = self._get_sample_length(index) landmark_dict = {} landmark_validity_dict = {} for lti, landmark_type in enumerate(self.landmark_types): landmark_source = self.landmark_source[lti] landmarks_dir = self._path_to_landmarks(index, landmark_type, landmark_source) landmarks = [] if (landmarks_dir / "landmarks.pkl").exists(): # landmarks are saved per video in a single file # landmark_list = FaceDataModuleBase.load_landmark_list(landmarks_dir / "landmarks.pkl") # landmark_list = FaceDataModuleBase.load_landmark_list(landmarks_dir / "landmarks_original.pkl") if not self.preload_videos: # landmark_list = FaceDataModuleBase.load_landmark_list(landm?arks_dir / f"landmarks_{landmark_source}.pkl") landmark_list = self._read_landmarks(index, landmark_type, landmark_source) # landmark_types = FaceDataModuleBase.load_landmark_list(landmarks_dir / "landmark_types.pkl") else: landmark_list = self.lmk_cache[index][landmark_type][landmark_source] # landmark_types = self.lmk_cache[index]["landmark_types"] landmarks = landmark_list[start_frame: sequence_length + start_frame] landmark_validity = np.ones((len(landmarks), 1), dtype=np.float32) for li in range(len(landmarks)): if len(landmarks[li]) == 0: # dropped detection if landmark_type == "mediapipe": # [WARNING] mediapipe landmarks coordinates are saved in the scale [0.0-1.0] (for absolute they need to be multiplied by img size) landmarks[li] = np.zeros((MEDIAPIPE_LANDMARK_NUMBER, 3)) elif landmark_type in ["fan", "kpt68"]: landmarks[li] = np.zeros((68, 2)) else: raise ValueError(f"Unknown landmark type '{landmark_type}'") landmark_validity[li] = 0. elif len(landmarks[li]) > 1: # multiple faces detected landmarks[li] = landmarks[li][0] # just take the first one for now else: \ landmarks[li] = landmarks[li][0] # # pad landmarks with zeros if necessary to match the desired video length # # if landmarks.shape[0] < sequence_length: # if len(landmarks) < sequence_length: # # concatente with zeros # landmarks += [np.zeros((landmarks.shape[1]))] * (sequence_length - len(landmarks)) # landmarks = np.concatenate([landmarks, np.zeros((sequence_length - landmarks.shape[0], landmarks.shape[1]))], axis=0) # landmark_validity = np.concatenate([landmark_validity, np.zeros((sequence_length - landmark_validity.shape[0]), dtype=np.bool)], axis=0) else: # landmarks are saved per frame landmark_validity = np.ones((len(landmarks), 1), dtype=np.float32) for i in range(start_frame, sequence_length + start_frame): landmark_path = landmarks_dir / f"{i:05d}_000.pkl" landmark_type, landmark = load_landmark(landmark_path) landmarks += [landmark] if len(landmark) == 0: # dropped detection landmark = [0, 0] landmark_validity[li] = 0. elif len(landmark) > 1: # multiple faces detected landmarks[li] = landmarks[li][0] # just take the first one for now else: landmark[li] = landmarks[li][0] landmarks = np.stack(landmarks, axis=0) # if landmark_type == "mediapipe" and self.align_images: # # # [WARNING] mediapipe landmarks coordinates are saved in the scale [0.0-1.0] (for absolute they need to be multiplied by img size) # # # landmarks -= 0.5 # # landmarks -= 1. # landmarks *= 2 # # # landmarks *= 2 # landmarks -= 1 # pad landmarks with zeros if necessary to match the desired video length if landmarks.shape[0] < sequence_length: landmarks = np.concatenate([landmarks, np.zeros( (sequence_length - landmarks.shape[0], *landmarks.shape[1:]), dtype=landmarks.dtype)], axis=0) landmark_validity = np.concatenate([landmark_validity, np.zeros((sequence_length - landmark_validity.shape[0], 1), dtype=landmark_validity.dtype)], axis=0) landmark_dict[landmark_type] = landmarks.astype(np.float32) landmark_validity_dict[landmark_type] = landmark_validity sample["landmarks"] = landmark_dict sample["landmarks_validity"] = landmark_validity_dict return sample def _path_to_segmentations(self, index): return (Path(self.output_dir) / f"segmentations_{self.segmentation_source}" / self.segmentation_type / self.video_list[self.video_indices[index]]).with_suffix("") def _read_segmentations(self, index, start_frame=None, end_frame=None): segmentations_dir = self._path_to_segmentations(index) if (segmentations_dir / "segmentations.hdf5").exists(): # if random access hdf5 exists (newest), let's use it segmentations, seg_types, seg_names = load_segmentation_list_v2(segmentations_dir / "segmentations.hdf5", start_frame, end_frame) elif (segmentations_dir / "segmentations.pkl").exists(): # segmentations are saved in a single pickle (no random access) segmentations, seg_types, seg_names = load_segmentation_list(segmentations_dir / "segmentations.pkl") if start_frame is not None and end_frame is not None: segmentations = segmentations[start_frame: end_frame] seg_types = seg_types[start_frame: end_frame] seg_names = seg_names[start_frame: end_frame] if isinstance(segmentations, list): segmentations = np.stack(segmentations, axis=0) if segmentations.ndim == 4: # T, C=1, W, H segmentations = segmentations[:,0,...] if isinstance(seg_types[0], bytes): seg_types = [seg_type.decode("utf-8") for seg_type in seg_types] if isinstance(seg_names[0], bytes): seg_names = [seg_name.decode("utf-8") for seg_name in seg_names] return segmentations, seg_types, seg_names def _retrieve_segmentations(self, index, start_frame, end_frame): if not self.preload_videos: # segmentations_dir = self._path_to_segmentations(index) # if (segmentations_dir / "segmentations.hdf5").exists(): # random access hdf5 exists, let's use it # segmentations, seg_types, seg_names = load_segmentation_list_v2(segmentations_dir / "segmentations.hdf5", start_frame, end_frame) # elif (segmentations_dir / "segmentations.pkl").exists(): # segmentations are saved in a single pickle # seg_images, seg_types, seg_names = load_segmentation_list(segmentations_dir / "segmentations.pkl") # segmentations = seg_images[start_frame: end_frame] # if isinstance(seg_images, list): # segmentations = np.stack(seg_images, axis=0) # if seg_images.ndim == 4: # T, C=1, W, H # segmentations = segmentations[:,0,...] segmentations, seg_types, seg_names = self._read_segmentations(index, start_frame, end_frame) return segmentations, seg_types, seg_names else: video_path = str(self._get_video_path(index)) segmentations, seg_types, seg_names = self.seg_cache[video_path] segmentations = segmentations[start_frame: end_frame] seg_types = seg_types[start_frame: end_frame] seg_names = seg_names[start_frame: end_frame] return segmentations, seg_types, seg_names def _load_reconstructions(self, index, rec_type, appearance=False, start_frame=None, end_frame=None): reconstructions_dir = self._path_to_reconstructions(index, rec_type) if (reconstructions_dir / "shape_pose_cam.hdf5").exists(): # random access hdf5 exists, let's use it shape_pose_cam = load_reconstruction_list_v2(reconstructions_dir / "shape_pose_cam.hdf5", start_frame=start_frame, end_frame=end_frame) if appearance: appearance = load_reconstruction_list_v2(reconstructions_dir / "appearance.hdf5", start_frame=start_frame, end_frame=end_frame) else: appearance = None elif (reconstructions_dir / "shape_pose_cam.pkl").exists(): # reconstructions are saved in a single pickle shape_pose_cam = load_reconstruction_list(reconstructions_dir / "shape_pose_cam.pkl", start_frame=start_frame, end_frame=end_frame) if appearance: appearance = load_reconstruction_list(reconstructions_dir / "appearance.pkl", start_frame=start_frame, end_frame=end_frame) else: appearance = None ## should no longer be necessary as the start/end frame is now handled in the load_reconstruction_list function # if start_frame is not None and end_frame is not None: # shape_pose_cam = {key: shape_pose_cam[key][:, start_frame: end_frame] for key in shape_pose_cam.keys()} # if appearance is not None: # appearance = {key: appearance[key][:, start_frame: end_frame] for key in appearance.keys()} else: raise RuntimeError(f"Reconstruction file not found in {reconstructions_dir}") # for key in shape_pose_cam.keys(): # shape_pose_cam[key] = np.copy(shape_pose_cam[key]) # for key in appearance.keys(): # appearance[key] = np.copy(appearance[key]) return shape_pose_cam, appearance def _load_emotions(self, index, features=False, start_frame=None, end_frame=None): emotions_dir = self._path_to_emotions(index) if (emotions_dir / "emotions.hdf5").exists(): # random access hdf5 exists, let's use it
emotions = load_emotion_list_v2(emotions_dir / "emotions.hdf5", start_frame, end_frame)
13
2023-11-07 20:13:32+00:00
16k
hxz393/ConfigCenterComparer
ui/action_compare.py
[ { "identifier": "get_resource_path", "path": "lib/get_resource_path.py", "snippet": "def get_resource_path(relative_path: Union[str, os.PathLike]) -> Optional[str]:\n \"\"\"\n 获取资源的绝对路径。这个函数适用于 PyInstaller 打包后的可执行文件。\n\n :type relative_path: Union[str, os.PathLike]\n :param relative_path: 相对...
import logging from typing import Dict, Optional, List from PyQt5.QtCore import QObject, pyqtSignal from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QAction from lib.get_resource_path import get_resource_path from lib.log_time import log_time from ui.config_manager import ConfigManager from ui.dialog_comparison import DialogComparison from ui.global_signals import global_signals from ui.lang_manager import LangManager from ui.message_show import message_show from ui.table_main import TableMain
12,167
""" 本文件包含用于处理和比较配置数据的类和函数。 该模块主要包含 `ActionCompare` 类,用于在用户界面中处理数据比较的逻辑。该类提供了对比配置数据、更新界面语言、重组数据等功能,方便用户进行环境配置的对比分析。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class ActionCompare(QObject): """ 提供数据比较功能的类。 该类负责处理用户界面中的数据对比逻辑,包括初始化UI组件、更新语言设置、执行数据对比等功能。它还负责处理各种事件和信号,并更新用户界面状态。 :param lang_manager: 语言管理器实例,用于处理界面语言更新。 :type lang_manager: LangManager :param config_manager: 配置管理器,用于获取网络测试相关配置。 :type config_manager: ConfigManager :param table: 主界面表格实例,提供数据获取和显示功能。 :type table: TableMain """ status_updated = pyqtSignal(str) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager, table: TableMain): super().__init__() self.lang_manager = lang_manager self.lang_manager.lang_updated.connect(self.update_lang) self.config_manager = config_manager self.table = table self.initUI() def initUI(self) -> None: """ 初始化用户界面组件。 :rtype: None :return: 无返回值。 """ self.action_compare = QAction(QIcon(get_resource_path('media/icons8-diff-files-26')), 'Compare') self.action_compare.setShortcut('F8') # 为了记录运行时间,使用匿名函数 self.action_compare.triggered.connect(lambda checked=False: self.compare()) self.update_lang() def update_lang(self) -> None: """ 更新界面语言设置。 :rtype: None :return: 无返回值。 """ self.lang = self.lang_manager.get_lang() self.action_compare.setText(self.lang['ui.action_compare_1']) self.action_compare.setStatusTip(self.lang['ui.action_compare_2'])
""" 本文件包含用于处理和比较配置数据的类和函数。 该模块主要包含 `ActionCompare` 类,用于在用户界面中处理数据比较的逻辑。该类提供了对比配置数据、更新界面语言、重组数据等功能,方便用户进行环境配置的对比分析。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class ActionCompare(QObject): """ 提供数据比较功能的类。 该类负责处理用户界面中的数据对比逻辑,包括初始化UI组件、更新语言设置、执行数据对比等功能。它还负责处理各种事件和信号,并更新用户界面状态。 :param lang_manager: 语言管理器实例,用于处理界面语言更新。 :type lang_manager: LangManager :param config_manager: 配置管理器,用于获取网络测试相关配置。 :type config_manager: ConfigManager :param table: 主界面表格实例,提供数据获取和显示功能。 :type table: TableMain """ status_updated = pyqtSignal(str) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager, table: TableMain): super().__init__() self.lang_manager = lang_manager self.lang_manager.lang_updated.connect(self.update_lang) self.config_manager = config_manager self.table = table self.initUI() def initUI(self) -> None: """ 初始化用户界面组件。 :rtype: None :return: 无返回值。 """ self.action_compare = QAction(QIcon(get_resource_path('media/icons8-diff-files-26')), 'Compare') self.action_compare.setShortcut('F8') # 为了记录运行时间,使用匿名函数 self.action_compare.triggered.connect(lambda checked=False: self.compare()) self.update_lang() def update_lang(self) -> None: """ 更新界面语言设置。 :rtype: None :return: 无返回值。 """ self.lang = self.lang_manager.get_lang() self.action_compare.setText(self.lang['ui.action_compare_1']) self.action_compare.setStatusTip(self.lang['ui.action_compare_2'])
@log_time
1
2023-11-07 01:02:38+00:00
16k
pytorch-labs/ao
test/test.py
[ { "identifier": "DynamicallyPerAxisQuantizedLinear", "path": "torchao/quantization/dynamic_quant.py", "snippet": "class DynamicallyPerAxisQuantizedLinear(torch.nn.Linear):\n \"\"\"\n This class is a replacement for `torch.nn.Linear`. It implements a\n quantized matmul using int8 dynamic symmetr...
import copy import unittest import torch import torch.nn as nn import os from torch._inductor.utils import run_and_get_code from torch._dynamo import config from torch.ao.quantization import MinMaxObserver, QConfigMapping from torchao.quantization.dynamic_quant import ( DynamicallyPerAxisQuantizedLinear, ) from torchao.quantization.quant_api import ( apply_dynamic_quant, apply_weight_only_int8_quant, change_linear_weights_to_int8_dqtensors, change_linear_weights_to_int8_woqtensors, change_linear_weights_to_int4_woqtensors, _replace_with_custom_fn_if_matches_filter, ) from torchao.quantization.quant_primitives import ( dequantize_per_channel, dequantize_per_tensor, dynamically_quantize_per_channel, dynamically_quantize_per_tensor, quant_int8_dynamic_linear, quant_int8_dynamic_per_token_linear, quantize_activation_per_token_absmax, safe_int_mm, ) from torchao.quantization.smoothquant import ( get_scale, smooth_fq_linear_to_inference, SmoothFakeDynamicallyQuantizedLinear, swap_linear_with_smooth_fq_linear, ) from torchao.quantization.subclass import ( Int8DynamicallyQuantizedLinearWeight, Int8WeightOnlyQuantizedLinearWeight, Int4WeightOnlyQuantizedLinearWeight ) from torchao.quantization.utils import ( _apply_logging_hook, compute_error, compute_error as SQNR, _fqn_to_op_to_shape_to_count, LoggingTensorMode, ) from torch.ao.quantization.quantize_fx import convert_to_reference_fx, prepare_fx from transformers import ( # type: ignore[import-untyped] DistilBertModel, DistilBertTokenizer, )
11,462
x = torch.randn(32, 32, dtype=dtype, device=device) y_calib_eager_t = lin_eager_t(x) y_calib_opt_t = lin_opt_t(x) y_calib_opt = lin_opt(x) torch.testing.assert_close(y_calib_eager_t, y_calib_opt_t) torch.testing.assert_close(y_calib_eager_t, y_calib_opt) lin_eager_t.to_inference() lin_opt_t.to_inference() lin_opt.to_inference() torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt_t.W_int_repr) torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt.W_int_repr) lin_opt_t = torch.compile(lin_opt_t, mode="max-autotune") lin_opt = torch.compile(lin_opt, mode="max-autotune") y_ref = lin_ref(x) y_eager = lin_eager_t(x) y_opt_t = lin_opt_t(x) y_opt = lin_opt(x) if not torch.any(torch.isinf(y_ref)) and torch.any(torch.isinf(y_eager)): # eager mode torch._int_mm is sometimes buggy, when this happens # we can't really compare the compiled version against it properly print("eager mode torch._int_mm known bad, test is inconclusive") return sqnr_ref_eager = compute_error(y_ref, y_eager) sqnr_eager_opt_t = compute_error(y_eager, y_opt_t) sqnr_eager_opt = compute_error(y_eager, y_opt) # since torch.compile for a torch.half model can # change numerics significantly, we can only test for a high SQNR here # and not for closeness self.assertTrue(sqnr_eager_opt_t >= 45.0) self.assertTrue(sqnr_eager_opt >= 45.0) # y_opt_t and y_opt should be equivalent torch.testing.assert_close(y_opt_t, y_opt) def test_selective_torch_compile(self): m = nn.Sequential( nn.Linear(4, 4), nn.Sequential( nn.Linear(4, 4), nn.Linear(4, 4), ), nn.Linear(4, 4), ) x = torch.randn(4, 4) y_ref = m(x) _replace_with_custom_fn_if_matches_filter( m, lambda mod: torch.compile(mod), lambda mod, fqn: isinstance(mod, nn.Linear) and fqn != "1.0", ) self.assertTrue(isinstance(m[0], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[1][0], nn.Linear)) self.assertTrue(isinstance(m[1][1], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[2], torch._dynamo.eval_frame.OptimizedModule)) y = m(x) torch.testing.assert_close(y, y_ref) def test_debug_x_absmax(self): m = nn.Sequential(nn.Linear(3, 4)) x0 = torch.randn(4, 5, 3) y0 = m(x0) swap_linear_with_smooth_fq_linear(m) # no calibration, straight to inference, should not crash smooth_fq_linear_to_inference(m, debug_skip_calibration=True) y1 = m(x0) class PythonQuantPrimitivesUnitTest(unittest.TestCase): def _test_dynamic_quant_per_tensor_numerics_impl( self, qmin, qmax, int_dtype, qint_dtype, float_dtype, device, qscheme ): x = torch.randn(256, dtype=float_dtype, device=device) y_vals, y_scale, y_zero_point = dynamically_quantize_per_tensor( x, qmin, qmax, int_dtype, qscheme ) # reference # quantize_per_tensor_dynamic doesn't work for half, so we cast there and back x_for_ref = x.half().float() if float_dtype == torch.float16 else x # quantize_per_tensor_dynamic doesn't support qscheme, so we just do dynamic # quant manually with observers + static quant obs = MinMaxObserver( dtype=qint_dtype, qscheme=qscheme, quant_min=qmin, quant_max=qmax ).to(device) obs(x_for_ref) ref_scale, ref_zero_point = obs.calculate_qparams() y_ref = torch.quantize_per_tensor( x_for_ref, ref_scale, ref_zero_point, qint_dtype ) # y_ref = torch.quantize_per_tensor_dynamic(x_for_ref, qint_dtype, False) # print(y_ref) if float_dtype == torch.float: assert torch.equal(y_vals, y_ref.int_repr()) else: # numerics are not exactly aligned yet, off-by-one probably due # to rounding assert torch.max(torch.abs(y_vals - y_ref.int_repr())).item() <= 1 torch.testing.assert_close( y_scale, torch.tensor([y_ref.q_scale()], device=device, dtype=float_dtype) ) if y_zero_point is not None: assert torch.equal( y_zero_point, torch.tensor([y_ref.q_zero_point()], device=device) ) else: self.assertTrue(y_ref.q_zero_point() == 0) # dequantize and check again
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # mypy: ignore-errors torch.manual_seed(0) config.cache_size_limit = 100 class SmoothquantUnitTest(unittest.TestCase): # first, let's reproduce the graphic from the paper, Figure 4, to ensure # we are calculating the scales correctly def test_figure_4(self): X = torch.FloatTensor([1, -16, 2, 6, -2, 8, -1, -9]).reshape(1, 2, 4) W = torch.FloatTensor([2, 1, -2, 1, -1, -1, 2, -1, -2, -1, -1, 1]).reshape(4, 3) X_mul_W = torch.matmul(X, W) smoothquant_scale = get_scale( torch.amax(torch.abs(X), dim=(0, 1)), torch.amax(torch.abs(W), dim=1), alpha=0.5, ) # reproduce scaled calculation X_scaled = X / smoothquant_scale.reshape(1, 1, -1) W_scaled = torch.matmul(torch.diag(smoothquant_scale), W) X_scaled_mul_scaled_W = torch.matmul(X_scaled, W_scaled) assert torch.allclose(X_mul_W, X_scaled_mul_scaled_W), "not close!" assert X_mul_W.shape == X_scaled_mul_scaled_W.shape # next, run the above test on a sample of representative inputs def test_tensors(self): x_shape = (1, 5, 7) w_shape = (7, 9) for i in range(3): X = torch.randn(x_shape) * 10 W = torch.randn(w_shape) s = get_scale( torch.amax(torch.abs(X), dim=(0, 1)), torch.amax(torch.abs(W), dim=1), alpha=0.5, ) Y = torch.matmul(X, W) Y_ref = torch.matmul( X / s.reshape(1, 1, -1), torch.matmul(torch.diag(s), W), ) assert torch.allclose(Y, Y_ref, atol=1e-3, rtol=1e-3), "not close!" def _test_smooth_linear_impl(self, x_shape, lin_shape, device): # so we can use the full range torch.backends.quantized.engine = "qnnpack" x = torch.randn(*x_shape, device=device) * 9 + 10 lin_fp32 = nn.Linear(*lin_shape, device=device) # misc: ignore lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float( copy.deepcopy(lin_fp32), alpha=0.25 ) lin_smooth_skip_scaling = SmoothFakeDynamicallyQuantizedLinear.from_float( copy.deepcopy(lin_fp32), alpha=0.25 ) lin_fp32_copy = copy.deepcopy(lin_fp32) # assignment: ignore lin_fp32_copy.qconfig = torch.ao.quantization.QConfig( # assignment: ignore activation=None, weight=torch.ao.quantization.default_per_channel_weight_observer, ) lin_dynamic_q = torch.ao.nn.quantized.dynamic.Linear.from_float( lin_fp32_copy.cpu() ) y_ref = lin_fp32(x) # calibrate the smoothquant versions y_smooth_nocalib = lin_smooth(x) _ = lin_smooth_skip_scaling(x) lin_smooth.to_inference() lin_smooth_skip_scaling.debug_skip_scaling = True lin_smooth_skip_scaling.to_inference() # verify that with scaling turned off, numerics match quantized version y_smooth_fq_only = lin_smooth_skip_scaling(x) y_smooth_fq = lin_smooth(x) y_dynamic_q = lin_dynamic_q(x.cpu()).to(device) # print('y_ref', y_ref) # print('y_smooth_nocalib', y_smooth_nocalib) # print('y_smooth_fq', y_smooth_fq) # print('y_smooth_fq_only', y_smooth_fq_only) # print('y_dynamic_q', y_dynamic_q) sqnr_smooth_fq = compute_error(y_ref, y_smooth_fq) sqnr_dynamic_q = compute_error(y_ref, y_dynamic_q) sqnr_fq = compute_error(y_smooth_fq_only, y_dynamic_q) # print('sqnr_smooth', sqnr_smooth_fq, 'sqnr_dynamic', sqnr_dynamic_q, 'sqnr_fq', sqnr_fq) assert torch.allclose( y_ref, y_smooth_nocalib ), "y_ref not close to y_smooth_nocalib" # after https://github.com/pytorch-labs/ao_benchmarks/pull/32, # numerics do not match exactly between production c++ code # and this Python code # assert torch.allclose( # y_smooth_fq_only, y_dynamic_q, # atol=torch.max(y_smooth_fq_only).item()*0.01, # rtol=0.00001), \ # 'y_smooth_fq_only not close to y_dynamic_q' self.assertTrue(sqnr_smooth_fq.item() >= 40.0) self.assertTrue(sqnr_dynamic_q.item() >= 40.0) self.assertTrue(sqnr_fq.item() >= 40.0) def test_smooth_linear_cpu(self): self._test_smooth_linear_impl((1, 5, 3), (3, 4), "cpu") def test_smooth_linear_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return self._test_smooth_linear_impl((1, 32, 32), (32, 16), "cuda") def test_smooth_linear_edge_cases(self): # so we can use the full range torch.backends.quantized.engine = "qnnpack" lin_fp32 = nn.Linear(3, 4) lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float( lin_fp32, alpha=0.25 ) # test different ranks x0 = torch.randn(4, 5, 3) x1 = torch.randn(1, 8, 5, 3) x2 = torch.randn(2, 3, 7, 5, 3) # calibrate _ = lin_smooth(x0) _ = lin_smooth(x1) _ = lin_smooth(x2) # inference lin_smooth.to_inference() _ = lin_smooth(x0) _ = lin_smooth(x1) _ = lin_smooth(x2) def test_swap(self): m = nn.Sequential( nn.Sequential(nn.Linear(4, 4), nn.ReLU(), nn.Linear(4, 4)), nn.Linear(4, 4), ) m_copy = copy.deepcopy(m) swap_linear_with_smooth_fq_linear(m_copy, skip_fqn_list=["0.2"]) # verify all linears are swapped assert isinstance(m_copy[0][0], SmoothFakeDynamicallyQuantizedLinear) assert isinstance(m_copy[0][1], nn.ReLU) # this one was skipped assert isinstance(m_copy[0][2], nn.Linear) assert isinstance(m_copy[1], SmoothFakeDynamicallyQuantizedLinear) # verify results do not change without smoothing x = torch.randn(4, 4) y_ref = m(x) y = m_copy(x) assert torch.allclose(y_ref, y) def test_weight_t_and_non_t_numerics_match(self): # verify that numerics match whether weight is stored # in transposed format (for cuBLAS) vs non-transposed format # (for torch.compile) if not torch.cuda.is_available(): print("no cuda, skip") return dtype = torch.half device = "cuda" lin_ref = nn.Linear(32, 16, dtype=dtype, device=device) lin_eager_t = copy.deepcopy(lin_ref) lin_opt_t = copy.deepcopy(lin_eager_t) lin_opt = copy.deepcopy(lin_eager_t) lin_eager_t = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_eager_t) lin_opt_t = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_opt_t) lin_opt = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_opt) lin_opt.store_w_int_repr_t = False x = torch.randn(32, 32, dtype=dtype, device=device) y_calib_eager_t = lin_eager_t(x) y_calib_opt_t = lin_opt_t(x) y_calib_opt = lin_opt(x) torch.testing.assert_close(y_calib_eager_t, y_calib_opt_t) torch.testing.assert_close(y_calib_eager_t, y_calib_opt) lin_eager_t.to_inference() lin_opt_t.to_inference() lin_opt.to_inference() torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt_t.W_int_repr) torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt.W_int_repr) lin_opt_t = torch.compile(lin_opt_t, mode="max-autotune") lin_opt = torch.compile(lin_opt, mode="max-autotune") y_ref = lin_ref(x) y_eager = lin_eager_t(x) y_opt_t = lin_opt_t(x) y_opt = lin_opt(x) if not torch.any(torch.isinf(y_ref)) and torch.any(torch.isinf(y_eager)): # eager mode torch._int_mm is sometimes buggy, when this happens # we can't really compare the compiled version against it properly print("eager mode torch._int_mm known bad, test is inconclusive") return sqnr_ref_eager = compute_error(y_ref, y_eager) sqnr_eager_opt_t = compute_error(y_eager, y_opt_t) sqnr_eager_opt = compute_error(y_eager, y_opt) # since torch.compile for a torch.half model can # change numerics significantly, we can only test for a high SQNR here # and not for closeness self.assertTrue(sqnr_eager_opt_t >= 45.0) self.assertTrue(sqnr_eager_opt >= 45.0) # y_opt_t and y_opt should be equivalent torch.testing.assert_close(y_opt_t, y_opt) def test_selective_torch_compile(self): m = nn.Sequential( nn.Linear(4, 4), nn.Sequential( nn.Linear(4, 4), nn.Linear(4, 4), ), nn.Linear(4, 4), ) x = torch.randn(4, 4) y_ref = m(x) _replace_with_custom_fn_if_matches_filter( m, lambda mod: torch.compile(mod), lambda mod, fqn: isinstance(mod, nn.Linear) and fqn != "1.0", ) self.assertTrue(isinstance(m[0], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[1][0], nn.Linear)) self.assertTrue(isinstance(m[1][1], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[2], torch._dynamo.eval_frame.OptimizedModule)) y = m(x) torch.testing.assert_close(y, y_ref) def test_debug_x_absmax(self): m = nn.Sequential(nn.Linear(3, 4)) x0 = torch.randn(4, 5, 3) y0 = m(x0) swap_linear_with_smooth_fq_linear(m) # no calibration, straight to inference, should not crash smooth_fq_linear_to_inference(m, debug_skip_calibration=True) y1 = m(x0) class PythonQuantPrimitivesUnitTest(unittest.TestCase): def _test_dynamic_quant_per_tensor_numerics_impl( self, qmin, qmax, int_dtype, qint_dtype, float_dtype, device, qscheme ): x = torch.randn(256, dtype=float_dtype, device=device) y_vals, y_scale, y_zero_point = dynamically_quantize_per_tensor( x, qmin, qmax, int_dtype, qscheme ) # reference # quantize_per_tensor_dynamic doesn't work for half, so we cast there and back x_for_ref = x.half().float() if float_dtype == torch.float16 else x # quantize_per_tensor_dynamic doesn't support qscheme, so we just do dynamic # quant manually with observers + static quant obs = MinMaxObserver( dtype=qint_dtype, qscheme=qscheme, quant_min=qmin, quant_max=qmax ).to(device) obs(x_for_ref) ref_scale, ref_zero_point = obs.calculate_qparams() y_ref = torch.quantize_per_tensor( x_for_ref, ref_scale, ref_zero_point, qint_dtype ) # y_ref = torch.quantize_per_tensor_dynamic(x_for_ref, qint_dtype, False) # print(y_ref) if float_dtype == torch.float: assert torch.equal(y_vals, y_ref.int_repr()) else: # numerics are not exactly aligned yet, off-by-one probably due # to rounding assert torch.max(torch.abs(y_vals - y_ref.int_repr())).item() <= 1 torch.testing.assert_close( y_scale, torch.tensor([y_ref.q_scale()], device=device, dtype=float_dtype) ) if y_zero_point is not None: assert torch.equal( y_zero_point, torch.tensor([y_ref.q_zero_point()], device=device) ) else: self.assertTrue(y_ref.q_zero_point() == 0) # dequantize and check again
x_dq = dequantize_per_tensor(y_vals, y_scale, y_zero_point, float_dtype)
8
2023-11-03 21:27:36+00:00
16k
intellerce/controlanimate
animatediff/pipelines/controlanimation_pipeline.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Opti...
import inspect import numpy as np import torch import PIL import math from typing import Callable, List, Optional, Union, Dict, Any from dataclasses import dataclass from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, UniPCMultistepScheduler ) from diffusers.utils import BaseOutput from diffusers.utils import ( deprecate, logging, # replace_example_docstring, # scale_lora_layers, # unscale_lora_layers, ) from einops import rearrange from ..models.unet import UNet3DConditionModel from ..utils.util import preprocess_image from diffusers.utils.torch_utils import randn_tensor from modules.controlresiduals_pipeline import MultiControlNetResidualsPipeline from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.utils.torch_utils import randn_tensor from diffusers import SchedulerMixin, ConfigMixin from typing import Any, Dict, List, Optional, Tuple, Union from diffusers.configuration_utils import register_to_config from accelerate import cpu_offload
12,605
# check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, input_frames, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latent_timestep, overlaps, strength, latents=None, last_output_frames = None, use_lcm = False, use_img2img = False, ): shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # torch.randn(shape, generator=generator, dtype=dtype) #.to(device) , device=rand_device last_output_frames_latents = [] if overlaps > 0 or strength < 1 or use_lcm: frames_latents = [] if input_frames is not None: for i , frame in enumerate(input_frames): image = frame image = self.image_processor.preprocess(image) if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) frame_latents = self.vae.encode(image).latent_dist.sample(generator) frame_latents = self.vae.config.scaling_factor * frame_latents frames_latents.append(frame_latents) if last_output_frames is not None: for image in last_output_frames: image = self.image_processor.preprocess(image) if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) frame_latents = self.vae.encode(image).latent_dist.sample(generator) frame_latents = self.vae.config.scaling_factor * frame_latents last_output_frames_latents.append(frame_latents) latents = latents.to(device) if use_lcm: frames_latents_tensor = torch.stack(frames_latents,dim=2) latents = self.scheduler.add_noise(frames_latents_tensor, latents.to(device), latent_timestep) elif last_output_frames is not None and strength < 1.0: # for i in range(len(last_output_frames_latents)): for i in range(video_length): if i < len(last_output_frames): latents[:, :, i, :, :] = self.scheduler.add_noise(last_output_frames_latents[i], latents[:, :, i, :, :].to(device), latent_timestep) else: if not use_img2img: latents[:, :, i, :, :] = self.scheduler.add_noise(last_output_frames_latents[-1], latents[:, :, i, :, :].to(device), latent_timestep) else: latents[:, :, i, :, :] = self.scheduler.add_noise(frames_latents[i], latents[:, :, i, :, :].to(device), latent_timestep) latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler if strength >= 1 and not use_lcm: latents = latents * self.scheduler.init_noise_sigma return latents def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] return timesteps, num_inference_steps - t_start @torch.no_grad() def __call__( self, video_length: Optional[int], input_frames: list = None, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, strength: float = 0.5, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, overlaps: int = 0,
# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py logger = logging.get_logger(__name__) # pylint: disable=invalid-name # For LCM: @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class ControlAnimationPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, None # LCM ], ): super().__init__() scheduler = ( scheduler if scheduler is not None else LCMScheduler( beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon" ) ) if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) self.latents = None self.frames_latents = None self.prev_frames_latents = None self.ip_adapter = None def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale # if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) # else: # scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers # unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps: torch.Tensor: generate embedding vectors at these timesteps embedding_dim: int: dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb def decode_latents(self, latents): video_length = latents.shape[2] latents = 1 / self.vae.config.scaling_factor * latents # 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0])): video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, input_frames, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latent_timestep, overlaps, strength, latents=None, last_output_frames = None, use_lcm = False, use_img2img = False, ): shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # torch.randn(shape, generator=generator, dtype=dtype) #.to(device) , device=rand_device last_output_frames_latents = [] if overlaps > 0 or strength < 1 or use_lcm: frames_latents = [] if input_frames is not None: for i , frame in enumerate(input_frames): image = frame image = self.image_processor.preprocess(image) if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) frame_latents = self.vae.encode(image).latent_dist.sample(generator) frame_latents = self.vae.config.scaling_factor * frame_latents frames_latents.append(frame_latents) if last_output_frames is not None: for image in last_output_frames: image = self.image_processor.preprocess(image) if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) frame_latents = self.vae.encode(image).latent_dist.sample(generator) frame_latents = self.vae.config.scaling_factor * frame_latents last_output_frames_latents.append(frame_latents) latents = latents.to(device) if use_lcm: frames_latents_tensor = torch.stack(frames_latents,dim=2) latents = self.scheduler.add_noise(frames_latents_tensor, latents.to(device), latent_timestep) elif last_output_frames is not None and strength < 1.0: # for i in range(len(last_output_frames_latents)): for i in range(video_length): if i < len(last_output_frames): latents[:, :, i, :, :] = self.scheduler.add_noise(last_output_frames_latents[i], latents[:, :, i, :, :].to(device), latent_timestep) else: if not use_img2img: latents[:, :, i, :, :] = self.scheduler.add_noise(last_output_frames_latents[-1], latents[:, :, i, :, :].to(device), latent_timestep) else: latents[:, :, i, :, :] = self.scheduler.add_noise(frames_latents[i], latents[:, :, i, :, :].to(device), latent_timestep) latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler if strength >= 1 and not use_lcm: latents = latents * self.scheduler.init_noise_sigma return latents def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] return timesteps, num_inference_steps - t_start @torch.no_grad() def __call__( self, video_length: Optional[int], input_frames: list = None, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, strength: float = 0.5, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, overlaps: int = 0,
multicontrolnetresiduals_pipeline: Optional[MultiControlNetResidualsPipeline] = None,
2
2023-11-04 01:35:44+00:00
16k
Zaczero/openstreetmap-ng
src/controllers/api/0.6/changeset_comment.py
[ { "identifier": "api_user", "path": "src/lib/auth.py", "snippet": "def api_user(*require_scopes: Scope | ExtendedScope) -> User:\n \"\"\"\n Dependency for authenticating the api user.\n \"\"\"\n\n return Security(\n _get_user,\n scopes=tuple(s.value for s in require_scopes),\n ...
from typing import Annotated from fastapi import APIRouter, Form from pydantic import PositiveInt from src.lib.auth import api_user from src.lib.format.format06 import Format06 from src.limits import CHANGESET_COMMENT_BODY_MAX_LENGTH from src.models.db.user import User from src.models.scope import ExtendedScope, Scope from src.services.changeset_comment_service import ChangesetCommentService
11,053
router = APIRouter() @router.post('/changeset/{changeset_id}/subscribe') async def changeset_subscribe( changeset_id: PositiveInt,
router = APIRouter() @router.post('/changeset/{changeset_id}/subscribe') async def changeset_subscribe( changeset_id: PositiveInt,
_: Annotated[User, api_user(Scope.write_api)],
3
2023-11-04 01:12:13+00:00
16k
codefuse-ai/Collinear-Constrained-Attention
train/trainer/atorch_trainer.py
[ { "identifier": "print_rank_0", "path": "utils/common_utils.py", "snippet": "TASK2ID = {}\nID2TASK = {}\n L = args.num_hidden_layers\n V = args.vocab_size\ndef get_rank():\ndef get_local_rank():\ndef is_main_process():\ndef is_local_main_process():\ndef print_rank_0(*message):\ndef get_world_size(...
import datetime import json import logging import math import os import random import re import shutil import time import warnings import gc import numpy as np import atorch import torch from functools import partial from pathlib import Path from deepspeed.ops.adam import DeepSpeedCPUAdam from torch.distributed.fsdp import FullStateDictConfig from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import StateDictType from torch.optim.lr_scheduler import LambdaLR, CosineAnnealingLR, CosineAnnealingWarmRestarts from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from torch.utils.tensorboard import SummaryWriter from tqdm.auto import tqdm from transformers import get_scheduler as get_scheduler_trans from transformers.modeling_utils import PreTrainedModel, unwrap_model from transformers.trainer import ( OPTIMIZER_NAME, SCHEDULER_NAME, TRAINER_STATE_NAME, TRAINING_ARGS_NAME ) from transformers.trainer_pt_utils import reissue_pt_warnings from transformers.trainer_utils import ( PREFIX_CHECKPOINT_DIR, ) from transformers.utils import WEIGHTS_NAME from torch.nn import CrossEntropyLoss from utils.common_utils import print_rank_0, get_tflops_megatron, get_computation_speed, TASK2ID, ID2TASK, EarlyStopping, logger from utils.auto_accelerate_utils import FAMO, get_ltor_masks_and_position_ids, SelfPacedStatus from atorch.auto import auto_accelerate from atorch.utils.version import torch_version from model.gpt_neox.modeling_gpt_neox import GPTNeoXLayer, GPTNeoXAttention, GPTNeoXMLP from model.llama.modeling_llama import LlamaDecoderLayer, LlamaAttention, LlamaMLP from model.glm.modeling_glm import GLMBlock from torch.cuda.amp import GradScaler from apex.optimizers import FusedSGD from model.peft.modeling_peft import PeftModel
12,545
# labels=batch['labels'], ) # self paced loss # ema_tmp: L_valid_ema at step t-1 if (self.global_steps + 1) % self.args.selfpaced_interval == 0 and (self.args.weighted_loss_mode.startswith('famo_valid_ema') or self.args.weighted_loss_mode == 'selfpaced'): ema_tmp = 0.001 * self.valid_task_loss_prev + (1 - 0.001) * self.ema_valid_task_loss_prev self.ratio_valid_task_loss_prev = (ema_tmp - self.ema_valid_task_loss_prev ) / (self.ema_valid_task_loss_prev + 1e-6) self.ema_valid_task_loss_prev = ema_tmp self.ratio_valid_task_loss_prev = torch.where(self.ratio_valid_task_loss_prev > 0, -1e20, self.ratio_valid_task_loss_prev) self.famo.ratio_valid_task_loss_prev = self.ratio_valid_task_loss_prev if self.args.weighted_loss_mode == 'selfpaced': self.selfpaced_status.update(self.global_steps + 1, epoch + 1, 'train', self.ratio_valid_task_loss_prev) loss, task_loss, task_num, self.famo, self.selfpaced_status = self.loss_func(outputs, batch, self.args.weighted_loss_mode, self.famo, self.selfpaced_status) # print(f'rank: {self.rank}, loss: {loss}, task loss: {task_loss}') if self.args.weighted_loss_mode.startswith('famo'): self.famo.print_loss = self.famo.print_loss / self.args.gradient_accumulation_steps loss_tensor = torch.zeros( [1], device=self.famo.print_loss.device, dtype=self.famo.print_loss.dtype) loss_tensor[0] = self.famo.print_loss.item() else: loss = loss / self.args.gradient_accumulation_steps loss_tensor = torch.zeros( [1], device=loss.device, dtype=loss.dtype) loss_tensor[0] = loss.item() torch.distributed.all_reduce(loss_tensor) torch.distributed.all_reduce(task_loss, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(task_num, op=torch.distributed.ReduceOp.SUM) reduce_loss = loss_tensor.sum() / torch.distributed.get_world_size() if has_inf_or_nan(reduce_loss): print_rank_0(f'There have nan loss.') self.skipped_steps += 1 skipped = True else: self.accumulated_loss += reduce_loss.item() mean_task_loss = task_loss / torch.distributed.get_world_size() self.accumulated_task_loss += mean_task_loss.cpu().numpy() self.accumulated_task_num += task_num.cpu().numpy() loss.backward() self.global_steps += 1 self.famo.global_steps += 1 if step % self.args.gradient_accumulation_steps == 0 or step == len(self.train_dataloader) - 1: if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0: # 如果是fp16,需要unscale。如果是bf16,self.optimizer里没有unscale这个方法 try: self.optimizer.unscale_() except Exception: pass if isinstance(self.model, FSDP): self.model.clip_grad_norm_(self.args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_( self.model.parameters(), self.args.max_grad_norm) self.optimizer.step() overflow = hasattr(self.optimizer, "step_was_skipped") and self.optimizer.step_was_skipped if skipped != overflow: print(f'skipped != overflow!!!!!!!!!!!!!!!!') if not overflow: self.lr_scheduler.step() # if not skipped: # self.optimizer.step() # self.lr_scheduler.step() self.optimizer.zero_grad() if (self.args.weighted_loss_mode.startswith('famo_valid') or self.args.weighted_loss_mode == 'selfpaced') and not skipped: # if self.args.weighted_loss_mode.startswith('famo_valid') and not skipped and step % self.args.famo_interval == 0: # delete caches # gc.collect() # torch.cuda.empty_cache() # self.model.eval() # if (step // self.args.famo_interval) % self.valid_dataloader_length == 0: if step % self.valid_dataloader_length == 0: valid_iterator = iter(self.valid_dataloader) valid_step = step % self.valid_dataloader_length if self.famo_resume: self.famo_resume = False for i in range(valid_step): v_batch = next(valid_iterator) v_batch = next(valid_iterator) if self.args.weighted_loss_mode == 'selfpaced': self.valid_task_loss_prev = self.self_paced_evaluate(self.global_steps, epoch + 1, v_batch, self.ratio_valid_task_loss_prev) else: with torch.autocast(device_type='cuda', dtype=torch.bfloat16): valid_outputs = self.model( input_ids=v_batch['input_ids'].to(self.device), attention_mask=v_batch['attention_mask'].to(self.device), position_ids=v_batch['position_ids'].to(self.device) ) valid_loss, valid_task_loss, valid_task_num, _, _ = self.loss_func(valid_outputs, v_batch, self.args.weighted_loss_mode) if self.args.weighted_loss_mode.startswith('famo_valid_ema'): torch.distributed.all_reduce(valid_task_loss, op=torch.distributed.ReduceOp.SUM) valid_task_loss /= torch.distributed.get_world_size() self.valid_task_loss_prev = valid_task_loss.clone().detach() # if self.famo.first_valid_step and self.args.resume_from_checkpoint == 'true': if self.args.resume_from_checkpoint == 'true': self.ema_valid_task_loss_prev = self.valid_task_loss_prev # self.scaler.scale(valid_loss).backward() valid_loss.backward() valid_loss_item = valid_loss.item() if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0: # 如果是fp16,需要unscale。如果是bf16,self.optimizer里没有unscale这个方法 try: self.optimizer.unscale_() except Exception: pass if isinstance(self.model, FSDP): self.model.clip_grad_norm_(self.args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_( self.model.parameters(), self.args.max_grad_norm) # print_rank_0(f'valid_loss {valid_loss.item()}, task loss: {valid_task_loss}, valid step: {valid_step}') self.famo.update(valid_task_loss) self.optimizer.zero_grad() step_time = time.time() - step_start
#!/usr/bin/env python # coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. HYPER_PARAMETER_NAME = 'hyper_parameters.json' ATORCH_CHECKPOINT_NAME = 'atorch_checkpoint.bin' EPOCH_CHECKPOINT_NAME = 'epoch' FAMO_CHECKPOINT_NAME = 'famo_checkpoint' EMA_CHECKPOINT_NAME = 'ema_checkpoint' # logger = logging.getLogger(__name__) def is_local_main_process(): return atorch.local_rank() == 0 def is_global_main_process(): return atorch.rank() == 0 def has_inf_or_nan(x): try: # if x is half, the .float() incurs an additional deep copy, but it's necessary if # Pytorch's .sum() creates a one-element tensor of the same type as x # (which is true for some recent version of pytorch). cpu_sum = float(x.float().sum()) # More efficient version that can be used if .sum() returns a Python scalar # cpu_sum = float(x.sum()) except RuntimeError as instance: # We want to check if inst is actually an overflow exception. # RuntimeError could come from a different error. # If so, we still want the exception to propagate. if "value cannot be converted" not in instance.args[0]: raise return True else: if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: return True return False def count_model_params(model): trainable_params = 0 all_params = 0 for param in model.parameters(): num_params = param.numel() all_params += num_params if param.requires_grad: trainable_params += num_params return all_params, trainable_params class AtorchArguments: def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def get_linear_schedule_with_log_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): def lr_lambda(current_step: int): inverse_log_warm_up = 1.0 / math.log(num_warmup_steps) if current_step == 0: return 0.0 if current_step < num_warmup_steps: return inverse_log_warm_up * math.log(current_step) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_scheduler(name, optimizer, num_warmup_steps, num_training_steps): scheduler_map = { 'log_warmup_linear_decay': get_linear_schedule_with_log_warmup} try: lr_scheduler = get_scheduler_trans( name, optimizer, num_warmup_steps, num_training_steps) return lr_scheduler except Exception: schedule_func = scheduler_map[name] return schedule_func(optimizer, num_warmup_steps, num_training_steps) class AtorchTrainer: def __init__(self, model, args, train_dataset, valid_dataset, tokenizer=None, callbacks=None, no_save_atorch_checkpoint=None, save_pytorch_model_bin_checkpoint=True, train_peft=False, rank=0, max_shard_size='10GB', files_to_save=None, args_to_save=None, data_collator=None, my_loss_func=None, **kwargs, ): self.args = args self.TASK2ID = TASK2ID self.ID2TASK = ID2TASK print('in atorch trainer') print(TASK2ID) print(ID2TASK) self.model = model self.no_save_atorch_checkpoint = no_save_atorch_checkpoint self.save_pytorch_model_bin_checkpoint = save_pytorch_model_bin_checkpoint self.train_peft = train_peft self.rank = rank self.kwargs = kwargs self.train_dataset = train_dataset self.valid_dataset = valid_dataset self.tokenizer = tokenizer self.max_shard_size = max_shard_size self.files_to_save = files_to_save self.args_to_save = args_to_save self.best_metric = None self.best_model_checkpoint = None self.no_save_base_model = True self.device = f"cuda:{atorch.local_rank()}" self.famo = FAMO(n_tasks=len(TASK2ID), device=self.device, mode=self.args.weighted_loss_mode) self.famo_resume = False self.selfpaced_status = SelfPacedStatus(args.selfpaced_interval) self.total_train_batch_size = self.args.per_device_train_batch_size * \ self.args.gradient_accumulation_steps * \ atorch.world_size() self.data_collator = data_collator self.my_loss_func = my_loss_func if self.args.early_stopping_patience > 0: print(f'early_stopping_patience: {self.args.early_stopping_patience}') patience = self.args.early_stopping_patience self.early_stopping = EarlyStopping(patience, verbose=True) self.train_dataloader_args = { "shuffle": True, "batch_size": self.total_train_batch_size, "pin_memory": True, "collate_fn": data_collator, "drop_last": True, "num_workers": self.args.num_workers, # "persistent_workers": args.num_workers > 0, } self.valid_dataloader = DataLoader( valid_dataset, sampler=DistributedSampler(valid_dataset, shuffle=True), batch_size=args.per_device_valid_batch_size, pin_memory=True, collate_fn=data_collator ) self.valid_dataloader_length = len(self.valid_dataloader) if self.args.resume_from_checkpoint == 'true': self.resume_checkpoint_dir = self.get_last_checkpoint( self.args.output_dir) self.atorch_args = AtorchArguments( lr=args.learning_rate, weight_decay=args.weight_decay, adam_eps=args.adam_epsilon, adam_beta1=args.adam_beta1, adam_beta2=args.adam_beta2) self.atorch_init() self.num_update_steps_per_epoch = math.ceil( len(self.train_dataloader) / self.args.gradient_accumulation_steps) print(f'number of update steps per epoch: {self.num_update_steps_per_epoch}') if self.args.max_steps == -1: self.args.max_steps = int( self.args.num_train_epochs * self.num_update_steps_per_epoch) else: self.args.num_train_epochs = math.ceil( self.args.max_steps / self.num_update_steps_per_epoch) # self.args.warmup_steps = self.args.get_warmup_steps( # self.args.max_steps) # 找不到get_warmup_steps custom_lr_scheduler_type = self.kwargs.get( 'custom_lr_scheduler_type', None) self.lr_scheduler = get_scheduler( name=custom_lr_scheduler_type if custom_lr_scheduler_type else self.args.lr_scheduler_type, optimizer=self.optimizer, num_warmup_steps=self.args.num_warmup_steps, num_training_steps=self.args.max_steps, ) print_rank_0(f'lr_scheduler{self.lr_scheduler}') if self.args.resume_from_checkpoint == 'true': with warnings.catch_warnings(record=True): self.lr_scheduler.load_state_dict(torch.load( os.path.join(self.resume_checkpoint_dir, SCHEDULER_NAME))) self._load_rng_state(self.resume_checkpoint_dir) torch.distributed.barrier() now_datetime = datetime.datetime.now() timestr = datetime.datetime.strftime(now_datetime, '%Y%m%d-%H%M%S') self.log_dir = os.path.join(self.args.output_dir, 'runs', timestr) self.summary_writer = None if torch.distributed.get_rank() == 0: self.summary_writer = SummaryWriter(log_dir=self.log_dir) def get_last_checkpoint(self, folder): _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)") content = sorted(os.listdir(folder)) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) def _load_rng_state(self, resume_checkpoint_dir): # Load RNG states from `checkpoint` if resume_checkpoint_dir is None: return if self.args.world_size > 1: rng_file = os.path.join( resume_checkpoint_dir, f"rng_state_{self.rank}.pth") if not os.path.isfile(rng_file): logger.info( f"Didn't find an RNG file for process {self.rnak}, if you are resuming a training that " "wasn't launched in a distributed fashion, reproducibility is not guaranteed." ) return else: rng_file = os.path.join(resume_checkpoint_dir, "rng_state.pth") if not os.path.isfile(rng_file): logger.info( "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " "fashion, reproducibility is not guaranteed." ) return checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) if torch.cuda.is_available(): if self.args.local_rank != -1: torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) else: try: torch.cuda.random.set_rng_state_all( checkpoint_rng_state["cuda"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) def load_atorch_model_state(self, model_state_dict, **kwargs): print('resume atorch model state') if self.is_rank0(): self.model.load_state_dict(model_state_dict) # 在 rank 0 加载完毕后,再通过sync_module_states分发参数 torch.distributed.barrier() # self.model = FSDP(self.model, sync_module_states=True, **kwargs) def load_atorch_optim_state(self, optim_state_dict): print('resume optimizer state') optim_state_dict = FSDP.scatter_full_optim_state_dict( optim_state_dict, self.model) # may be removed after PyTorch 2.2 def move_optim_state_to_cpu(optim_state_dict): for k in optim_state_dict: if isinstance(optim_state_dict[k], torch.Tensor): optim_state_dict[k] = optim_state_dict[k].cpu() elif isinstance(optim_state_dict[k], dict): move_optim_state_to_cpu(optim_state_dict[k]) move_optim_state_to_cpu(optim_state_dict) self.optimizer.load_state_dict(optim_state_dict) def load_famo_state(self): print_rank_0(f'loading famo checkpoint') self.famo_resume = True famo_dir = os.path.join(self.resume_checkpoint_dir, 'famo_checkpoint/') if not os.path.exists(famo_dir): print_rank_0(f'can not find the famo checkpoint dir!') else: famo_state_name = FAMO_CHECKPOINT_NAME + f'_rank_{self.rank}.pth' famo_checkpoint_state = torch.load(os.path.join(famo_dir, famo_state_name)) w_opt_state = famo_checkpoint_state['w_opt_state'] self.famo.prev_train_loss = famo_checkpoint_state['prev_train_loss'].to(self.famo.device) self.famo.prev_valid_loss = famo_checkpoint_state['prev_valid_loss'].to(self.famo.device) self.famo.first_train_step = famo_checkpoint_state['first_train_step'] self.famo.first_valid_step = famo_checkpoint_state['first_valid_step'] self.famo.ratio_valid_task_loss_prev = famo_checkpoint_state['ratio_valid_task_loss_prev'].to(self.famo.device) self.famo.w = famo_checkpoint_state['w'].to(self.famo.device) self.famo.w_opt.load_state_dict(w_opt_state) print_rank_0(f'prev_train_loss: {self.famo.prev_train_loss}') print_rank_0(f'prev_valid_loss: {self.famo.prev_valid_loss}') print_rank_0(f'first_train_step: {self.famo.first_train_step}') print_rank_0(f'first_valid_step: {self.famo.first_valid_step}') print_rank_0(f'ratio_valid_task_loss_prev: {self.famo.ratio_valid_task_loss_prev}') print_rank_0(f'w: {self.famo.w}') print_rank_0(f'load famo checkpoint successfully') def atorch_init(self): assert torch_version() >= (2, 0, 0), "use pt2.0 for use orig param if fsdp" if self.args.model_type == 'gpt_neox': # wrap_class = (GPTNeoXAttention, GPTNeoXMLP) wrap_class = (GPTNeoXLayer,) elif self.args.model_type == 'llama': # wrap_class = (LlamaAttention, LlamaMLP) wrap_class = (LlamaDecoderLayer,) elif self.args.model_type == 'glm': wrap_class = (GLMBlock,) parallel_mode = [] if self.args.dp: # p_mode = ([("data", torch.distributed.get_world_size())], None) parallel_mode.append(("data", self.args.dp)) if self.args.tp: parallel_mode.append(("tensor_parallel", self.args.tp)) strategy = [ # ("parallel_mode", p_mode), ("parallel_mode", (parallel_mode, None)), "module_replace", # ("fsdp", fsdp_config), # ("amp_native", {"dtype": torch.bfloat16}) if self.args.bf16 else "amp_native", # ("checkpoint", wrap_class), ] if self.args.peft_type is None or self.args.peft_type == 'lora': cpu_offload = False if self.args.total_model_param < 1e9 else True fsdp_config = { "atorch_wrap_cls": wrap_class, "sync_module_states": True, "use_orig_params": True, "limit_all_gathers": True, # "cpu_offload": True, } print(fsdp_config) fsdp_opt = ("fsdp", fsdp_config) strategy.append(fsdp_opt) self.args.atorch_opt = "fsdp" else: num_all_params, num_trainable_params = count_model_params(self.model) if num_all_params < 11e9 or self.args.peft_type == "qlora": # For GLM-10B logger.info( f"Found using {self.args.peft_type} method. The peft model has {num_all_params} and only " f"{num_trainable_params} params are trainable({100 * num_trainable_params / num_all_params}%)" ". Set atorch opt to DistributedDataParallel.") self.args.atorch_opt = "ddp" if self.args.bf16 or self.args.fp16: if self.args.bf16: amp_config = {"dtype": torch.bfloat16, "skip_if_nonfinite": True} # amp_config = {"dtype": torch.bfloat16} if self.args.peft_type == "qlora": # The dtype of grads is bf16 when using qlora # atorch scaler does not support bf16 grads. amp_config["skip_if_nonfinite"] = False elif self.args.fp16: amp_config = {"dtype": torch.float16} strategy.append(("amp_native", amp_config)) # strategy.append(("half", "bf16")) if self.args.checkpoint_activations: strategy.append(("checkpoint", wrap_class)) print(f"Manually loaded auto acc strategy: {strategy}") def prepare_input(batch, device): # DEBUG: GLM NoneType batch = {k: v.to(device=device, non_blocking=True) if v is not None else None for k, v in batch.items()} return batch def optim_param_func(model, args): no_decay = ["bias", "LayerNorm.weight", "layernorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, ] return optimizer_grouped_parameters # load fsdp checkpoint参数 if self.args.resume_from_checkpoint == 'true': logger.info(f'Resume training from {self.resume_checkpoint_dir}') if self.is_rank0(): sd = torch.load(os.path.join( self.resume_checkpoint_dir, ATORCH_CHECKPOINT_NAME), map_location='cpu') model_state_dict, optim_state_dict = sd['model_state_dict'], sd['optimizer_state_dict'] else: model_state_dict, optim_state_dict = None, None torch.distributed.barrier() # other rank waiting ########## self.load_atorch_model_state(model_state_dict) ########## if self.is_rank0(): print(f'GPU mem before fsdp:') print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) optim_func = torch.optim.AdamW print(f'optimizer before fsdp: {optim_func}') ddp_find_unused_parameters = None if self.args.atorch_opt == "ddp" and not (self.args.peft_type in ["lora", "qlora"] and self.args.checkpoint_activations): ddp_find_unused_parameters = True status, result, best_strategy = auto_accelerate( self.model, optim_func, self.train_dataset, dataloader_args=self.train_dataloader_args, loss_func=self.my_loss_func, prepare_input=prepare_input, optim_args={ "lr": self.atorch_args.lr, "weight_decay": self.atorch_args.weight_decay, "eps": self.atorch_args.adam_eps, "betas": (self.atorch_args.adam_beta1, self.atorch_args.adam_beta2), }, optim_param_func=partial( optim_param_func, args=self.atorch_args), load_strategy=strategy, ignore_dryrun_on_load_strategy=True, find_unused_parameters=ddp_find_unused_parameters, ) assert ( status ), f"auto_accelerate failed. status: {status}, result: {result}, best_strategy: {best_strategy}" print(f"Best strategy is: {best_strategy}") self.model = result.model self.optimizer = result.optim print(f'optimizer after fsdp: {self.optimizer}') self.loss_func = result.loss_func self.train_dataloader = result.dataloader self.prepare_input = result.prepare_input if self.args.resume_from_checkpoint == 'true': self.load_atorch_optim_state(optim_state_dict) if self.args.weighted_loss_mode.startswith('famo_valid'): self.load_famo_state() print(f"atorch use optimizer: {self.optimizer}") if self.is_rank0(): print(f'GPU mem after fsdp:') print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) def evaluate(self): logger.info(f"Start evaluation") if self.is_rank0(): print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) print(f'valid dataset length is: {len(self.valid_dataset)}') print(f'valid dataloader length is: {len(self.valid_dataloader)}') print(f'per device batch size: {self.args.per_device_valid_batch_size}') progress_bar = tqdm(range(len(self.valid_dataloader)), disable=not is_local_main_process(), smoothing=0) self.model.eval() losses = [] accumulated_task_loss_np = np.zeros(len(self.ID2TASK)) accumulated_task_num_np = np.zeros(len(self.ID2TASK)) accumulated_step = 0 for step, batch in enumerate(self.valid_dataloader): # if step >= self.args.valid_iters: if step >= self.args.valid_iters and (self.args.total_model_param >= 1e9 or self.args.train_mode == 'sst'): break with torch.no_grad(): # batch = {k: v.to(self.device) for k, v in batch.items()} # batch = self.prepare_input(batch, self.device) # outputs = self.model(**batch) outputs = self.model( input_ids=batch['input_ids'].to(self.device), attention_mask=batch['attention_mask'].to(self.device), position_ids=batch['position_ids'].to(self.device) ) # loss = outputs["loss"] loss, task_loss, task_num, _, _ = self.loss_func(outputs, batch, self.args.weighted_loss_mode) repeated_loss = loss.repeat( self.args.per_device_valid_batch_size) if repeated_loss.ndim == 0: repeated_loss = repeated_loss.clone()[None] output_tensors = [repeated_loss.clone() for _ in range(atorch.world_size())] torch.distributed.all_gather(output_tensors, repeated_loss) for tensor in output_tensors: if torch.isnan(tensor).any() or torch.isinf(tensor).any(): accumulated_step -= 1 continue losses.append(torch.cat(output_tensors, dim=0).cpu()) task_loss = task_loss.cpu().numpy() task_num = task_num.cpu().numpy() accumulated_task_loss_np += task_loss accumulated_task_num_np += task_num accumulated_step += 1 progress_bar.update(1) losses = torch.cat(losses) losses = losses[: len(self.valid_dataset)] mean_loss = torch.mean(losses).item() accumulated_task_loss = torch.tensor(accumulated_task_loss_np).to(self.device) accumulated_task_num = torch.tensor(accumulated_task_num_np).to(self.device) torch.distributed.all_reduce(accumulated_task_loss, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(accumulated_task_num, op=torch.distributed.ReduceOp.SUM) accumulated_task_loss /= torch.distributed.get_world_size() valid_task_loss = accumulated_task_loss / (accumulated_step - 1) logs = {'valid_loss': mean_loss} per_task_valid_loss = {self.ID2TASK[i]+'_loss': valid_task_loss[i].item() for i in range(len(self.ID2TASK))} logs.update(per_task_valid_loss) if is_global_main_process(): logger.info('log point') for i in range(len(self.ID2TASK)): if accumulated_task_num[i] != 0: logger.info(f"{self.ID2TASK[i]}_loss: {valid_task_loss[i]}, sample nums: {accumulated_task_num[i]}") self.log(logs, step=self.global_steps, phase='Evaluation') metrics = {'valid_loss': mean_loss, 'valid_task_loss': valid_task_loss} logger.info(f"Finish evaluation") if self.is_rank0(): print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) return metrics def log(self, logs, step, phase='Train'): if not self.summary_writer: return logger.info(json.dumps(logs)) for key, value in logs.items(): self.summary_writer.add_scalar(f'{phase}/{key}', value, step) def _sorted_checkpoints( self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, checkpoint_name_pattern='([0-9]+)', use_mtime=False ): ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(output_dir).glob( f"{checkpoint_prefix}-*") if os.path.isdir(x)] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append( (os.path.getmtime(path), path)) else: regex_match = re.search( f".*{checkpoint_prefix}-({checkpoint_name_pattern})", path) if regex_match is not None and regex_match.groups() is not None: ordering_and_checkpoint_path.append( (int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] # Make sure we don't delete the best model. if self.best_model_checkpoint is not None: best_model_index = checkpoints_sorted.index(str(Path(self.best_model_checkpoint))) # for i in range(best_model_index, len(checkpoints_sorted) - 2): for i in range(best_model_index, len(checkpoints_sorted) - 1): checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i] print_rank_0(f'checkpoints sorted list: {checkpoints_sorted}') return checkpoints_sorted def _rotate_checkpoints( self, use_mtime=False, output_dir=None, prefix=PREFIX_CHECKPOINT_DIR, checkpoint_name_pattern='.*') -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints( use_mtime=use_mtime, output_dir=output_dir, checkpoint_prefix=prefix, checkpoint_name_pattern=checkpoint_name_pattern) if len(checkpoints_sorted) <= self.args.save_total_limit: return # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. save_total_limit = self.args.save_total_limit number_of_checkpoints_to_delete = max( 0, len(checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint, ignore_errors=True) def _clean_atorch_checkpoints(self, output_dir=None, prefix=PREFIX_CHECKPOINT_DIR): # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints( output_dir=output_dir, checkpoint_prefix=prefix, checkpoint_name_pattern='([0-9]+)') # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. for checkpoint in checkpoints_sorted[:-1]: logger.info( f"Deleting older atorch checkpoint [{checkpoint}] due to self.args.save_total_limit") try: os.remove(os.path.join(checkpoint, ATORCH_CHECKPOINT_NAME)) except Exception: continue def _save_peft_model(self, output_dir, state_dict=None): logger.info(f"Start saving peft model to {output_dir}") output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) model = unwrap_model(self.model) if isinstance(model, PeftModel): if state_dict is None: state_dict = model.state_dict() model.save_pretrained( output_dir, state_dict=state_dict, is_main_process=self.is_rank0()) else: if state_dict is None: state_dict = self.model.state_dict() if self.is_rank0(): torch.save(state_dict, os.path.join( output_dir, "pytorch_model.bin")) logger.info(f"Saving peft model done.") def _save_model(self, output_dir=None, state_dict=None): # If we are executing this function, we are the process zero, so we don't check for that. output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info(f"Saving model checkpoint to {output_dir}") # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` if not isinstance(self.model, PreTrainedModel): if isinstance(unwrap_model(self.model), PreTrainedModel): print_rank_0('save in not pretrained model~~~~~~~') if state_dict is None: state_dict = self.model.state_dict() # state_dict = {key: value.bfloat16() if self.args.bf16 else value.half() for key, value in self.model.state_dict().items()} model = unwrap_model(self.model) model.save_pretrained( output_dir, state_dict=state_dict, max_shard_size=self.max_shard_size, is_main_process=self.is_rank0()) # unwrap_model(self.model).save_pretrained( # output_dir, state_dict=state_dict, max_shard_size=self.max_shard_size) elif isinstance(unwrap_model(self.model), PeftModel): if state_dict is None: state_dict = unwrap_model(self.model).base_model.model.state_dict() # state_dict = {key: value.bfloat16() if self.args.bf16 else value.half() for key, value in state_dict.items()} # Filter the peft params ... param_keys = list(state_dict.keys()) base_model_state_dict = {} for key in param_keys: if LORA_KEY in key: # state_dict.pop(key) continue elif PEFT_PARAM_PREFIX in key: # value = state_dict.pop(key) value = state_dict[key] new_key = key.replace(PEFT_PARAM_PREFIX, "") base_model_state_dict[new_key] = value else: base_model_state_dict[key] = value if self.is_rank0(): torch.save(base_model_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: logger.info( "Trainer.model is not a `PreTrainedModel`, only saving its state dict.") if state_dict is None: state_dict = self.model.state_dict() # state_dict = {key: value.bfloat16() if self.args.bf16 else value.half() for key, value in self.model.state_dict().items()} torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: print(f'save in pretrained model!!!!!!') if state_dict is None: state_dict = self.model.state_dict() # state_dict = {key: value.bfloat16() if self.args.bf16 else value.half() for key, value in self.model.state_dict().items()} self.model.save_pretrained( output_dir, state_dict=state_dict, max_shard_size=self.max_shard_size) # if self.tokenizer is not None and self.args.model_type == 'glm': # TODO: 需要适配加载tokenizer,主要是gpt2_multi_task_dataset的encode部分里的tokenizer.tokenize函数 if self.tokenizer is not None: self.tokenizer.save_pretrained(output_dir) # Good practice: save your training arguments together with the trained model torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) def is_rank0(self): return self.rank == 0 def _save_atorch_checkpoint(self, output_dir): # StateDictType.FULL_STATE_DICT得到完整的模型状态。 # FullStateDictConfig指定保存到CPU,仅rank0保存 save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, save_policy): model_state_dict = self.model.state_dict() optim_state_dict = FSDP.full_optim_state_dict(self.model, self.optimizer) # may be removed after PyTorch 2.2 if self.is_rank0(): os.makedirs(output_dir, exist_ok=True) torch.save( { "model_state_dict": model_state_dict, "optimizer_state_dict": optim_state_dict, "global_steps": self.global_steps, }, os.path.join(output_dir, ATORCH_CHECKPOINT_NAME), ) torch.distributed.barrier() # other rank waiting def _save_famo_checkpoint(self, output_dir): famo_dir = os.path.join(output_dir, 'famo_checkpoint/') os.makedirs(famo_dir, exist_ok=True) if self.famo.w_opt is not None: optim_state = self.famo.w_opt.state_dict() famo_state = {'prev_train_loss': self.famo.prev_train_loss, 'prev_valid_loss': self.famo.prev_valid_loss, 'first_train_step': self.famo.first_train_step, 'first_valid_step': self.famo.first_valid_step, 'ratio_valid_task_loss_prev': self.famo.ratio_valid_task_loss_prev, 'w': self.famo.w, 'w_opt_state': optim_state, } famo_state_name = FAMO_CHECKPOINT_NAME + f'_rank_{self.rank}.pth' torch.save(famo_state, os.path.join(famo_dir, famo_state_name)) # if self.is_rank0(): # torch.save(famo_state, os.path.join(output_dir, FAMO_CHECKPOINT_NAME)) torch.distributed.barrier() # other rank waiting def save(self, suffix=None, metrics=None): logger.info('Save start') if self.is_rank0(): print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) if not self.save_pytorch_model_bin_checkpoint: return if suffix is None: checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.global_steps}" else: checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{suffix}" run_dir = self.args.output_dir output_dir = os.path.join(run_dir, checkpoint_folder) # self._save_model(output_dir) # 获取要存的state_dict, 每个rank都要调用 if isinstance(self.model, FSDP): save_policy = FullStateDictConfig(offload_to_cpu=atorch.world_size() > 1, rank0_only=atorch.world_size() > 1) with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, save_policy): model_state_dict = self.model.state_dict() optim_state_dict = FSDP.full_optim_state_dict(self.model, self.optimizer) # may be removed after PyTorch 2.2 else: model_state_dict = unwrap_model(self.model).state_dict() optim_state_dict = self.optimizer.state_dict() if not self.no_save_atorch_checkpoint: if self.args.peft_type is None or not self.no_save_base_model: if self.is_rank0(): os.makedirs(output_dir, exist_ok=True) torch.save( { "model_state_dict": model_state_dict, "optimizer_state_dict": optim_state_dict, "global_steps": self.global_steps, }, os.path.join(output_dir, ATORCH_CHECKPOINT_NAME), ) torch.distributed.barrier() # other rank waiting if self.args.peft_type is not None: print(f'no_save_base_model: {self.no_save_base_model}') if not self.no_save_base_model: self._save_model(output_dir=output_dir) self._save_peft_model(output_dir=output_dir) else: self._save_model(output_dir=output_dir) # if not self.no_save_atorch_checkpoint: # self._save_atorch_checkpoint(output_dir) # else: # torch.save(self.optimizer.state_dict(), # os.path.join(output_dir, OPTIMIZER_NAME)) with warnings.catch_warnings(record=True) as caught_warnings: torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) reissue_pt_warnings(caught_warnings) # Save RNG state in non-distributed training rng_states = { "python": random.getstate(), "numpy": np.random.get_state(), "cpu": torch.random.get_rng_state(), } if torch.cuda.is_available(): if self.args.local_rank == -1: # In non distributed, we save the global CUDA RNG state (will take care of DataParallel) rng_states["cuda"] = torch.cuda.random.get_rng_state_all() else: rng_states["cuda"] = torch.cuda.random.get_rng_state() os.makedirs(output_dir, exist_ok=True) if torch.distributed.get_world_size() <= 1: torch.save(rng_states, os.path.join(output_dir, "rng_state.pth")) else: # torch.save(rng_states, os.path.join( # output_dir, f"rng_state_{self.args.process_index}.pth")) # process_index = rank torch.save(rng_states, os.path.join( output_dir, f"rng_state_{self.rank}.pth")) if self.args_to_save: json.dump(self.args_to_save, open(os.path.join(output_dir, HYPER_PARAMETER_NAME), 'w'), ensure_ascii=False, indent=2) # save state state = {'global_steps': self.global_steps} json.dump(state, open(os.path.join( output_dir, TRAINER_STATE_NAME), 'w'), ensure_ascii=False, indent=2) # if self.args.weighted_loss_mode == "selfpaced" or self.args.weighted_loss_mode.startswith('famo_valid_ema'): # ema_dir = os.path.join(output_dir, 'ema_checkpoint/') # os.makedirs(ema_dir, exist_ok=True) # ema_state = {'valid_task_loss_prev': self.valid_task_loss_prev, # 'ema_valid_task_loss_prev': self.ema_valid_task_loss_prev, # 'ratio_valid_task_loss_prev': self.ratio_valid_task_loss_prev} # ema_state_name = EMA_CHECKPOINT_NAME + f'_rank_{self.rank}.pth' # torch.save(ema_state, os.path.join(ema_dir, ema_state_name)) # torch.distributed.barrier() # other rank waiting # print_rank_0(f'ema state to save: {ema_state}') # if self.files_to_save: # for name in self.files_to_save: # if not os.path.exists(name): # continue # try: # if os.path.isfile(name): # shutil.copy(name, output_dir) # elif os.path.isdir(name): # shutil.copytree(name, os.path.join( # output_dir, os.path.basename(name))) # except Exception: # continue # Determine the new best metric / best model checkpoint if metrics is not None and self.args.metric_for_best_model is not None: metric_to_check = self.args.metric_for_best_model if not metric_to_check.startswith("valid_"): metric_to_check = f"valid_{metric_to_check}" metric_value = metrics[metric_to_check] operator = np.greater if self.args.greater_is_better == 'true' else np.less if ( self.best_metric is None or self.best_model_checkpoint is None or operator(metric_value, self.best_metric) ): self.best_metric = metric_value self.best_model_checkpoint = output_dir print_rank_0(f'current best model checkpoint is: {self.best_model_checkpoint}, valid_loss: {self.best_metric}') if self.args.weighted_loss_mode.startswith('famo_valid'): self._save_famo_checkpoint(output_dir) if self.is_rank0(): if self.args.extra_save_by_epoch: print('extra_save_by_epoch') # 如果是每个epoch extra save的,那么每个epoch的checkpoint不会删除,不受save_total_limit的影响, # 而对按step存的,则会只保留save_total_limit个 self._rotate_checkpoints( output_dir=run_dir, prefix=PREFIX_CHECKPOINT_DIR, checkpoint_name_pattern='([0-9]+)$') else: self._rotate_checkpoints( output_dir=run_dir, prefix=PREFIX_CHECKPOINT_DIR) # 只保留最新一个checkpoint的atorch checkpoint self._clean_atorch_checkpoints( output_dir=run_dir, prefix=PREFIX_CHECKPOINT_DIR) print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) torch.distributed.barrier() logger.info('Save finished') def self_paced_evaluate(self, global_steps, current_epoch, v_batch, ratio_valid_task_loss_prev): self.model.eval() # v_batch = next(valid_iterator) with torch.no_grad(): valid_outputs = self.model( input_ids=v_batch['input_ids'], attention_mask=v_batch['attention_mask'], position_ids=v_batch['position_ids'] ) self.selfpaced_status.update(global_steps, current_epoch, 'valid', ratio_valid_task_loss_prev) _, valid_task_loss, valid_task_num, _, self.selfpaced_status = self.loss_func(valid_outputs, v_batch, self.args.weighted_loss_mode, self.famo, self.selfpaced_status) torch.distributed.all_reduce(valid_task_loss, op=torch.distributed.ReduceOp.SUM) valid_task_loss /= torch.distributed.get_world_size() # print_rank_0(f"self paced valid loss: {valid_task_loss}") return valid_task_loss def train(self, **kwargs): logger.info("***** Running training *****") logger.info(f" Num examples = {len(self.train_dataset)}") logger.info(f" Num Epochs = {self.args.num_train_epochs}") logger.info( f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}") logger.info( f" Total train batch size (w. parallel, distributed & accumulation) = {self.total_train_batch_size}") logger.info( f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {self.args.max_steps}") progress_bar = tqdm(range(self.args.max_steps), disable=not is_local_main_process(), smoothing=0) training_time = 0 self.global_steps = 0 start_epoch = 0 steps_trained_in_current_epoch = 0 exit_flag = False if self.args.resume_from_checkpoint == 'true': state = json.load( open(os.path.join(self.resume_checkpoint_dir, TRAINER_STATE_NAME), 'r')) self.global_steps = state.get('global_steps', 0) # progress_bar.update(self.global_steps) progress_bar = tqdm(range(self.args.max_steps), disable=not is_local_main_process(), initial=self.global_steps, smoothing=0) start_epoch = self.global_steps // self.num_update_steps_per_epoch steps_trained_in_current_epoch = self.global_steps % self.num_update_steps_per_epoch steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps print(f'Start training at step {self.global_steps}') self.last_step_logged = self.global_steps self.skipped_steps = 0 self.accumulated_loss = 0 self.accumulated_task_loss = np.zeros(len(self.ID2TASK)) self.accumulated_task_num = np.zeros(len(self.ID2TASK)) self.args.selfpaced_interval = 20 if self.args.total_model_param >= 1e9 else 20 self.args.famo_interval = 10 self.train_task_loss_prev = None self.valid_task_loss_prev = None # L_valid at step t-1 self.ema_valid_task_loss_prev = None # L_valid_ema at step t-2 self.ratio_valid_task_loss_prev = torch.zeros(len(self.ID2TASK)).to(self.device) # ema ratio at step t-1 if self.args.weighted_loss_mode.startswith('famo_valid_ema'): if self.args.resume_from_checkpoint == 'true': # TODO pass # ema_dir = os.path.join(self.resume_checkpoint_dir, 'ema_checkpoint/') # if not os.path.exists(ema_dir): # print_rank_0(f'can not find the famo checkpoint dir!') # else: # ema_state_name = EMA_CHECKPOINT_NAME + f'_rank_{self.rank}.pth' # ema_checkpoint_state = torch.load(os.path.join(ema_dir, ema_state_name)) # self.valid_task_loss_prev = ema_checkpoint_state['valid_task_loss_prev'].to(self.device) # L_valid at step t-1 # self.ema_valid_task_loss_prev = ema_checkpoint_state['ema_valid_task_loss_prev'].to(self.device) # L_valid_ema at step t-2 # self.ratio_valid_task_loss_prev = ema_checkpoint_state['ratio_valid_task_loss_prev'].to(self.device) # ema ratio at step t-1 # print_rank_0(f'trainer state from resume: {ema_checkpoint_state}') else: metrics = self.evaluate() self.ema_valid_task_loss_prev = metrics['valid_task_loss'] self.famo.global_steps = self.global_steps if self.args.weighted_loss_mode == "selfpaced": self.ema_valid_task_loss_prev = self.self_paced_evaluate(self.global_steps, start_epoch + 1, next(iter(self.valid_dataloader)), self.ratio_valid_task_loss_prev) for epoch in range(start_epoch, int(self.args.num_train_epochs)): self.train_dataloader.set_epoch(epoch) self.model.train() start_time = time.time() valid_iterator = iter(self.valid_dataloader) for step, batch in enumerate(self.train_dataloader): if step == 0: print_rank_0(f"step 1 batch shape: {batch['input_ids'].shape},\n" f"last 10 tokens: {batch['input_ids'][:, -10:]}") # f"last 10 loss mask: {batch['loss_mask'][:, -10:]}" print_rank_0(f"first 1000 tokens") for pt in range(10): print_rank_0(f"{batch['input_ids'][:, 10 * pt:10 * pt + 10]}") # print_rank_0(f"{batch['loss_mask'][:, 10 * pt:10 * pt + 10]}") # self.global_steps += 1 skipped = False self.model.train() step_start = time.time() if steps_trained_in_current_epoch and step < steps_trained_in_current_epoch: continue steps_trained_in_current_epoch = 0 # 恢复到上一次的steps in current epoch后,需要置零,否则后面的每个epoch都会跳过前面的steps # batch = self.prepare_input(batch, self.device) outputs = self.model( input_ids=batch['input_ids'].to(self.device), attention_mask=batch['attention_mask'].to(self.device), position_ids=batch['position_ids'].to(self.device), # labels=batch['labels'], ) # self paced loss # ema_tmp: L_valid_ema at step t-1 if (self.global_steps + 1) % self.args.selfpaced_interval == 0 and (self.args.weighted_loss_mode.startswith('famo_valid_ema') or self.args.weighted_loss_mode == 'selfpaced'): ema_tmp = 0.001 * self.valid_task_loss_prev + (1 - 0.001) * self.ema_valid_task_loss_prev self.ratio_valid_task_loss_prev = (ema_tmp - self.ema_valid_task_loss_prev ) / (self.ema_valid_task_loss_prev + 1e-6) self.ema_valid_task_loss_prev = ema_tmp self.ratio_valid_task_loss_prev = torch.where(self.ratio_valid_task_loss_prev > 0, -1e20, self.ratio_valid_task_loss_prev) self.famo.ratio_valid_task_loss_prev = self.ratio_valid_task_loss_prev if self.args.weighted_loss_mode == 'selfpaced': self.selfpaced_status.update(self.global_steps + 1, epoch + 1, 'train', self.ratio_valid_task_loss_prev) loss, task_loss, task_num, self.famo, self.selfpaced_status = self.loss_func(outputs, batch, self.args.weighted_loss_mode, self.famo, self.selfpaced_status) # print(f'rank: {self.rank}, loss: {loss}, task loss: {task_loss}') if self.args.weighted_loss_mode.startswith('famo'): self.famo.print_loss = self.famo.print_loss / self.args.gradient_accumulation_steps loss_tensor = torch.zeros( [1], device=self.famo.print_loss.device, dtype=self.famo.print_loss.dtype) loss_tensor[0] = self.famo.print_loss.item() else: loss = loss / self.args.gradient_accumulation_steps loss_tensor = torch.zeros( [1], device=loss.device, dtype=loss.dtype) loss_tensor[0] = loss.item() torch.distributed.all_reduce(loss_tensor) torch.distributed.all_reduce(task_loss, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(task_num, op=torch.distributed.ReduceOp.SUM) reduce_loss = loss_tensor.sum() / torch.distributed.get_world_size() if has_inf_or_nan(reduce_loss): print_rank_0(f'There have nan loss.') self.skipped_steps += 1 skipped = True else: self.accumulated_loss += reduce_loss.item() mean_task_loss = task_loss / torch.distributed.get_world_size() self.accumulated_task_loss += mean_task_loss.cpu().numpy() self.accumulated_task_num += task_num.cpu().numpy() loss.backward() self.global_steps += 1 self.famo.global_steps += 1 if step % self.args.gradient_accumulation_steps == 0 or step == len(self.train_dataloader) - 1: if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0: # 如果是fp16,需要unscale。如果是bf16,self.optimizer里没有unscale这个方法 try: self.optimizer.unscale_() except Exception: pass if isinstance(self.model, FSDP): self.model.clip_grad_norm_(self.args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_( self.model.parameters(), self.args.max_grad_norm) self.optimizer.step() overflow = hasattr(self.optimizer, "step_was_skipped") and self.optimizer.step_was_skipped if skipped != overflow: print(f'skipped != overflow!!!!!!!!!!!!!!!!') if not overflow: self.lr_scheduler.step() # if not skipped: # self.optimizer.step() # self.lr_scheduler.step() self.optimizer.zero_grad() if (self.args.weighted_loss_mode.startswith('famo_valid') or self.args.weighted_loss_mode == 'selfpaced') and not skipped: # if self.args.weighted_loss_mode.startswith('famo_valid') and not skipped and step % self.args.famo_interval == 0: # delete caches # gc.collect() # torch.cuda.empty_cache() # self.model.eval() # if (step // self.args.famo_interval) % self.valid_dataloader_length == 0: if step % self.valid_dataloader_length == 0: valid_iterator = iter(self.valid_dataloader) valid_step = step % self.valid_dataloader_length if self.famo_resume: self.famo_resume = False for i in range(valid_step): v_batch = next(valid_iterator) v_batch = next(valid_iterator) if self.args.weighted_loss_mode == 'selfpaced': self.valid_task_loss_prev = self.self_paced_evaluate(self.global_steps, epoch + 1, v_batch, self.ratio_valid_task_loss_prev) else: with torch.autocast(device_type='cuda', dtype=torch.bfloat16): valid_outputs = self.model( input_ids=v_batch['input_ids'].to(self.device), attention_mask=v_batch['attention_mask'].to(self.device), position_ids=v_batch['position_ids'].to(self.device) ) valid_loss, valid_task_loss, valid_task_num, _, _ = self.loss_func(valid_outputs, v_batch, self.args.weighted_loss_mode) if self.args.weighted_loss_mode.startswith('famo_valid_ema'): torch.distributed.all_reduce(valid_task_loss, op=torch.distributed.ReduceOp.SUM) valid_task_loss /= torch.distributed.get_world_size() self.valid_task_loss_prev = valid_task_loss.clone().detach() # if self.famo.first_valid_step and self.args.resume_from_checkpoint == 'true': if self.args.resume_from_checkpoint == 'true': self.ema_valid_task_loss_prev = self.valid_task_loss_prev # self.scaler.scale(valid_loss).backward() valid_loss.backward() valid_loss_item = valid_loss.item() if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0: # 如果是fp16,需要unscale。如果是bf16,self.optimizer里没有unscale这个方法 try: self.optimizer.unscale_() except Exception: pass if isinstance(self.model, FSDP): self.model.clip_grad_norm_(self.args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_( self.model.parameters(), self.args.max_grad_norm) # print_rank_0(f'valid_loss {valid_loss.item()}, task loss: {valid_task_loss}, valid step: {valid_step}') self.famo.update(valid_task_loss) self.optimizer.zero_grad() step_time = time.time() - step_start
step_tflops = get_tflops_megatron(self.args.total_model_param, self.args.hidden_size, self.args.num_hidden_layers,
0
2023-11-02 01:37:01+00:00
16k
bytedance/cryostar
projects/star/train_density.py
[ { "identifier": "StarfileDataSet", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDataSet(Dataset):\n\n def __init__(self, cfg: StarfileDatasetConfig):\n super().__init__()\n self.cfg = cfg\n self.df = starfile.read(Path(cfg.starfile_path))\n\n if \"optics\"...
import os import os.path as osp import einops import lightning.pytorch as pl import numpy as np import torch from lightning.pytorch.strategies import DDPStrategy from lightning.pytorch.utilities import rank_zero_only from torch.utils.data import DataLoader from tqdm import tqdm from mmengine import mkdir_or_exist from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig from cryostar.nerf.volume_utils import ImplicitFourierVolume from cryostar.utils.transforms import SpatialGridTranslate, FourierGridTranslate from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN from cryostar.utils.fft_utils import (fourier_to_primal_2d, primal_to_fourier_2d) from cryostar.utils.latent_space_utils import sample_along_pca, get_nearest_point, cluster_kmeans from cryostar.utils.misc import (pl_init_exp, create_circular_mask, log_to_current, pretty_dict) from cryostar.utils.losses import calc_kl_loss from cryostar.utils.ml_modules import VAEEncoder, reparameterize from cryostar.utils.mrc_tools import save_mrc from miscs import infer_ctf_params_from_config
10,998
log_to_current = rank_zero_only(log_to_current) TASK_NAME = "density" class CryoModel(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() self.cfg = cfg self.dataset = dataset self.z_dim = cfg.model.z_dim self.history_saved_dirs = [] if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0: if cfg.model.enc_space == "real": self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) elif cfg.model.enc_space == "fourier": self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) else: raise NotImplementedError if cfg.model.shift_method == "interp": self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, ) log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.") elif cfg.model.shift_method == "fft": self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, ) else: raise NotImplementedError ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) self.vol = ImplicitFourierVolume( self.z_dim, self.cfg.data_process.down_side_shape, self.cfg.loss.mask_rad_for_image_loss, { "net_type": cfg.model.net_type, "pe_dim": self.cfg.data_process.down_side_shape, "D": self.cfg.data_process.down_side_shape, "pe_type": cfg.model.pe_type, "force_symmetry": False, "hidden": cfg.model.hidden, }) mask = create_circular_mask(self.cfg.data_process.down_side_shape, self.cfg.data_process.down_side_shape, None, self.cfg.data_process.down_side_shape // 2 * self.cfg.loss.mask_rad_for_image_loss,) self.register_buffer("mask", torch.from_numpy(mask)) if cfg.extra_input_data_attr.given_z is not None: self.register_buffer("given_z", torch.from_numpy(np.load(cfg.extra_input_data_attr.given_z))) if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") state_dict = torch.load(self.cfg.extra_input_data_attr.ckpt_path, map_location=self.device) self.vol.load_state_dict(state_dict) def _get_save_dir(self): save_dir = os.path.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def process_image(self, batch): R = batch["rotmat"] bsz = len(R) trans = torch.cat([ batch["shiftY"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix, batch["shiftX"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix ], dim=2) proj_in = batch["proj"].to(self.device) if self.cfg.model.shift_method == "interp": proj = self.translate.transform(proj_in.squeeze(1), trans.to(self.device)) elif self.cfg.model.shift_method == "fft": fproj = primal_to_fourier_2d(proj_in) fproj = self.f_translate.transform(fproj.squeeze(1), trans.to(self.device)) proj = fourier_to_primal_2d(fproj) if self.cfg.model.shift_data: return proj, proj else: return proj_in, proj def training_step(self, batch, batch_idx): R = batch["rotmat"] bsz = len(R) proj_in, proj_out = self.process_image(batch) f_proj_in = primal_to_fourier_2d(proj_in) if self.z_dim != 0: if self.cfg.extra_input_data_attr.given_z is not None: z = self.given_z[batch["idx"]].reshape(bsz, -1) kld_loss = 0.0 else: if self.cfg.model.enc_space == "fourier": enc_input = einops.rearrange(torch.view_as_real(f_proj_in), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) elif self.cfg.model.enc_space == "real": enc_input = einops.rearrange(proj_in, "b 1 ny nx -> b (1 ny nx)") mu, log_var = self.encoder(enc_input)
log_to_current = rank_zero_only(log_to_current) TASK_NAME = "density" class CryoModel(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() self.cfg = cfg self.dataset = dataset self.z_dim = cfg.model.z_dim self.history_saved_dirs = [] if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0: if cfg.model.enc_space == "real": self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) elif cfg.model.enc_space == "fourier": self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) else: raise NotImplementedError if cfg.model.shift_method == "interp": self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, ) log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.") elif cfg.model.shift_method == "fft": self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, ) else: raise NotImplementedError ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) self.vol = ImplicitFourierVolume( self.z_dim, self.cfg.data_process.down_side_shape, self.cfg.loss.mask_rad_for_image_loss, { "net_type": cfg.model.net_type, "pe_dim": self.cfg.data_process.down_side_shape, "D": self.cfg.data_process.down_side_shape, "pe_type": cfg.model.pe_type, "force_symmetry": False, "hidden": cfg.model.hidden, }) mask = create_circular_mask(self.cfg.data_process.down_side_shape, self.cfg.data_process.down_side_shape, None, self.cfg.data_process.down_side_shape // 2 * self.cfg.loss.mask_rad_for_image_loss,) self.register_buffer("mask", torch.from_numpy(mask)) if cfg.extra_input_data_attr.given_z is not None: self.register_buffer("given_z", torch.from_numpy(np.load(cfg.extra_input_data_attr.given_z))) if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") state_dict = torch.load(self.cfg.extra_input_data_attr.ckpt_path, map_location=self.device) self.vol.load_state_dict(state_dict) def _get_save_dir(self): save_dir = os.path.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def process_image(self, batch): R = batch["rotmat"] bsz = len(R) trans = torch.cat([ batch["shiftY"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix, batch["shiftX"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix ], dim=2) proj_in = batch["proj"].to(self.device) if self.cfg.model.shift_method == "interp": proj = self.translate.transform(proj_in.squeeze(1), trans.to(self.device)) elif self.cfg.model.shift_method == "fft": fproj = primal_to_fourier_2d(proj_in) fproj = self.f_translate.transform(fproj.squeeze(1), trans.to(self.device)) proj = fourier_to_primal_2d(fproj) if self.cfg.model.shift_data: return proj, proj else: return proj_in, proj def training_step(self, batch, batch_idx): R = batch["rotmat"] bsz = len(R) proj_in, proj_out = self.process_image(batch) f_proj_in = primal_to_fourier_2d(proj_in) if self.z_dim != 0: if self.cfg.extra_input_data_attr.given_z is not None: z = self.given_z[batch["idx"]].reshape(bsz, -1) kld_loss = 0.0 else: if self.cfg.model.enc_space == "fourier": enc_input = einops.rearrange(torch.view_as_real(f_proj_in), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) elif self.cfg.model.enc_space == "real": enc_input = einops.rearrange(proj_in, "b 1 ny nx -> b (1 ny nx)") mu, log_var = self.encoder(enc_input)
z = reparameterize(mu, log_var)
15
2023-11-06 07:15:26+00:00
16k
UMass-Foundation-Model/CoVLM
transformers/src/transformers/models/sew/configuration_sew.py
[ { "identifier": "PretrainedConfig", "path": "transformers/src/transformers/configuration_utils.py", "snippet": "class PretrainedConfig(PushToHubMixin):\n r\"\"\"\n Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as\n methods for lo...
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging
12,428
# coding=utf-8 # Copyright 2021 ASAPP Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ SEW model configuration""" logger = logging.get_logger(__name__) SEW_PRETRAINED_CONFIG_ARCHIVE_MAP = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew }
# coding=utf-8 # Copyright 2021 ASAPP Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ SEW model configuration""" logger = logging.get_logger(__name__) SEW_PRETRAINED_CONFIG_ARCHIVE_MAP = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew }
class SEWConfig(PretrainedConfig):
0
2023-11-07 04:23:57+00:00
16k
HKU-BAL/ClairS-TO
src/realign_reads.py
[ { "identifier": "subprocess_popen", "path": "shared/utils.py", "snippet": "BASIC_BASES = set(\"ACGTU\")\nWARNING = '\\033[93m'\nERROR = '\\033[91m'\nENDC = '\\033[0m'\ndef log_error(log):\ndef log_warning(log):\ndef is_file_exists(file_name, suffix=\"\"):\ndef is_folder_exists(folder_name, suffix=\"\"):...
import sys import os import shlex import ctypes import re import subprocess import shared.param as param from subprocess import PIPE from argparse import ArgumentParser, SUPPRESS from collections import defaultdict from shared.utils import subprocess_popen, reference_sequence_from, IUPAC_base_to_ACGT_base_dict as BASE2ACGT, log_error from shared.interval_tree import bed_tree_from from shared.intervaltree.intervaltree import IntervalTree
14,016
start = reference_position end = reference_position + advance for ins_idx in range(start, end): pileup[ins_idx]["X"] += 1 # deletion consumes reference reference_position += advance # reset advance advance = 0 yield chunk_start, chunk_end yield None, None def reads_realignment(args): POS = args.pos args.ctg_start = POS - args.realign_flanking_window args.ctg_end = POS + args.realign_flanking_window bed_file_path = args.bed_fn extend_bed = args.extend_bed fasta_file_path = args.ref_fn ctg_name = args.ctg_name ctg_start = args.ctg_start ctg_end = args.ctg_end samtools_execute_command = args.samtools bam_file_path = args.bam_fn min_mq = args.min_mq min_coverage = args.min_coverage is_bed_file_given = bed_file_path is not None is_ctg_name_given = ctg_name is not None read_fn = args.read_fn global test_pos test_pos = None is_ctg_range_given = is_ctg_name_given and ctg_start is not None and ctg_end is not None ref_regions = [] reads_regions = [] reference_start, reference_end = None, None if is_ctg_range_given: extend_start = ctg_start - max_window_size extend_end = ctg_end + max_window_size reads_regions.append(region_from(ctg_name=ctg_name, ctg_start=extend_start, ctg_end=extend_end)) reference_start, reference_end = ctg_start - expandReferenceRegion, ctg_end + expandReferenceRegion reference_start = 1 if reference_start < 1 else reference_start ref_regions.append(region_from(ctg_name=ctg_name, ctg_start=reference_start, ctg_end=reference_end)) elif is_ctg_name_given: reads_regions.append(region_from(ctg_name=ctg_name)) ref_regions.append(region_from(ctg_name=ctg_name)) reference_start = 1 reference_sequence = reference_sequence_from( samtools_execute_command=samtools_execute_command, fasta_file_path=fasta_file_path, regions=ref_regions ) if reference_sequence is None or len(reference_sequence) == 0: sys.exit("[ERROR] Failed to load reference sequence from file ({}).".format(fasta_file_path)) tree = bed_tree_from(bed_file_path=bed_file_path) if is_bed_file_given and ctg_name not in tree: sys.exit("[ERROR] ctg_name({}) not exists in bed file({}).".format(ctg_name, bed_file_path)) bed_option = ' -L {}'.format(extend_bed) if extend_bed else "" bed_option = ' -L {}'.format(bed_file_path) if is_bed_file_given else bed_option mq_option = ' -q {}'.format(min_mq) if min_mq > 0 else "" samtools_view_command = "{} view -h {} {}".format(samtools_execute_command, bam_file_path, " ".join(reads_regions)) + mq_option + bed_option samtools_view_process = subprocess_popen( shlex.split(samtools_view_command) ) if read_fn and read_fn == 'PIPE': save_file_fp = TensorStdout(sys.stdout) elif read_fn: save_file_fp = subprocess_popen(shlex.split("{} view -bh - -o {}".format(samtools_execute_command, read_fn + ( '.{}_{}'.format(ctg_start, ctg_end) if is_ctg_range_given and not test_pos else ""))), stdin=PIPE, stdout=PIPE) reference_start_0_based = 0 if reference_start is None else (reference_start - 1) header = [] add_header = False aligned_reads = defaultdict() pileup = defaultdict(lambda: {"X": 0}) samtools_view_generator = samtools_view_generator_from(samtools_view_process=samtools_view_process, aligned_reads=aligned_reads, pileup=pileup, ctg_name=ctg_name, reference_sequence=reference_sequence, reference_start_0_based=reference_start_0_based, header=header, center_pos=POS) pre_aligned_reads = defaultdict() while True: chunk_start, chunk_end = next(samtools_view_generator) if chunk_start is None: break if not add_header: save_file_fp.stdin.write(''.join(header)) add_header = True variant_allele_list = [[position, pileup[position]["X"]] for position in list(pileup.keys())] candidate_position_list = [(position, support_allele_count) for position, support_allele_count in variant_allele_list if support_allele_count >= min_coverage and position >= chunk_start - region_expansion_in_bp - 1 and position <= chunk_end + region_expansion_in_bp - 1] candidate_position_list.sort(key=(lambda x: x[0])) candidate_position_list = [item for item in candidate_position_list if item[0] >= POS - args.max_distance and item[0] < POS + args.max_distance] if not len(aligned_reads) or not len(candidate_position_list): continue if len(pre_aligned_reads): # update the read in previous chunk for read_name, read in pre_aligned_reads.items(): aligned_reads[read_name] = read region_dict = {} split_region_size = max_window_size
# BSD 3-Clause License # # Copyright 2023 The University of Hong Kong, Department of Computer Science # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. realign_chunk_size = 5000 min_dbg_mapping_quality = min_dbg_base_quality = 20 region_expansion_in_bp = expand_align_ref_region = 20 min_windows_distance = expand_align_ref_region * 4 max_window_size = max_region_reads_num = 1000 expandReferenceRegion = 100000 realigner_mod = os.path.join(*(os.path.split(__file__)[:-1] + ('realign/realigner',))) dbg_mod = os.path.join(*(os.path.split(__file__)[:-1] + ('realign/debruijn_graph',))) if not os.path.exists(realigner_mod) or not os.path.exists(dbg_mod): # try to find modules in clair3 python_path = subprocess.run('which python', stdout=subprocess.PIPE, shell=True).stdout.decode().rstrip() conda_prefix = os.path.dirname(os.path.dirname(python_path)) clair3_realign_path = os.path.join(conda_prefix, 'bin', 'preprocess', 'realign') clair3_realigner_mod = os.path.join(clair3_realign_path, 'realigner') clair3_dbg_mod = os.path.join(clair3_realign_path, 'debruijn_graph') if os.path.exists(clair3_realigner_mod) and os.path.exists(clair3_dbg_mod): realigner_mod = clair3_realigner_mod dbg_mod = clair3_dbg_mod else: print(log_error("[ERROR] `realigner` or `debruijn_graph` submodule not found in conda environment, pls install clair3-illumina package!")) sys.exit(1) realigner = ctypes.cdll.LoadLibrary(realigner_mod) dbg = ctypes.cdll.LoadLibrary(dbg_mod) class StructPointer(ctypes.Structure): _fields_ = [("position", ctypes.c_int * max_region_reads_num), ("cigar_string", ctypes.c_char_p * max_region_reads_num), ] class DBGPointer(ctypes.Structure): _fields_ = [("consensus_size", ctypes.c_int), ("consensus", ctypes.c_char_p * 200), ] # Read class for storing read information cigar_indel_re = r"(\d+)(D)" cigarRe = r"(\d+)([MIDNSHP=X])" graph_min_mapping_quality = 14 def get_len(seq, cigar): if 'D' not in cigar: return len(seq) indel_length = 0 for m in re.finditer(cigar_indel_re, cigar): indel_length += int(m.group(1)) return len(seq) + indel_length def print_ed(s1, s2): match_str = "" for x, y in zip(s1, s2): if x == y: match_str += "|" else: match_str += " " print(s1) print(match_str) print(s2) class Read(object): def __init__(self, read_start, seq, cigar, mapping_quality, base_quality, strand, raw_base_quality=None, unalign=False, read_name=None, read_id=None, flag=None, RNEXT=0, PNEXT=0, TLEN=0, phasing=None): self.read_start = read_start self.cigar = cigar self.mapping_quality = mapping_quality self.seq = seq self.base_quality = base_quality self.read_id = read_id self.read_end = self.read_start + get_len(seq, cigar) self.strand = strand self.graph_mq = True if self.mapping_quality >= graph_min_mapping_quality else False self.raw_base_quality = raw_base_quality self.read_name = read_name self.region = {} self.region_cigar = None self.region_start = None self.flag = str(flag) self.RNEXT = RNEXT self.PNEXT = PNEXT self.TLEN = PNEXT self.test_pos = None self.best_cigar = cigar self.best_pos = read_start self.best_align_score = None self.phasing = phasing def set_realign_flag(self): self.unalign = True def count_align_score(self, cigar): score = 0 for m in re.finditer(cigarRe, cigar): l, op, = int(m.group(1)), m.group(2) if op in 'MX=S': continue elif op in 'ID': score += l return score def set_realignment_info(self, region_start, realignment_cigar, realignment_start): realignment_cigar = realignment_cigar.replace('X', 'M') if realignment_cigar == self.cigar and realignment_start == self.read_start: return if self.best_align_score and realignment_cigar == self.best_cigar and realignment_start == self.best_pos: return realignment_align_score = self.count_align_score(realignment_cigar) if not self.best_align_score or realignment_align_score >= self.best_align_score: self.best_cigar = realignment_cigar self.best_pos = realignment_start self.best_align_score = realignment_align_score def decode_region(self, region_str): if region_str == '-' or '-' not in region_str: return region_str = region_str.rstrip().split('_') for region in region_str: region, cigar, pos = region.split('-') region, pos = int(region), int(pos) self.region[region] = [cigar, pos] def byte(x): return bytes(x, encoding="utf8") def find_max_overlap_index(query_region, search_regions): def overlap_length(region1, region2): return max(0, (min(region1[1], region2[1]) - max(region1[0], region2[0]))) overlap_lengths = [overlap_length(query_region, search_region) for search_region in search_regions] argmax = max(range(len(search_regions)), key=lambda idx: overlap_lengths[idx]) return None if overlap_lengths[argmax] == 0 else argmax def get_reference_seq(sequence, start, end, reference_start_0_based): if end < start: end, start = start, end return sequence[start - reference_start_0_based: end - reference_start_0_based] def phredscore2raw_score(qual): return ord(qual) - 33 def evc_base_from(base): return base if base == "N" else BASE2ACGT[base] def region_from(ctg_name, ctg_start=None, ctg_end=None): """ 1-based region string [start, end] """ if ctg_name is None: return "" if (ctg_start is None) != (ctg_end is None): return "" if ctg_start is None and ctg_end is None: return "{}".format(ctg_name) return "{}:{}-{}".format(ctg_name, ctg_start, ctg_end) class TensorStdout(object): def __init__(self, handle): self.stdin = handle def __del__(self): self.stdin.close() def get_halpotype_tag(samtools_view_columns): found_hp_tag = False tag = [c for c in samtools_view_columns if 'HP:i:' in c] if not len(tag) or len(tag[0]) < 6 or not tag[0][5].isdigit(): return None return tag[0][5] def is_too_many_soft_clipped_bases_for_a_read_from(CIGAR): soft_clipped_bases = 0 total_alignment_positions = 0 advance = 0 for c in str(CIGAR): if c.isdigit(): advance = advance * 10 + int(c) continue if c == "S": soft_clipped_bases += advance total_alignment_positions += advance advance = 0 # skip a read less than 55% aligned return 1.0 - float(soft_clipped_bases) / (total_alignment_positions + 1) < 0.55 def samtools_view_generator_from(samtools_view_process, aligned_reads, pileup, ctg_name, reference_sequence, reference_start_0_based, header, center_pos=None): CHUNK_SIZE = realign_chunk_size chunk_start, chunk_end = None, None for row_id, row in enumerate(samtools_view_process.stdout): if row[0] == '@': header.append(row) continue columns = row.strip().split() RNAME = columns[2] if RNAME != ctg_name: continue read_name = columns[0] FLAG = int(columns[1]) POS = int(columns[3]) - 1 # switch from 1-base to 0-base to match sequence index MAPQ = int(columns[4]) CIGAR = columns[5] SEQ = columns[9].upper() # uppercase for SEQ (regexp is \*|[A-Za-z=.]+) RNEXT = columns[6] PNEXT = columns[7] TLEN = columns[8] reference_position = POS query_position = 0 raw_base_quality = columns[10] QUAL = [phredscore2raw_score(item) for item in raw_base_quality] STRAND = (16 == (FLAG & 16)) HP_TAG = get_halpotype_tag(columns[11:]) read_name += "_" + str(int(STRAND)) # distinguish two strand if chunk_start is None: chunk_start = POS chunk_end = chunk_start + CHUNK_SIZE if POS >= chunk_end + region_expansion_in_bp: yield chunk_start, chunk_end chunk_start += CHUNK_SIZE chunk_end += CHUNK_SIZE read = Read(read_start=POS, seq=SEQ, cigar=CIGAR, mapping_quality=MAPQ, base_quality=QUAL, strand=STRAND, raw_base_quality=raw_base_quality, read_name=read_name, flag=FLAG, PNEXT=PNEXT, RNEXT=RNEXT, TLEN=TLEN, phasing=HP_TAG) if CIGAR == "*" or is_too_many_soft_clipped_bases_for_a_read_from(CIGAR): continue aligned_reads[read_name] = read if MAPQ < min_dbg_mapping_quality: continue advance = 0 for c in str(CIGAR): if c.isdigit(): advance = advance * 10 + int(c) continue if c == '=': reference_position += advance query_position += advance elif c == "M" or c == 'X': for _ in range(advance): if QUAL[query_position] >= min_dbg_base_quality: reference_base = reference_sequence[reference_position - reference_start_0_based] # 0 base query_base = SEQ[query_position] if reference_base in 'ACGT' and query_base != reference_base: pileup[reference_position]['X'] += 1 reference_position += 1 query_position += 1 elif c == "I" or c == 'S': pre_base = reference_sequence[reference_position - reference_start_0_based - 1] ins_base_quality = QUAL[query_position: query_position + advance] out_of_region = reference_position < chunk_start - region_expansion_in_bp or reference_position > chunk_end + region_expansion_in_bp if not out_of_region and pre_base in 'ACGT' and ( sum([True for bq in ins_base_quality if bq < min_dbg_base_quality]) == 0): # skip the bad seq start = reference_position - advance end = reference_position + advance for ins_idx in range(start, end): pileup[ins_idx]["X"] += 1 # insertion consumes query query_position += advance elif c == "D": out_of_region = reference_position < chunk_start - region_expansion_in_bp or reference_position > chunk_end + region_expansion_in_bp pre_base = reference_sequence[reference_position - reference_start_0_based - 1] # 0-base if not out_of_region and pre_base in 'ACGT': start = reference_position end = reference_position + advance for ins_idx in range(start, end): pileup[ins_idx]["X"] += 1 # deletion consumes reference reference_position += advance # reset advance advance = 0 yield chunk_start, chunk_end yield None, None def reads_realignment(args): POS = args.pos args.ctg_start = POS - args.realign_flanking_window args.ctg_end = POS + args.realign_flanking_window bed_file_path = args.bed_fn extend_bed = args.extend_bed fasta_file_path = args.ref_fn ctg_name = args.ctg_name ctg_start = args.ctg_start ctg_end = args.ctg_end samtools_execute_command = args.samtools bam_file_path = args.bam_fn min_mq = args.min_mq min_coverage = args.min_coverage is_bed_file_given = bed_file_path is not None is_ctg_name_given = ctg_name is not None read_fn = args.read_fn global test_pos test_pos = None is_ctg_range_given = is_ctg_name_given and ctg_start is not None and ctg_end is not None ref_regions = [] reads_regions = [] reference_start, reference_end = None, None if is_ctg_range_given: extend_start = ctg_start - max_window_size extend_end = ctg_end + max_window_size reads_regions.append(region_from(ctg_name=ctg_name, ctg_start=extend_start, ctg_end=extend_end)) reference_start, reference_end = ctg_start - expandReferenceRegion, ctg_end + expandReferenceRegion reference_start = 1 if reference_start < 1 else reference_start ref_regions.append(region_from(ctg_name=ctg_name, ctg_start=reference_start, ctg_end=reference_end)) elif is_ctg_name_given: reads_regions.append(region_from(ctg_name=ctg_name)) ref_regions.append(region_from(ctg_name=ctg_name)) reference_start = 1 reference_sequence = reference_sequence_from( samtools_execute_command=samtools_execute_command, fasta_file_path=fasta_file_path, regions=ref_regions ) if reference_sequence is None or len(reference_sequence) == 0: sys.exit("[ERROR] Failed to load reference sequence from file ({}).".format(fasta_file_path)) tree = bed_tree_from(bed_file_path=bed_file_path) if is_bed_file_given and ctg_name not in tree: sys.exit("[ERROR] ctg_name({}) not exists in bed file({}).".format(ctg_name, bed_file_path)) bed_option = ' -L {}'.format(extend_bed) if extend_bed else "" bed_option = ' -L {}'.format(bed_file_path) if is_bed_file_given else bed_option mq_option = ' -q {}'.format(min_mq) if min_mq > 0 else "" samtools_view_command = "{} view -h {} {}".format(samtools_execute_command, bam_file_path, " ".join(reads_regions)) + mq_option + bed_option samtools_view_process = subprocess_popen( shlex.split(samtools_view_command) ) if read_fn and read_fn == 'PIPE': save_file_fp = TensorStdout(sys.stdout) elif read_fn: save_file_fp = subprocess_popen(shlex.split("{} view -bh - -o {}".format(samtools_execute_command, read_fn + ( '.{}_{}'.format(ctg_start, ctg_end) if is_ctg_range_given and not test_pos else ""))), stdin=PIPE, stdout=PIPE) reference_start_0_based = 0 if reference_start is None else (reference_start - 1) header = [] add_header = False aligned_reads = defaultdict() pileup = defaultdict(lambda: {"X": 0}) samtools_view_generator = samtools_view_generator_from(samtools_view_process=samtools_view_process, aligned_reads=aligned_reads, pileup=pileup, ctg_name=ctg_name, reference_sequence=reference_sequence, reference_start_0_based=reference_start_0_based, header=header, center_pos=POS) pre_aligned_reads = defaultdict() while True: chunk_start, chunk_end = next(samtools_view_generator) if chunk_start is None: break if not add_header: save_file_fp.stdin.write(''.join(header)) add_header = True variant_allele_list = [[position, pileup[position]["X"]] for position in list(pileup.keys())] candidate_position_list = [(position, support_allele_count) for position, support_allele_count in variant_allele_list if support_allele_count >= min_coverage and position >= chunk_start - region_expansion_in_bp - 1 and position <= chunk_end + region_expansion_in_bp - 1] candidate_position_list.sort(key=(lambda x: x[0])) candidate_position_list = [item for item in candidate_position_list if item[0] >= POS - args.max_distance and item[0] < POS + args.max_distance] if not len(aligned_reads) or not len(candidate_position_list): continue if len(pre_aligned_reads): # update the read in previous chunk for read_name, read in pre_aligned_reads.items(): aligned_reads[read_name] = read region_dict = {} split_region_size = max_window_size
region_tree = IntervalTree()
2
2023-11-07 04:39:16+00:00
16k
the-siesta-group/edfio
tests/test_edfplus_header.py
[ { "identifier": "AnonymizedDateError", "path": "edfio/edf.py", "snippet": "class AnonymizedDateError(ValueError):\n \"\"\"Raised when trying to access an anonymized startdate or birthdate.\"\"\"" }, { "identifier": "Edf", "path": "edfio/edf.py", "snippet": "class Edf:\n \"\"\"Pytho...
import datetime import numpy as np import pytest from edfio import AnonymizedDateError, Edf, EdfSignal, Patient, Recording
13,683
@pytest.fixture() def patient(): return Patient._from_str("MCH-0234567 F 02-MAY-1951 Haagse_Harry") @pytest.fixture() def recording(): return Recording._from_str( "Startdate 02-MAR-2002 EMG561 BK/JOP Sony. MNC R Median Nerve." ) @pytest.fixture() def edf(patient, recording):
@pytest.fixture() def patient(): return Patient._from_str("MCH-0234567 F 02-MAY-1951 Haagse_Harry") @pytest.fixture() def recording(): return Recording._from_str( "Startdate 02-MAR-2002 EMG561 BK/JOP Sony. MNC R Median Nerve." ) @pytest.fixture() def edf(patient, recording):
return Edf([EdfSignal(np.arange(10), 1)], patient=patient, recording=recording)
1
2023-11-09 09:53:27+00:00
16k
sb-ai-lab/HypEx
hypex/matcher.py
[ { "identifier": "FaissMatcher", "path": "hypex/algorithms/faiss_matcher.py", "snippet": "class FaissMatcher:\n \"\"\"A class used to match instances using Faiss library.\"\"\"\n\n def __init__(\n self,\n df: pd.DataFrame,\n outcomes: str,\n treatment: st...
import logging import pickle import numpy as np import pandas as pd from typing import Union from tqdm.auto import tqdm from .algorithms.faiss_matcher import FaissMatcher from .algorithms.no_replacement_matching import MatcherNoReplacement from .selectors.feature_selector import FeatureSelector from .selectors.spearman_filter import SpearmanFilter from .selectors.outliers_filter import OutliersFilter from .selectors.base_filtration import const_filtration, nan_filtration from .utils.validators import random_feature from .utils.validators import random_treatment from .utils.validators import subset_refuter from .utils.validators import test_significance
12,387
info_col: Columns with id, date or metadata, not taking part in calculations. Defaults to None weights: weights for numeric columns in order to increase matching quality by weighted feature. By default, is None (all features have the same weight equal to 1). Example: {'feature_1': 10} base_filtration: To use or not base filtration of features in order to remove all constant or almost all constant, bool. Default is False. generate_report: Flag to create report. Defaults to True report_feat_select_dir: Folder for report files. Defaults to "report_feature_selector" timeout: Limit work time of code LAMA. Defaults to 600 n_threads: Maximum number of threads. Defaults to 1 n_folds: Number of folds for cross-validation. Defaults to 4 verbose: Flag to show process stages. Defaults to 2 use_algos: List of names of LAMA algorithms for feature selection. Defaults to ["lgb"] same_target_threshold: Threshold for correlation coefficient filter (Spearman). Default to 0.7 interquartile_coeff: Percent for drop outliers. Default to 1.5 drop_outliers_by_percentile: Flag to drop outliers by custom percentiles. Defaults to True min_percentile: Minimum percentile to drop outliers. Defaults to 0.02 max_percentile: Maximum percentile to drop outliers. Defaults to 0.98 n_neighbors: Number of neighbors to match (in fact you may see more then n matches as every match may have more then one neighbor with the same distance). Default value is 1. silent: Write logs in debug mode pbar: Display progress bar while get index """ if use_algos is None: use_algos = USE_ALGOS self.input_data = input_data if outcome is None: outcome = list() self.outcomes = outcome if type(outcome) == list else [outcome] self.treatment = treatment self.group_col = group_col self.info_col = info_col self.outcome_type = outcome_type self.weights = weights self.generate_report = generate_report self.report_feat_select_dir = report_feat_select_dir self.timeout = timeout self.n_threads = n_threads self.n_folds = n_folds self.verbose = verbose self.use_algos = use_algos self.same_target_threshold = same_target_threshold self.interquartile_coeff = interquartile_coeff self.mode_percentile = drop_outliers_by_percentile self.min_percentile = min_percentile self.max_percentile = max_percentile self.base_filtration = base_filtration self.features_importance = None self.matcher = None self.val_dict = None self.pval_dict = None self.new_treatment = None self.validate = None self.dropped_features = [] self.n_neighbors = n_neighbors self.silent = silent self.pbar = pbar self._preprocessing_data() def _convert_categorical_to_dummy(self): """Converts categorical variables to dummy variables. Returns: Data with categorical variables converted to dummy variables. """ info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col if columns_to_drop is not None: data = self.input_data.drop(columns=columns_to_drop) else: data = self.input_data dummy_data = pd.get_dummies(data, drop_first=True, dtype=np.uint8) return dummy_data def _preprocessing_data(self): """Converts categorical features into dummy variables.""" info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col + self.outcomes + [self.treatment] if self.base_filtration: filtered_features = nan_filtration(self.input_data.drop(columns=columns_to_drop)) self.dropped_features = [f for f in self.input_data.columns if f not in filtered_features + columns_to_drop] self.input_data = self.input_data[filtered_features + columns_to_drop] nan_counts = self.input_data.isna().sum().sum() if nan_counts != 0: self._log(f"Number of NaN values filled with zeros: {nan_counts}", silent=False) self.input_data = self.input_data.fillna(0) if self.group_col is not None: group_col = self.input_data[[self.group_col]] if self.info_col is not None: info_col = self.input_data[self.info_col] self.input_data = self._convert_categorical_to_dummy() if self.group_col is not None: self.input_data = pd.concat([self.input_data, group_col], axis=1) if self.info_col is not None: self.input_data = pd.concat([self.input_data, info_col], axis=1) if self.base_filtration:
"""Base Matcher class.""" REPORT_FEAT_SELECT_DIR = "report_feature_selector" REPORT_PROP_MATCHER_DIR = "report_matcher" NAME_REPORT = "lama_interactive_report.html" N_THREADS = 1 N_FOLDS = 4 RANDOM_STATE = 123 TEST_SIZE = 0.2 TIMEOUT = 600 VERBOSE = 2 USE_ALGOS = ["lgb"] PROP_SCORES_COLUMN = "prop_scores" GENERATE_REPORT = True SAME_TARGET_THRESHOLD = 0.7 OUT_INTER_COEFF = 1.5 OUT_MODE_PERCENT = True OUT_MIN_PERCENT = 0.02 OUT_MAX_PERCENT = 0.98 logger = logging.getLogger("hypex") console_out = logging.StreamHandler() logging.basicConfig( handlers=(console_out,), format="[%(asctime)s | %(name)s | %(levelname)s]: %(message)s", datefmt="%d.%m.%Y %H:%M:%S", level=logging.INFO, ) class Matcher: """Class for compile full pipeline of Matching in Causal Inference task. Matcher steps: - Read, analyze data - Feature selection via LightAutoML - Converting a dataset with features to another space via Cholesky decomposition In the new space, the distance L2 becomes equivalent to the Mahalanobis distance. This allows us to use faiss to search for nearest objects, which can search only by L2 metric, but without violating the methodology of matching, for which it is important to count by the Mahalanobis distance - Finding the nearest neighbors for each unit (with duplicates) using faiss. For each of the control group, neighbors from the target group are matched and vice versa. - Calculation bias - Creating matched df (Wide df with pairs) - Calculation metrics: ATE, ATT, ATC, p-value, and сonfidence intervals - Calculation quality: PS-test, KS test, SMD test - Returns metrics as dataframe, quality results as dict of df's and df_matched - After receiving the result, the result should be validated using :func:`~hypex.matcher.Matcher.validate_result` Example: Common usecase - base pipeline for matching >>> # Base info >>> treatment = "treatment" # Column name with info about 'treatment' 0 or 1 >>> target = "target" # Column name with target >>> >>> # Optional >>> info_col = ["user_id", 'address'] # Columns that will not participate in the match and are informative. >>> group_col = "CatCol" # Column name for strict comparison (for a categorical feature) >>> >>> # Matching >>> model = Matcher(data, outcome=target, treatment=treatment, info_col=info_col, group_col=group_col) >>> features = model.lama_feature_select() # Feature selection via lama >>> results, quality, df_matched = model.estimate(features=some_features) # Performs matching >>> >>> model.validate_result() """ def __init__( self, input_data: pd.DataFrame, treatment: str, outcome: Union[str, list] = None, outcome_type: str = "numeric", group_col: str = None, info_col: list = None, weights: dict = None, base_filtration: bool = False, generate_report: bool = GENERATE_REPORT, report_feat_select_dir: str = REPORT_FEAT_SELECT_DIR, timeout: int = TIMEOUT, n_threads: int = N_THREADS, n_folds: int = N_FOLDS, verbose: bool = VERBOSE, use_algos: list = None, same_target_threshold: float = SAME_TARGET_THRESHOLD, interquartile_coeff: float = OUT_INTER_COEFF, drop_outliers_by_percentile: bool = OUT_MODE_PERCENT, min_percentile: float = OUT_MIN_PERCENT, max_percentile: float = OUT_MAX_PERCENT, n_neighbors: int = 1, silent: bool = True, pbar: bool = True, ): """Initialize the Matcher object. Args: input_data: Input dataframe outcome: Target column treatment: Column determine control and test groups outcome_type: Values type of target column. Defaults to "numeric" group_col: Column for grouping. Defaults to None. info_col: Columns with id, date or metadata, not taking part in calculations. Defaults to None weights: weights for numeric columns in order to increase matching quality by weighted feature. By default, is None (all features have the same weight equal to 1). Example: {'feature_1': 10} base_filtration: To use or not base filtration of features in order to remove all constant or almost all constant, bool. Default is False. generate_report: Flag to create report. Defaults to True report_feat_select_dir: Folder for report files. Defaults to "report_feature_selector" timeout: Limit work time of code LAMA. Defaults to 600 n_threads: Maximum number of threads. Defaults to 1 n_folds: Number of folds for cross-validation. Defaults to 4 verbose: Flag to show process stages. Defaults to 2 use_algos: List of names of LAMA algorithms for feature selection. Defaults to ["lgb"] same_target_threshold: Threshold for correlation coefficient filter (Spearman). Default to 0.7 interquartile_coeff: Percent for drop outliers. Default to 1.5 drop_outliers_by_percentile: Flag to drop outliers by custom percentiles. Defaults to True min_percentile: Minimum percentile to drop outliers. Defaults to 0.02 max_percentile: Maximum percentile to drop outliers. Defaults to 0.98 n_neighbors: Number of neighbors to match (in fact you may see more then n matches as every match may have more then one neighbor with the same distance). Default value is 1. silent: Write logs in debug mode pbar: Display progress bar while get index """ if use_algos is None: use_algos = USE_ALGOS self.input_data = input_data if outcome is None: outcome = list() self.outcomes = outcome if type(outcome) == list else [outcome] self.treatment = treatment self.group_col = group_col self.info_col = info_col self.outcome_type = outcome_type self.weights = weights self.generate_report = generate_report self.report_feat_select_dir = report_feat_select_dir self.timeout = timeout self.n_threads = n_threads self.n_folds = n_folds self.verbose = verbose self.use_algos = use_algos self.same_target_threshold = same_target_threshold self.interquartile_coeff = interquartile_coeff self.mode_percentile = drop_outliers_by_percentile self.min_percentile = min_percentile self.max_percentile = max_percentile self.base_filtration = base_filtration self.features_importance = None self.matcher = None self.val_dict = None self.pval_dict = None self.new_treatment = None self.validate = None self.dropped_features = [] self.n_neighbors = n_neighbors self.silent = silent self.pbar = pbar self._preprocessing_data() def _convert_categorical_to_dummy(self): """Converts categorical variables to dummy variables. Returns: Data with categorical variables converted to dummy variables. """ info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col if columns_to_drop is not None: data = self.input_data.drop(columns=columns_to_drop) else: data = self.input_data dummy_data = pd.get_dummies(data, drop_first=True, dtype=np.uint8) return dummy_data def _preprocessing_data(self): """Converts categorical features into dummy variables.""" info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col + self.outcomes + [self.treatment] if self.base_filtration: filtered_features = nan_filtration(self.input_data.drop(columns=columns_to_drop)) self.dropped_features = [f for f in self.input_data.columns if f not in filtered_features + columns_to_drop] self.input_data = self.input_data[filtered_features + columns_to_drop] nan_counts = self.input_data.isna().sum().sum() if nan_counts != 0: self._log(f"Number of NaN values filled with zeros: {nan_counts}", silent=False) self.input_data = self.input_data.fillna(0) if self.group_col is not None: group_col = self.input_data[[self.group_col]] if self.info_col is not None: info_col = self.input_data[self.info_col] self.input_data = self._convert_categorical_to_dummy() if self.group_col is not None: self.input_data = pd.concat([self.input_data, group_col], axis=1) if self.info_col is not None: self.input_data = pd.concat([self.input_data, info_col], axis=1) if self.base_filtration:
filtered_features = const_filtration(self.input_data.drop(columns=columns_to_drop))
5
2023-11-01 08:58:57+00:00
16k
tianhaowuhz/human-assisting-dex-grasp
Runners/TrainGFPPO.py
[ { "identifier": "GFPPO", "path": "Algorithms/ppo/gf_ppo_update.py", "snippet": "class GFPPO:\n def __init__(self,\n vec_env,\n cfg_train,\n device='cpu',\n sampler='sequential',\n log_dir='run',\n is_testi...
import isaacgym import condexenvs import torch import os import sys from Algorithms.ppo import GFPPO from utils.config import load_cfg, get_args, set_np_formatting
11,134
sys.path.append(os.path.dirname(os.path.dirname(__file__))) if __name__ == '__main__': set_np_formatting() args = get_args() cfg_train, logdir = load_cfg(args) ''' change for different method ''' cfg_train['setting']['grad_scale'] = 1.0 reward_normalize = False cfg_train['policy']['pointnet_version'] = 'pt' if args.exp_name == 'ours': reward_type = "ori_similarity+height+sr" reward_normalize = False sub_obs_type = "joint+fingertipjoint+wrist+objpcl+gf" cfg_train['setting']['action_type'] = "joint" cfg_train['setting']['sub_action_type'] = "add+jointscale" cfg_train['policy']['pretrain_pointnet'] = True ''' policy ''' cfg_train['policy']['hand_pcl'] = False ''' training setting ''' cfg_train['learn']['nsteps'] = 50 cfg_train['learn']['noptepochs'] = 2 cfg_train['learn']['nminibatches'] = int(args.num_envs*cfg_train['learn']['nsteps']/64) if cfg_train['policy']['pointnet_version'] == 'pt': cfg_train['learn']['optim_stepsize'] = 0.0003 else: cfg_train['learn']['optim_stepsize'] = 0.0003 cfg_train['learn']['desired_kl'] = 0.016 cfg_train['learn']['gamma'] = 0.99 envs = condexenvs.make( seed=args.seed, task="ShadowHandCon", num_envs=args.num_envs, sim_device=f"cuda:{args.run_device_id}", rl_device=f"cuda:{args.run_device_id}", graphics_device_id = -1, headless=args.headless, mode = args.mode, eval_times=args.eval_times, method = args.method, constrained = args.constrained, reward_type = reward_type, reward_normalize = reward_normalize, sub_obs_type = sub_obs_type, dataset_type = args.dataset_type, ) envs.reset(env_init=True) learn_cfg = cfg_train["learn"] is_testing = learn_cfg["test"] # Override resume and testing flags if they are passed as parameters. if args.model_dir != "": chkpt_path = args.model_dir logdir = logdir + "_seed{}".format(args.seed)
sys.path.append(os.path.dirname(os.path.dirname(__file__))) if __name__ == '__main__': set_np_formatting() args = get_args() cfg_train, logdir = load_cfg(args) ''' change for different method ''' cfg_train['setting']['grad_scale'] = 1.0 reward_normalize = False cfg_train['policy']['pointnet_version'] = 'pt' if args.exp_name == 'ours': reward_type = "ori_similarity+height+sr" reward_normalize = False sub_obs_type = "joint+fingertipjoint+wrist+objpcl+gf" cfg_train['setting']['action_type'] = "joint" cfg_train['setting']['sub_action_type'] = "add+jointscale" cfg_train['policy']['pretrain_pointnet'] = True ''' policy ''' cfg_train['policy']['hand_pcl'] = False ''' training setting ''' cfg_train['learn']['nsteps'] = 50 cfg_train['learn']['noptepochs'] = 2 cfg_train['learn']['nminibatches'] = int(args.num_envs*cfg_train['learn']['nsteps']/64) if cfg_train['policy']['pointnet_version'] == 'pt': cfg_train['learn']['optim_stepsize'] = 0.0003 else: cfg_train['learn']['optim_stepsize'] = 0.0003 cfg_train['learn']['desired_kl'] = 0.016 cfg_train['learn']['gamma'] = 0.99 envs = condexenvs.make( seed=args.seed, task="ShadowHandCon", num_envs=args.num_envs, sim_device=f"cuda:{args.run_device_id}", rl_device=f"cuda:{args.run_device_id}", graphics_device_id = -1, headless=args.headless, mode = args.mode, eval_times=args.eval_times, method = args.method, constrained = args.constrained, reward_type = reward_type, reward_normalize = reward_normalize, sub_obs_type = sub_obs_type, dataset_type = args.dataset_type, ) envs.reset(env_init=True) learn_cfg = cfg_train["learn"] is_testing = learn_cfg["test"] # Override resume and testing flags if they are passed as parameters. if args.model_dir != "": chkpt_path = args.model_dir logdir = logdir + "_seed{}".format(args.seed)
runner = GFPPO(vec_env=envs,
0
2023-11-09 06:08:40+00:00
16k
ApolloAuto/apollo-model-centerpoint
tools/create_bevformer_nus_infos.py
[ { "identifier": "NuscenesMVDataset", "path": "paddle3d/datasets/nuscenes/nuscenes_multiview_det.py", "snippet": "class NuscenesMVDataset(NuscenesDetDataset):\n \"\"\"\n Nuscecens dataset for multi-view camera detection task.\n \"\"\"\n DATASET_NAME = \"Nuscenes\"\n\n def __init__(self,\n ...
import argparse import os import pickle import numpy as np import tqdm from pathlib import Path from nuscenes import NuScenes from nuscenes.can_bus.can_bus_api import NuScenesCanBus from nuscenes.utils import splits as nuscenes_split from nuscenes.utils.data_classes import Box as NuScenesBox from nuscenes.utils.geometry_utils import transform_matrix from pyquaternion import Quaternion from paddle3d.datasets.nuscenes import NuscenesMVDataset from paddle3d.datasets.nuscenes.nuscenes_det import NuscenesDetDataset from paddle3d.utils.logger import logger
10,934
rotation = last_pose.pop('orientation') can_bus.extend(pos) can_bus.extend(rotation) for key in last_pose.keys(): can_bus.extend(pose[key]) # 16 elements can_bus.extend([0., 0.]) return np.array(can_bus) def fill_trainval_infos(nusc, nusc_can_bus, train_scenes, val_scenes, test=False, max_sweeps=10): """Generate the train/val infos from the raw data. """ train_nusc_infos = [] val_nusc_infos = [] frame_idx = 0 msg = "Begin to generate a info of nuScenes dataset." for sample_idx in logger.range(len(nusc.sample), msg=msg): sample = nusc.sample[sample_idx] lidar_token = sample['data']['LIDAR_TOP'] sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) cs_record = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) assert os.path.exists(lidar_path) # absolute path can_bus = _get_can_bus_info(nusc, nusc_can_bus, sample) info = { 'lidar_token': lidar_token, 'lidar_path': nusc.get('sample_data', lidar_token)['filename'], # relative path 'token': sample['token'], 'prev': sample['prev'], 'next': sample['next'], 'can_bus': can_bus, 'frame_idx': frame_idx, # temporal related info 'sweeps': [], 'cams': dict(), 'scene_token': sample['scene_token'], 'lidar2ego_translation': cs_record['translation'], 'lidar2ego_rotation': cs_record['rotation'], 'ego2global_translation': pose_record['translation'], 'ego2global_rotation': pose_record['rotation'], 'timestamp': sample['timestamp'], } if sample['next'] == '': frame_idx = 0 else: frame_idx += 1 l2e_r = info['lidar2ego_rotation'] l2e_t = info['lidar2ego_translation'] e2g_r = info['ego2global_rotation'] e2g_t = info['ego2global_translation'] l2e_r_mat = Quaternion(l2e_r).rotation_matrix e2g_r_mat = Quaternion(e2g_r).rotation_matrix # obtain 6 image's information per frame camera_types = [ 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_FRONT_LEFT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT', ] for cam in camera_types: cam_token = sample['data'][cam] cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, cam) cam_info.update(cam_intrinsic=cam_intrinsic) info['cams'].update({cam: cam_info}) # obtain sweeps for a single key-frame sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) sweeps = [] while len(sweeps) < max_sweeps: if not sd_rec['prev'] == '': sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') sweeps.append(sweep) sd_rec = nusc.get('sample_data', sd_rec['prev']) else: break info['sweeps'] = sweeps # obtain annotation if not test: annotations = [ nusc.get('sample_annotation', token) for token in sample['anns'] ] locs = np.array([b.center for b in boxes]).reshape(-1, 3) dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) rots = np.array([b.orientation.yaw_pitch_roll[0] for b in boxes]).reshape(-1, 1) velocity = np.array( [nusc.box_velocity(token)[:2] for token in sample['anns']]) valid_flag = np.array( [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 for anno in annotations], dtype=bool).reshape(-1) # convert velo from global to lidar for i in range(len(boxes)): velo = np.array([*velocity[i], 0.0]) velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( l2e_r_mat).T velocity[i] = velo[:2] names = [b.name for b in boxes] for i in range(len(names)): # NuscenesDetDataset.LABEL_MAP
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------ # Modify from https://github.com/fundamentalvision/BEVFormer/blob/master/tools/create_data.py # Copyright (c) OpenMMLab. All rights reserved. # ------------------------------------------------------------------------ # ------------------------------------------------------------------------ # Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) # Copyright (c) OpenMMLab. All rights reserved. # ------------------------------------------------------------------------ SENSORS = [ 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_FRONT_LEFT' ] def parse_args(): parser = argparse.ArgumentParser( description='Create infos for kitti dataset.') parser.add_argument( '--dataset_root', default='data/nuscenes', help='Path of the dataset.', type=str) parser.add_argument( '--can_bus_root', type=str, default='data/nuscenes', help='specify the root path of nuScenes canbus') parser.add_argument( '--save_dir', default='data/nuscenes', help='Path to save the generated database.', type=str) parser.add_argument( '--mode', default='train', help='mode to generate dataset.', type=str) parser.add_argument( '--num_sweep', default=10, help='nummber of sweep frames between two key frame.', type=int) return parser.parse_args() def is_filepath(x): return isinstance(x, str) or isinstance(x, Path) def get_available_scenes(nusc): """Get available scenes from the input nuscenes class. """ available_scenes = [] logger.info('total scene num: {}'.format(len(nusc.scene))) for scene in nusc.scene: scene_token = scene['token'] scene_rec = nusc.get('scene', scene_token) sample_rec = nusc.get('sample', scene_rec['first_sample_token']) sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) has_more_frames = True scene_not_exist = False while has_more_frames: lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token']) lidar_path = str(lidar_path) if os.getcwd() in lidar_path: # path from lyftdataset is absolute path lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1] # relative path if not is_filepath(lidar_path): scene_not_exist = True break else: break if scene_not_exist: continue available_scenes.append(scene) logger.info('exist scene num: {}'.format(len(available_scenes))) return available_scenes def obtain_sensor2top(nusc, sensor_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, sensor_type='lidar'): """Obtain the info with RT matric from general sensor to Top LiDAR. """ sd_rec = nusc.get('sample_data', sensor_token) cs_record = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) data_path = str(nusc.get_sample_data_path(sd_rec['token'])) # absolute path if os.getcwd() in data_path: # path from lyftdataset is absolute path data_path = data_path.split(f'{os.getcwd()}/')[-1] # relative path sweep = { 'data_path': nusc.get('sample_data', sd_rec['token'])['filename'], # relative path 'type': sensor_type, 'sample_data_token': sd_rec['token'], 'sensor2ego_translation': cs_record['translation'], 'sensor2ego_rotation': cs_record['rotation'], 'ego2global_translation': pose_record['translation'], 'ego2global_rotation': pose_record['rotation'], 'timestamp': sd_rec['timestamp'] } l2e_r_s = sweep['sensor2ego_rotation'] l2e_t_s = sweep['sensor2ego_translation'] e2g_r_s = sweep['ego2global_rotation'] e2g_t_s = sweep['ego2global_translation'] # obtain the RT from sensor to Top LiDAR # sweep->ego->global->ego'->lidar l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T ) + l2e_t @ np.linalg.inv(l2e_r_mat).T sweep['sensor2lidar_rotation'] = R.T # points @ R.T + T sweep['sensor2lidar_translation'] = T return sweep def _get_can_bus_info(nusc, nusc_can_bus, sample): scene_name = nusc.get('scene', sample['scene_token'])['name'] sample_timestamp = sample['timestamp'] try: pose_list = nusc_can_bus.get_messages(scene_name, 'pose') except: return np.zeros(18) # server scenes do not have can bus information. can_bus = [] # during each scene, the first timestamp of can_bus may be large than the first sample's timestamp last_pose = pose_list[0] for i, pose in enumerate(pose_list): if pose['utime'] > sample_timestamp: break last_pose = pose _ = last_pose.pop('utime') # useless pos = last_pose.pop('pos') rotation = last_pose.pop('orientation') can_bus.extend(pos) can_bus.extend(rotation) for key in last_pose.keys(): can_bus.extend(pose[key]) # 16 elements can_bus.extend([0., 0.]) return np.array(can_bus) def fill_trainval_infos(nusc, nusc_can_bus, train_scenes, val_scenes, test=False, max_sweeps=10): """Generate the train/val infos from the raw data. """ train_nusc_infos = [] val_nusc_infos = [] frame_idx = 0 msg = "Begin to generate a info of nuScenes dataset." for sample_idx in logger.range(len(nusc.sample), msg=msg): sample = nusc.sample[sample_idx] lidar_token = sample['data']['LIDAR_TOP'] sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) cs_record = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) assert os.path.exists(lidar_path) # absolute path can_bus = _get_can_bus_info(nusc, nusc_can_bus, sample) info = { 'lidar_token': lidar_token, 'lidar_path': nusc.get('sample_data', lidar_token)['filename'], # relative path 'token': sample['token'], 'prev': sample['prev'], 'next': sample['next'], 'can_bus': can_bus, 'frame_idx': frame_idx, # temporal related info 'sweeps': [], 'cams': dict(), 'scene_token': sample['scene_token'], 'lidar2ego_translation': cs_record['translation'], 'lidar2ego_rotation': cs_record['rotation'], 'ego2global_translation': pose_record['translation'], 'ego2global_rotation': pose_record['rotation'], 'timestamp': sample['timestamp'], } if sample['next'] == '': frame_idx = 0 else: frame_idx += 1 l2e_r = info['lidar2ego_rotation'] l2e_t = info['lidar2ego_translation'] e2g_r = info['ego2global_rotation'] e2g_t = info['ego2global_translation'] l2e_r_mat = Quaternion(l2e_r).rotation_matrix e2g_r_mat = Quaternion(e2g_r).rotation_matrix # obtain 6 image's information per frame camera_types = [ 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_FRONT_LEFT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT', ] for cam in camera_types: cam_token = sample['data'][cam] cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, cam) cam_info.update(cam_intrinsic=cam_intrinsic) info['cams'].update({cam: cam_info}) # obtain sweeps for a single key-frame sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) sweeps = [] while len(sweeps) < max_sweeps: if not sd_rec['prev'] == '': sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') sweeps.append(sweep) sd_rec = nusc.get('sample_data', sd_rec['prev']) else: break info['sweeps'] = sweeps # obtain annotation if not test: annotations = [ nusc.get('sample_annotation', token) for token in sample['anns'] ] locs = np.array([b.center for b in boxes]).reshape(-1, 3) dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) rots = np.array([b.orientation.yaw_pitch_roll[0] for b in boxes]).reshape(-1, 1) velocity = np.array( [nusc.box_velocity(token)[:2] for token in sample['anns']]) valid_flag = np.array( [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 for anno in annotations], dtype=bool).reshape(-1) # convert velo from global to lidar for i in range(len(boxes)): velo = np.array([*velocity[i], 0.0]) velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( l2e_r_mat).T velocity[i] = velo[:2] names = [b.name for b in boxes] for i in range(len(names)): # NuscenesDetDataset.LABEL_MAP
if names[i] in NuscenesDetDataset.LABEL_MAP:
1
2023-11-08 07:08:03+00:00
16k
ml4bio/RhoFold
rhofold/model/structure_module.py
[ { "identifier": "Linear", "path": "rhofold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in th...
import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple, Sequence from rhofold.model.primitives import Linear, LayerNorm from rhofold.utils.rigid_utils import Rigid from rhofold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) from einops import rearrange from rhofold.utils.alphabet import RNAAlphabet from rhofold.utils.converter import RNAConverter
10,951
self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) # hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) # ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s) self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor],
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class RefineNet(nn.Module): """""" def __init__(self, dim = 64, is_pos_emb = True, n_layer = 4, enable = True, **kwargs): """Constructor function.""" super().__init__() self.is_pos_emb = is_pos_emb self.alphabet = RNAAlphabet.from_architecture('RNA') self.embed_tokens = nn.Embedding(len(self.alphabet), dim) self.enable = enable if self.is_pos_emb: self.embed_positions = PosEmbedding(4096, dim, self.alphabet.padding_idx) self.refine_layer0 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer1 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer2 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer3 = ResEGNN(corrections=n_layer, dims_in=dim) def forward(self, tokens, cords): """Perform the forward pass. Args: Returns: """ if not self.enable: return cords tokens = tokens[:, 0, :] tokens = tokens.unsqueeze(-1).repeat(1, 1, 23) b, l, n = tokens.shape cords = cords.reshape([b, l, n, 3]) fea = self.embed_tokens(tokens) b, l, n, _ = fea.shape if self.is_pos_emb: fea += self.embed_positions(tokens.reshape(b * l, n)).view(fea.size()) out = self.refine_layer0(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer1(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, n, l, -1]).transpose(1,2) out = self.refine_layer2(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer3(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] cords = cords.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, l * n, 3]) return cords class Swish_(torch.nn.Module): def forward(self, x): return x * x.sigmoid() SiLU = torch.nn.SiLU if hasattr(torch.nn, 'SiLU') else Swish_ class CoorsNorm(torch.nn.Module): def __init__(self, eps=1e-8): super().__init__() self.eps = eps self.fn = torch.nn.LayerNorm(1) def forward(self, coors): norm = coors.norm(dim=-1, keepdim=True) normed_coors = coors / norm.clamp(min=self.eps) phase = self.fn(norm) return phase * normed_coors # classes class EGNN(torch.nn.Module): def __init__( self, dim, m_dim=32, ): super().__init__() ''' # Most of the code in this file is based on egnn-pytorch by lucidrains. ''' edge_input_dim = (dim * 2) + 1 self.edge_mlp = torch.nn.Sequential( torch.nn.Linear(edge_input_dim, edge_input_dim * 2), SiLU(), torch.nn.Linear(edge_input_dim * 2, m_dim), SiLU() ) self.coors_norm = CoorsNorm() self.node_mlp = torch.nn.Sequential( torch.nn.Linear(dim + m_dim, dim * 2), SiLU(), torch.nn.Linear(dim * 2, dim), ) self.coors_mlp = torch.nn.Sequential( torch.nn.Linear(m_dim, m_dim * 4), SiLU(), torch.nn.Linear(m_dim * 4, 1) ) def forward(self, feats, coors): rel_coors = rearrange(coors, 'b i d -> b i () d') - rearrange(coors, 'b j d -> b () j d') rel_dist = (rel_coors ** 2).sum(dim=-1, keepdim=True) feats_j = rearrange(feats, 'b j d -> b () j d') feats_i = rearrange(feats, 'b i d -> b i () d') feats_i, feats_j = torch.broadcast_tensors(feats_i, feats_j) edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1) m_ij = self.edge_mlp(edge_input) coor_weights = self.coors_mlp(m_ij) coor_weights = rearrange(coor_weights, 'b i j () -> b i j') rel_coors = self.coors_norm(rel_coors) scale_factor = 1 / 50.0 coors_out = torch.einsum('b i j, b i j c -> b i c', coor_weights * scale_factor, rel_coors) + coors m_i = m_ij.sum(dim=-2) node_mlp_input = torch.cat((feats, m_i), dim=-1) node_out = self.node_mlp(node_mlp_input) + feats return node_out, coors_out class ResEGNN(torch.nn.Module): def __init__(self, corrections=4, dims_in=41, **kwargs): super().__init__() self.layers = torch.nn.ModuleList([EGNN(dim=dims_in, **kwargs) for _ in range(corrections)]) def forward(self, amino, geom, is_fea = False, keep_last_cords = None): output = [] for layer in self.layers: geom_init = geom amino, geom = layer(amino, geom) if keep_last_cords is not None: geom[:, -keep_last_cords:] = geom_init[:, -keep_last_cords:] output.append([amino, geom]) return output if is_fea else geom class PosEmbedding(nn.Embedding): """ """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): if padding_idx is not None: num_embeddings_ = num_embeddings + padding_idx + 1 else: num_embeddings_ = num_embeddings super().__init__(num_embeddings_, embedding_dim, padding_idx) self.max_positions = num_embeddings def forward(self, input: torch.Tensor): """Input is expected to be of size [bsz x seqlen].""" mask = input.ne(self.padding_idx).int() positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + self.padding_idx return F.embedding( positions, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden self.linear_1 = Linear(self.c_hidden, self.c_hidden) self.linear_2 = Linear(self.c_hidden, self.c_hidden) self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class AngleResnet(nn.Module): """ """ def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon): """ Args: c_in: Input channel dimension c_hidden: Hidden channel dimension no_blocks: Number of resnet blocks no_angles: Number of torsion angles to generate epsilon: Small constant for normalization """ super(AngleResnet, self).__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_blocks = no_blocks self.no_angles = no_angles self.eps = epsilon self.linear_in = Linear(self.c_in, self.c_hidden) self.linear_initial = Linear(self.c_in, self.c_hidden) self.layers = nn.ModuleList() for _ in range(self.no_blocks): layer = AngleResnetBlock(c_hidden=self.c_hidden) self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) # hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) # ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s) self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor],
r: Rigid,
2
2023-11-01 10:29:08+00:00
16k
tylerlight071/Project-Cipher
main.py
[ { "identifier": "clear_terminal", "path": "components/common_functions.py", "snippet": "def clear_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')" }, { "identifier": "print_slow", "path": "components/common_functions.py", "snippet": "def print_slow(text, delay=0.00): #...
import msvcrt import os import pickle import sys import time import colorama import pygame from colorama import Fore, Style from components.common_functions import clear_terminal, print_slow, shop_help, help_user, connect_help, mail_help, \ system_help from conversations.calls import intro_call, first_call, second_call, third_call, fourth_call, fifth_call, sixth_call, \ markus_seen_call from conversations.minigame_calls import code_shatter_call from minigames.code_shatter_minigame import code_shatter_minigame from minigames.eye_spy_minigame import port_scanning from systems.level_1.amy.amy_system import AmySystem from systems.level_1.billy.billy_system import BillySystem from systems.level_1.cameras.camera_1 import camera_first from systems.level_1.markus.markus_system import MarkusSystem
13,723
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem()
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem()
billy_system = BillySystem()
19
2023-11-06 09:52:13+00:00
16k
ziqi-zhang/TAOISM
python/test/test_bn.py
[ { "identifier": "register_layer", "path": "python/common_net.py", "snippet": "def register_layer(layer, name):\n layer.register_forward_hook(hooking_layer(name))\n layer.register_backward_hook(hooking_layer_backward(name))\n layer_names.append(name)" }, { "identifier": "register_weight_...
import os import sys import numpy as np import torch import torch.distributed as dist from pdb import set_trace as st from torch import optim, nn from python.common_net import register_layer, register_weight_layer, get_layer_weight, get_layer_input, \ get_layer_weight_grad, get_layer_output, get_layer_output_grad, get_layer_input_grad from python.enclave_interfaces import GlobalTensor from python.layers.batch_norm_2d import SecretBatchNorm2dLayer from python.layers.input import SecretInputLayer from python.layers.output import SecretOutputLayer from python.sgx_net import init_communicate, warming_up_cuda, SecretNeuralNetwork, SgdOptimizer from python.utils.basic_utils import ExecutionModeOptions from python.utils.torch_utils import compare_expected_actual, seed_torch
11,022
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def test_BN(sid=0, master_addr=0, master_port=0, is_compare=False): batch_size = 2 n_img_channel = 256 img_hw = 32 x_shape = [batch_size, n_img_channel, img_hw, img_hw]
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def test_BN(sid=0, master_addr=0, master_port=0, is_compare=False): batch_size = 2 n_img_channel = 256 img_hw = 32 x_shape = [batch_size, n_img_channel, img_hw, img_hw]
GlobalTensor.init()
8
2023-11-01 10:37:37+00:00
16k
Codra-Ingenierie-Informatique/DataLab
cdl/tests/features/embedded2_unit.py
[ { "identifier": "CDLMainWindow", "path": "cdl/core/gui/main.py", "snippet": "class CDLMainWindow(QW.QMainWindow, AbstractCDLControl, metaclass=CDLMainWindowMeta):\n \"\"\"DataLab main window\n\n Args:\n console: enable internal console\n hide_on_close: True to hide window on close\n ...
from cdl.core.gui.main import CDLMainWindow from cdl.tests.features import embedded1_unit
13,362
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """ Application embedded test 2 DataLab main window is simply hidden when closing application. It is shown and raised above other windows when reopening application. """ # guitest: show
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """ Application embedded test 2 DataLab main window is simply hidden when closing application. It is shown and raised above other windows when reopening application. """ # guitest: show
class HostWindow(embedded1_unit.AbstractHostWindow):
1
2023-11-09 16:56:03+00:00
16k
lalalamdbf/PLSE_IDRR
src/prompt-tuning/prompt/pipeline_base.py
[ { "identifier": "InputExample", "path": "src/prompt-tuning/prompt/data_utils.py", "snippet": "class InputExample(object):\n \"\"\"A raw input example consisting of segments of text,\n a label for classification task or a target sequence of generation task.\n Other desired information can be pas...
from pickle import FALSE from torch.utils.data.sampler import RandomSampler from transformers.configuration_utils import PretrainedConfig from transformers.generation_utils import GenerationMixin from torch.utils.data import Dataset from typing import * from .data_utils import InputExample, InputFeatures from torch.utils.data._utils.collate import default_collate from tqdm.std import tqdm from transformers.tokenization_utils import PreTrainedTokenizer from transformers.utils.dummy_pt_objects import PreTrainedModel from .utils import TokenizerWrapper from .prompt_base import Template, Verbalizer from collections import defaultdict from collections import namedtuple from torch.utils.data import DataLoader import torch import torch.nn as nn import inspect import numpy as np
10,931
def signature(f): r"""Get the function f 's input arguments. A useful gadget when some function slot might be instantiated into multiple functions. Args: f (:obj:`function`) : the function to get the input arguments. Returns: namedtuple : of args, default, varargs, keywords, respectively.s """ sig = inspect.signature(f) args = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] varargs = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL ] varargs = varargs[0] if varargs else None keywords = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD ] keywords = keywords[0] if keywords else None defaults = [ p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty ] or None argspec = namedtuple('Signature', ['args', 'defaults', 'varargs', 'keywords']) return argspec(args, defaults, varargs, keywords) class PromptDataLoader(object): r""" PromptDataLoader wraps the original dataset. The input data is firstly wrapped with the prompt's template, and then is tokenized by a wrapperd-tokenizer. Args: dataset (:obj:`Dataset` or :obj:`List`): Either a DatasetObject or a list containing the input examples. template (:obj:`Template`): A derived class of :obj:`Template` tokenizer (:obj:`PretrainedTokenizer`): The pretrained tokenizer. tokenizer_wrapper_class (:cls:`TokenizerWrapper`): The class of tokenizer wrapper. max_seq_length (:obj:`int`, optional): The max sequence length of the input ids. It's used to truncate sentences. batch_size (:obj:`int`, optional): The batch_size of data loader teacher_forcing (:obj:`bool`, optional): Whether to fill the mask with target text. Set to true in training generation model. decoder_max_length (:obj:`int`, optional): the decoder maximum length of an encoder-decoder model. predict_eos_token (:obj:`bool`, optional): Whether to predict the <eos> token. Suggest to set to true in generation. truncate_method (:obj:`bool`, optional): the truncate method to use. select from `head`, `tail`, `balanced`. kwargs :Other kwargs that might be passed into a tokenizer wrapper. """ def __init__(self, dataset: Union[Dataset, List], template: Template,
def signature(f): r"""Get the function f 's input arguments. A useful gadget when some function slot might be instantiated into multiple functions. Args: f (:obj:`function`) : the function to get the input arguments. Returns: namedtuple : of args, default, varargs, keywords, respectively.s """ sig = inspect.signature(f) args = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] varargs = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL ] varargs = varargs[0] if varargs else None keywords = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD ] keywords = keywords[0] if keywords else None defaults = [ p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty ] or None argspec = namedtuple('Signature', ['args', 'defaults', 'varargs', 'keywords']) return argspec(args, defaults, varargs, keywords) class PromptDataLoader(object): r""" PromptDataLoader wraps the original dataset. The input data is firstly wrapped with the prompt's template, and then is tokenized by a wrapperd-tokenizer. Args: dataset (:obj:`Dataset` or :obj:`List`): Either a DatasetObject or a list containing the input examples. template (:obj:`Template`): A derived class of :obj:`Template` tokenizer (:obj:`PretrainedTokenizer`): The pretrained tokenizer. tokenizer_wrapper_class (:cls:`TokenizerWrapper`): The class of tokenizer wrapper. max_seq_length (:obj:`int`, optional): The max sequence length of the input ids. It's used to truncate sentences. batch_size (:obj:`int`, optional): The batch_size of data loader teacher_forcing (:obj:`bool`, optional): Whether to fill the mask with target text. Set to true in training generation model. decoder_max_length (:obj:`int`, optional): the decoder maximum length of an encoder-decoder model. predict_eos_token (:obj:`bool`, optional): Whether to predict the <eos> token. Suggest to set to true in generation. truncate_method (:obj:`bool`, optional): the truncate method to use. select from `head`, `tail`, `balanced`. kwargs :Other kwargs that might be passed into a tokenizer wrapper. """ def __init__(self, dataset: Union[Dataset, List], template: Template,
tokenizer_wrapper: Optional[TokenizerWrapper] = None,
2
2023-11-01 08:52:36+00:00
16k
choderalab/chiron
Examples/LJ_mcmove.py
[ { "identifier": "LJPotential", "path": "chiron/potential.py", "snippet": "class LJPotential(NeuralNetworkPotential):\n def __init__(\n self,\n topology: Topology,\n sigma: unit.Quantity = 3.350 * unit.angstroms,\n epsilon: unit.Quantity = 1.0 * unit.kilocalories_per_mole,\...
from openmmtools.testsystems import LennardJonesFluid from chiron.potential import LJPotential from openmm import unit from chiron.states import SamplerState, ThermodynamicState from chiron.neighbors import NeighborListNsqrd, OrthogonalPeriodicSpace from chiron.neighbors import PairList from chiron.reporters import SimulationReporter from chiron.mcmc import MetropolisDisplacementMove import os
13,336
# Use the LennardJonesFluid example from openmmtools to initialize particle positions and topology # For this example, the topology provides the masses for the particles # The default LennardJonesFluid example considers the system to be Argon with 39.9 amu lj_fluid = LennardJonesFluid(reduced_density=0.1, nparticles=1000) # initialize the LennardJones potential in chiron # sigma = 0.34 * unit.nanometer epsilon = 0.238 * unit.kilocalories_per_mole cutoff = 3.0 * sigma lj_potential = LJPotential( lj_fluid.topology, sigma=sigma, epsilon=epsilon, cutoff=cutoff ) # define the sampler state sampler_state = SamplerState( x0=lj_fluid.positions, box_vectors=lj_fluid.system.getDefaultPeriodicBoxVectors() ) # define the thermodynamic state thermodynamic_state = ThermodynamicState( potential=lj_potential, temperature=300 * unit.kelvin ) # define the neighbor list for an orthogonal periodic space skin = 0.5 * unit.nanometer
# Use the LennardJonesFluid example from openmmtools to initialize particle positions and topology # For this example, the topology provides the masses for the particles # The default LennardJonesFluid example considers the system to be Argon with 39.9 amu lj_fluid = LennardJonesFluid(reduced_density=0.1, nparticles=1000) # initialize the LennardJones potential in chiron # sigma = 0.34 * unit.nanometer epsilon = 0.238 * unit.kilocalories_per_mole cutoff = 3.0 * sigma lj_potential = LJPotential( lj_fluid.topology, sigma=sigma, epsilon=epsilon, cutoff=cutoff ) # define the sampler state sampler_state = SamplerState( x0=lj_fluid.positions, box_vectors=lj_fluid.system.getDefaultPeriodicBoxVectors() ) # define the thermodynamic state thermodynamic_state = ThermodynamicState( potential=lj_potential, temperature=300 * unit.kelvin ) # define the neighbor list for an orthogonal periodic space skin = 0.5 * unit.nanometer
nbr_list = NeighborListNsqrd(
3
2023-11-07 18:17:43+00:00
16k
WolfgangFahl/dcm
dcm/dcm_webserver.py
[ { "identifier": "Assessment", "path": "dcm/dcm_assessment.py", "snippet": "class Assessment:\n \"\"\"\n Assessment for CompetenceTree\n \"\"\"\n\n def __init__(\n self,\n webserver: NiceGuiWebserver,\n dcm: DynamicCompetenceMap,\n learner: Learner,\n debug:...
import os from typing import Optional from urllib.parse import urlparse from fastapi import HTTPException from fastapi.responses import HTMLResponse from ngwidgets.file_selector import FileSelector from ngwidgets.input_webserver import InputWebserver from ngwidgets.webserver import WebserverConfig from nicegui import Client, app, ui from pydantic import BaseModel from dcm.dcm_assessment import Assessment from dcm.dcm_chart import DcmChart from dcm.dcm_core import CompetenceTree, DynamicCompetenceMap, Learner from dcm.svg import SVG, SVGConfig from dcm.version import Version
13,264
""" Endpoints to get the description of a competence area Args: tree_id (str): ID of the tree area_id (str): ID of the area aspect_id (str, optional): ID of the aspect. Defaults to None. Returns: HTMLResponse: HTML content of the description. """ path=f"{tree_id}/{aspect_id}/{area_id}" return await self.show_description(path) @app.get("/description/{tree_id}/{aspect_id}") async def get_description_for_aspect( tree_id: str, aspect_id: str = None ) -> HTMLResponse: """ Endpoint to get the description of a competence aspect Args: tree_id (str): ID of the tree area_id (str): ID of the area Returns: HTMLResponse: HTML content of the description. """ path=f"{tree_id}/{aspect_id}" return await self.show_description(path) @app.get("/description/{tree_id}") async def get_description_for_tree( tree_id: str ) -> HTMLResponse: """ Endpoint to get the description of a competence tree Args: tree_id (str): ID of the tree Returns: HTMLResponse: HTML content of the description. """ path=f"{tree_id}" return await self.show_description(path) async def show_description( self, path:str=None ) -> HTMLResponse: """ Show the HTML description of a specific competence element given by the path Args: path(str): the path identifying the element Returns: HTMLResponse: The response object containing the HTML-formatted description. Raises: HTTPException: If the example name provided does not exist in the examples collection. """ path_parts=path.split("/") tree_id=path_parts[0] if tree_id in self.examples: example = self.examples[tree_id] element = example.competence_tree.lookup_by_path(path) if element: content = element.as_html() return HTMLResponse(content=content) else: content = ( f"No element found for {path} in {tree_id}" ) return HTMLResponse(content=content, status_code=404) else: msg = f"unknown competence tree {tree_id}" raise HTTPException(status_code=404, detail=msg) async def render_svg(self, svg_render_request: SVGRenderRequest) -> HTMLResponse: """ render the given request """ r = svg_render_request dcm = DynamicCompetenceMap.from_definition_string( r.name, r.definition, content_class=CompetenceTree, markup=r.markup ) dcm_chart = DcmChart(dcm) svg_markup = dcm_chart.generate_svg_markup( config=r.config, with_java_script=True ) response = HTMLResponse(content=svg_markup) return response def get_basename_without_extension(self, url) -> str: # Parse the URL to get the path component path = urlparse(url).path # Extract the base name (e.g., "example.html" from "/dir/example.html") basename = os.path.basename(path) # Split the base name and extension and return just the base name return os.path.splitext(basename)[0] async def render(self, _click_args=None): """ Renders the json content as an SVG visualization Args: click_args (object): The click event arguments. """ try: input_source = self.input if input_source: name = self.get_basename_without_extension(input_source) ui.notify(f"rendering {name}") definition = self.do_read_input(input_source) # Determine the format based on the file extension markup = "json" if input_source.endswith(".json") else "yaml" if "learner_id" in definition:
""" Created on 2023-11-06 @author: wf """ class SVGRenderRequest(BaseModel): """ A request for rendering an SVG. Attributes: name (str): The name of the render request. definition (str): The string representation of the data to be rendered, in either JSON or YAML format. markup (str): The format of the definition ('json' or 'yaml'). config (SVGConfig): Optional configuration for SVG rendering. Defaults to None, which uses default settings. """ name: str definition: str markup: str config: Optional[SVGConfig] = None class DynamicCompentenceMapWebServer(InputWebserver): """ server to supply Dynamic Competence Map Visualizations """ @classmethod def get_config(cls) -> WebserverConfig: """ get the configuration for this Webserver """ copy_right = "(c)2023-2024 Wolfgang Fahl" config = WebserverConfig( copy_right=copy_right, version=Version(), default_port=8885 ) return config def __init__(self): """Constructs all the necessary attributes for the WebServer object.""" InputWebserver.__init__( self, config=DynamicCompentenceMapWebServer.get_config() ) self.examples = DynamicCompetenceMap.get_examples(markup="yaml") self.dcm = None self.assessment = None @app.post("/svg/") async def render_svg(svg_render_request: SVGRenderRequest) -> HTMLResponse: """ render the given request """ return await self.render_svg(svg_render_request) @app.get("/description/{tree_id}/{aspect_id}/{area_id}/{facet_id}") async def get_description_for_facet( tree_id: str, aspect_id: str = None, area_id:str=None, facet_id: str = None ) -> HTMLResponse: """ Endpoints to get the description of a competence facet Args: tree_id (str): ID of the tree area_id (str): ID of the area aspect_id (str, optional): ID of the aspect. Defaults to None. facet_id (str, optional): ID of the facet. Defaults to None. Returns: HTMLResponse: HTML content of the description. """ path=f"{tree_id}/{aspect_id}/{area_id}/{facet_id}" return await self.show_description(path) @app.get("/description/{tree_id}/{aspect_id}/{area_id}") async def get_description_for_area( tree_id: str, aspect_id: str = None, area_id:str=None ) -> HTMLResponse: """ Endpoints to get the description of a competence area Args: tree_id (str): ID of the tree area_id (str): ID of the area aspect_id (str, optional): ID of the aspect. Defaults to None. Returns: HTMLResponse: HTML content of the description. """ path=f"{tree_id}/{aspect_id}/{area_id}" return await self.show_description(path) @app.get("/description/{tree_id}/{aspect_id}") async def get_description_for_aspect( tree_id: str, aspect_id: str = None ) -> HTMLResponse: """ Endpoint to get the description of a competence aspect Args: tree_id (str): ID of the tree area_id (str): ID of the area Returns: HTMLResponse: HTML content of the description. """ path=f"{tree_id}/{aspect_id}" return await self.show_description(path) @app.get("/description/{tree_id}") async def get_description_for_tree( tree_id: str ) -> HTMLResponse: """ Endpoint to get the description of a competence tree Args: tree_id (str): ID of the tree Returns: HTMLResponse: HTML content of the description. """ path=f"{tree_id}" return await self.show_description(path) async def show_description( self, path:str=None ) -> HTMLResponse: """ Show the HTML description of a specific competence element given by the path Args: path(str): the path identifying the element Returns: HTMLResponse: The response object containing the HTML-formatted description. Raises: HTTPException: If the example name provided does not exist in the examples collection. """ path_parts=path.split("/") tree_id=path_parts[0] if tree_id in self.examples: example = self.examples[tree_id] element = example.competence_tree.lookup_by_path(path) if element: content = element.as_html() return HTMLResponse(content=content) else: content = ( f"No element found for {path} in {tree_id}" ) return HTMLResponse(content=content, status_code=404) else: msg = f"unknown competence tree {tree_id}" raise HTTPException(status_code=404, detail=msg) async def render_svg(self, svg_render_request: SVGRenderRequest) -> HTMLResponse: """ render the given request """ r = svg_render_request dcm = DynamicCompetenceMap.from_definition_string( r.name, r.definition, content_class=CompetenceTree, markup=r.markup ) dcm_chart = DcmChart(dcm) svg_markup = dcm_chart.generate_svg_markup( config=r.config, with_java_script=True ) response = HTMLResponse(content=svg_markup) return response def get_basename_without_extension(self, url) -> str: # Parse the URL to get the path component path = urlparse(url).path # Extract the base name (e.g., "example.html" from "/dir/example.html") basename = os.path.basename(path) # Split the base name and extension and return just the base name return os.path.splitext(basename)[0] async def render(self, _click_args=None): """ Renders the json content as an SVG visualization Args: click_args (object): The click event arguments. """ try: input_source = self.input if input_source: name = self.get_basename_without_extension(input_source) ui.notify(f"rendering {name}") definition = self.do_read_input(input_source) # Determine the format based on the file extension markup = "json" if input_source.endswith(".json") else "yaml" if "learner_id" in definition:
content_class = Learner
4
2023-11-06 09:24:24+00:00
16k
shadowpa0327/FLORA
models/build.py
[ { "identifier": "VisionTransformer", "path": "models/deit.py", "snippet": "class VisionTransformer(nn.Module):\n \"\"\" Vision Transformer\n\n A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`\n - https://arxiv.org/abs/2010.11929\n\n Include...
from .deit import VisionTransformer from .swin_transformer import SwinTransformer from .lr_swin_transformer import LRSwinTransformer from .lr_swin_transformer_subnet import LRSwinTransformer as LRSwinTransformerSubnet from .lr_deit import LRVisionTransformer from .lr_deit_subnet import LRVisionTransformerSubnet
10,828
# -------------------------------------------------------- # TinyViT Model Builder # Copyright (c) 2022 Microsoft # -------------------------------------------------------- def build_model(config): model_type = config.MODEL.TYPE if model_type == 'swin': model = SwinTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.SWIN.PATCH_SIZE, in_chans=config.MODEL.SWIN.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.SWIN.EMBED_DIM, depths=config.MODEL.SWIN.DEPTHS, num_heads=config.MODEL.SWIN.NUM_HEADS, window_size=config.MODEL.SWIN.WINDOW_SIZE, mlp_ratio=config.MODEL.SWIN.MLP_RATIO, qkv_bias=config.MODEL.SWIN.QKV_BIAS, qk_scale=config.MODEL.SWIN.QK_SCALE, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ape=config.MODEL.SWIN.APE, patch_norm=config.MODEL.SWIN.PATCH_NORM, use_checkpoint=config.TRAIN.USE_CHECKPOINT, fused_window_process=config.FUSED_WINDOW_PROCESS ) elif model_type == 'deit': model = VisionTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.DEIT.PATCH_SIZE, in_chans=config.MODEL.DEIT.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.DEIT.EMBED_DIM, depth=config.MODEL.DEIT.DEPTH, num_heads = config.MODEL.DEIT.NUM_HEADS, mlp_ratio = config.MODEL.DEIT.MLP_RATIO, qkv_bias = config.MODEL.DEIT.QKV_BIAS, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ) elif model_type == 'lr_swin': model = LRSwinTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.SWIN.PATCH_SIZE, in_chans=config.MODEL.SWIN.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.SWIN.EMBED_DIM, depths=config.MODEL.SWIN.DEPTHS, num_heads=config.MODEL.SWIN.NUM_HEADS, window_size=config.MODEL.SWIN.WINDOW_SIZE, mlp_ratio=config.MODEL.SWIN.MLP_RATIO, qkv_bias=config.MODEL.SWIN.QKV_BIAS, qk_scale=config.MODEL.SWIN.QK_SCALE, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ape=config.MODEL.SWIN.APE, patch_norm=config.MODEL.SWIN.PATCH_NORM, use_checkpoint=config.TRAIN.USE_CHECKPOINT, fused_window_process=config.FUSED_WINDOW_PROCESS ) elif model_type == 'lr_swin_subnet': model = LRSwinTransformerSubnet( svd_config=config.MODEL.SWIN.SVD_CONFIG, img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.SWIN.PATCH_SIZE, in_chans=config.MODEL.SWIN.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.SWIN.EMBED_DIM, depths=config.MODEL.SWIN.DEPTHS, num_heads=config.MODEL.SWIN.NUM_HEADS, window_size=config.MODEL.SWIN.WINDOW_SIZE, mlp_ratio=config.MODEL.SWIN.MLP_RATIO, qkv_bias=config.MODEL.SWIN.QKV_BIAS, qk_scale=config.MODEL.SWIN.QK_SCALE, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ape=config.MODEL.SWIN.APE, patch_norm=config.MODEL.SWIN.PATCH_NORM, use_checkpoint=config.TRAIN.USE_CHECKPOINT, fused_window_process=config.FUSED_WINDOW_PROCESS ) elif model_type == 'lr_deit': model = LRVisionTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.DEIT.PATCH_SIZE, in_chans=config.MODEL.DEIT.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.DEIT.EMBED_DIM, depth=config.MODEL.DEIT.DEPTH, num_heads = config.MODEL.DEIT.NUM_HEADS, mlp_ratio = config.MODEL.DEIT.MLP_RATIO, qkv_bias = config.MODEL.DEIT.QKV_BIAS, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, fused_lr=config.MODEL.DEIT.FUSE_LR, ) elif model_type == 'lr_deit_subnet':
# -------------------------------------------------------- # TinyViT Model Builder # Copyright (c) 2022 Microsoft # -------------------------------------------------------- def build_model(config): model_type = config.MODEL.TYPE if model_type == 'swin': model = SwinTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.SWIN.PATCH_SIZE, in_chans=config.MODEL.SWIN.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.SWIN.EMBED_DIM, depths=config.MODEL.SWIN.DEPTHS, num_heads=config.MODEL.SWIN.NUM_HEADS, window_size=config.MODEL.SWIN.WINDOW_SIZE, mlp_ratio=config.MODEL.SWIN.MLP_RATIO, qkv_bias=config.MODEL.SWIN.QKV_BIAS, qk_scale=config.MODEL.SWIN.QK_SCALE, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ape=config.MODEL.SWIN.APE, patch_norm=config.MODEL.SWIN.PATCH_NORM, use_checkpoint=config.TRAIN.USE_CHECKPOINT, fused_window_process=config.FUSED_WINDOW_PROCESS ) elif model_type == 'deit': model = VisionTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.DEIT.PATCH_SIZE, in_chans=config.MODEL.DEIT.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.DEIT.EMBED_DIM, depth=config.MODEL.DEIT.DEPTH, num_heads = config.MODEL.DEIT.NUM_HEADS, mlp_ratio = config.MODEL.DEIT.MLP_RATIO, qkv_bias = config.MODEL.DEIT.QKV_BIAS, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ) elif model_type == 'lr_swin': model = LRSwinTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.SWIN.PATCH_SIZE, in_chans=config.MODEL.SWIN.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.SWIN.EMBED_DIM, depths=config.MODEL.SWIN.DEPTHS, num_heads=config.MODEL.SWIN.NUM_HEADS, window_size=config.MODEL.SWIN.WINDOW_SIZE, mlp_ratio=config.MODEL.SWIN.MLP_RATIO, qkv_bias=config.MODEL.SWIN.QKV_BIAS, qk_scale=config.MODEL.SWIN.QK_SCALE, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ape=config.MODEL.SWIN.APE, patch_norm=config.MODEL.SWIN.PATCH_NORM, use_checkpoint=config.TRAIN.USE_CHECKPOINT, fused_window_process=config.FUSED_WINDOW_PROCESS ) elif model_type == 'lr_swin_subnet': model = LRSwinTransformerSubnet( svd_config=config.MODEL.SWIN.SVD_CONFIG, img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.SWIN.PATCH_SIZE, in_chans=config.MODEL.SWIN.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.SWIN.EMBED_DIM, depths=config.MODEL.SWIN.DEPTHS, num_heads=config.MODEL.SWIN.NUM_HEADS, window_size=config.MODEL.SWIN.WINDOW_SIZE, mlp_ratio=config.MODEL.SWIN.MLP_RATIO, qkv_bias=config.MODEL.SWIN.QKV_BIAS, qk_scale=config.MODEL.SWIN.QK_SCALE, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ape=config.MODEL.SWIN.APE, patch_norm=config.MODEL.SWIN.PATCH_NORM, use_checkpoint=config.TRAIN.USE_CHECKPOINT, fused_window_process=config.FUSED_WINDOW_PROCESS ) elif model_type == 'lr_deit': model = LRVisionTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.DEIT.PATCH_SIZE, in_chans=config.MODEL.DEIT.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.DEIT.EMBED_DIM, depth=config.MODEL.DEIT.DEPTH, num_heads = config.MODEL.DEIT.NUM_HEADS, mlp_ratio = config.MODEL.DEIT.MLP_RATIO, qkv_bias = config.MODEL.DEIT.QKV_BIAS, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, fused_lr=config.MODEL.DEIT.FUSE_LR, ) elif model_type == 'lr_deit_subnet':
model = LRVisionTransformerSubnet(
5
2023-11-03 09:54:45+00:00
16k
Harvard-Ophthalmology-AI-Lab/FairSeg
SAMed/segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "SAMed/segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_...
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
11,326
"point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros(len(data["boxes"])), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros(len(data["boxes"])), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crops_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crops_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros(len(data["boxes"])), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros(len(data["boxes"])), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold
data["boxes"] = batched_mask_to_box(data["masks"])
5
2023-11-03 17:05:40+00:00
16k
microsoft/PLEX
PLEX/finetuning.py
[ { "identifier": "TrajectoryDataset", "path": "PLEX/util/data.py", "snippet": "class TrajectoryDataset:\n def __init__(self, trajectories, camera_names, contextual):\n self.trajectories = list(trajectories)\n if not globals.full_state_mode:\n self.camera_names = camera_names\n...
import os import torch import argparse import sys from PLEX.util.data import TrajectoryDataset, load_data, setup_batch_sampler, train_val_split from PLEX.util.misc import parse_tasks, setup_essentials, setup_model, set_trainable_params, setup_trainer, run_training from PLEX.util.evaluators import get_success_rate_evaluator, get_validation_error_evaluator from PLEX.util.cmdline import add_common_args, add_conditioning_args from PLEX.util.log import setup_wandb_logging
11,382
def finetune(cmdline_args): os.environ["NCCL_DEBUG"] = "INFO" print("=== Finetuning ===") parser = argparse.ArgumentParser() # Add all relevant command-line arguments add_common_args(parser) add_conditioning_args(parser) parser.add_argument('--finetune_learning_rate', type=float, default=1e-5) parser.add_argument('--finetune_steps_per_iter', type=int, default=100) parser.add_argument('--target_task', type=str, default=None) parser.add_argument('--max_target_trajectories', type=int, default=None) # Parse them and validate them args = parser.parse_args(cmdline_args) args = vars(args) if not args['bc_learning_mode']: assert 'reward' not in args['modalities_to_mask'], "If the model is expected to condition on returns, then they should not be masked out." # NOTE: The arguments below aren't actual command-line arguments. We are just addeing them to args[] out of convenience. # Note also that during finetuning we set predicted_inverse_dynamics_loss_weight=1, i.e., **in case the # finetuning trajectories contain actions**, we adapt PLEX's based on the predicted observation latents # from it planner PL rather than based on the actual ("grounded") observation latents contained # in finetuning trajectories. if args['model'] == 'PLEX': args['grounded_inverse_dynamics_loss_weight'] = 0 args['predicted_inverse_dynamics_loss_weight'] = 1 args['future_prediction_loss_weight'] = 1
def finetune(cmdline_args): os.environ["NCCL_DEBUG"] = "INFO" print("=== Finetuning ===") parser = argparse.ArgumentParser() # Add all relevant command-line arguments add_common_args(parser) add_conditioning_args(parser) parser.add_argument('--finetune_learning_rate', type=float, default=1e-5) parser.add_argument('--finetune_steps_per_iter', type=int, default=100) parser.add_argument('--target_task', type=str, default=None) parser.add_argument('--max_target_trajectories', type=int, default=None) # Parse them and validate them args = parser.parse_args(cmdline_args) args = vars(args) if not args['bc_learning_mode']: assert 'reward' not in args['modalities_to_mask'], "If the model is expected to condition on returns, then they should not be masked out." # NOTE: The arguments below aren't actual command-line arguments. We are just addeing them to args[] out of convenience. # Note also that during finetuning we set predicted_inverse_dynamics_loss_weight=1, i.e., **in case the # finetuning trajectories contain actions**, we adapt PLEX's based on the predicted observation latents # from it planner PL rather than based on the actual ("grounded") observation latents contained # in finetuning trajectories. if args['model'] == 'PLEX': args['grounded_inverse_dynamics_loss_weight'] = 0 args['predicted_inverse_dynamics_loss_weight'] = 1 args['future_prediction_loss_weight'] = 1
log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict = setup_essentials(args)
5
2023-11-06 09:38:09+00:00
16k
Giftify-Bot/Giftify-Bot
cogs/giveaways/start.py
[ { "identifier": "Giftify", "path": "bot.py", "snippet": "class Giftify(GiftifyHelper, commands.AutoShardedBot):\r\n user: discord.ClientUser\r\n\r\n colour: int = 0xCB3045\r\n __version_info__ = \"1.1.4\"\r\n\r\n def __init__(\r\n self,\r\n *,\r\n log_handler: LogHandler...
import datetime import discord from collections import ChainMap from typing import Dict, List, Optional, Tuple from discord import app_commands from discord.app_commands import Transform from discord.ext import commands from bot import Giftify from models.giveaway_settings import ChannelConfig from models.giveaways import Giveaway, GiveawayAction from utils.exceptions import GiveawayError from utils.transformers import ( BonusRolesTransformer, RolesTransformer, TextChannelsTransformer, TimeTransformer, ) from utils.tree import Interaction
11,054
class GiveawayStart(commands.GroupCog): """A cog for starting giveaways.""" bot: Giftify @app_commands.command(name="start") @app_commands.describe( prize="The prize of the giveaway.", duration="The duration of the giveaway.", winners="The number of winners for the giveaway.", required_roles="The roles required to participate in the giveaway.", blacklisted_roles="The roles not allowed to participate in the giveaway.", bypass_roles="The roles that can bypass participation restrictions", multiplier_roles="Use the format <role:number_of_multiplier_roles_entries> split using a ':' (colon).", messages_required="The number of messages required to join the giveaway.", allowed_message_channels="The channels where users are allowed to send messages.", amari="The amari level required for the giveaway.", weekly_amari="The weekly amari xp required for the giveaway.", no_defaults="Flag to exclude default settings for the giveaway.", image="The attached image for the giveaway.", ping="Wheter to ping the server pingrole when the giveaway starts.", donor="The donating member for the giveaway.", message="The message to accompany the giveaway.", ) @app_commands.checks.bot_has_permissions( embed_links=True, send_messages=True, view_channel=True, add_reactions=True ) @app_commands.checks.cooldown(1, 5, key=lambda i: (i.guild, i.user.id)) async def giveaway_start( self, interaction: Interaction,
class GiveawayStart(commands.GroupCog): """A cog for starting giveaways.""" bot: Giftify @app_commands.command(name="start") @app_commands.describe( prize="The prize of the giveaway.", duration="The duration of the giveaway.", winners="The number of winners for the giveaway.", required_roles="The roles required to participate in the giveaway.", blacklisted_roles="The roles not allowed to participate in the giveaway.", bypass_roles="The roles that can bypass participation restrictions", multiplier_roles="Use the format <role:number_of_multiplier_roles_entries> split using a ':' (colon).", messages_required="The number of messages required to join the giveaway.", allowed_message_channels="The channels where users are allowed to send messages.", amari="The amari level required for the giveaway.", weekly_amari="The weekly amari xp required for the giveaway.", no_defaults="Flag to exclude default settings for the giveaway.", image="The attached image for the giveaway.", ping="Wheter to ping the server pingrole when the giveaway starts.", donor="The donating member for the giveaway.", message="The message to accompany the giveaway.", ) @app_commands.checks.bot_has_permissions( embed_links=True, send_messages=True, view_channel=True, add_reactions=True ) @app_commands.checks.cooldown(1, 5, key=lambda i: (i.guild, i.user.id)) async def giveaway_start( self, interaction: Interaction,
duration: Transform[datetime.datetime, TimeTransformer],
8
2023-11-09 15:00:15+00:00
16k
Zjy0401/CoCoFormer
train.py
[ { "identifier": "create_jsf_datasets", "path": "dataset/jsf.py", "snippet": "def create_jsf_datasets(dataset_root, max_seq, random_seq=True):\n\n train_root = os.path.join(dataset_root, \"train\")\n # val_root = os.path.join(dataset_root, \"val\")\n test_root = os.path.join(dataset_root, \"test...
import os import csv import shutil import torch import torch.nn as nn import pickle from thop import profile from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from torch.optim import Adam from dataset.jsf import create_jsf_datasets from model.CoCoFormer import CoCoformer, Discriminator, PureTransformer from model.loss import SmoothCrossEntropyLoss from utilities.constants import * from utilities.device import get_device, use_cuda from utilities.lr_scheduling import LrStepTracker, get_lr from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params from utilities.run_model import train_epoch, train_with_adv, eval_model, get_metrics, train_with_pure_transformer, params from tensorboardX import SummaryWriter
10,976
# from dataset.e_piano import create_epiano_datasets, compute_epiano_accuracy, split_train_test CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"] # Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy BASELINE_EPOCH = -1 # main def main(): """ ---------- Author: Damon Gwinn ---------- Entry point. Trains a model specified by command line arguments ---------- """ args = parse_train_args() print_train_args(args) if args.force_cpu: use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs(args.output_dir, exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, "results") os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if args.no_tensorboard: tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) ##### Datasets ##### # train_dataset, val_dataset, test_dataset = create_epiano_datasets(args.input_dir, args.max_sequence) train_dataset, test_dataset = create_jsf_datasets(args.input_dir, args.max_sequence) train_loader = DataLoader(train_dataset, batch_size=args.batch_size * len(args.gpu), num_workers=args.n_workers, shuffle=True) # val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers) ##### read word2event event2word f = open(args.word2event, 'rb') word2event = pickle.load(f) # reverse the vector event2word event2word = {} for key, val in word2event.items(): event2word[val] = key if args.only_Transformer: model = PureTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) else: model = CoCoformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) model_disc = Discriminator() if args.gpu[0] != -1: model = torch.nn.DataParallel(model, device_ids=args.gpu) model = model.cuda(device=args.gpu[0]) model_disc = torch.nn.DataParallel(model_disc, device_ids=args.gpu) model_disc = model_disc.cuda(device=args.gpu[0])
# from dataset.e_piano import create_epiano_datasets, compute_epiano_accuracy, split_train_test CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"] # Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy BASELINE_EPOCH = -1 # main def main(): """ ---------- Author: Damon Gwinn ---------- Entry point. Trains a model specified by command line arguments ---------- """ args = parse_train_args() print_train_args(args) if args.force_cpu: use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs(args.output_dir, exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, "results") os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if args.no_tensorboard: tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) ##### Datasets ##### # train_dataset, val_dataset, test_dataset = create_epiano_datasets(args.input_dir, args.max_sequence) train_dataset, test_dataset = create_jsf_datasets(args.input_dir, args.max_sequence) train_loader = DataLoader(train_dataset, batch_size=args.batch_size * len(args.gpu), num_workers=args.n_workers, shuffle=True) # val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers) ##### read word2event event2word f = open(args.word2event, 'rb') word2event = pickle.load(f) # reverse the vector event2word event2word = {} for key, val in word2event.items(): event2word[val] = key if args.only_Transformer: model = PureTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) else: model = CoCoformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) model_disc = Discriminator() if args.gpu[0] != -1: model = torch.nn.DataParallel(model, device_ids=args.gpu) model = model.cuda(device=args.gpu[0]) model_disc = torch.nn.DataParallel(model_disc, device_ids=args.gpu) model_disc = model_disc.cuda(device=args.gpu[0])
params(train_loader, model, model_disc)
17
2023-11-01 08:33:08+00:00
16k
tiendatnguyen-vision/Orbit-symmetrize
RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/representation.py
[ { "identifier": "Group", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/groups.py", "snippet": "class Group(nn.Module):\n \"\"\" Abstract Group Object which new groups should inherit from. \"\"\"\n\n def __init__(self):\n super().__init__()\n self.lie_algebra = NotImplemen...
import math import logging import itertools import torch from functools import lru_cache as cache, reduce from collections import defaultdict from plum import dispatch from torch import nn from ..groups import Group from .linear_operator_base import LinearOperator from .linear_operators import ConcatLazy, I, lazify, densify, LazyJVP, LazyPerm, \ LazyDirectSum, LazyKron, LazyKronsum, lazy_direct_matmat, product from .utils import orthogonal_complement, krylov_constraint_solve, get_device
11,093
if rb == 1: return ra if rb == 0: return 0 if ra.concrete(): return SumRep(*(rb*[ra])) return DeferredSumRep(*(rb*[ra])) @dispatch def mul_reps(ra: int, rb): # pylint: disable=function-redefined """ Product of a scalar and a representation. """ return mul_reps(rb, ra) # pylint: disable=W1114:arguments-out-of-order class ScalarRep(Rep): """ The trivial representation of the group G. """ def __init__(self, G=None): super().__init__() self.G = G self.is_permutation = True def forward(self, G): self.G = G return self def size(self): return 1 def canonicalize(self): return self, torch.zeros(1, dtype=torch.long) def __repr__(self): return "V⁰" def t(self): return self def rho(self, M): return torch.eye(1, device=self.G.device) def drho(self, A): return 0*torch.eye(1, device=self.G.device) def __hash__(self): return 0 def __eq__(self, other): return isinstance(other, ScalarRep) def __mul__(self, other): if isinstance(other, int): return super().__mul__(other) return other def __rmul__(self, other): if isinstance(other, int): return super().__rmul__(other) return other def concrete(self): return True class Base(Rep): """ Base representation V of a group.""" def __init__(self, G=None): super().__init__() self.G = G if G is not None: self.is_permutation = G.is_permutation def forward(self, G): return self.__class__(G) def rho(self, M): if isinstance(self.G, Group) and isinstance(M, dict): M = M[self.G] return M def drho(self, A): if isinstance(self.G, Group) and isinstance(A, dict): A = A[self.G] return A def size(self): assert self.G is not None, f"must know G to find size for rep={self}" return self.G.d def __repr__(self): return "V" def __hash__(self): return hash((type(self), self.G)) def __eq__(self, other): return type(other) is type(self) and self.G == other.G def __lt__(self, other): if isinstance(other, Dual): return True return super().__lt__(other) class Dual(Rep): """ Dual representation V*, rho*, drho*.""" def __init__(self, rep): super().__init__() self.rep = rep self.G = rep.G if hasattr(rep, "is_permutation"): self.is_permutation = rep.is_permutation def forward(self, G): return self.rep(G).t() def rho(self, M): rho = self.rep.rho(M)
""" The base Representation class. """ class Rep(nn.Module): """ The base Representation class. Representation objects formalize the vector space V on which the group acts, the group representation matrix ρ(g), and the Lie Algebra representation dρ(A) in a single object. Representations act as types for vectors coming from V. These types can be manipulated and transformed with the built in operators ⊕,⊗,dual, as well as incorporating custom representations. Representation objects should be immutable. At minimum, new representations need to implement ``rho``, ``__str__``.""" def __init__(self): super().__init__() self.is_permutation = False self._size = None self.G = None def rho(self, M): """ Group representation of the matrix M of shape (d,d)""" raise NotImplementedError def drho(self, A): """ Lie Algebra representation of the matrix A of shape (d,d)""" In = torch.eye(A.size(0), dtype=A.dtype, device=A.device) return LazyJVP(self.rho, In, A) def forward(self, G): """ Instantiate (nonconcrete) representation with a symmetry group (forward) """ raise NotImplementedError def __str__(self): return repr(self) def __repr__(self): raise NotImplementedError def __eq__(self, other): if type(self) is not type(other): # pylint: disable=unidiomatic-typecheck return False return self.__hash__() == other.__hash__() def __hash__(self): raise NotImplementedError def size(self): """ Dimension dim(V) of the representation """ if self._size is not None: return self._size if self.concrete() and isinstance(self.G, Group): self._size = self.rho(self.G.sample()).size(-1) return self._size raise NotImplementedError def canonicalize(self): """ An optional method to convert the representation into a canonical form in order to reuse equivalent solutions in the solver. Should return both the canonically ordered representation, along with a permutation which can be applied to vectors of the current representation to achieve that ordering. """ # return canonicalized rep return self, torch.arange(self.size()) def rho_dense(self, M): """ A convenience function which returns rho(M) as a dense matrix.""" return densify(self.rho(M)) def drho_dense(self, A): """ A convenience function which returns drho(A) as a dense matrix.""" return densify(self.drho(A)) def constraint_matrix(self): """ Constructs the equivariance constrant matrix (lazily) by concatenating the constraints (ρ(hᵢ)-I) for i=1,...M and dρ(Aₖ) for k=1,..,D from the generators of the symmetry group. """ n = self.size() constraints = [] constraints.extend([lazify(self.rho(h)).to(self.G.device)-I(n, device=self.G.device) \ for h in self.G.discrete_generators]) constraints.extend([lazify(self.drho(A)).to(self.G.device) for A in self.G.lie_algebra]) return ConcatLazy(constraints) if constraints else lazify( torch.zeros((1, n), device=self.G.device)) solcache = {} def equivariant_basis(self): """ Computes the equivariant solution basis for the given representation of size N. Canonicalizes problems and caches solutions for reuse. Output [Q (N,r)] """ if self == Scalar: return torch.ones((1, 1), device=self.G.device) canon_rep, perm = self.canonicalize() invperm = torch.argsort(perm) if canon_rep not in self.solcache: logging.info("%r cache miss", canon_rep) logging.info("Solving basis for %r%s", self, f", for G={self.G}" if self.G is not None else "") C_lazy = canon_rep.constraint_matrix() if C_lazy.size(0)*C_lazy.size(1) > 3e7: # Too large to use SVD result = krylov_constraint_solve(C_lazy) else: C_dense = C_lazy.to_dense() result = orthogonal_complement(C_dense) self.solcache[canon_rep] = result return self.solcache[canon_rep][invperm] def equivariant_projector(self): """ Computes the (lazy) projection matrix P=QQᵀ that projects to the equivariant basis.""" Q = self.equivariant_basis() Q_lazy = lazify(Q) P = Q_lazy@Q_lazy.H() return P def concrete(self): """ Concreteness """ return isinstance(self.G, Group) def __add__(self, other): """ Direct sum (⊕) of representations. """ if isinstance(other, int): if other == 0: return self return self+other*Scalar if both_concrete(self, other): return SumRep(self, other) return DeferredSumRep(self, other) def __radd__(self, other): if isinstance(other, int): if other == 0: return self return other*Scalar+self return NotImplemented def __mul__(self, other): """ Tensor sum (⊗) of representations. """ return mul_reps(self, other) def __rmul__(self, other): return mul_reps(other, self) def __pow__(self, other): """ Iterated tensor product. """ assert isinstance(other, int), \ f"Power only supported for integers, not {type(other)}" assert other >= 0, f"Negative powers {other} not supported" return reduce(lambda a, b: a*b, other*[self], Scalar) def __rshift__(self, other): """ Linear maps from self -> other """ return other*self.t() def __lshift__(self, other): """ Linear maps from other -> self """ return self*other.t() def __lt__(self, other): """ less than defined to disambiguate ordering multiple different representations. Canonical ordering is determined first by Group, then by size, then by hash""" if other == Scalar: return False try: if self.G < other.G: return True if self.G > other.G: return False except (AttributeError, TypeError): pass if self.size() < other.size(): return True if self.size() > other.size(): return False return hash(self) < hash(other) # For sorting purposes only def t(self): """ Dual representation V*, rho*, drho*.""" if isinstance(self.G, Group) and self.G.is_orthogonal: return self return Dual(self) @dispatch def mul_reps(ra, rb: int): """ Product of a scalar and a representation. """ if rb == 1: return ra if rb == 0: return 0 if ra.concrete(): return SumRep(*(rb*[ra])) return DeferredSumRep(*(rb*[ra])) @dispatch def mul_reps(ra: int, rb): # pylint: disable=function-redefined """ Product of a scalar and a representation. """ return mul_reps(rb, ra) # pylint: disable=W1114:arguments-out-of-order class ScalarRep(Rep): """ The trivial representation of the group G. """ def __init__(self, G=None): super().__init__() self.G = G self.is_permutation = True def forward(self, G): self.G = G return self def size(self): return 1 def canonicalize(self): return self, torch.zeros(1, dtype=torch.long) def __repr__(self): return "V⁰" def t(self): return self def rho(self, M): return torch.eye(1, device=self.G.device) def drho(self, A): return 0*torch.eye(1, device=self.G.device) def __hash__(self): return 0 def __eq__(self, other): return isinstance(other, ScalarRep) def __mul__(self, other): if isinstance(other, int): return super().__mul__(other) return other def __rmul__(self, other): if isinstance(other, int): return super().__rmul__(other) return other def concrete(self): return True class Base(Rep): """ Base representation V of a group.""" def __init__(self, G=None): super().__init__() self.G = G if G is not None: self.is_permutation = G.is_permutation def forward(self, G): return self.__class__(G) def rho(self, M): if isinstance(self.G, Group) and isinstance(M, dict): M = M[self.G] return M def drho(self, A): if isinstance(self.G, Group) and isinstance(A, dict): A = A[self.G] return A def size(self): assert self.G is not None, f"must know G to find size for rep={self}" return self.G.d def __repr__(self): return "V" def __hash__(self): return hash((type(self), self.G)) def __eq__(self, other): return type(other) is type(self) and self.G == other.G def __lt__(self, other): if isinstance(other, Dual): return True return super().__lt__(other) class Dual(Rep): """ Dual representation V*, rho*, drho*.""" def __init__(self, rep): super().__init__() self.rep = rep self.G = rep.G if hasattr(rep, "is_permutation"): self.is_permutation = rep.is_permutation def forward(self, G): return self.rep(G).t() def rho(self, M): rho = self.rep.rho(M)
rhoinvt = rho.invt() if isinstance(rho, LinearOperator) else torch.linalg.inv(rho).t()
1
2023-11-01 07:19:02+00:00
16k
mbreuss/consistency_trajectory_models_toy_task
ctm_train.py
[ { "identifier": "ConsistencyTrajectoryModel", "path": "ctm/ctm.py", "snippet": "class ConsistencyTrajectoryModel(nn.Module):\n\n def __init__(\n self, \n data_dim: int,\n cond_dim: int,\n sampler_type: str,\n sigma_data: float,\n sigma...
from tqdm import tqdm from ctm.ctm import ConsistencyTrajectoryModel from ctm.toy_tasks.data_generator import DataGenerator from ctm.visualization.vis_utils import plot_main_figure
11,448
""" Discrete consistency distillation training of the consistency model on a toy task. We train a diffusion model and the consistency model at the same time and iteratively update the weights of the consistency model and the diffusion model. """ if __name__ == "__main__": device = 'cpu' n_sampling_steps = 10 use_pretraining = True
""" Discrete consistency distillation training of the consistency model on a toy task. We train a diffusion model and the consistency model at the same time and iteratively update the weights of the consistency model and the diffusion model. """ if __name__ == "__main__": device = 'cpu' n_sampling_steps = 10 use_pretraining = True
cm = ConsistencyTrajectoryModel(
0
2023-11-07 15:30:11+00:00
16k
awslabs/optimizing-multitask-training-through-dynamic-pipelines
dynapipe/pipe/data_loader.py
[ { "identifier": "ProfileBasedCostModelWithRC", "path": "dynapipe/data_opt/cost_models.py", "snippet": "class ProfileBasedCostModelWithRC(object):\n \"\"\"\n Wrapper class for multiple ProfileBasedCostModel objects, one for each\n tensor parallel degree and recomputation method.\n \"\"\"\n\n ...
import json import logging import multiprocessing as mp import os import time import traceback import torch import pickle from dataclasses import dataclass, field, fields from queue import Empty from typing import List, Optional from torch.utils.data import DataLoader as PTDataLoader from dynapipe.data_opt.cost_models import ProfileBasedCostModelWithRC from dynapipe.data_opt.optimizer import DataAssignmentOptimizer from dynapipe.model import DynaPipeCluster, TransformerModelSpec from dynapipe.pipe.instructions import ( deserialize_list_of_eps, serialize_list_of_eps, ) from dynapipe.schedule_opt.execution_planner import ExecutionPlanner from dynapipe.utils.logger import create_logger, logger from .kv_redis import RedisKVStore from .utils import validate_device_assignment
12,407
return kv_store, host, port def _checked_delete_key(kv_store: RedisKVStore, key: str, logger=None): result = kv_store.delete_key(key) if not result: raise RuntimeError( "Internal error: failed to delete key " "{}.".format(key) ) if logger is not None: logger.debug("Deleted key: {}".format(key)) def _get_from_shared_kv_store( kv_store: RedisKVStore, key: str, reader_idx: int, n_total_readers: int, decode: bool = True, logger=None, ): reader_count_key = key + "_rc" reader_ack_key = key + "_r{}_ack".format(reader_idx) # wait for reader ack if logger is not None: logger.debug("Waiting for reader ack key: {}".format(reader_ack_key)) kv_store.get(reader_ack_key) if logger is not None: logger.debug( "Got reader ack key: {}, waiting for data key: {}".format( reader_ack_key, key ) ) data = kv_store.get(key) if logger is not None: logger.debug("Removing reader ack key: {}".format(reader_ack_key)) # remove reader ack _checked_delete_key(kv_store, reader_ack_key, logger=logger) # get reader count reader_count = kv_store.add(reader_count_key, 1) if reader_count == n_total_readers: if logger is not None: logger.debug( "Last reader, reset reader count: {}".format(reader_count_key) ) # reset reader count result_readers = kv_store.add(reader_count_key, -n_total_readers) assert result_readers == 0 if logger is not None: logger.debug("Last reader, remove data key: {}".format(key)) # remove data key _checked_delete_key(kv_store, key, logger=logger) if logger is not None: logger.debug("Last reader, set ack key: {}".format(key + "_ack")) # set all reader ack keys keys_to_reset = [ key + "_r{}_ack".format(i) for i in range(n_total_readers) ] if logger is not None: logger.debug("Last reader, reset keys: {}".format(keys_to_reset)) for reset_key in keys_to_reset: val = kv_store.add(reset_key, 1) # make sure the key is set got_val = int(kv_store.get(reset_key).decode()) if not val == got_val: raise RuntimeError( "Failed to set reader ack key: {}".format(reset_key) ) if logger is not None: logger.debug("Set reader ack key: {}".format(reset_key)) # set data ack key kv_store.add(key + "_ack", 1) if decode: return data.decode() return data def _put_to_shared_kv_store( kv_store: RedisKVStore, key: str, data, logger=None ): # put execution plan into local kv store ack_key = key + "_ack" if logger is not None: logger.debug("Wait for data ack key: {}".format(ack_key)) # wait for ack key kv_store.get(ack_key) # remove ack key _checked_delete_key(kv_store, ack_key, logger=logger) if logger is not None: logger.debug("Set data key: {}".format(key)) # set data key kv_store.set(key, data) @dataclass class WorkerData: round_seqlen_multiple: Optional[int] = None logger: Optional[logging.Logger] = None kv_store: Optional[RedisKVStore] = None processed_batches: Optional[int] = None kv_buffer_size: Optional[int] = None seqlen_offset: Optional[int] = 0 def check_initialized(self): cls_fields = fields(self.__class__) for fld in cls_fields: if getattr(self, fld.name) is None: raise RuntimeError( "Worker data not initialized: {}".format(fld.name) ) @dataclass class PreprocessingWorkerData(WorkerData): # required at initialization: node_rank: Optional[int] = None profile_path: Optional[str] = None # filled later in worker init: dataopt: Optional[DataAssignmentOptimizer] = None
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 MANAGER_PROCESS_TIMEOUT = 1 RECEIVER_PROCESS_TIMEOUT = 1 KVSTORE_TIMEOUT = 1800 # 30 minutes # ONLY USED FOR DEBUG PURPOSES DEBUG_USE_DUMMY_EP = False DEBUG_DUMP_EP_STATS = os.getenv( "DYNAPIPE_DEBUG_DUMP_EP_STATS", "False" ).lower() in ("true", "1", "t") DEBUG_DUMP_EP_PREFIX = os.environ.get("DYNAPIPE_DEBUG_DUMP_EP_PREFIX", None) if DEBUG_DUMP_EP_STATS and DEBUG_DUMP_EP_PREFIX is None: raise ValueError( "DYNAPIPE_DEBUG_DUMP_EP_PREFIX must be set if " "DYNAPIPE_DEBUG_DUMP_EP_STATS is set." ) _kvstore_handle = None def _init_kv_store(is_master, logger=None): host = os.environ.get("DYNAPIPE_KV_HOST", "localhost") port = os.environ.get("DYNAPIPE_KV_PORT", 29500) if logger is not None: logger.debug( "Init kv store, is_master: {}, host: {}, port: {}".format( is_master, host, port ) ) # kv_store = torch.distributed.TCPStore( # "127.0.0.1", # port, # is_master=is_master, # timeout=timedelta(seconds=KVSTORE_TIMEOUT), # ) kv_store = RedisKVStore(host, port, is_master=is_master) return kv_store, host, port def _checked_delete_key(kv_store: RedisKVStore, key: str, logger=None): result = kv_store.delete_key(key) if not result: raise RuntimeError( "Internal error: failed to delete key " "{}.".format(key) ) if logger is not None: logger.debug("Deleted key: {}".format(key)) def _get_from_shared_kv_store( kv_store: RedisKVStore, key: str, reader_idx: int, n_total_readers: int, decode: bool = True, logger=None, ): reader_count_key = key + "_rc" reader_ack_key = key + "_r{}_ack".format(reader_idx) # wait for reader ack if logger is not None: logger.debug("Waiting for reader ack key: {}".format(reader_ack_key)) kv_store.get(reader_ack_key) if logger is not None: logger.debug( "Got reader ack key: {}, waiting for data key: {}".format( reader_ack_key, key ) ) data = kv_store.get(key) if logger is not None: logger.debug("Removing reader ack key: {}".format(reader_ack_key)) # remove reader ack _checked_delete_key(kv_store, reader_ack_key, logger=logger) # get reader count reader_count = kv_store.add(reader_count_key, 1) if reader_count == n_total_readers: if logger is not None: logger.debug( "Last reader, reset reader count: {}".format(reader_count_key) ) # reset reader count result_readers = kv_store.add(reader_count_key, -n_total_readers) assert result_readers == 0 if logger is not None: logger.debug("Last reader, remove data key: {}".format(key)) # remove data key _checked_delete_key(kv_store, key, logger=logger) if logger is not None: logger.debug("Last reader, set ack key: {}".format(key + "_ack")) # set all reader ack keys keys_to_reset = [ key + "_r{}_ack".format(i) for i in range(n_total_readers) ] if logger is not None: logger.debug("Last reader, reset keys: {}".format(keys_to_reset)) for reset_key in keys_to_reset: val = kv_store.add(reset_key, 1) # make sure the key is set got_val = int(kv_store.get(reset_key).decode()) if not val == got_val: raise RuntimeError( "Failed to set reader ack key: {}".format(reset_key) ) if logger is not None: logger.debug("Set reader ack key: {}".format(reset_key)) # set data ack key kv_store.add(key + "_ack", 1) if decode: return data.decode() return data def _put_to_shared_kv_store( kv_store: RedisKVStore, key: str, data, logger=None ): # put execution plan into local kv store ack_key = key + "_ack" if logger is not None: logger.debug("Wait for data ack key: {}".format(ack_key)) # wait for ack key kv_store.get(ack_key) # remove ack key _checked_delete_key(kv_store, ack_key, logger=logger) if logger is not None: logger.debug("Set data key: {}".format(key)) # set data key kv_store.set(key, data) @dataclass class WorkerData: round_seqlen_multiple: Optional[int] = None logger: Optional[logging.Logger] = None kv_store: Optional[RedisKVStore] = None processed_batches: Optional[int] = None kv_buffer_size: Optional[int] = None seqlen_offset: Optional[int] = 0 def check_initialized(self): cls_fields = fields(self.__class__) for fld in cls_fields: if getattr(self, fld.name) is None: raise RuntimeError( "Worker data not initialized: {}".format(fld.name) ) @dataclass class PreprocessingWorkerData(WorkerData): # required at initialization: node_rank: Optional[int] = None profile_path: Optional[str] = None # filled later in worker init: dataopt: Optional[DataAssignmentOptimizer] = None
exec_planner: Optional[ExecutionPlanner] = None
6
2023-11-08 07:58:20+00:00
16k
SqueezeAILab/LLMCompiler
src/llm_compiler/llm_compiler.py
[ { "identifier": "AsyncStatsCallbackHandler", "path": "src/callbacks/callbacks.py", "snippet": "class AsyncStatsCallbackHandler(AsyncCallbackHandler):\n \"\"\"Collect useful stats about the run.\n Add more stats as needed.\"\"\"\n\n def __init__(self, stream: bool = False) -> None:\n supe...
import asyncio from typing import Any, Dict, List, Mapping, Optional, Sequence, Union, cast from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.llms import BaseLLM from langchain.prompts.base import StringPromptValue from src.callbacks.callbacks import AsyncStatsCallbackHandler from src.chains.chain import Chain from src.llm_compiler.constants import JOINNER_REPLAN from src.llm_compiler.planner import Planner from src.llm_compiler.task_fetching_unit import Task, TaskFetchingUnit from src.tools.base import StructuredTool, Tool from src.utils.logger_utils import log
12,299
return stats def reset_all_stats(self): if self.planner_callback: self.planner_callback.reset() if self.executor_callback: self.executor_callback.reset() @property def input_keys(self) -> List[str]: return [self.input_key] @property def output_keys(self) -> List[str]: return [self.output_key] # TODO(sk): move all join related functions to a separate class def _parse_joinner_output(self, raw_answer: str) -> str: """We expect the joinner output format to be: ``` Thought: xxx Action: Finish/Replan(yyy) ``` Returns: thought (xxx) answer (yyy) is_replan (True/False) """ thought, answer, is_replan = "", "", False # default values raw_answers = raw_answer.split("\n") for ans in raw_answers: if ans.startswith("Action:"): answer = ans[ans.find("(") + 1 : ans.find(")")] is_replan = JOINNER_REPLAN in ans elif ans.startswith("Thought:"): thought = ans.split("Thought:")[1].strip() return thought, answer, is_replan def _generate_context_for_replanner( self, tasks: Mapping[int, Task], joinner_thought: str ) -> str: """Formatted like this: ``` 1. action 1 Observation: xxx 2. action 2 Observation: yyy ... Thought: joinner_thought ``` """ previous_plan_and_observations = "\n".join( [ task.get_though_action_observation( include_action=True, include_action_idx=True ) for task in tasks.values() if not task.is_join ] ) joinner_thought = f"Thought: {joinner_thought}" context = "\n\n".join([previous_plan_and_observations, joinner_thought]) return context def _format_contexts(self, contexts: Sequence[str]) -> str: """contexts is a list of context each context is formatted as the description of _generate_context_for_replanner """ formatted_contexts = "" for context in contexts: formatted_contexts += f"Previous Plan:\n\n{context}\n\n" formatted_contexts += "Current Plan:\n\n" return formatted_contexts async def join( self, input_query: str, agent_scratchpad: str, is_final: bool ) -> str: if is_final: joinner_prompt = self.joinner_prompt_final else: joinner_prompt = self.joinner_prompt prompt = ( f"{joinner_prompt}\n" # Instructions and examples f"Question: {input_query}\n\n" # User input query f"{agent_scratchpad}\n" # T-A-O # "---\n" ) log("Joining prompt:\n", prompt, block=True) response = await self.agent.arun( prompt, callbacks=[self.executor_callback] if self.benchmark else None ) raw_answer = cast(str, response.generations[0][0].message.content) log("Question: \n", input_query, block=True) log("Raw Answer: \n", raw_answer, block=True) thought, answer, is_replan = self._parse_joinner_output(raw_answer) if is_final: # If final, we don't need to replan is_replan = False return thought, answer, is_replan def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ): raise NotImplementedError("LLMCompiler is async only.") async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: contexts = [] joinner_thought = "" agent_scratchpad = "" for i in range(self.max_replans): is_first_iter = i == 0 is_final_iter = i == self.max_replans - 1
class LLMCompilerAgent: """Self defined agent for LLM Compiler.""" def __init__(self, llm: BaseLLM) -> None: self.llm = llm async def arun(self, prompt: str, callbacks=None) -> str: return await self.llm.agenerate_prompt( prompts=[StringPromptValue(text=prompt)], stop=None, callbacks=callbacks, ) class LLMCompiler(Chain, extra="allow"): """LLMCompuler Engine.""" """The step container to use.""" input_key: str = "input" output_key: str = "output" def __init__( self, tools: Sequence[Union[Tool, StructuredTool]], planner_llm: BaseLLM, planner_example_prompt: str, planner_example_prompt_replan: Optional[str], planner_stop: Optional[list[str]], planner_stream: bool, agent_llm: BaseLLM, joinner_prompt: str, joinner_prompt_final: Optional[str], max_replans: int, benchmark: bool, **kwargs, ) -> None: """ Args: tools: List of tools to use. max_replans: Maximum number of replans to do. benchmark: Whether to collect benchmark stats. Planner Args: planner_llm: LLM to use for planning. planner_example_prompt: Example prompt for planning. planner_example_prompt_replan: Example prompt for replanning. Assign this if you want to use different example prompt for replanning. If not assigned, default to `planner_example_prompt`. planner_stop: Stop tokens for planning. planner_stream: Whether to stream the planning. Agent Args: agent_llm: LLM to use for agent. joinner_prompt: Prompt to use for joinner. joinner_prompt_final: Prompt to use for joinner at the final replanning iter. If not assigned, default to `joinner_prompt`. """ super().__init__(**kwargs) if not planner_example_prompt_replan: log( "Replan example prompt not specified, using the same prompt as the planner." ) planner_example_prompt_replan = planner_example_prompt self.planner = Planner( llm=planner_llm, example_prompt=planner_example_prompt, example_prompt_replan=planner_example_prompt_replan, tools=tools, stop=planner_stop, ) self.agent = LLMCompilerAgent(agent_llm) self.joinner_prompt = joinner_prompt self.joinner_prompt_final = joinner_prompt_final or joinner_prompt self.planner_stream = planner_stream self.max_replans = max_replans # callbacks self.benchmark = benchmark if benchmark: self.planner_callback = AsyncStatsCallbackHandler(stream=planner_stream) self.executor_callback = AsyncStatsCallbackHandler(stream=False) else: self.planner_callback = None self.executor_callback = None def get_all_stats(self): stats = {} if self.benchmark: stats["planner"] = self.planner_callback.get_stats() stats["executor"] = self.executor_callback.get_stats() stats["total"] = { k: v + stats["executor"][k] for k, v in stats["planner"].items() } return stats def reset_all_stats(self): if self.planner_callback: self.planner_callback.reset() if self.executor_callback: self.executor_callback.reset() @property def input_keys(self) -> List[str]: return [self.input_key] @property def output_keys(self) -> List[str]: return [self.output_key] # TODO(sk): move all join related functions to a separate class def _parse_joinner_output(self, raw_answer: str) -> str: """We expect the joinner output format to be: ``` Thought: xxx Action: Finish/Replan(yyy) ``` Returns: thought (xxx) answer (yyy) is_replan (True/False) """ thought, answer, is_replan = "", "", False # default values raw_answers = raw_answer.split("\n") for ans in raw_answers: if ans.startswith("Action:"): answer = ans[ans.find("(") + 1 : ans.find(")")] is_replan = JOINNER_REPLAN in ans elif ans.startswith("Thought:"): thought = ans.split("Thought:")[1].strip() return thought, answer, is_replan def _generate_context_for_replanner( self, tasks: Mapping[int, Task], joinner_thought: str ) -> str: """Formatted like this: ``` 1. action 1 Observation: xxx 2. action 2 Observation: yyy ... Thought: joinner_thought ``` """ previous_plan_and_observations = "\n".join( [ task.get_though_action_observation( include_action=True, include_action_idx=True ) for task in tasks.values() if not task.is_join ] ) joinner_thought = f"Thought: {joinner_thought}" context = "\n\n".join([previous_plan_and_observations, joinner_thought]) return context def _format_contexts(self, contexts: Sequence[str]) -> str: """contexts is a list of context each context is formatted as the description of _generate_context_for_replanner """ formatted_contexts = "" for context in contexts: formatted_contexts += f"Previous Plan:\n\n{context}\n\n" formatted_contexts += "Current Plan:\n\n" return formatted_contexts async def join( self, input_query: str, agent_scratchpad: str, is_final: bool ) -> str: if is_final: joinner_prompt = self.joinner_prompt_final else: joinner_prompt = self.joinner_prompt prompt = ( f"{joinner_prompt}\n" # Instructions and examples f"Question: {input_query}\n\n" # User input query f"{agent_scratchpad}\n" # T-A-O # "---\n" ) log("Joining prompt:\n", prompt, block=True) response = await self.agent.arun( prompt, callbacks=[self.executor_callback] if self.benchmark else None ) raw_answer = cast(str, response.generations[0][0].message.content) log("Question: \n", input_query, block=True) log("Raw Answer: \n", raw_answer, block=True) thought, answer, is_replan = self._parse_joinner_output(raw_answer) if is_final: # If final, we don't need to replan is_replan = False return thought, answer, is_replan def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ): raise NotImplementedError("LLMCompiler is async only.") async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: contexts = [] joinner_thought = "" agent_scratchpad = "" for i in range(self.max_replans): is_first_iter = i == 0 is_final_iter = i == self.max_replans - 1
task_fetching_unit = TaskFetchingUnit()
5
2023-12-06 21:12:54+00:00
16k
bytedance/ImageDream
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
14,365
nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitSDF):
3
2023-12-13 21:09:37+00:00
16k
allenai/unified-io-2
t5x/models_test.py
[ { "identifier": "decoding", "path": "t5x/decoding.py", "snippet": "NEG_INF = np.array(-1.0e7)\nMIN_TEMPERATURE = np.array(1e-4)\nclass DecodingState:\nclass SamplingLoopState:\nclass BeamState:\ndef _is_tracer(value: Any):\ndef temperature_sample(\n inputs: jnp.ndarray,\n cache: Mapping[str, jnp.n...
import functools import flax import jax import jax.numpy as jnp import numpy as np import t5.data.tasks # pylint:disable=unused-import import tensorflow as tf from unittest import mock from absl import logging from absl.testing import absltest from absl.testing import parameterized from flax import traverse_util from t5x import decoding from t5x import models from t5x import partitioning from t5x import test_utils from t5x import trainer as trainer_lib from t5x import utils
12,627
@parameterized.named_parameters( dict(testcase_name='no_force_decoding', prompt_with_targets=False), dict(testcase_name='force_decoding', prompt_with_targets=True), ) def test_prompt_with_targets(self, prompt_with_targets): batch_size, encoder_len, max_decode_len, emb_dim = 2, 3, 4, 5 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.full([batch_size, max_decode_len], 2, dtype=np.int32) } # These dummy logits represent the probability distribution where all the # probability mass is in one item (i.e., degenerate distribution). For # batch element 0, it is vocabulary index 3. # We test `_predict_step` to avoid having to define a task and its # vocabulary. dummy_logits = jnp.expand_dims( jnp.array([[-1e7, -1e7, -1e7, 0, -1e7], [-1e7, -1e7, -1e7, -1e7, 0]]), axis=1) mock_decode_fn = mock.Mock() mock_decode_fn.return_value = (np.full([batch_size, max_decode_len, 1], 3, dtype=np.int32), np.full([batch_size, 1], 1.0, dtype=np.float32)) class MockModule: def __init__(self): self.dtype = jnp.float32 def apply(self, *args, method=None, **kwargs): del args, kwargs if method is None: # use for module.`__call__` return (dummy_logits, {'cache': {}}) else: return method() def encode(self): return jnp.zeros((batch_size, encoder_len, emb_dim)) def decode(self): return (dummy_logits, {'cache': {}}) def mock_init(self): self.module = MockModule() self.module.scan_layers = False self._input_vocabulary = mock.Mock(eos_id=1) self._output_vocabulary = mock.Mock(eos_id=1) self._decode_fn = mock_decode_fn with mock.patch.object( models.EncoderDecoderModel, '__init__', new=mock_init): model = models.EncoderDecoderModel() model.predict_batch_with_aux({}, batch, prompt_with_targets=prompt_with_targets) if prompt_with_targets: expected_inputs = batch['decoder_input_tokens'] else: expected_inputs = np.zeros([batch_size, max_decode_len], dtype=np.int32) assert mock_decode_fn.call_count == 1 # Look at the kwargs call list for inputs, assert_called_with doesn't # work well with np.array comparison. np.testing.assert_array_equal(mock_decode_fn.mock_calls[0][2]['inputs'], expected_inputs) def test_predict_batch_loop_and_caches_are_equal(self): vocab_size = 50 lengths = np.array([[2], [3]]) batch_size, beam_size, encoder_len, max_decode_len = 2, 2, 3, 7 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_target_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.concatenate( [ np.expand_dims( np.concatenate( [[0], np.arange(9, 9 + lengths[0][0], dtype=np.int32), np.zeros((max_decode_len - lengths[0][0] - 1), dtype=np.int32)]), axis=0), # First element np.expand_dims( np.concatenate( [[0], np.arange(3, 3 + lengths[1][0], dtype=np.int32), np.zeros((max_decode_len - lengths[1][0] - 1), dtype=np.int32)]), axis=0) # Second element ], axis=0), } model = test_utils.get_t5_test_model(vocab_size=50) module = model.module params = module.init( jax.random.PRNGKey(0), jnp.ones((batch_size, encoder_len)), jnp.ones((batch_size, max_decode_len)), jnp.ones((batch_size, max_decode_len)), enable_dropout=False)['params'] def mock_init(self): self.module = module # Set the EOS token to be larger then the vocabulary size. This forces the # model to decode all the way to `max_decode_length`, allowing us to test # behavior when one element reaches the end before the others. self._output_vocabulary = mock.Mock(eos_id=vocab_size + 12)
# Copyright 2022 The T5X Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for t5x.models.""" # Parse absl flags test_srcdir and test_tmpdir. jax.config.parse_flags_with_absl() PartitionSpec = partitioning.PartitionSpec class ModelsTest(parameterized.TestCase): def test_remove_prefix(self): sequences = np.array([[1, 2, 3, 4, 5, 6, 7, 0], [6, 7, 8, 9, 10, 11, 0, 0]]) prefix_lengths = np.array([2, 4]) expected = [[3, 4, 5, 6, 7, 0, 0, 0], [10, 11, 0, 0, 0, 0, 0, 0]] remove_prefix = jax.jit(models.remove_prefix) actual = remove_prefix(sequences, prefix_lengths) np.testing.assert_array_equal(actual, expected) def test_remove_prefix_zero_len_prefix(self): sequences = np.array([[1, 2, 3, 4, 5, 6, 7, 0], [6, 7, 8, 9, 10, 11, 0, 0]]) prefix_lengths = np.array([0, 0]) remove_prefix = jax.jit(models.remove_prefix) actual = remove_prefix(sequences, prefix_lengths) # The expected output is the original sequences. np.testing.assert_array_equal(actual, sequences) BATCH_SIZE, ENCODER_LEN, MAX_DECODE_LEN, EMBED_DIM = 2, 3, 4, 5 class EncoderDecoderModelTest(parameterized.TestCase): @parameterized.named_parameters( dict( testcase_name='no_types', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62] }, types=None), dict( testcase_name='int32', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62] }, types={ 'encoder_input_tokens': jnp.int32, 'decoder_input_tokens': jnp.int32 }), dict( testcase_name='float32', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62], 'encoder_positions': [1, 512], 'decoder_positions': [1, 62], }, types={ 'encoder_input_tokens': jnp.int32, 'decoder_input_tokens': jnp.int32, 'encoder_positions': jnp.int32, 'decoder_positions': jnp.int32 }), dict( testcase_name='float32_segment_ids', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62], 'encoder_segment_ids': [1, 512], 'decoder_segment_ids': [1, 62], }, types={ 'encoder_input_tokens': jnp.int32, 'decoder_input_tokens': jnp.int32, 'encoder_segment_ids': jnp.int32, 'decoder_segment_ids': jnp.int32 }), ) def test_get_initial_variables_shapes_and_types(self, shapes, types): mock_transformer = mock.Mock() mock_transformer.init.return_value = {'params': {}} mock_optimizer_def = mock.Mock() rng = mock.Mock() def mock_init(self): self.module = mock_transformer self.optimizer_def = mock_optimizer_def with mock.patch.object( models.EncoderDecoderModel, '__init__', new=mock_init): model = models.EncoderDecoderModel() model.get_initial_variables(rng, shapes, types) if types is None: encoder_input = jnp.ones( shapes['encoder_input_tokens'], dtype=jnp.float32) decoder_input = jnp.ones( shapes['decoder_input_tokens'], dtype=jnp.float32) else: encoder_input = jnp.ones( shapes['encoder_input_tokens'], dtype=types['encoder_input_tokens']) decoder_input = jnp.ones( shapes['decoder_input_tokens'], dtype=types['decoder_input_tokens']) # Using `.assert_called_once_with` doesn't work because the simple # comparison it does for the array arguments fail (truth value of an array # is ambiguous). called_with = mock_transformer.init.call_args self.assertEqual(called_with[0][0], rng) np.testing.assert_allclose(called_with[0][1], encoder_input) np.testing.assert_allclose(called_with[0][2], decoder_input) np.testing.assert_allclose(called_with[0][3], decoder_input) if 'encoder_positions' in shapes: encoder_positions = jnp.ones( shapes['encoder_positions'], dtype=types['encoder_positions']) np.testing.assert_allclose(called_with[1]['encoder_positions'], encoder_positions) else: self.assertIsNone(called_with[1]['encoder_positions']) if 'decoder_positions' in shapes: decoder_positions = jnp.ones( shapes['decoder_positions'], dtype=types['decoder_positions']) np.testing.assert_allclose(called_with[1]['decoder_positions'], decoder_positions) else: self.assertIsNone(called_with[1]['decoder_positions']) if 'encoder_segment_ids' in shapes: encoder_positions = jnp.ones( shapes['encoder_segment_ids'], dtype=types['encoder_segment_ids']) np.testing.assert_allclose(called_with[1]['encoder_segment_ids'], encoder_positions) else: self.assertIsNone(called_with[1]['encoder_segment_ids']) if 'decoder_segment_ids' in shapes: decoder_segment_ids = jnp.ones( shapes['decoder_segment_ids'], dtype=types['decoder_segment_ids']) np.testing.assert_allclose(called_with[1]['decoder_segment_ids'], decoder_segment_ids) else: self.assertIsNone(called_with[1]['decoder_segment_ids']) self.assertFalse(called_with[1]['decode']) self.assertFalse(called_with[1]['enable_dropout']) @parameterized.named_parameters( dict(testcase_name='no_force_decoding', prompt_with_targets=False), dict(testcase_name='force_decoding', prompt_with_targets=True), ) def test_prompt_with_targets(self, prompt_with_targets): batch_size, encoder_len, max_decode_len, emb_dim = 2, 3, 4, 5 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.full([batch_size, max_decode_len], 2, dtype=np.int32) } # These dummy logits represent the probability distribution where all the # probability mass is in one item (i.e., degenerate distribution). For # batch element 0, it is vocabulary index 3. # We test `_predict_step` to avoid having to define a task and its # vocabulary. dummy_logits = jnp.expand_dims( jnp.array([[-1e7, -1e7, -1e7, 0, -1e7], [-1e7, -1e7, -1e7, -1e7, 0]]), axis=1) mock_decode_fn = mock.Mock() mock_decode_fn.return_value = (np.full([batch_size, max_decode_len, 1], 3, dtype=np.int32), np.full([batch_size, 1], 1.0, dtype=np.float32)) class MockModule: def __init__(self): self.dtype = jnp.float32 def apply(self, *args, method=None, **kwargs): del args, kwargs if method is None: # use for module.`__call__` return (dummy_logits, {'cache': {}}) else: return method() def encode(self): return jnp.zeros((batch_size, encoder_len, emb_dim)) def decode(self): return (dummy_logits, {'cache': {}}) def mock_init(self): self.module = MockModule() self.module.scan_layers = False self._input_vocabulary = mock.Mock(eos_id=1) self._output_vocabulary = mock.Mock(eos_id=1) self._decode_fn = mock_decode_fn with mock.patch.object( models.EncoderDecoderModel, '__init__', new=mock_init): model = models.EncoderDecoderModel() model.predict_batch_with_aux({}, batch, prompt_with_targets=prompt_with_targets) if prompt_with_targets: expected_inputs = batch['decoder_input_tokens'] else: expected_inputs = np.zeros([batch_size, max_decode_len], dtype=np.int32) assert mock_decode_fn.call_count == 1 # Look at the kwargs call list for inputs, assert_called_with doesn't # work well with np.array comparison. np.testing.assert_array_equal(mock_decode_fn.mock_calls[0][2]['inputs'], expected_inputs) def test_predict_batch_loop_and_caches_are_equal(self): vocab_size = 50 lengths = np.array([[2], [3]]) batch_size, beam_size, encoder_len, max_decode_len = 2, 2, 3, 7 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_target_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.concatenate( [ np.expand_dims( np.concatenate( [[0], np.arange(9, 9 + lengths[0][0], dtype=np.int32), np.zeros((max_decode_len - lengths[0][0] - 1), dtype=np.int32)]), axis=0), # First element np.expand_dims( np.concatenate( [[0], np.arange(3, 3 + lengths[1][0], dtype=np.int32), np.zeros((max_decode_len - lengths[1][0] - 1), dtype=np.int32)]), axis=0) # Second element ], axis=0), } model = test_utils.get_t5_test_model(vocab_size=50) module = model.module params = module.init( jax.random.PRNGKey(0), jnp.ones((batch_size, encoder_len)), jnp.ones((batch_size, max_decode_len)), jnp.ones((batch_size, max_decode_len)), enable_dropout=False)['params'] def mock_init(self): self.module = module # Set the EOS token to be larger then the vocabulary size. This forces the # model to decode all the way to `max_decode_length`, allowing us to test # behavior when one element reaches the end before the others. self._output_vocabulary = mock.Mock(eos_id=vocab_size + 12)
self._decode_fn = decoding.beam_search
0
2023-12-12 20:23:33+00:00
16k
zju3dv/EasyVolcap
tests/headless_opengl_tests.py
[ { "identifier": "eglContextManager", "path": "easyvolcap/utils/egl_utils.py", "snippet": "class eglContextManager:\n # Manages the creation and destruction of an EGL context\n # Will resize if the size of the window changes\n # Will also manage gl.Viewport to render different parts of the scree...
from easyvolcap.utils.egl_utils import eglContextManager # must be imported before OpenGL.GL from os.path import join, dirname from easyvolcap.utils.console_utils import * from easyvolcap.utils.gl_utils import Quad, Mesh from easyvolcap.utils.viewer_utils import Camera from easyvolcap.utils.data_utils import save_image from easyvolcap.utils.gl_utils import common_opengl_options, linearize_depth from easyvolcap.utils.test_utils import my_tests import OpenGL.GL as gl import os import cv2 import torch import numpy as np
11,678
from __future__ import absolute_import, division, print_function # fmt: off # fmt: on WIDTH, HEIGHT = 512, 512 eglctx = eglContextManager(HEIGHT, WIDTH) # will create a new context
from __future__ import absolute_import, division, print_function # fmt: off # fmt: on WIDTH, HEIGHT = 512, 512 eglctx = eglContextManager(HEIGHT, WIDTH) # will create a new context
common_opengl_options() # common init
5
2023-12-07 08:53:42+00:00
16k
alibaba/animate-anything
utils/lora_handler.py
[ { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition_mask.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns samp...
import os import torch import uuid from logging import warnings from typing import Union from types import SimpleNamespace from models.unet_3d_condition_mask import UNet3DConditionModel from transformers import CLIPTextModel from utils.convert_diffusers_to_original_ms_text_to_video import convert_unet_state_dict, convert_text_enc_state_dict_v20 from .lora import ( extract_lora_ups_down, inject_trainable_lora_extended, save_lora_weight, train_patch_pipe, monkeypatch_or_replace_lora, monkeypatch_or_replace_lora_extended ) from stable_lora.lora import ( activate_lora_train, add_lora_to, save_lora, load_lora, set_mode_group )
11,099
FILE_BASENAMES = ['unet', 'text_encoder'] LORA_FILE_TYPES = ['.pt', '.safetensors'] CLONE_OF_SIMO_KEYS = ['model', 'loras', 'target_replace_module', 'r'] STABLE_LORA_KEYS = ['model', 'target_module', 'search_class', 'r', 'dropout', 'lora_bias'] lora_versions = dict( stable_lora = "stable_lora", cloneofsimo = "cloneofsimo" ) lora_func_types = dict( loader = "loader", injector = "injector" ) lora_args = dict( model = None, loras = None, target_replace_module = [], target_module = [], r = 4, search_class = [torch.nn.Linear], dropout = 0, lora_bias = 'none' ) LoraVersions = SimpleNamespace(**lora_versions) LoraFuncTypes = SimpleNamespace(**lora_func_types) LORA_VERSIONS = [LoraVersions.stable_lora, LoraVersions.cloneofsimo] LORA_FUNC_TYPES = [LoraFuncTypes.loader, LoraFuncTypes.injector] def filter_dict(_dict, keys=[]): if len(keys) == 0: assert "Keys cannot empty for filtering return dict." for k in keys: if k not in lora_args.keys(): assert f"{k} does not exist in available LoRA arguments" return {k: v for k, v in _dict.items() if k in keys} class LoraHandler(object): def __init__( self, version: LORA_VERSIONS = LoraVersions.cloneofsimo, use_unet_lora: bool = False, use_text_lora: bool = False, save_for_webui: bool = False, only_for_webui: bool = False, lora_bias: str = 'none', unet_replace_modules: list = ['UNet3DConditionModel'], text_encoder_replace_modules: list = ['CLIPEncoderLayer'] ): self.version = version self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader) self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector) self.lora_bias = lora_bias self.use_unet_lora = use_unet_lora self.use_text_lora = use_text_lora self.save_for_webui = save_for_webui self.only_for_webui = only_for_webui self.unet_replace_modules = unet_replace_modules self.text_encoder_replace_modules = text_encoder_replace_modules self.use_lora = any([use_text_lora, use_unet_lora]) if self.use_lora: print(f"Using LoRA Version: {self.version}") def is_cloneofsimo_lora(self): return self.version == LoraVersions.cloneofsimo def is_stable_lora(self): return self.version == LoraVersions.stable_lora def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader): if self.is_cloneofsimo_lora(): if func_type == LoraFuncTypes.loader: return monkeypatch_or_replace_lora_extended if func_type == LoraFuncTypes.injector: return inject_trainable_lora_extended if self.is_stable_lora(): if func_type == LoraFuncTypes.loader: return load_lora if func_type == LoraFuncTypes.injector:
FILE_BASENAMES = ['unet', 'text_encoder'] LORA_FILE_TYPES = ['.pt', '.safetensors'] CLONE_OF_SIMO_KEYS = ['model', 'loras', 'target_replace_module', 'r'] STABLE_LORA_KEYS = ['model', 'target_module', 'search_class', 'r', 'dropout', 'lora_bias'] lora_versions = dict( stable_lora = "stable_lora", cloneofsimo = "cloneofsimo" ) lora_func_types = dict( loader = "loader", injector = "injector" ) lora_args = dict( model = None, loras = None, target_replace_module = [], target_module = [], r = 4, search_class = [torch.nn.Linear], dropout = 0, lora_bias = 'none' ) LoraVersions = SimpleNamespace(**lora_versions) LoraFuncTypes = SimpleNamespace(**lora_func_types) LORA_VERSIONS = [LoraVersions.stable_lora, LoraVersions.cloneofsimo] LORA_FUNC_TYPES = [LoraFuncTypes.loader, LoraFuncTypes.injector] def filter_dict(_dict, keys=[]): if len(keys) == 0: assert "Keys cannot empty for filtering return dict." for k in keys: if k not in lora_args.keys(): assert f"{k} does not exist in available LoRA arguments" return {k: v for k, v in _dict.items() if k in keys} class LoraHandler(object): def __init__( self, version: LORA_VERSIONS = LoraVersions.cloneofsimo, use_unet_lora: bool = False, use_text_lora: bool = False, save_for_webui: bool = False, only_for_webui: bool = False, lora_bias: str = 'none', unet_replace_modules: list = ['UNet3DConditionModel'], text_encoder_replace_modules: list = ['CLIPEncoderLayer'] ): self.version = version self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader) self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector) self.lora_bias = lora_bias self.use_unet_lora = use_unet_lora self.use_text_lora = use_text_lora self.save_for_webui = save_for_webui self.only_for_webui = only_for_webui self.unet_replace_modules = unet_replace_modules self.text_encoder_replace_modules = text_encoder_replace_modules self.use_lora = any([use_text_lora, use_unet_lora]) if self.use_lora: print(f"Using LoRA Version: {self.version}") def is_cloneofsimo_lora(self): return self.version == LoraVersions.cloneofsimo def is_stable_lora(self): return self.version == LoraVersions.stable_lora def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader): if self.is_cloneofsimo_lora(): if func_type == LoraFuncTypes.loader: return monkeypatch_or_replace_lora_extended if func_type == LoraFuncTypes.injector: return inject_trainable_lora_extended if self.is_stable_lora(): if func_type == LoraFuncTypes.loader: return load_lora if func_type == LoraFuncTypes.injector:
return add_lora_to
10
2023-12-07 08:26:29+00:00
16k
octo-models/octo
scripts/finetune.py
[ { "identifier": "make_single_dataset", "path": "octo/data/dataset.py", "snippet": "def make_single_dataset(\n dataset_kwargs: dict,\n *,\n train: bool,\n traj_transform_kwargs: dict = {},\n frame_transform_kwargs: dict = {},\n) -> dl.DLataset:\n \"\"\"Creates a single dataset from kwar...
import datetime import imp import os import flax import jax import optax import tensorflow as tf import tqdm import wandb from functools import partial from absl import app, flags, logging from flax.traverse_util import flatten_dict from jax.sharding import Mesh, NamedSharding, PartitionSpec from ml_collections import config_flags, ConfigDict from octo.data.dataset import make_single_dataset from octo.model.octo_model import OctoModel from octo.utils.jax_utils import initialize_compilation_cache from octo.utils.spec import ModuleSpec from octo.utils.train_callbacks import ( RolloutVisualizationCallback, SaveCallback, ValidationCallback, VisualizationCallback, ) from octo.utils.train_utils import ( check_config_diff, create_optimizer, format_name_with_config, merge_params, process_text, Timer, TrainState, ) from jax_smi import initialise_tracking # type: ignore
11,508
wandb_id, ) wandb.config.update(dict(save_dir=save_dir), allow_val_change=True) logging.info("Saving to %s", save_dir) save_callback = SaveCallback(save_dir) # Add window_size to top of config, to make eval easier new_config = ConfigDict(model.config) new_config["window_size"] = example_batch["observation"]["pad_mask"].shape[1] model = model.replace(config=new_config) # Save finetuning config since it's not saved by SaveCallback, i.e. as part of model.save_pretrained() with open( tf.io.gfile.join(save_dir, "finetune_config.json"), "w" ) as config_file: config_file.write(FLAGS.config.to_json_best_effort()) else: save_dir = None save_callback = SaveCallback(None) logging.warning("save_dir not passed in, not saving checkpoints") example_batch_spec = jax.tree_map( lambda arr: (arr.shape, str(arr.dtype)), example_batch ) wandb.config.update( dict(example_batch_spec=example_batch_spec), allow_val_change=True ) ######### # # Define loss, train_step, and eval_step # ######### def loss_fn(params, batch, rng, train=True): bound_module = model.module.bind({"params": params}, rngs={"dropout": rng}) transformer_embeddings = bound_module.octo_transformer( batch["observation"], batch["task"], batch["observation"]["pad_mask"], train=train, ) action_loss, action_metrics = bound_module.heads["action"].loss( transformer_embeddings, # Action head knows to pull out the action readout_key batch["action"], pad_mask=batch["observation"]["pad_mask"], train=train, ) return action_loss, action_metrics # Data parallelism # Model is replicated across devices, data is split across devices @partial( jax.jit, in_shardings=[replicated_sharding, dp_sharding], ) def train_step(state, batch): rng, dropout_rng = jax.random.split(state.rng) (loss, info), grads = jax.value_and_grad(loss_fn, has_aux=True)( state.model.params, batch, dropout_rng, train=True ) # Gradient Metrics (TODO: Does the finetuner need these?) ### grad_norm = optax.global_norm(grads) updates, _ = state.tx.update(grads, state.opt_state, state.model.params) update_norm = optax.global_norm(updates) info.update( { "grad_norm": grad_norm, "update_norm": update_norm, "param_norm": param_norm_callable(state.model.params), "learning_rate": lr_callable(state.step), } ) # End Debug Metrics # new_state = state.apply_gradients(grads=grads, rng=rng) return new_state, info ######### # # Build validation & visualization callbacks # ######### if FLAGS.config.modality == "image_conditioned": modes_to_evaluate = ["image_conditioned"] elif FLAGS.config.modality == "text_conditioned": modes_to_evaluate = ["text_conditioned"] elif FLAGS.config.modality == "multimodal": modes_to_evaluate = ["image_conditioned", "text_conditioned"] else: modes_to_evaluate = ["base"] dataset_kwargs_list = [FLAGS.config.dataset_kwargs] val_callback = ValidationCallback( loss_fn=loss_fn, process_batch_fn=process_batch, text_processor=text_processor, val_dataset_kwargs_list=dataset_kwargs_list, dataset_kwargs=FLAGS.config, modes_to_evaluate=modes_to_evaluate, **FLAGS.config.val_kwargs, ) viz_callback = VisualizationCallback( text_processor=text_processor, val_dataset_kwargs_list=dataset_kwargs_list, dataset_kwargs=FLAGS.config, modes_to_evaluate=modes_to_evaluate, **FLAGS.config.viz_kwargs, ) ######### # # Optionally build visualizers for sim env evals # ######### if "rollout_kwargs" in FLAGS.config:
try: initialise_tracking() except ImportError: pass FLAGS = flags.FLAGS flags.DEFINE_string("name", "experiment", "Experiment name.") flags.DEFINE_bool("debug", False, "Debug config (no wandb logging)") default_config_file = os.path.join( os.path.dirname(__file__), "configs/finetune_config.py" ) config_flags.DEFINE_config_file( "config", default_config_file, "File path to the training hyperparameter configuration.", lock_config=False, ) def main(_): initialize_compilation_cache() devices = jax.devices() logging.info( f""" Octo Finetuning Script ====================== Pretrained model: {FLAGS.config.pretrained_path} Finetuning Dataset: {FLAGS.config.dataset_kwargs.name} Data dir: {FLAGS.config.dataset_kwargs.data_dir} Task Modality: {FLAGS.config.modality} Finetuning Mode: {FLAGS.config.finetuning_mode} # Devices: {jax.device_count()} Batch size: {FLAGS.config.batch_size} ({FLAGS.config.batch_size // len(devices) } per device) # Steps: {FLAGS.config.num_steps} """ ) ######### # # Setup Jax Data Parallelism # ######### assert ( FLAGS.config.batch_size % len(devices) == 0 ), f"Batch size ({FLAGS.config.batch_size}) must be divisible by the number of devices ({len(devices)})" assert ( FLAGS.config.viz_kwargs.eval_batch_size % len(devices) == 0 ), f"Eval batch size ({FLAGS.config.viz_kwargs.eval_batch_size}) must be divisible by the number of devices ({len(devices)})" # create a 1D mesh with a single axis named "batch" mesh = Mesh(jax.devices(), axis_names="batch") # Our batches will be data-parallel sharded -- each device will get a slice of the batch dp_sharding = NamedSharding(mesh, PartitionSpec("batch")) # Our model will be replicated across devices (we are only doing data parallelism, not model parallelism) replicated_sharding = NamedSharding(mesh, PartitionSpec()) # prevent tensorflow from using GPU memory since it's only used for data loading tf.config.set_visible_devices([], "GPU") ######### # # Setup WandB # ######### name = format_name_with_config( FLAGS.name, FLAGS.config.to_dict(), ) wandb_id = "{name}_{time}".format( name=name, time=datetime.datetime.now().strftime("%Y%m%d_%H%M%S"), ) wandb.init( config=FLAGS.config.to_dict(), id=wandb_id, name=name, mode="disabled" if FLAGS.debug else None, **FLAGS.config.wandb, ) ######### # # Load Pretrained model + optionally modify config # ######### pretrained_model = OctoModel.load_pretrained( FLAGS.config.pretrained_path, step=FLAGS.config.pretrained_step, ) flat_config = flax.traverse_util.flatten_dict( pretrained_model.config, keep_empty_nodes=True ) for d_key in flax.traverse_util.flatten_dict( FLAGS.config.get("config_delete_keys", ConfigDict()).to_dict() ): for c_key in list(flat_config.keys()): if ".".join(c_key).startswith(".".join(d_key)): del flat_config[c_key] config = ConfigDict(flax.traverse_util.unflatten_dict(flat_config)) config.update(FLAGS.config.get("update_config", ConfigDict())) config = config.to_dict() check_config_diff(config, pretrained_model.config) ######### # # Setup Data Loader # ######### # create text processor if config["text_processor"] is None: text_processor = None else: text_processor = ModuleSpec.instantiate(config["text_processor"])() def process_batch(batch): batch = process_text(batch, text_processor) del batch["dataset_name"] return batch # load standardize_fn from `path/to/file.py:fn_name` format if ( standardize_fn := FLAGS.config["dataset_kwargs"].get("standardize_fn", None) ) is not None: path, name = standardize_fn.split(":") # imp is deprecated, but it's also what ml_collections uses standardize_fn = getattr(imp.load_source("standardize_fn", path), name) del FLAGS.config["dataset_kwargs"]["standardize_fn"] FLAGS.config["dataset_kwargs"]["standardize_fn"] = standardize_fn dataset = make_single_dataset( FLAGS.config.dataset_kwargs, traj_transform_kwargs=FLAGS.config.traj_transform_kwargs, frame_transform_kwargs=FLAGS.config.frame_transform_kwargs, train=True, ) train_data_iter = ( dataset.repeat() .unbatch() .shuffle(FLAGS.config.shuffle_buffer_size) .batch(FLAGS.config.batch_size) .iterator() ) train_data_iter = map(process_batch, train_data_iter) example_batch = next(train_data_iter) ######### # # Load Pretrained Model # ######### rng = jax.random.PRNGKey(FLAGS.config.seed) rng, init_rng = jax.random.split(rng) model = OctoModel.from_config( config, example_batch, text_processor, rng=init_rng, dataset_statistics=dataset.dataset_statistics, ) merged_params = merge_params(model.params, pretrained_model.params) model = model.replace(params=merged_params) del pretrained_model ######### # # Setup Optimizer and Train State # ######### params = model.params if FLAGS.config.optimizer.frozen_keys is None: FLAGS.config.optimizer.frozen_keys = model.config["optimizer"]["frozen_keys"] tx, lr_callable, param_norm_callable = create_optimizer( params, **FLAGS.config.optimizer.to_dict(), ) train_state = TrainState.create( model=model, tx=tx, rng=rng, ) ######### # # Save all metadata # ######### if FLAGS.config.save_dir is not None: save_dir = tf.io.gfile.join( FLAGS.config.save_dir, FLAGS.config.wandb.project, FLAGS.config.wandb.group or "", wandb_id, ) wandb.config.update(dict(save_dir=save_dir), allow_val_change=True) logging.info("Saving to %s", save_dir) save_callback = SaveCallback(save_dir) # Add window_size to top of config, to make eval easier new_config = ConfigDict(model.config) new_config["window_size"] = example_batch["observation"]["pad_mask"].shape[1] model = model.replace(config=new_config) # Save finetuning config since it's not saved by SaveCallback, i.e. as part of model.save_pretrained() with open( tf.io.gfile.join(save_dir, "finetune_config.json"), "w" ) as config_file: config_file.write(FLAGS.config.to_json_best_effort()) else: save_dir = None save_callback = SaveCallback(None) logging.warning("save_dir not passed in, not saving checkpoints") example_batch_spec = jax.tree_map( lambda arr: (arr.shape, str(arr.dtype)), example_batch ) wandb.config.update( dict(example_batch_spec=example_batch_spec), allow_val_change=True ) ######### # # Define loss, train_step, and eval_step # ######### def loss_fn(params, batch, rng, train=True): bound_module = model.module.bind({"params": params}, rngs={"dropout": rng}) transformer_embeddings = bound_module.octo_transformer( batch["observation"], batch["task"], batch["observation"]["pad_mask"], train=train, ) action_loss, action_metrics = bound_module.heads["action"].loss( transformer_embeddings, # Action head knows to pull out the action readout_key batch["action"], pad_mask=batch["observation"]["pad_mask"], train=train, ) return action_loss, action_metrics # Data parallelism # Model is replicated across devices, data is split across devices @partial( jax.jit, in_shardings=[replicated_sharding, dp_sharding], ) def train_step(state, batch): rng, dropout_rng = jax.random.split(state.rng) (loss, info), grads = jax.value_and_grad(loss_fn, has_aux=True)( state.model.params, batch, dropout_rng, train=True ) # Gradient Metrics (TODO: Does the finetuner need these?) ### grad_norm = optax.global_norm(grads) updates, _ = state.tx.update(grads, state.opt_state, state.model.params) update_norm = optax.global_norm(updates) info.update( { "grad_norm": grad_norm, "update_norm": update_norm, "param_norm": param_norm_callable(state.model.params), "learning_rate": lr_callable(state.step), } ) # End Debug Metrics # new_state = state.apply_gradients(grads=grads, rng=rng) return new_state, info ######### # # Build validation & visualization callbacks # ######### if FLAGS.config.modality == "image_conditioned": modes_to_evaluate = ["image_conditioned"] elif FLAGS.config.modality == "text_conditioned": modes_to_evaluate = ["text_conditioned"] elif FLAGS.config.modality == "multimodal": modes_to_evaluate = ["image_conditioned", "text_conditioned"] else: modes_to_evaluate = ["base"] dataset_kwargs_list = [FLAGS.config.dataset_kwargs] val_callback = ValidationCallback( loss_fn=loss_fn, process_batch_fn=process_batch, text_processor=text_processor, val_dataset_kwargs_list=dataset_kwargs_list, dataset_kwargs=FLAGS.config, modes_to_evaluate=modes_to_evaluate, **FLAGS.config.val_kwargs, ) viz_callback = VisualizationCallback( text_processor=text_processor, val_dataset_kwargs_list=dataset_kwargs_list, dataset_kwargs=FLAGS.config, modes_to_evaluate=modes_to_evaluate, **FLAGS.config.viz_kwargs, ) ######### # # Optionally build visualizers for sim env evals # ######### if "rollout_kwargs" in FLAGS.config:
rollout_callback = RolloutVisualizationCallback(
4
2023-12-13 09:58:56+00:00
16k
LinShan-Bin/OccNeRF
run.py
[ { "identifier": "Runer", "path": "runner.py", "snippet": "class Runer:\n\n def __init__(self, options):\n\n self.opt = options\n self.opt.B = self.opt.batch_size // self.opt.cam_N\n if self.opt.debug:\n self.opt.voxels_size = [8, 128, 128]\n self.opt.render_...
import torch import numpy as np from runner import Runer from options import MonodepthOptions
13,641
# Copyright Niantic 2019. Patent Pending. All rights reserved. # # This software is licensed under the terms of the Monodepth2 licence # which allows for non-commercial use only, the full terms of which are made # available in the LICENSE file. from __future__ import absolute_import, division, print_function options = MonodepthOptions() opts = options.parse() def setup_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) torch.backends.cudnn.deterministic = True if __name__ == "__main__": setup_seed(42)
# Copyright Niantic 2019. Patent Pending. All rights reserved. # # This software is licensed under the terms of the Monodepth2 licence # which allows for non-commercial use only, the full terms of which are made # available in the LICENSE file. from __future__ import absolute_import, division, print_function options = MonodepthOptions() opts = options.parse() def setup_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) torch.backends.cudnn.deterministic = True if __name__ == "__main__": setup_seed(42)
trainer = Runer(opts)
0
2023-12-14 15:00:21+00:00
16k
modelscope/richdreamer
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"i...
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from einops import rearrange, repeat from functools import partial from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import (AutoencoderKL, IdentityFirstStage, VQModelInterface,) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like,) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl,) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import (count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat,)
12,580
new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
12
2023-12-06 07:53:11+00:00
16k
rehg-lab/RAVE
annotator/oneformer/detectron2/export/caffe2_patch.py
[ { "identifier": "poolers", "path": "annotator/oneformer/detectron2/modeling/poolers.py", "snippet": "def assign_boxes_to_levels(\r\n box_lists: List[Boxes],\r\n min_level: int,\r\n max_level: int,\r\n canonical_box_size: int,\r\n canonical_level: int,\r\n):\r\ndef _convert_boxes_to_pooler...
import contextlib import torch from unittest import mock from annotator.oneformer.detectron2.modeling import poolers from annotator.oneformer.detectron2.modeling.proposal_generator import rpn from annotator.oneformer.detectron2.modeling.roi_heads import keypoint_head, mask_head from annotator.oneformer.detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers from .c10 import ( Caffe2Compatible, Caffe2FastRCNNOutputsInference, Caffe2KeypointRCNNInference, Caffe2MaskRCNNInference, Caffe2ROIPooler, Caffe2RPN, )
11,187
# Copyright (c) Facebook, Inc. and its affiliates. class GenericMixin(object): pass class Caffe2CompatibleConverter(object): """ A GenericUpdater which implements the `create_from` interface, by modifying module object and assign it with another class replaceCls. """ def __init__(self, replaceCls): self.replaceCls = replaceCls def create_from(self, module): # update module's class to the new class assert isinstance(module, torch.nn.Module) if issubclass(self.replaceCls, GenericMixin): # replaceCls should act as mixin, create a new class on-the-fly new_class = type( "{}MixedWith{}".format(self.replaceCls.__name__, module.__class__.__name__), (self.replaceCls, module.__class__), {}, # {"new_method": lambda self: ...}, ) module.__class__ = new_class else: # replaceCls is complete class, this allow arbitrary class swap module.__class__ = self.replaceCls # initialize Caffe2Compatible if isinstance(module, Caffe2Compatible): module.tensor_mode = False return module def patch(model, target, updater, *args, **kwargs): """ recursively (post-order) update all modules with the target type and its subclasses, make a initialization/composition/inheritance/... via the updater.create_from. """ for name, module in model.named_children(): model._modules[name] = patch(module, target, updater, *args, **kwargs) if isinstance(model, target): return updater.create_from(model, *args, **kwargs) return model def patch_generalized_rcnn(model): ccc = Caffe2CompatibleConverter model = patch(model, rpn.RPN, ccc(Caffe2RPN)) model = patch(model, poolers.ROIPooler, ccc(Caffe2ROIPooler)) return model @contextlib.contextmanager def mock_fastrcnn_outputs_inference( tensor_mode, check=True, box_predictor_type=FastRCNNOutputLayers ): with mock.patch.object( box_predictor_type, "inference", autospec=True, side_effect=Caffe2FastRCNNOutputsInference(tensor_mode), ) as mocked_func: yield if check: assert mocked_func.call_count > 0 @contextlib.contextmanager def mock_mask_rcnn_inference(tensor_mode, patched_module, check=True): with mock.patch( "{}.mask_rcnn_inference".format(patched_module), side_effect=Caffe2MaskRCNNInference() ) as mocked_func: yield if check: assert mocked_func.call_count > 0 @contextlib.contextmanager def mock_keypoint_rcnn_inference(tensor_mode, patched_module, use_heatmap_max_keypoint, check=True): with mock.patch( "{}.keypoint_rcnn_inference".format(patched_module),
# Copyright (c) Facebook, Inc. and its affiliates. class GenericMixin(object): pass class Caffe2CompatibleConverter(object): """ A GenericUpdater which implements the `create_from` interface, by modifying module object and assign it with another class replaceCls. """ def __init__(self, replaceCls): self.replaceCls = replaceCls def create_from(self, module): # update module's class to the new class assert isinstance(module, torch.nn.Module) if issubclass(self.replaceCls, GenericMixin): # replaceCls should act as mixin, create a new class on-the-fly new_class = type( "{}MixedWith{}".format(self.replaceCls.__name__, module.__class__.__name__), (self.replaceCls, module.__class__), {}, # {"new_method": lambda self: ...}, ) module.__class__ = new_class else: # replaceCls is complete class, this allow arbitrary class swap module.__class__ = self.replaceCls # initialize Caffe2Compatible if isinstance(module, Caffe2Compatible): module.tensor_mode = False return module def patch(model, target, updater, *args, **kwargs): """ recursively (post-order) update all modules with the target type and its subclasses, make a initialization/composition/inheritance/... via the updater.create_from. """ for name, module in model.named_children(): model._modules[name] = patch(module, target, updater, *args, **kwargs) if isinstance(model, target): return updater.create_from(model, *args, **kwargs) return model def patch_generalized_rcnn(model): ccc = Caffe2CompatibleConverter model = patch(model, rpn.RPN, ccc(Caffe2RPN)) model = patch(model, poolers.ROIPooler, ccc(Caffe2ROIPooler)) return model @contextlib.contextmanager def mock_fastrcnn_outputs_inference( tensor_mode, check=True, box_predictor_type=FastRCNNOutputLayers ): with mock.patch.object( box_predictor_type, "inference", autospec=True, side_effect=Caffe2FastRCNNOutputsInference(tensor_mode), ) as mocked_func: yield if check: assert mocked_func.call_count > 0 @contextlib.contextmanager def mock_mask_rcnn_inference(tensor_mode, patched_module, check=True): with mock.patch( "{}.mask_rcnn_inference".format(patched_module), side_effect=Caffe2MaskRCNNInference() ) as mocked_func: yield if check: assert mocked_func.call_count > 0 @contextlib.contextmanager def mock_keypoint_rcnn_inference(tensor_mode, patched_module, use_heatmap_max_keypoint, check=True): with mock.patch( "{}.keypoint_rcnn_inference".format(patched_module),
side_effect=Caffe2KeypointRCNNInference(use_heatmap_max_keypoint),
7
2023-12-05 02:51:53+00:00
16k
u2seg/U2Seg
detectron2/data/build.py
[ { "identifier": "configurable", "path": "detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\n \"\"\"\n Decorate a function or a class's __init__ method so that it can be called\n with a :class:`CfgNode` object using a :func:`from_config` function th...
import itertools import logging import numpy as np import operator import pickle import torch import torch.utils.data as torchdata from collections import OrderedDict, defaultdict from typing import Any, Callable, Dict, List, Optional, Union from tabulate import tabulate from termcolor import colored from detectron2.config import configurable from detectron2.structures import BoxMode from detectron2.utils.comm import get_world_size from detectron2.utils.env import seed_all_rng from detectron2.utils.file_io import PathManager from detectron2.utils.logger import _log_api_usage, log_first_n from .catalog import DatasetCatalog, MetadataCatalog from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset from .dataset_mapper import DatasetMapper from .detection_utils import check_metadata_consistency from .samplers import ( InferenceSampler, RandomSubsetTrainingSampler, RepeatFactorTrainingSampler, TrainingSampler, )
11,729
Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. proposal_file (str): file path of pre-computed proposals, in pkl format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file)) with PathManager.open(proposal_file, "rb") as f: proposals = pickle.load(f, encoding="latin1") # Rename the key names in D1 proposal files rename_keys = {"indexes": "ids", "scores": "objectness_logits"} for key in rename_keys: if key in proposals: proposals[rename_keys[key]] = proposals.pop(key) # Fetch the indexes of all proposals that are in the dataset # Convert image_id to str since they could be int. img_ids = set({str(record["image_id"]) for record in dataset_dicts}) id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids} # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS' bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS for record in dataset_dicts: # Get the index of the proposal i = id_to_index[str(record["image_id"])] boxes = proposals["boxes"][i] objectness_logits = proposals["objectness_logits"][i] # Sort the proposals in descending order of the scores inds = objectness_logits.argsort()[::-1] record["proposal_boxes"] = boxes[inds] record["proposal_objectness_logits"] = objectness_logits[inds] record["proposal_bbox_mode"] = bbox_mode return dataset_dicts def print_instances_class_histogram(dataset_dicts, class_names): """ Args: dataset_dicts (list[dict]): list of dataset dicts. class_names (list[str]): list of class names (zero-indexed). """ num_classes = len(class_names) hist_bins = np.arange(num_classes + 1) histogram = np.zeros((num_classes,), dtype=int) for entry in dataset_dicts: annos = entry["annotations"] classes = np.asarray( [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=int ) if len(classes): assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}" assert ( classes.max() < num_classes ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes" histogram += np.histogram(classes, bins=hist_bins)[0] N_COLS = min(6, len(class_names) * 2) def short_name(x): # make long class names shorter. useful for lvis if len(x) > 13: return x[:11] + ".." return x data = list( itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) ) total_num_instances = sum(data[1::2]) data.extend([None] * (N_COLS - (len(data) % N_COLS))) if num_classes > 1: data.extend(["total", total_num_instances]) data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) table = tabulate( data, headers=["category", "#instances"] * (N_COLS // 2), tablefmt="pipe", numalign="left", stralign="center", ) log_first_n( logging.INFO, "Distribution of instances among all {} categories:\n".format(num_classes) + colored(table, "cyan"), key="message", ) def get_detection_dataset_dicts( names, filter_empty=True, min_keypoints=0, proposal_files=None, check_consistency=True, ): """ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. Args: names (str or list[str]): a dataset name or a list of dataset names filter_empty (bool): whether to filter out images without instance annotations min_keypoints (int): filter out images with fewer keypoints than `min_keypoints`. Set to 0 to do nothing. proposal_files (list[str]): if given, a list of object proposal files that match each dataset in `names`. check_consistency (bool): whether to check if datasets have consistent metadata. Returns: list[dict]: a list of dicts following the standard dataset dict format. """ if isinstance(names, str): names = [names] assert len(names), names
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains the default logic to build a dataloader for training or testing. """ __all__ = [ "build_batch_data_loader", "build_detection_train_loader", "build_detection_test_loader", "get_detection_dataset_dicts", "load_proposals_into_dataset", "print_instances_class_histogram", ] def filter_images_with_only_crowd_annotations(dataset_dicts): """ Filter out images with none annotations or only crowd annotations (i.e., images without non-crowd annotations). A common training-time preprocessing on COCO dataset. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format, but filtered. """ num_before = len(dataset_dicts) def valid(anns): for ann in anns: if ann.get("iscrowd", 0) == 0: return True return False dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with no usable annotations. {} images left.".format( num_before - num_after, num_after ) ) return dataset_dicts def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): """ Filter out images with too few number of keypoints. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format as dataset_dicts, but filtered. """ num_before = len(dataset_dicts) def visible_keypoints_in_image(dic): # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility annotations = dic["annotations"] return sum( (np.array(ann["keypoints"][2::3]) > 0).sum() for ann in annotations if "keypoints" in ann ) dataset_dicts = [ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image ] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with fewer than {} keypoints.".format( num_before - num_after, min_keypoints_per_image ) ) return dataset_dicts def load_proposals_into_dataset(dataset_dicts, proposal_file): """ Load precomputed object proposals into the dataset. The proposal file should be a pickled dict with the following keys: - "ids": list[int] or list[str], the image ids - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores corresponding to the boxes. - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. proposal_file (str): file path of pre-computed proposals, in pkl format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file)) with PathManager.open(proposal_file, "rb") as f: proposals = pickle.load(f, encoding="latin1") # Rename the key names in D1 proposal files rename_keys = {"indexes": "ids", "scores": "objectness_logits"} for key in rename_keys: if key in proposals: proposals[rename_keys[key]] = proposals.pop(key) # Fetch the indexes of all proposals that are in the dataset # Convert image_id to str since they could be int. img_ids = set({str(record["image_id"]) for record in dataset_dicts}) id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids} # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS' bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS for record in dataset_dicts: # Get the index of the proposal i = id_to_index[str(record["image_id"])] boxes = proposals["boxes"][i] objectness_logits = proposals["objectness_logits"][i] # Sort the proposals in descending order of the scores inds = objectness_logits.argsort()[::-1] record["proposal_boxes"] = boxes[inds] record["proposal_objectness_logits"] = objectness_logits[inds] record["proposal_bbox_mode"] = bbox_mode return dataset_dicts def print_instances_class_histogram(dataset_dicts, class_names): """ Args: dataset_dicts (list[dict]): list of dataset dicts. class_names (list[str]): list of class names (zero-indexed). """ num_classes = len(class_names) hist_bins = np.arange(num_classes + 1) histogram = np.zeros((num_classes,), dtype=int) for entry in dataset_dicts: annos = entry["annotations"] classes = np.asarray( [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=int ) if len(classes): assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}" assert ( classes.max() < num_classes ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes" histogram += np.histogram(classes, bins=hist_bins)[0] N_COLS = min(6, len(class_names) * 2) def short_name(x): # make long class names shorter. useful for lvis if len(x) > 13: return x[:11] + ".." return x data = list( itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) ) total_num_instances = sum(data[1::2]) data.extend([None] * (N_COLS - (len(data) % N_COLS))) if num_classes > 1: data.extend(["total", total_num_instances]) data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) table = tabulate( data, headers=["category", "#instances"] * (N_COLS // 2), tablefmt="pipe", numalign="left", stralign="center", ) log_first_n( logging.INFO, "Distribution of instances among all {} categories:\n".format(num_classes) + colored(table, "cyan"), key="message", ) def get_detection_dataset_dicts( names, filter_empty=True, min_keypoints=0, proposal_files=None, check_consistency=True, ): """ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. Args: names (str or list[str]): a dataset name or a list of dataset names filter_empty (bool): whether to filter out images without instance annotations min_keypoints (int): filter out images with fewer keypoints than `min_keypoints`. Set to 0 to do nothing. proposal_files (list[str]): if given, a list of object proposal files that match each dataset in `names`. check_consistency (bool): whether to check if datasets have consistent metadata. Returns: list[dict]: a list of dicts following the standard dataset dict format. """ if isinstance(names, str): names = [names] assert len(names), names
available_datasets = DatasetCatalog.keys()
7
2023-12-05 01:13:31+00:00
16k
upfusion3d/upfusion
control_net/ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "control_net/ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from control_net.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from control_net.ldm.modules.ema import LitEma from control_net.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from control_net.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from control_net.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from control_net.ldm.models.diffusion.ddim import DDIMSampler
11,165
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit
if reset_ema: assert exists(ckpt_path)
1
2023-12-12 00:49:11+00:00
16k
modelscope/normal-depth-diffusion
scripts/t2i.py
[ { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n\n def __init__(self, model, schedule='linear', **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedu...
import argparse import glob import os import pdb import sys import time import cv2 import numpy as np import torch from contextlib import contextmanager, nullcontext from itertools import islice from diffusers.pipelines.stable_diffusion.safety_checker import \ StableDiffusionSafetyChecker from einops import rearrange, repeat from imwatermark import WatermarkEncoder from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.dpm_solver import DPMSolverSampler from ldm.models.diffusion.plms import PLMSSampler from ldm.util import instantiate_from_config from model_zoo import build_model from omegaconf import OmegaConf from PIL import Image from pytorch_lightning import seed_everything from torch import autocast from torchvision.utils import make_grid from tqdm import tqdm, trange from transformers import AutoFeatureExtractor from utils.color_transfer import (map_2_16bit, map_16bit_2_8, split_rgbd, split_rgbd_only_tensor, split_rgbd_tensor)
10,863
default=42, help='the seed (for reproducible sampling)', ) parser.add_argument( '--precision', type=str, help='evaluate at this precision', choices=['full', 'autocast'], default='autocast') opt = parser.parse_args() seed_everything(opt.seed) ckpt_name = os.path.splitext(os.path.basename(opt.ckpt))[0] outdir = os.path.join(opt.save_dir, ckpt_name) os.makedirs(outdir, exist_ok=True) outpath = outdir # config = OmegaConf.load(f"{opt.config}") # model = load_model_from_config(config, f"{opt.ckpt}") model = build_model('nd', opt.ckpt, strict=False) device = torch.device( 'cuda') if torch.cuda.is_available() else torch.device('cpu') model = model.to(device) if opt.dpm_solver: sampler = DPMSolverSampler(model) elif opt.plms: sampler = PLMSSampler(model) else: sampler = DDIMSampler(model) print( 'Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...' ) wm = 'StableDiffusionV1' wm_encoder = WatermarkEncoder() wm_encoder.set_watermark('bytes', wm.encode('utf-8')) batch_size = opt.n_samples n_rows = opt.n_rows if opt.n_rows > 0 else batch_size if not opt.from_file: prompt = opt.prompt assert prompt is not None data = [batch_size * [prompt]] else: print(f'reading prompts from {opt.from_file}') with open(opt.from_file, 'r') as f: data = f.read().splitlines() data = list(chunk(data, batch_size)) if opt.prompt is None: prompts = [ 'a close up of a sheet of pizza on a table.', 'A picture of some lemons on a table.', 'A little girl with a pink bow in her hair eating broccoli.', 'A highly detailed stone bust of Theodoros Kolokotronis', ] else: prompts = [opt.prompt] sub = '' for prompt_id, prompt in enumerate(prompts): if prompt[-1] == '.': prompt = prompt[:-1] data = [batch_size * [prompt + sub]] save_path = os.path.join(outpath, 'normal-depth') os.makedirs(save_path, exist_ok=True) base_count = prompt_id grid_count = prompt_id start_code = None if opt.fixed_code: start_code = torch.randn( [opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device) precision_scope = autocast if opt.precision == 'autocast' else nullcontext with torch.no_grad(): with precision_scope('cuda'): tic = time.time() all_samples = list() for n in trange(opt.n_iter, desc='Sampling'): for prompts in tqdm(data, desc='data'): uc = None if opt.scale != 1.0: # uc = model.get_learned_conditioning(batch_size * [""]) uc = model.get_learned_conditioning( batch_size * [NEGATIVE_PROMPTS]) if isinstance(prompts, tuple): prompts = list(prompts) c = model.get_learned_conditioning(prompts) shape = [opt.C, opt.H // opt.f, opt.W // opt.f] samples_ddim, _ = sampler.sample( S=50, conditioning=c, batch_size=opt.n_samples, shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, x_T=start_code) ''' rgbd= (samples_ddim[0]).permute(1,2,0) rgb = rgbd[...,:3] depth = rgbd[...,3] cv2.imwrite('rgb.png',((rgb.clamp(-1,1).detach().cpu().numpy()+1)[...,::-1] /2 * 255).astype(np.uint8)) cv2.imwrite('depth.png', ((depth.clamp(-1,1).detach().cpu().numpy()+1) /2 * 255).astype(np.uint8)) ''' x_samples_ddim = model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp( (x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
sys.path.append('./') # load safety model ''' safety_model_id = "CompVis/stable-diffusion-safety-checker" safety_feature_extractor = AutoFeatureExtractor.from_pretrained(safety_model_id) safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id) ''' NEGATIVE_PROMPTS = 'ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, signature, cut off, draft.' def chunk(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) def numpy_to_pil(images): """ Convert a numpy image or a batch of images to a PIL image. """ if images.ndim == 3: images = images[None, ...] images = (images * 255).round().astype('uint8') pil_images = [Image.fromarray(image) for image in images] return pil_images def load_model_from_config(config, ckpt, verbose=False): print(f'Loading model from {ckpt}') pl_sd = torch.load(ckpt, map_location='cpu') if 'global_step' in pl_sd: print(f"Global Step: {pl_sd['global_step']}") sd = pl_sd['state_dict'] model = instantiate_from_config(config.model) m, u = model.load_state_dict(sd, strict=False) if len(m) > 0 and verbose: print('missing keys:') print(m) if len(u) > 0 and verbose: print('unexpected keys:') print(u) model.cuda() model.eval() return model def put_watermark(img, wm_encoder=None): if wm_encoder is not None: img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) img = wm_encoder.encode(img, 'dwtDct') img = Image.fromarray(img[:, :, ::-1]) return img def load_replacement(x): try: hwc = x.shape y = Image.open('assets/rick.jpeg').convert('RGB').resize( (hwc[1], hwc[0])) y = (np.array(y) / 255.0).astype(x.dtype) assert y.shape == x.shape return y except Exception: return x def check_safety(x_image): return x_image, False def main(): parser = argparse.ArgumentParser() parser.add_argument( '--prompt', type=str, nargs='?', default=None, help='the prompt to render') parser.add_argument( '--save_dir', type=str, nargs='?', help='dir to write results to', default='outputs/txt2img-samples') parser.add_argument( '--skip_grid', action='store_true', help= 'do not save a grid, only individual samples. Helpful when evaluating lots of samples', ) parser.add_argument( '--skip_save', action='store_true', help='do not save individual samples. For speed measurements.', ) parser.add_argument( '--ddim_steps', type=int, default=50, help='number of ddim sampling steps', ) parser.add_argument( '--plms', action='store_true', help='use plms sampling', ) parser.add_argument( '--dpm_solver', action='store_true', help='use dpm_solver sampling', ) parser.add_argument( '--laion400m', action='store_true', help='uses the LAION400M model', ) parser.add_argument( '--fixed_code', action='store_true', help='if enabled, uses the same starting code across samples ', ) parser.add_argument( '--ddim_eta', type=float, default=0.0, help='ddim eta (eta=0.0 corresponds to deterministic sampling', ) parser.add_argument( '--n_iter', type=int, default=1, help='sample this often', ) parser.add_argument( '--H', type=int, default=512, help='image height, in pixel space', ) parser.add_argument( '--W', type=int, default=512, help='image width, in pixel space', ) parser.add_argument( '--C', type=int, default=4, help='latent channels', ) parser.add_argument( '--f', type=int, default=8, help='downsampling factor', ) parser.add_argument( '--n_samples', type=int, default=1, help= 'how many samples to produce for each given prompt. A.k.a. batch size', ) parser.add_argument( '--n_rows', type=int, default=0, help='rows in the grid (default: n_samples)', ) parser.add_argument( '--scale', type=float, default=7.5, help= 'unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))', ) parser.add_argument( '--from-file', type=str, help='if specified, load prompts from this file', ) parser.add_argument( '--config', type=str, default='./configs/inference/nd/nd-1.5-inference.yaml', help='path to config which constructs model', ) parser.add_argument( '--ckpt', type=str, default='models/ldm/txt2depth/last.ckpt', help='path to checkpoint of model', ) parser.add_argument( '--seed', type=int, default=42, help='the seed (for reproducible sampling)', ) parser.add_argument( '--precision', type=str, help='evaluate at this precision', choices=['full', 'autocast'], default='autocast') opt = parser.parse_args() seed_everything(opt.seed) ckpt_name = os.path.splitext(os.path.basename(opt.ckpt))[0] outdir = os.path.join(opt.save_dir, ckpt_name) os.makedirs(outdir, exist_ok=True) outpath = outdir # config = OmegaConf.load(f"{opt.config}") # model = load_model_from_config(config, f"{opt.ckpt}") model = build_model('nd', opt.ckpt, strict=False) device = torch.device( 'cuda') if torch.cuda.is_available() else torch.device('cpu') model = model.to(device) if opt.dpm_solver: sampler = DPMSolverSampler(model) elif opt.plms: sampler = PLMSSampler(model) else: sampler = DDIMSampler(model) print( 'Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...' ) wm = 'StableDiffusionV1' wm_encoder = WatermarkEncoder() wm_encoder.set_watermark('bytes', wm.encode('utf-8')) batch_size = opt.n_samples n_rows = opt.n_rows if opt.n_rows > 0 else batch_size if not opt.from_file: prompt = opt.prompt assert prompt is not None data = [batch_size * [prompt]] else: print(f'reading prompts from {opt.from_file}') with open(opt.from_file, 'r') as f: data = f.read().splitlines() data = list(chunk(data, batch_size)) if opt.prompt is None: prompts = [ 'a close up of a sheet of pizza on a table.', 'A picture of some lemons on a table.', 'A little girl with a pink bow in her hair eating broccoli.', 'A highly detailed stone bust of Theodoros Kolokotronis', ] else: prompts = [opt.prompt] sub = '' for prompt_id, prompt in enumerate(prompts): if prompt[-1] == '.': prompt = prompt[:-1] data = [batch_size * [prompt + sub]] save_path = os.path.join(outpath, 'normal-depth') os.makedirs(save_path, exist_ok=True) base_count = prompt_id grid_count = prompt_id start_code = None if opt.fixed_code: start_code = torch.randn( [opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device) precision_scope = autocast if opt.precision == 'autocast' else nullcontext with torch.no_grad(): with precision_scope('cuda'): tic = time.time() all_samples = list() for n in trange(opt.n_iter, desc='Sampling'): for prompts in tqdm(data, desc='data'): uc = None if opt.scale != 1.0: # uc = model.get_learned_conditioning(batch_size * [""]) uc = model.get_learned_conditioning( batch_size * [NEGATIVE_PROMPTS]) if isinstance(prompts, tuple): prompts = list(prompts) c = model.get_learned_conditioning(prompts) shape = [opt.C, opt.H // opt.f, opt.W // opt.f] samples_ddim, _ = sampler.sample( S=50, conditioning=c, batch_size=opt.n_samples, shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, x_T=start_code) ''' rgbd= (samples_ddim[0]).permute(1,2,0) rgb = rgbd[...,:3] depth = rgbd[...,3] cv2.imwrite('rgb.png',((rgb.clamp(-1,1).detach().cpu().numpy()+1)[...,::-1] /2 * 255).astype(np.uint8)) cv2.imwrite('depth.png', ((depth.clamp(-1,1).detach().cpu().numpy()+1) /2 * 255).astype(np.uint8)) ''' x_samples_ddim = model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp( (x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
x_samples_ddim = split_rgbd_only_tensor(x_samples_ddim)
8
2023-12-06 07:29:34+00:00
16k
nox-410/tvm.tl
python/tvm/topi/arm_cpu/conv2d_gemm.py
[ { "identifier": "get_const_tuple", "path": "python/tvm/topi/utils.py", "snippet": "def get_const_tuple(in_tuple):\n \"\"\"Verifies input tuple is IntImm or Var, returns tuple of int or Var.\n\n Parameters\n ----------\n in_tuple : tuple of Expr\n The input.\n\n Returns\n -------...
import tvm from tvm.target import Target from tvm import te from tvm.topi import nn from tvm.autotvm.task.space import AnnotateEntity, ReorderEntity, OtherOptionEntity from ..utils import get_const_tuple, get_const_int from ..nn.utils import get_pad_tuple from .tensor_intrin import ( gemm_4x4_int8_int8_int32, gemm_acc_4x4_int8_int8_int32, gemm_acc_nx16_int8_int8_int32, gemm_acc_2x2_int8_int8_int32, )
13,310
) zero = tvm.tir.const(0) else: # No need to pack/unpack, execute GEMM directly C = te.compute( (batches, M_padded, N_padded), lambda b, x, y: te.sum( A[b, x, k].astype("int32") * B_interleaved_t[ y // tile_rows_B, k // tile_cols_B, idxm(y, tile_rows_B), idxm(k, tile_cols_B) ].astype("int32"), axis=k, ), name="C", ) # We need to ensure that infer bound pass does not remove the padding # which is necessary for the tensorizations to work. So we need to # add a dummy reference to the padding area of the result zero = ( tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1] - tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1] ) # Reshape the result into a convolution output out_shape = (batches, OH, OW, OC) out = te.compute( out_shape, lambda b, x, y, z: (C(b, y + OW * x, z) + zero).astype(out_dtype), name="conv2d_gemm_output", ) return out def schedule_conv2d_gemm_interleaved(cfg, s, out, final_out): """Schedule the conv2d_gemm interleaved strategy""" C = out.op.input_tensors[0] C_interleaved = C.op.input_tensors[0] A_interleaved = C_interleaved.op.input_tensors[0] # Input transform A_interleaved_input = A_interleaved.op.input_tensors[0] if A_interleaved_input.op.name == "A_padded_K" or A_interleaved_input.op.name == "A_padded_M": s[A_interleaved_input].compute_at(s[A_interleaved], A_interleaved.op.axis[3]) s[A_interleaved_input].vectorize(A_interleaved_input.op.axis[2]) s[A_interleaved_input].compute_inline() data_im2col = A_interleaved_input.op.input_tensors[0] else: data_im2col = A_interleaved_input b, m, n = data_im2col.op.axis if data_im2col.op.name == "data_im2col": n_size = data_im2col.shape[2] if n_size % 16 == 0: split_factor = 16 else: split_factor = 8 n_outer, n_inner = s[data_im2col].split(n, split_factor) s[data_im2col].unroll(n_outer) s[data_im2col].vectorize(n_inner) b_m_fused = s[data_im2col].fuse(b, m) s[data_im2col].parallel(b_m_fused) else: s[data_im2col].compute_inline() # Computation(through tensorize) b, xo, yo, xi, yi = C_interleaved.op.axis[0:5] outer_gemm, inner_gemm = cfg["reorder_gemm"].apply(s, C_interleaved, [xo, yo]) b_outer_gemm_fused = s[C_interleaved].fuse(b, outer_gemm) s[C_interleaved].parallel(b_outer_gemm_fused) s[A_interleaved].compute_at(s[C_interleaved], b_outer_gemm_fused) _, _, _, outer_A_interleaved, inner_A_interleaved = A_interleaved.op.axis cfg["A_interleaved_unroll_vec"].apply( s, A_interleaved, [outer_A_interleaved, inner_A_interleaved] ) in_type = A_interleaved.dtype out_type = C.dtype k = C_interleaved.op.reduce_axis[0] _, M, N = C.shape if in_type in ["int8", "uint8"]: target = Target.current(allow_none=False) if target.features.has_matmul_i8: gemm_acc = gemm_acc_2x2_int8_int8_int32(in_type) xi_inner, yi_inner = C_interleaved.op.axis[-2:] k_outer, k_inner = s[C_interleaved].split(k, 8) s[C_interleaved].reorder( b_outer_gemm_fused, inner_gemm, k_outer, xi, yi, xi_inner, yi_inner, k_inner ) s[C_interleaved].tensorize(xi_inner, gemm_acc) s[C_interleaved].unroll(xi) s[C_interleaved].unroll(yi) elif target.features.has_dotprod: gemm_acc = gemm_acc_4x4_int8_int8_int32(in_type) xi_outer, yi_outer, xi_inner, yi_inner = s[C_interleaved].tile( xi, yi, x_factor=8, y_factor=4 ) k_outer, k_inner = s[C_interleaved].split(k, 4) xi_inner_outer, xi_inner_inner = s[C_interleaved].split(xi_inner, 4) s[C_interleaved].reorder( b_outer_gemm_fused, inner_gemm, xi_outer, yi_outer, k_outer, xi_inner_outer, xi_inner_inner, yi_inner, k_inner, ) s[C_interleaved].tensorize(xi_inner_inner, gemm_acc) s[C_interleaved].unroll(xi_inner_outer) elif target.features.has_asimd: s[C_interleaved].reorder(yi, xi) K = A_interleaved_input.shape[2] assert in_type in ["int8", "uint8"], "Only int8 and uint8 gemm are supported" unroll = cfg["gemm_quantized_unroll"].val
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-variable, too-many-locals # pylint: disable=unused-argument, redefined-builtin """GEMM Convolution schedule on ARM""" def configure_knobs(cfg, M, K, target): """Configure auto-tuning knobs for the interleaved strategy""" x, y = cfg.axis(M // 4), cfg.axis(K // 16) cfg.define_reorder("reorder_gemm", [x, y], policy="candidate", candidate=[[x, y], [y, x]]) outer_loop, inner_loop = cfg.axis(4), cfg.axis(16) cfg.define_annotate( "A_interleaved_unroll_vec", [outer_loop, inner_loop], policy="try_unroll_vec" ) # Fallback configuration if cfg.is_fallback: cfg["reorder_gemm"] = ReorderEntity([0, 1]) cfg["A_interleaved_unroll_vec"] = AnnotateEntity(["unroll", "vec"]) if not target.features.has_dotprod: cfg.define_knob("gemm_quantized_unroll", [True, False]) if cfg.is_fallback: cfg["gemm_quantized_unroll"] = OtherOptionEntity(False) # Compute function def compute_conv2d_gemm_without_weight_transform( cfg, data, B_interleaved_t, strides, padding, dilation, out_dtype, kernel_size, output_channels, interleave_A, ): """Compute conv2d by transforming the input, executing GEMM and transforming the output back""" batches, IH, IW, IC = get_const_tuple(data.shape) KH, KW = get_const_tuple(kernel_size) OC = get_const_int(output_channels) kernel_area = KH * KW if isinstance(dilation, int): dilation_h = dilation_w = dilation else: dilation_h, dilation_w = get_const_tuple(dilation) dilated_kernel_h = (KH - 1) * dilation_h + 1 dilated_kernel_w = (KW - 1) * dilation_w + 1 pad_top, pad_left, pad_down, pad_right = get_pad_tuple( padding, (dilated_kernel_h, dilated_kernel_w) ) HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides) OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1 OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1 if pad_top or pad_left: data_pad = nn.pad( data, [0, pad_top, pad_left, 0], [0, pad_down, pad_right, 0], name="data_pad" ) else: data_pad = data # Im2col M = OH * OW K = IC * kernel_area N = OC A_shape = (batches, M, K) if kernel_area == 1: A = tvm.topi.reshape(data_pad, A_shape) else: A = te.compute( A_shape, lambda n, x, y: data_pad[ n, HSTR * (x // OW) + dilation_h * ((y // IC) // KW), WSTR * (x % OW) + dilation_w * ((y // IC) % KW), y % IC, ], name="data_im2col", ) # Pad if necessary N_transformed = B_interleaved_t.shape[0] tile_rows_B = B_interleaved_t.shape[2] tile_cols_B = B_interleaved_t.shape[3] # Select the tiling strategy for A. # The tiling information is chosen to maximize register usage during # the tile computation. # # Please refer to: # - https://discuss.tvm.apache.org/t/rfc-improve-quantized-convolution-performance-for-armv8-architectures # pylint: disable=line-too-long # - https://discuss.tvm.apache.org/t/rfc-accelerate-quantized-convolution-through-dot-product # - https://discuss.tvm.apache.org/t/rfc-improve-quantized-convolution-through-mmla-instruction # - Conv2DGemmWeightTransformRel in src/relay/op/nn/convolution.h # In order to have more information # target = Target.current(allow_none=False) if target.features.has_matmul_i8: # If smmla/ummla is enabled, we are loading 8 rows from A. Each row # will contain 8 elements tile_rows_A = 8 tile_cols_A = 8 elif target.features.has_dotprod and interleave_A: # If dot product has been enabled, and we are interleaving A # tile size should be 8x4 tile_rows_A = 8 tile_cols_A = 4 else: # If either there is no dot product or if we are using a native strategy # tile size should be 4x16 tile_rows_A = 4 tile_cols_A = 16 pad_M = 0 pad_K = 0 if M % tile_rows_A != 0: pad_M = tile_rows_A - (M % tile_rows_A) if K % tile_cols_A != 0: pad_K = tile_cols_A - (K % tile_cols_A) M_padded = M + pad_M K_padded = K + pad_K N_padded = N_transformed * tile_rows_B pad_before = (0, 0, 0) pad_after = (0, pad_M, pad_K) if pad_K != 0: A = nn.pad(A, pad_before=pad_before, pad_after=pad_after, name="A_padded_K") elif pad_M != 0: A = nn.pad(A, pad_before=pad_before, pad_after=pad_after, name="A_padded_M") idxm = tvm.tir.indexmod k = te.reduce_axis((0, K_padded), "k") if interleave_A: # Configuration space configure_knobs(cfg, M_padded, K_padded, target) # Pack the input data A_interleaved = te.compute( (batches, M_padded // tile_rows_A, K_padded // tile_cols_A, tile_rows_A, tile_cols_A), lambda b, x, y, z, w: A[b, z + tile_rows_A * x, w + tile_cols_A * y], name="A_interleaved", ) target = Target.current(allow_none=False) if target.features.has_matmul_i8: # Execute GEMM. In the case of mmla, we need to enforce the tiling # from the compute. This is because mmla is doing a tiled computation # as well. So we have a big 8x12 tile, with small 2x2 sub-tiles # generated by mmla. In theory we could make the tile 2x2 and # fuse and split during scheduling, but this would not work # because of possible padding C_interleaved = te.compute( ( batches, M_padded // tile_rows_A, N_transformed, tile_rows_A // 2, tile_rows_B // 2, 2, 2, ), lambda b, x, y, w, z, s, t: te.sum( A_interleaved[b, x, k // tile_cols_A, 2 * w + s, idxm(k, tile_cols_A)].astype( "int32" ) * B_interleaved_t[y, k // tile_cols_B, 2 * z + t, idxm(k, tile_cols_B)].astype( "int32" ), axis=k, ), name="C_interleaved", ) # Ensure the padding needed for tensorize does not get removed during tir passes # by adding a dummy reference to the specific padded area of the result zero = ( tvm.tir.const(1, C_interleaved.dtype) * C_interleaved[ batches - 1, M // tile_rows_A, N_transformed - 1, idxm(M, tile_rows_A) // 2, tile_rows_B // 2 - 1, 1, 1, ] - tvm.tir.const(1, C_interleaved.dtype) * C_interleaved[ batches - 1, M // tile_rows_A, N_transformed - 1, idxm(M, tile_rows_A) // 2, tile_rows_B // 2 - 1, 1, 1, ] ) # Unpack the result C = te.compute( (batches, M, N), lambda b, x, y: ( C_interleaved[ b, x // tile_rows_A, y // tile_rows_B, idxm(x, tile_rows_A) // 2, idxm(y, tile_rows_B) // 2, idxm(idxm(x, tile_rows_A), 2), idxm(idxm(y, tile_rows_B), 2), ] + zero ).astype(out_dtype), name="C", ) else: # Execute GEMM C_interleaved = te.compute( (batches, M_padded // tile_rows_A, N_transformed, tile_rows_A, tile_rows_B), lambda b, x, y, w, z: te.sum( A_interleaved[b, x, k // tile_cols_A, w, idxm(k, tile_cols_A)].astype("int32") * B_interleaved_t[y, k // tile_cols_B, z, idxm(k, tile_cols_B)].astype("int32"), axis=k, ), name="C_interleaved", ) # Unpack the result C = te.compute( (batches, M, N), lambda b, x, y: C_interleaved[ b, x // tile_rows_A, y // tile_rows_B, idxm(x, tile_rows_A), idxm(y, tile_rows_B), ].astype(out_dtype), name="C", ) zero = tvm.tir.const(0) else: # No need to pack/unpack, execute GEMM directly C = te.compute( (batches, M_padded, N_padded), lambda b, x, y: te.sum( A[b, x, k].astype("int32") * B_interleaved_t[ y // tile_rows_B, k // tile_cols_B, idxm(y, tile_rows_B), idxm(k, tile_cols_B) ].astype("int32"), axis=k, ), name="C", ) # We need to ensure that infer bound pass does not remove the padding # which is necessary for the tensorizations to work. So we need to # add a dummy reference to the padding area of the result zero = ( tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1] - tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1] ) # Reshape the result into a convolution output out_shape = (batches, OH, OW, OC) out = te.compute( out_shape, lambda b, x, y, z: (C(b, y + OW * x, z) + zero).astype(out_dtype), name="conv2d_gemm_output", ) return out def schedule_conv2d_gemm_interleaved(cfg, s, out, final_out): """Schedule the conv2d_gemm interleaved strategy""" C = out.op.input_tensors[0] C_interleaved = C.op.input_tensors[0] A_interleaved = C_interleaved.op.input_tensors[0] # Input transform A_interleaved_input = A_interleaved.op.input_tensors[0] if A_interleaved_input.op.name == "A_padded_K" or A_interleaved_input.op.name == "A_padded_M": s[A_interleaved_input].compute_at(s[A_interleaved], A_interleaved.op.axis[3]) s[A_interleaved_input].vectorize(A_interleaved_input.op.axis[2]) s[A_interleaved_input].compute_inline() data_im2col = A_interleaved_input.op.input_tensors[0] else: data_im2col = A_interleaved_input b, m, n = data_im2col.op.axis if data_im2col.op.name == "data_im2col": n_size = data_im2col.shape[2] if n_size % 16 == 0: split_factor = 16 else: split_factor = 8 n_outer, n_inner = s[data_im2col].split(n, split_factor) s[data_im2col].unroll(n_outer) s[data_im2col].vectorize(n_inner) b_m_fused = s[data_im2col].fuse(b, m) s[data_im2col].parallel(b_m_fused) else: s[data_im2col].compute_inline() # Computation(through tensorize) b, xo, yo, xi, yi = C_interleaved.op.axis[0:5] outer_gemm, inner_gemm = cfg["reorder_gemm"].apply(s, C_interleaved, [xo, yo]) b_outer_gemm_fused = s[C_interleaved].fuse(b, outer_gemm) s[C_interleaved].parallel(b_outer_gemm_fused) s[A_interleaved].compute_at(s[C_interleaved], b_outer_gemm_fused) _, _, _, outer_A_interleaved, inner_A_interleaved = A_interleaved.op.axis cfg["A_interleaved_unroll_vec"].apply( s, A_interleaved, [outer_A_interleaved, inner_A_interleaved] ) in_type = A_interleaved.dtype out_type = C.dtype k = C_interleaved.op.reduce_axis[0] _, M, N = C.shape if in_type in ["int8", "uint8"]: target = Target.current(allow_none=False) if target.features.has_matmul_i8: gemm_acc = gemm_acc_2x2_int8_int8_int32(in_type) xi_inner, yi_inner = C_interleaved.op.axis[-2:] k_outer, k_inner = s[C_interleaved].split(k, 8) s[C_interleaved].reorder( b_outer_gemm_fused, inner_gemm, k_outer, xi, yi, xi_inner, yi_inner, k_inner ) s[C_interleaved].tensorize(xi_inner, gemm_acc) s[C_interleaved].unroll(xi) s[C_interleaved].unroll(yi) elif target.features.has_dotprod: gemm_acc = gemm_acc_4x4_int8_int8_int32(in_type) xi_outer, yi_outer, xi_inner, yi_inner = s[C_interleaved].tile( xi, yi, x_factor=8, y_factor=4 ) k_outer, k_inner = s[C_interleaved].split(k, 4) xi_inner_outer, xi_inner_inner = s[C_interleaved].split(xi_inner, 4) s[C_interleaved].reorder( b_outer_gemm_fused, inner_gemm, xi_outer, yi_outer, k_outer, xi_inner_outer, xi_inner_inner, yi_inner, k_inner, ) s[C_interleaved].tensorize(xi_inner_inner, gemm_acc) s[C_interleaved].unroll(xi_inner_outer) elif target.features.has_asimd: s[C_interleaved].reorder(yi, xi) K = A_interleaved_input.shape[2] assert in_type in ["int8", "uint8"], "Only int8 and uint8 gemm are supported" unroll = cfg["gemm_quantized_unroll"].val
gemm = gemm_4x4_int8_int8_int32(M, N, K, unroll, in_type)
3
2023-12-14 02:37:47+00:00
16k
yolain/ComfyUI-Easy-Use
py/easyNodes.py
[ { "identifier": "advanced_encode", "path": "py/adv_encode.py", "snippet": "def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5,\n apply_to_pooled=True):\n tokenized = clip.tokenize(text, return_word_ids=True)\n if isinstance(cli...
import sys import os import re import json import time import math import torch import psutil import random import datetime import comfy.sd import comfy.utils import numpy as np import folder_paths import comfy.samplers import comfy.controlnet import latent_preview import comfy.model_base import comfy.model_management from pathlib import Path from comfy.sd import CLIP, VAE from comfy.cli_args import args from urllib.request import urlopen from collections import defaultdict from PIL.PngImagePlugin import PngInfo from PIL import Image, ImageDraw, ImageFont from comfy.model_patcher import ModelPatcher from comfy_extras.chainner_models import model_loading from typing import Dict, List, Optional, Tuple, Union, Any from .adv_encode import advanced_encode, advanced_encode_XL from server import PromptServer from nodes import VAELoader, MAX_RESOLUTION, RepeatLatentBatch, NODE_CLASS_MAPPINGS as ALL_NODE_CLASS_MAPPINGS, ConditioningSetMask from comfy_extras.nodes_mask import LatentCompositeMasked from .config import BASE_RESOLUTIONS from .log import log_node_info, log_node_error, log_node_warn, log_node_success from .wildcards import process_with_loras, get_wildcard_list from comfy_extras.nodes_stable3d import camera_embeddings from .gradual_latent_hires_fix import sample_dpmpp_2s_ancestral, sample_dpmpp_2m_sde, sample_lcm, sample_euler_ancestral from .dynthres_core import DynThresh
10,886
self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value, y_value) self.num += 1 else: # ttNl(f'{CC.GREY}X: {x_value_label}').t(f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value) self.num += 1 # Rearrange latent array to match preview image grid self.latents_plot = self.rearrange_tensors(self.latents_plot, self.num_cols, self.num_rows) # Concatenate the tensors along the first dimension (dim=0) self.latents_plot = torch.cat(self.latents_plot, dim=0) return self.latents_plot def plot_images_and_labels(self): # Calculate the background dimensions bg_width, bg_height, x_offset_initial, y_offset = self.calculate_background_dimensions() # Create the white background image background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=(255, 255, 255, 255)) output_image = [] for row_index in range(self.num_rows): x_offset = x_offset_initial for col_index in range(self.num_cols): index = col_index * self.num_rows + row_index img = self.image_list[index] output_image.append(sampler.pil2tensor(img)) background.paste(img, (x_offset, y_offset)) # Handle X label if row_index == 0 and self.x_type != "None": label_bg = self.create_label(img, self.x_label[col_index], int(48 * img.width / 512)) label_y = (y_offset - label_bg.height) // 2 background.alpha_composite(label_bg, (x_offset, label_y)) # Handle Y label if col_index == 0 and self.y_type != "None": label_bg = self.create_label(img, self.y_label[row_index], int(48 * img.height / 512), False) label_bg = label_bg.rotate(90, expand=True) label_x = (x_offset - label_bg.width) // 2 label_y = y_offset + (img.height - label_bg.height) // 2 background.alpha_composite(label_bg, (label_x, label_y)) x_offset += img.width + self.grid_spacing y_offset += img.height + self.grid_spacing return (sampler.pil2tensor(background), output_image) easyCache = easyLoader() sampler = easySampler() def check_link_to_clip(node_id, clip_id, visited=None, node=None): """Check if a given node links directly or indirectly to a loader node.""" if visited is None: visited = set() if node_id in visited: return False visited.add(node_id) if "pipe" in node["inputs"]: link_ids = node["inputs"]["pipe"] for id in link_ids: if id != 0 and id == str(clip_id): return True return False def find_nearest_steps(clip_id, prompt): """Find the nearest KSampler or preSampling node that references the given id.""" for id in prompt: node = prompt[id] if "Sampler" in node["class_type"] or "sampler" in node["class_type"] or "Sampling" in node["class_type"]: # Check if this KSampler node directly or indirectly references the given CLIPTextEncode node if check_link_to_clip(id, clip_id, None, node): steps = node["inputs"]["steps"] if "steps" in node["inputs"] else 1 return steps return 1 def find_wildcards_seed(text, prompt): if "__" in text: for i in prompt: if "wildcards" in prompt[i]['class_type'] and text == prompt[i]['inputs']['text']: return prompt[i]['inputs']['seed_num'] if "seed_num" in prompt[i]['inputs'] else None else: return None class easySave: def __init__(self, my_unique_id=0, prompt=None, extra_pnginfo=None, number_padding=5, overwrite_existing=False, output_dir=folder_paths.get_temp_directory()): self.number_padding = int(number_padding) if number_padding not in [None, "None", 0] else None self.overwrite_existing = overwrite_existing self.my_unique_id = my_unique_id self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.type = 'temp' self.output_dir = output_dir if self.output_dir != folder_paths.get_temp_directory(): self.output_dir = self.folder_parser(self.output_dir, self.prompt, self.my_unique_id) if not os.path.exists(self.output_dir): self._create_directory(self.output_dir) @staticmethod def _create_directory(folder: str): """Try to create the directory and log the status.""" log_node_warn("", f"Folder {folder} does not exist. Attempting to create...") if not os.path.exists(folder): try: os.makedirs(folder) log_node_success("",f"{folder} Created Successfully") except OSError:
# 加载器 class easyLoader: def __init__(self): self.loaded_objects = { "ckpt": defaultdict(tuple), # {ckpt_name: (model, ...)} "clip": defaultdict(tuple), "clip_vision": defaultdict(tuple), "bvae": defaultdict(tuple), "vae": defaultdict(object), "lora": defaultdict(dict), # {lora_name: {UID: (model_lora, clip_lora)}} } self.memory_threshold = self.determine_memory_threshold(0.7) def clean_values(self, values: str): original_values = values.split("; ") cleaned_values = [] for value in original_values: cleaned_value = value.strip(';').strip() if cleaned_value == "": continue try: cleaned_value = int(cleaned_value) except ValueError: try: cleaned_value = float(cleaned_value) except ValueError: pass cleaned_values.append(cleaned_value) return cleaned_values def clear_unused_objects(self, desired_names: set, object_type: str): keys = set(self.loaded_objects[object_type].keys()) for key in keys - desired_names: del self.loaded_objects[object_type][key] def get_input_value(self, entry, key): val = entry["inputs"][key] return val if isinstance(val, str) else val[0] def process_pipe_loader(self, entry, desired_ckpt_names, desired_vae_names, desired_lora_names, desired_lora_settings, num_loras=3, suffix=""): for idx in range(1, num_loras + 1): lora_name_key = f"{suffix}lora{idx}_name" desired_lora_names.add(self.get_input_value(entry, lora_name_key)) setting = f'{self.get_input_value(entry, lora_name_key)};{entry["inputs"][f"{suffix}lora{idx}_model_strength"]};{entry["inputs"][f"{suffix}lora{idx}_clip_strength"]}' desired_lora_settings.add(setting) desired_ckpt_names.add(self.get_input_value(entry, f"{suffix}ckpt_name")) desired_vae_names.add(self.get_input_value(entry, f"{suffix}vae_name")) def update_loaded_objects(self, prompt): desired_ckpt_names = set() desired_vae_names = set() desired_lora_names = set() desired_lora_settings = set() for entry in prompt.values(): class_type = entry["class_type"] if class_type == "easy a1111Loader" or class_type == "easy comfyLoader": lora_name = self.get_input_value(entry, "lora_name") desired_lora_names.add(lora_name) setting = f'{lora_name};{entry["inputs"]["lora_model_strength"]};{entry["inputs"]["lora_clip_strength"]}' desired_lora_settings.add(setting) desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name")) desired_vae_names.add(self.get_input_value(entry, "vae_name")) elif class_type == "easy zero123Loader" or class_type == 'easy svdLoader': desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name")) desired_vae_names.add(self.get_input_value(entry, "vae_name")) elif class_type == "easy XYInputs: ModelMergeBlocks": desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name_1")) desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name_2")) vae_use = self.get_input_value(entry, "vae_use") if vae_use != 'Use Model 1' and vae_use != 'Use Model 2': desired_vae_names.add(vae_use) object_types = ["ckpt", "clip", "bvae", "vae", "lora"] for object_type in object_types: desired_names = desired_ckpt_names if object_type in ["ckpt", "clip", "bvae"] else desired_vae_names if object_type == "vae" else desired_lora_names self.clear_unused_objects(desired_names, object_type) def add_to_cache(self, obj_type, key, value): """ Add an item to the cache with the current timestamp. """ timestamped_value = (value, time.time()) self.loaded_objects[obj_type][key] = timestamped_value def determine_memory_threshold(self, percentage=0.8): """ Determines the memory threshold as a percentage of the total available memory. Args: - percentage (float): The fraction of total memory to use as the threshold. Should be a value between 0 and 1. Default is 0.8 (80%). Returns: - memory_threshold (int): Memory threshold in bytes. """ total_memory = psutil.virtual_memory().total memory_threshold = total_memory * percentage return memory_threshold def get_memory_usage(self): """ Returns the memory usage of the current process in bytes. """ process = psutil.Process(os.getpid()) return process.memory_info().rss def eviction_based_on_memory(self): """ Evicts objects from cache based on memory usage and priority. """ current_memory = self.get_memory_usage() if current_memory < self.memory_threshold: return eviction_order = ["vae", "lora", "bvae", "clip", "ckpt"] for obj_type in eviction_order: if current_memory < self.memory_threshold: break # Sort items based on age (using the timestamp) items = list(self.loaded_objects[obj_type].items()) items.sort(key=lambda x: x[1][1]) # Sorting by timestamp for item in items: if current_memory < self.memory_threshold: break del self.loaded_objects[obj_type][item[0]] current_memory = self.get_memory_usage() def load_checkpoint(self, ckpt_name, config_name=None, load_vision=False): cache_name = ckpt_name if config_name not in [None, "Default"]: cache_name = ckpt_name + "_" + config_name if cache_name in self.loaded_objects["ckpt"]: cache_out = self.loaded_objects["clip_vision"][cache_name][0] if load_vision else self.loaded_objects["clip"][cache_name][0] return self.loaded_objects["ckpt"][cache_name][0], cache_out, self.loaded_objects["bvae"][cache_name][0] ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) output_clip = False if load_vision else True output_clipvision = True if load_vision else False if config_name not in [None, "Default"]: config_path = folder_paths.get_full_path("configs", config_name) loaded_ckpt = comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=output_clip, output_clipvision=output_clipvision, embedding_directory=folder_paths.get_folder_paths("embeddings")) else: loaded_ckpt = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=output_clip, output_clipvision=output_clipvision, embedding_directory=folder_paths.get_folder_paths("embeddings")) self.add_to_cache("ckpt", cache_name, loaded_ckpt[0]) self.add_to_cache("bvae", cache_name, loaded_ckpt[2]) if load_vision: out = loaded_ckpt[3] self.add_to_cache("clip_vision", cache_name, out) else: out = loaded_ckpt[1] self.add_to_cache("clip", cache_name, loaded_ckpt[1]) self.eviction_based_on_memory() return loaded_ckpt[0], out, loaded_ckpt[2] def load_vae(self, vae_name): if vae_name in self.loaded_objects["vae"]: return self.loaded_objects["vae"][vae_name][0] vae_path = folder_paths.get_full_path("vae", vae_name) sd = comfy.utils.load_torch_file(vae_path) loaded_vae = comfy.sd.VAE(sd=sd) self.add_to_cache("vae", vae_name, loaded_vae) self.eviction_based_on_memory() return loaded_vae def load_lora(self, lora_name, model, clip, strength_model, strength_clip): model_hash = str(model)[44:-1] clip_hash = str(clip)[25:-1] unique_id = f'{model_hash};{clip_hash};{lora_name};{strength_model};{strength_clip}' if unique_id in self.loaded_objects["lora"] and unique_id in self.loaded_objects["lora"][lora_name]: return self.loaded_objects["lora"][unique_id][0] lora_path = folder_paths.get_full_path("loras", lora_name) lora = comfy.utils.load_torch_file(lora_path, safe_load=True) model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) self.add_to_cache("lora", unique_id, (model_lora, clip_lora)) self.eviction_based_on_memory() return model_lora, clip_lora # 采样器 class easySampler: def __init__(self): self.last_helds: dict[str, list] = { "results": [], "pipe_line": [], } @staticmethod def tensor2pil(image: torch.Tensor) -> Image.Image: """Convert a torch tensor to a PIL image.""" return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) @staticmethod def pil2tensor(image: Image.Image) -> torch.Tensor: """Convert a PIL image to a torch tensor.""" return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) @staticmethod def enforce_mul_of_64(d): d = int(d) if d <= 7: d = 8 leftover = d % 8 # 8 is the number of pixels per byte if leftover != 0: # if the number of pixels is not a multiple of 8 if (leftover < 4): # if the number of pixels is less than 4 d -= leftover # remove the leftover pixels else: # if the number of pixels is more than 4 d += 8 - leftover # add the leftover pixels return int(d) @staticmethod def safe_split(to_split: str, delimiter: str) -> List[str]: """Split the input string and return a list of non-empty parts.""" parts = to_split.split(delimiter) parts = [part for part in parts if part not in ('', ' ', ' ')] while len(parts) < 2: parts.append('None') return parts def common_ksampler(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, preview_latent=True, disable_pbar=False): device = comfy.model_management.get_torch_device() latent_image = latent["samples"] if disable_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: batch_inds = latent["batch_index"] if "batch_index" in latent else None noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) noise_mask = None if "noise_mask" in latent: noise_mask = latent["noise_mask"] preview_format = "JPEG" if preview_format not in ["JPEG", "PNG"]: preview_format = "JPEG" previewer = False if preview_latent: previewer = latent_preview.get_previewer(device, model.model.latent_format) pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): preview_bytes = None if previewer: preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) pbar.update_absolute(step + 1, total_steps, preview_bytes) samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) out = latent.copy() out["samples"] = samples return out def custom_ksampler(self, model, seed, steps, cfg, _sampler, sigmas, positive, negative, latent, disable_noise=False, preview_latent=True, disable_pbar=False): device = comfy.model_management.get_torch_device() latent_image = latent["samples"] if disable_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: batch_inds = latent["batch_index"] if "batch_index" in latent else None noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) noise_mask = None if "noise_mask" in latent: noise_mask = latent["noise_mask"] preview_format = "JPEG" if preview_format not in ["JPEG", "PNG"]: preview_format = "JPEG" previewer = False if preview_latent: previewer = latent_preview.get_previewer(device, model.model.latent_format) pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): preview_bytes = None if previewer: preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) pbar.update_absolute(step + 1, total_steps, preview_bytes) samples = comfy.sample.sample_custom(model, noise, cfg, _sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) out = latent.copy() out["samples"] = samples return out def get_value_by_id(self, key: str, my_unique_id: Any) -> Optional[Any]: """Retrieve value by its associated ID.""" try: for value, id_ in self.last_helds[key]: if id_ == my_unique_id: return value except KeyError: return None def update_value_by_id(self, key: str, my_unique_id: Any, new_value: Any) -> Union[bool, None]: """Update the value associated with a given ID. Return True if updated, False if appended, None if key doesn't exist.""" try: for i, (value, id_) in enumerate(self.last_helds[key]): if id_ == my_unique_id: self.last_helds[key][i] = (new_value, id_) return True self.last_helds[key].append((new_value, my_unique_id)) return False except KeyError: return False def upscale(self, samples, upscale_method, scale_by, crop): s = samples.copy() width = self.enforce_mul_of_64(round(samples["samples"].shape[3] * scale_by)) height = self.enforce_mul_of_64(round(samples["samples"].shape[2] * scale_by)) if (width > MAX_RESOLUTION): width = MAX_RESOLUTION if (height > MAX_RESOLUTION): height = MAX_RESOLUTION s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, crop) return (s,) def handle_upscale(self, samples: dict, upscale_method: str, factor: float, crop: bool) -> dict: """Upscale the samples if the upscale_method is not set to 'None'.""" if upscale_method != "None": samples = self.upscale(samples, upscale_method, factor, crop)[0] return samples def init_state(self, my_unique_id: Any, key: str, default: Any) -> Any: """Initialize the state by either fetching the stored value or setting a default.""" value = self.get_value_by_id(key, my_unique_id) if value is not None: return value return default def get_output(self, pipe: dict,) -> Tuple: """Return a tuple of various elements fetched from the input pipe dictionary.""" return ( pipe, pipe.get("images"), pipe.get("model"), pipe.get("positive"), pipe.get("negative"), pipe.get("samples"), pipe.get("vae"), pipe.get("clip"), pipe.get("seed"), ) def get_output_sdxl(self, sdxl_pipe: dict) -> Tuple: """Return a tuple of various elements fetched from the input sdxl_pipe dictionary.""" return ( sdxl_pipe, sdxl_pipe.get("model"), sdxl_pipe.get("positive"), sdxl_pipe.get("negative"), sdxl_pipe.get("vae"), sdxl_pipe.get("refiner_model"), sdxl_pipe.get("refiner_positive"), sdxl_pipe.get("refiner_negative"), sdxl_pipe.get("refiner_vae"), sdxl_pipe.get("samples"), sdxl_pipe.get("clip"), sdxl_pipe.get("images"), sdxl_pipe.get("seed") ) # XY图表 class easyXYPlot: def __init__(self, xyPlotData, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id): self.x_node_type, self.x_type = easySampler.safe_split(xyPlotData.get("x_axis"), ': ') self.y_node_type, self.y_type = easySampler.safe_split(xyPlotData.get("y_axis"), ': ') self.x_values = xyPlotData.get("x_vals") if self.x_type != "None" else [] self.y_values = xyPlotData.get("y_vals") if self.y_type != "None" else [] self.grid_spacing = xyPlotData.get("grid_spacing") self.latent_id = 0 self.output_individuals = xyPlotData.get("output_individuals") self.x_label, self.y_label = [], [] self.max_width, self.max_height = 0, 0 self.latents_plot = [] self.image_list = [] self.num_cols = len(self.x_values) if len(self.x_values) > 0 else 1 self.num_rows = len(self.y_values) if len(self.y_values) > 0 else 1 self.total = self.num_cols * self.num_rows self.num = 0 self.save_prefix = save_prefix self.image_output = image_output self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.my_unique_id = my_unique_id # Helper Functions @staticmethod def define_variable(plot_image_vars, value_type, value, index): plot_image_vars[value_type] = value if value_type in ["seed", "Seeds++ Batch"]: value_label = f"{value}" else: value_label = f"{value_type}: {value}" if "ControlNet" in value_type: if "," in value: line = value.split(',') value_label = f"{value_type}: {line[2]}" if value_type in ["ModelMergeBlocks"]: if ":" in value: line = value.split(':') value_label = f"{line[0]}" elif len(value) > 16: value_label = f"ModelMergeBlocks {index + 1}" else: value_label = f"MMB: {value}" if value_type in ["Positive Prompt S/R"]: value_label = f"pos prompt {index + 1}" if index>0 else f"pos prompt" if value_type in ["Negative Prompt S/R"]: value_label = f"neg prompt {index + 1}" if index>0 else f"neg prompt" if value_type in ["steps", "cfg", "denoise", "clip_skip", "lora_model_strength", "lora_clip_strength"]: value_label = f"{value_type}: {value}" if value_type == "positive": value_label = f"pos prompt {index + 1}" elif value_type == "negative": value_label = f"neg prompt {index + 1}" return plot_image_vars, value_label @staticmethod def get_font(font_size): return ImageFont.truetype(str(Path(os.path.join(Path(__file__).parent.parent, 'resources/OpenSans-Medium.ttf'))), font_size) @staticmethod def update_label(label, value, num_items): if len(label) < num_items: return [*label, value] return label @staticmethod def rearrange_tensors(latent, num_cols, num_rows): new_latent = [] for i in range(num_rows): for j in range(num_cols): index = j * num_rows + i new_latent.append(latent[index]) return new_latent def calculate_background_dimensions(self): border_size = int((self.max_width // 8) * 1.5) if self.y_type != "None" or self.x_type != "None" else 0 bg_width = self.num_cols * (self.max_width + self.grid_spacing) - self.grid_spacing + border_size * ( self.y_type != "None") bg_height = self.num_rows * (self.max_height + self.grid_spacing) - self.grid_spacing + border_size * ( self.x_type != "None") x_offset_initial = border_size if self.y_type != "None" else 0 y_offset = border_size if self.x_type != "None" else 0 return bg_width, bg_height, x_offset_initial, y_offset def adjust_font_size(self, text, initial_font_size, label_width): font = self.get_font(initial_font_size) text_width, _ = font.getsize(text) scaling_factor = 0.9 if text_width > (label_width * scaling_factor): return int(initial_font_size * (label_width / text_width) * scaling_factor) else: return initial_font_size def create_label(self, img, text, initial_font_size, is_x_label=True, max_font_size=70, min_font_size=10): label_width = img.width if is_x_label else img.height # Adjust font size font_size = self.adjust_font_size(text, initial_font_size, label_width) font_size = min(max_font_size, font_size) # Ensure font isn't too large font_size = max(min_font_size, font_size) # Ensure font isn't too small label_height = int(font_size * 1.5) if is_x_label else font_size label_bg = Image.new('RGBA', (label_width, label_height), color=(255, 255, 255, 0)) d = ImageDraw.Draw(label_bg) font = self.get_font(font_size) # Check if text will fit, if not insert ellipsis and reduce text if d.textsize(text, font=font)[0] > label_width: while d.textsize(text + '...', font=font)[0] > label_width and len(text) > 0: text = text[:-1] text = text + '...' # Compute text width and height for multi-line text text_lines = text.split('\n') text_widths, text_heights = zip(*[d.textsize(line, font=font) for line in text_lines]) max_text_width = max(text_widths) total_text_height = sum(text_heights) # Compute position for each line of text lines_positions = [] current_y = 0 for line, line_width, line_height in zip(text_lines, text_widths, text_heights): text_x = (label_width - line_width) // 2 text_y = current_y + (label_height - total_text_height) // 2 current_y += line_height lines_positions.append((line, (text_x, text_y))) # Draw each line of text for line, (text_x, text_y) in lines_positions: d.text((text_x, text_y), line, fill='black', font=font) return label_bg def sample_plot_image(self, plot_image_vars, samples, preview_latent, latents_plot, image_list, disable_noise, start_step, last_step, force_full_denoise, x_value=None, y_value=None): model, clip, vae, positive, negative, seed, steps, cfg = None, None, None, None, None, None, None, None sampler_name, scheduler, denoise = None, None, None # 高级用法 if plot_image_vars["x_node_type"] == "advanced" or plot_image_vars["y_node_type"] == "advanced": if self.x_type == "Seeds++ Batch" or self.y_type == "Seeds++ Batch": seed = int(x_value) if self.x_type == "Seeds++ Batch" else int(y_value) if self.x_type == "Steps" or self.y_type == "Steps": steps = int(x_value) if self.x_type == "Steps" else int(y_value) if self.x_type == "StartStep" or self.y_type == "StartStep": start_step = int(x_value) if self.x_type == "StartStep" else int(y_value) if self.x_type == "EndStep" or self.y_type == "EndStep": last_step = int(x_value) if self.x_type == "EndStep" else int(y_value) if self.x_type == "CFG Scale" or self.y_type == "CFG Scale": cfg = float(x_value) if self.x_type == "CFG Scale" else float(y_value) if self.x_type == "Sampler" or self.y_type == "Sampler" or self.y_type == "Sampler & Scheduler": sampler_name = float(x_value) if self.x_type == "Sampler" or self.x_type == "Sampler & Scheduler" else float(y_value) if self.x_type == "Scheduler" or self.y_type == "Scheduler" or self.y_type == "Sampler & Scheduler": scheduler = float(x_value) if self.x_type == "Scheduler" or self.x_type == "Sampler & Scheduler" else float(y_value) if self.x_type == "Denoise" or self.y_type == "Denoise": denoise = float(x_value) if self.x_type == "Denoise" else float(y_value) # 模型叠加 if self.x_type == "ModelMergeBlocks" or self.y_type == "ModelMergeBlocks": ckpt_name_1, ckpt_name_2 = plot_image_vars['models'] model1, clip1, vae1 = easyCache.load_checkpoint(ckpt_name_1) model2, clip2, vae2 = easyCache.load_checkpoint(ckpt_name_2) xy_values = x_value if self.x_type == "ModelMergeBlocks" else y_value if ":" in xy_values: xy_line = xy_values.split(':') xy_values = xy_line[1] xy_arrs = xy_values.split(',') # ModelMergeBlocks if len(xy_arrs) == 3: input, middle, out = xy_arrs kwargs = { "input": input, "middle": middle, "out": out } elif len(xy_arrs) == 30: kwargs = {} kwargs["time_embed."] = xy_arrs[0] kwargs["label_emb."] = xy_arrs[1] for i in range(12): kwargs["input_blocks.{}.".format(i)] = xy_arrs[2+i] for i in range(3): kwargs["middle_block.{}.".format(i)] = xy_arrs[14+i] for i in range(12): kwargs["output_blocks.{}.".format(i)] = xy_arrs[17+i] kwargs["out."] = xy_arrs[29] else: raise Exception("ModelMergeBlocks weight length error") default_ratio = next(iter(kwargs.values())) m = model1.clone() kp = model2.get_key_patches("diffusion_model.") for k in kp: ratio = float(default_ratio) k_unet = k[len("diffusion_model."):] last_arg_size = 0 for arg in kwargs: if k_unet.startswith(arg) and last_arg_size < len(arg): ratio = float(kwargs[arg]) last_arg_size = len(arg) m.add_patches({k: kp[k]}, 1.0 - ratio, ratio) vae_use = plot_image_vars['vae_use'] clip = clip2 if vae_use == 'Use Model 2' else clip1 if vae_use == 'Use Model 2': vae = vae2 elif vae_use == 'Use Model 1': vae = vae1 else: (vae,) = VAELoader().load_vae(vae_use) model = m # 如果存在lora_stack叠加lora optional_lora_stack = plot_image_vars['lora_stack'] if optional_lora_stack is not None and optional_lora_stack != []: for lora in optional_lora_stack: lora_name = lora["lora_name"] model = model if model is not None else lora["model"] clip = clip if clip is not None else lora["clip"] lora_model_strength = lora["lora_model_strength"] lora_clip_strength = lora["lora_clip_strength"] if "lbw" in lora: lbw = lora["lbw"] lbw_a = lora["lbw_a"] lbw_b = lora["lbw_b"] cls = ALL_NODE_CLASS_MAPPINGS['LoraLoaderBlockWeight //Inspire'] model, clip, _ = cls().doit(model, clip, lora_name, lora_model_strength, lora_clip_strength, False, 0, lbw_a, lbw_b, "", lbw) model, clip = easyCache.load_lora(lora_name, model, clip, lora_model_strength, lora_clip_strength) # 处理clip clip = clip.clone() if plot_image_vars['clip_skip'] != 0: clip.clip_layer(plot_image_vars['clip_skip']) # 提示词 if "Positive" in self.x_type or "Positive" in self.y_type: if self.x_type == 'Positive Prompt S/R' or self.y_type == 'Positive Prompt S/R': positive = x_value if self.x_type == "Positive Prompt S/R" else y_value if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] clip = clip if clip is not None else plot_image_vars["clip"] positive, = cls().encode(clip, positive, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception( f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: clip = clip if clip is not None else plot_image_vars["clip"] positive, positive_pooled = advanced_encode(clip, positive, plot_image_vars['positive_token_normalization'], plot_image_vars[ 'positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") positive = [[positive, {"pooled_output": positive_pooled}]] if "Negative" in self.x_type or "Negative" in self.y_type: if self.x_type == 'Negative Prompt S/R' or self.y_type == 'Negative Prompt S/R': negative = x_value if self.x_type == "Negative Prompt S/R" else y_value if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] clip = clip if clip is not None else plot_image_vars["clip"] negative, = cls().encode(clip, negative, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception( f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: clip = clip if clip is not None else plot_image_vars["clip"] negative, negative_pooled = advanced_encode(clip, negative, plot_image_vars['negative_token_normalization'], plot_image_vars[ 'negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") negative = [[negative, {"pooled_output": negative_pooled}]] # ControlNet if "ControlNet" in self.x_type or "ControlNet" in self.y_type: _pipe = { "model": model if model is not None else plot_image_vars["model"], "positive": positive if positive is not None else plot_image_vars["positive_cond"], "negative": negative if negative is not None else plot_image_vars["negative_cond"], "vae": vae if vae is not None else plot_image_vars['vae'], "clip": clip if clip is not None else plot_image_vars['clip'], "samples": None, "images": None, "loader_settings": {} } cnet = plot_image_vars["cnet"] if "cnet" in plot_image_vars else None if cnet: strength, start_percent, end_percent = x_value.split(',') if "ControlNet" in self.x_type else y_value.split(',') strength = float(strength) start_percent = float(start_percent) end_percent = float(end_percent) for index, item in enumerate(cnet): control_net_names = item[0] image = item[1] for idx, control_net_name in enumerate(control_net_names): # print(control_net_name) _pipe, = controlnetAdvanced().controlnetApply(_pipe, image, control_net_name, None, strength, start_percent, end_percent) positive = _pipe['positive'] negative = _pipe['negative'] del _pipe # 简单用法 if plot_image_vars["x_node_type"] == "loader" or plot_image_vars["y_node_type"] == "loader": model, clip, vae = easyCache.load_checkpoint(plot_image_vars['ckpt_name']) if plot_image_vars['lora_name'] != "None": model, clip = easyCache.load_lora(plot_image_vars['lora_name'], model, clip, plot_image_vars['lora_model_strength'], plot_image_vars['lora_clip_strength']) # Check for custom VAE if plot_image_vars['vae_name'] not in ["Baked-VAE", "Baked VAE"]: vae = easyCache.load_vae(plot_image_vars['vae_name']) # CLIP skip if not clip: raise Exception("No CLIP found") clip = clip.clone() clip.clip_layer(plot_image_vars['clip_skip']) if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] positive, = cls().encode(clip, plot_image_vars['positive'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) negative, = cls().encode(clip, plot_image_vars['negative'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception(f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: positive, positive_pooled = advanced_encode(clip, plot_image_vars['positive'], plot_image_vars['positive_token_normalization'], plot_image_vars['positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") positive = [[positive, {"pooled_output": positive_pooled}]] negative, negative_pooled = advanced_encode(clip, plot_image_vars['negative'], plot_image_vars['negative_token_normalization'], plot_image_vars['negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") negative = [[negative, {"pooled_output": negative_pooled}]] model = model if model is not None else plot_image_vars["model"] clip = clip if clip is not None else plot_image_vars["clip"] vae = vae if vae is not None else plot_image_vars["vae"] positive = positive if positive is not None else plot_image_vars["positive_cond"] negative = negative if negative is not None else plot_image_vars["negative_cond"] seed = seed if seed is not None else plot_image_vars["seed"] steps = steps if steps is not None else plot_image_vars["steps"] cfg = cfg if cfg is not None else plot_image_vars["cfg"] sampler_name = sampler_name if sampler_name is not None else plot_image_vars["sampler_name"] scheduler = scheduler if scheduler is not None else plot_image_vars["scheduler"] denoise = denoise if denoise is not None else plot_image_vars["denoise"] # Sample samples = sampler.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise=denoise, disable_noise=disable_noise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise) # Decode images and store latent = samples["samples"] # Add the latent tensor to the tensors list latents_plot.append(latent) # Decode the image image = vae.decode(latent).cpu() if self.output_individuals in [True, "True"]: easy_save = easySave(self.my_unique_id, self.prompt, self.extra_pnginfo) easy_save.images(image, self.save_prefix, self.image_output, group_id=self.num) # Convert the image from tensor to PIL Image and add it to the list pil_image = easySampler.tensor2pil(image) image_list.append(pil_image) # Update max dimensions self.max_width = max(self.max_width, pil_image.width) self.max_height = max(self.max_height, pil_image.height) # Return the touched variables return image_list, self.max_width, self.max_height, latents_plot # Process Functions def validate_xy_plot(self): if self.x_type == 'None' and self.y_type == 'None': log_node_warn(f'easyKsampler[{self.my_unique_id}]','No Valid Plot Types - Reverting to default sampling...') return False else: return True def get_latent(self, samples): # Extract the 'samples' tensor from the dictionary latent_image_tensor = samples["samples"] # Split the tensor into individual image tensors image_tensors = torch.split(latent_image_tensor, 1, dim=0) # Create a list of dictionaries containing the individual image tensors latent_list = [{'samples': image} for image in image_tensors] # Set latent only to the first latent of batch if self.latent_id >= len(latent_list): log_node_warn(f'easy kSampler[{self.my_unique_id}]',f'The selected latent_id ({self.latent_id}) is out of range.') log_node_warn(f'easy kSampler[{self.my_unique_id}]', f'Automatically setting the latent_id to the last image in the list (index: {len(latent_list) - 1}).') self.latent_id = len(latent_list) - 1 return latent_list[self.latent_id] def get_labels_and_sample(self, plot_image_vars, latent_image, preview_latent, start_step, last_step, force_full_denoise, disable_noise): for x_index, x_value in enumerate(self.x_values): plot_image_vars, x_value_label = self.define_variable(plot_image_vars, self.x_type, x_value, x_index) self.x_label = self.update_label(self.x_label, x_value_label, len(self.x_values)) if self.y_type != 'None': for y_index, y_value in enumerate(self.y_values): plot_image_vars, y_value_label = self.define_variable(plot_image_vars, self.y_type, y_value, y_index) self.y_label = self.update_label(self.y_label, y_value_label, len(self.y_values)) # ttNl(f'{CC.GREY}X: {x_value_label}, Y: {y_value_label}').t( # f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value, y_value) self.num += 1 else: # ttNl(f'{CC.GREY}X: {x_value_label}').t(f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value) self.num += 1 # Rearrange latent array to match preview image grid self.latents_plot = self.rearrange_tensors(self.latents_plot, self.num_cols, self.num_rows) # Concatenate the tensors along the first dimension (dim=0) self.latents_plot = torch.cat(self.latents_plot, dim=0) return self.latents_plot def plot_images_and_labels(self): # Calculate the background dimensions bg_width, bg_height, x_offset_initial, y_offset = self.calculate_background_dimensions() # Create the white background image background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=(255, 255, 255, 255)) output_image = [] for row_index in range(self.num_rows): x_offset = x_offset_initial for col_index in range(self.num_cols): index = col_index * self.num_rows + row_index img = self.image_list[index] output_image.append(sampler.pil2tensor(img)) background.paste(img, (x_offset, y_offset)) # Handle X label if row_index == 0 and self.x_type != "None": label_bg = self.create_label(img, self.x_label[col_index], int(48 * img.width / 512)) label_y = (y_offset - label_bg.height) // 2 background.alpha_composite(label_bg, (x_offset, label_y)) # Handle Y label if col_index == 0 and self.y_type != "None": label_bg = self.create_label(img, self.y_label[row_index], int(48 * img.height / 512), False) label_bg = label_bg.rotate(90, expand=True) label_x = (x_offset - label_bg.width) // 2 label_y = y_offset + (img.height - label_bg.height) // 2 background.alpha_composite(label_bg, (label_x, label_y)) x_offset += img.width + self.grid_spacing y_offset += img.height + self.grid_spacing return (sampler.pil2tensor(background), output_image) easyCache = easyLoader() sampler = easySampler() def check_link_to_clip(node_id, clip_id, visited=None, node=None): """Check if a given node links directly or indirectly to a loader node.""" if visited is None: visited = set() if node_id in visited: return False visited.add(node_id) if "pipe" in node["inputs"]: link_ids = node["inputs"]["pipe"] for id in link_ids: if id != 0 and id == str(clip_id): return True return False def find_nearest_steps(clip_id, prompt): """Find the nearest KSampler or preSampling node that references the given id.""" for id in prompt: node = prompt[id] if "Sampler" in node["class_type"] or "sampler" in node["class_type"] or "Sampling" in node["class_type"]: # Check if this KSampler node directly or indirectly references the given CLIPTextEncode node if check_link_to_clip(id, clip_id, None, node): steps = node["inputs"]["steps"] if "steps" in node["inputs"] else 1 return steps return 1 def find_wildcards_seed(text, prompt): if "__" in text: for i in prompt: if "wildcards" in prompt[i]['class_type'] and text == prompt[i]['inputs']['text']: return prompt[i]['inputs']['seed_num'] if "seed_num" in prompt[i]['inputs'] else None else: return None class easySave: def __init__(self, my_unique_id=0, prompt=None, extra_pnginfo=None, number_padding=5, overwrite_existing=False, output_dir=folder_paths.get_temp_directory()): self.number_padding = int(number_padding) if number_padding not in [None, "None", 0] else None self.overwrite_existing = overwrite_existing self.my_unique_id = my_unique_id self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.type = 'temp' self.output_dir = output_dir if self.output_dir != folder_paths.get_temp_directory(): self.output_dir = self.folder_parser(self.output_dir, self.prompt, self.my_unique_id) if not os.path.exists(self.output_dir): self._create_directory(self.output_dir) @staticmethod def _create_directory(folder: str): """Try to create the directory and log the status.""" log_node_warn("", f"Folder {folder} does not exist. Attempting to create...") if not os.path.exists(folder): try: os.makedirs(folder) log_node_success("",f"{folder} Created Successfully") except OSError:
log_node_error(f"Failed to create folder {folder}")
4
2023-12-10 07:02:36+00:00
16k
AIFSH/NativeDancer
nativedancer/third_part/detectron2/data/build.py
[ { "identifier": "configurable", "path": "nativedancer/third_part/detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\n \"\"\"\n Decorate a function or a class's __init__ method so that it can be called\n with a :class:`CfgNode` object using a :func:`...
import itertools import logging import numpy as np import operator import pickle import torch import torch.utils.data as torchdata from collections import OrderedDict, defaultdict from typing import Any, Callable, Dict, List, Optional, Union from tabulate import tabulate from termcolor import colored from ..config import configurable from ..structures import BoxMode from ..utils.comm import get_world_size from ..utils.env import seed_all_rng from ..utils.file_io import PathManager from ..utils.logger import _log_api_usage, log_first_n from .catalog import DatasetCatalog, MetadataCatalog from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset from .dataset_mapper import DatasetMapper from .detection_utils import check_metadata_consistency from .samplers import ( InferenceSampler, RandomSubsetTrainingSampler, RepeatFactorTrainingSampler, TrainingSampler, )
12,525
table = tabulate( data, headers=["category", "#instances"] * (N_COLS // 2), tablefmt="pipe", numalign="left", stralign="center", ) log_first_n( logging.INFO, "Distribution of instances among all {} categories:\n".format(num_classes) + colored(table, "cyan"), key="message", ) def get_detection_dataset_dicts( names, filter_empty=True, min_keypoints=0, proposal_files=None, check_consistency=True, ): """ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. Args: names (str or list[str]): a dataset name or a list of dataset names filter_empty (bool): whether to filter out images without instance annotations min_keypoints (int): filter out images with fewer keypoints than `min_keypoints`. Set to 0 to do nothing. proposal_files (list[str]): if given, a list of object proposal files that match each dataset in `names`. check_consistency (bool): whether to check if datasets have consistent metadata. Returns: list[dict]: a list of dicts following the standard dataset dict format. """ if isinstance(names, str): names = [names] assert len(names), names available_datasets = DatasetCatalog.keys() names_set = set(names) if not names_set.issubset(available_datasets): logger = logging.getLogger(__name__) logger.warning( "The following dataset names are not registered in the DatasetCatalog: " f"{names_set - available_datasets}. " f"Available datasets are {available_datasets}" ) dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names] if isinstance(dataset_dicts[0], torchdata.Dataset): if len(dataset_dicts) > 1: # ConcatDataset does not work for iterable style dataset. # We could support concat for iterable as well, but it's often # not a good idea to concat iterables anyway. return torchdata.ConcatDataset(dataset_dicts) return dataset_dicts[0] for dataset_name, dicts in zip(names, dataset_dicts): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) if proposal_files is not None: assert len(names) == len(proposal_files) # load precomputed proposals from proposal files dataset_dicts = [ load_proposals_into_dataset(dataset_i_dicts, proposal_file) for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) ] dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) has_instances = "annotations" in dataset_dicts[0] if filter_empty and has_instances: dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) if min_keypoints > 0 and has_instances: dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) if check_consistency and has_instances: try: class_names = MetadataCatalog.get(names[0]).thing_classes check_metadata_consistency("thing_classes", names) print_instances_class_histogram(dataset_dicts, class_names) except AttributeError: # class names are not available for this dataset pass assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names)) return dataset_dicts def build_batch_data_loader( dataset, sampler, total_batch_size, *, aspect_ratio_grouping=False, num_workers=0, collate_fn=None, drop_last: bool = True, **kwargs, ): """ Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are: 1. support aspect ratio grouping options 2. use no "batch collation", because this is common for detection training Args: dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices. Must be provided iff. ``dataset`` is a map-style dataset. total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see :func:`build_detection_train_loader`. drop_last (bool): if ``True``, the dataloader will drop incomplete batches. Returns: iterable[list]. Length of each list is the batch size of the current GPU. Each element in the list comes from the dataset. """
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains the default logic to build a dataloader for training or testing. """ __all__ = [ "build_batch_data_loader", "build_detection_train_loader", "build_detection_test_loader", "get_detection_dataset_dicts", "load_proposals_into_dataset", "print_instances_class_histogram", ] def filter_images_with_only_crowd_annotations(dataset_dicts): """ Filter out images with none annotations or only crowd annotations (i.e., images without non-crowd annotations). A common training-time preprocessing on COCO dataset. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format, but filtered. """ num_before = len(dataset_dicts) def valid(anns): for ann in anns: if ann.get("iscrowd", 0) == 0: return True return False dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with no usable annotations. {} images left.".format( num_before - num_after, num_after ) ) return dataset_dicts def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): """ Filter out images with too few number of keypoints. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format as dataset_dicts, but filtered. """ num_before = len(dataset_dicts) def visible_keypoints_in_image(dic): # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility annotations = dic["annotations"] return sum( (np.array(ann["keypoints"][2::3]) > 0).sum() for ann in annotations if "keypoints" in ann ) dataset_dicts = [ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image ] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with fewer than {} keypoints.".format( num_before - num_after, min_keypoints_per_image ) ) return dataset_dicts def load_proposals_into_dataset(dataset_dicts, proposal_file): """ Load precomputed object proposals into the dataset. The proposal file should be a pickled dict with the following keys: - "ids": list[int] or list[str], the image ids - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores corresponding to the boxes. - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. proposal_file (str): file path of pre-computed proposals, in pkl format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file)) with PathManager.open(proposal_file, "rb") as f: proposals = pickle.load(f, encoding="latin1") # Rename the key names in D1 proposal files rename_keys = {"indexes": "ids", "scores": "objectness_logits"} for key in rename_keys: if key in proposals: proposals[rename_keys[key]] = proposals.pop(key) # Fetch the indexes of all proposals that are in the dataset # Convert image_id to str since they could be int. img_ids = set({str(record["image_id"]) for record in dataset_dicts}) id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids} # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS' bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS for record in dataset_dicts: # Get the index of the proposal i = id_to_index[str(record["image_id"])] boxes = proposals["boxes"][i] objectness_logits = proposals["objectness_logits"][i] # Sort the proposals in descending order of the scores inds = objectness_logits.argsort()[::-1] record["proposal_boxes"] = boxes[inds] record["proposal_objectness_logits"] = objectness_logits[inds] record["proposal_bbox_mode"] = bbox_mode return dataset_dicts def print_instances_class_histogram(dataset_dicts, class_names): """ Args: dataset_dicts (list[dict]): list of dataset dicts. class_names (list[str]): list of class names (zero-indexed). """ num_classes = len(class_names) hist_bins = np.arange(num_classes + 1) histogram = np.zeros((num_classes,), dtype=int) for entry in dataset_dicts: annos = entry["annotations"] classes = np.asarray( [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=int ) if len(classes): assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}" assert ( classes.max() < num_classes ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes" histogram += np.histogram(classes, bins=hist_bins)[0] N_COLS = min(6, len(class_names) * 2) def short_name(x): # make long class names shorter. useful for lvis if len(x) > 13: return x[:11] + ".." return x data = list( itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) ) total_num_instances = sum(data[1::2]) data.extend([None] * (N_COLS - (len(data) % N_COLS))) if num_classes > 1: data.extend(["total", total_num_instances]) data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) table = tabulate( data, headers=["category", "#instances"] * (N_COLS // 2), tablefmt="pipe", numalign="left", stralign="center", ) log_first_n( logging.INFO, "Distribution of instances among all {} categories:\n".format(num_classes) + colored(table, "cyan"), key="message", ) def get_detection_dataset_dicts( names, filter_empty=True, min_keypoints=0, proposal_files=None, check_consistency=True, ): """ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. Args: names (str or list[str]): a dataset name or a list of dataset names filter_empty (bool): whether to filter out images without instance annotations min_keypoints (int): filter out images with fewer keypoints than `min_keypoints`. Set to 0 to do nothing. proposal_files (list[str]): if given, a list of object proposal files that match each dataset in `names`. check_consistency (bool): whether to check if datasets have consistent metadata. Returns: list[dict]: a list of dicts following the standard dataset dict format. """ if isinstance(names, str): names = [names] assert len(names), names available_datasets = DatasetCatalog.keys() names_set = set(names) if not names_set.issubset(available_datasets): logger = logging.getLogger(__name__) logger.warning( "The following dataset names are not registered in the DatasetCatalog: " f"{names_set - available_datasets}. " f"Available datasets are {available_datasets}" ) dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names] if isinstance(dataset_dicts[0], torchdata.Dataset): if len(dataset_dicts) > 1: # ConcatDataset does not work for iterable style dataset. # We could support concat for iterable as well, but it's often # not a good idea to concat iterables anyway. return torchdata.ConcatDataset(dataset_dicts) return dataset_dicts[0] for dataset_name, dicts in zip(names, dataset_dicts): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) if proposal_files is not None: assert len(names) == len(proposal_files) # load precomputed proposals from proposal files dataset_dicts = [ load_proposals_into_dataset(dataset_i_dicts, proposal_file) for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) ] dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) has_instances = "annotations" in dataset_dicts[0] if filter_empty and has_instances: dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) if min_keypoints > 0 and has_instances: dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) if check_consistency and has_instances: try: class_names = MetadataCatalog.get(names[0]).thing_classes check_metadata_consistency("thing_classes", names) print_instances_class_histogram(dataset_dicts, class_names) except AttributeError: # class names are not available for this dataset pass assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names)) return dataset_dicts def build_batch_data_loader( dataset, sampler, total_batch_size, *, aspect_ratio_grouping=False, num_workers=0, collate_fn=None, drop_last: bool = True, **kwargs, ): """ Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are: 1. support aspect ratio grouping options 2. use no "batch collation", because this is common for detection training Args: dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices. Must be provided iff. ``dataset`` is a map-style dataset. total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see :func:`build_detection_train_loader`. drop_last (bool): if ``True``, the dataloader will drop incomplete batches. Returns: iterable[list]. Length of each list is the batch size of the current GPU. Each element in the list comes from the dataset. """
world_size = get_world_size()
2
2023-12-10 20:14:00+00:00
16k
ethanweber/nerfiller
nerfiller/scripts/inpaint_nerfstudio_dataset.py
[ { "identifier": "RGBInpainter", "path": "nerfiller/inpaint/rgb_inpainter.py", "snippet": "class RGBInpainter:\n \"\"\"\n Module for inpainting with the stable diffusion inpainting pipeline.\n \"\"\"\n\n def __init__(\n self,\n half_precision_weights: bool = True,\n lora_...
import json import shutil import mediapy import torch import tyro import math from pathlib import Path from nerfiller.inpaint.rgb_inpainter import RGBInpainter from nerfiller.inpaint.lama_inpainter import LaMaInpainter from nerfiller.nerf.dataset_utils import parse_nerfstudio_frame from nerfiller.utils.image_utils import get_inpainted_image_row from nerfiller.utils.camera_utils import rescale_intrinsics from nerfiller.configs.inpaint import InpaintConfig, AnnotatedBaseConfigUnion from datetime import datetime from nerfiller.utils.diff_utils import register_extended_attention from nerfiller.utils.mask_utils import downscale_mask
11,842
# Setup the modules for guidance. # multiview_metric = ReprojectMetric(lossfeatmult=1.0) # feature_extractor = SuperPointExtractor(device=config.device) # TODO: make sure feature_extractor is half precision with half_precision_weights # Copy the original dataset besides the images, which we will inpaint. output_folder = ( Path(str(config.nerfstudio_dataset) + "-" + "inpaint") / str(config.method_name) / datetime.now().strftime("%Y-%m-%d_%H%M%S") ) output_folder.mkdir(parents=True) shutil.copytree(config.nerfstudio_dataset / "images", output_folder / "original_images") shutil.copytree(config.nerfstudio_dataset / "masks", output_folder / "original_masks") shutil.copytree(config.nerfstudio_dataset / "depth", output_folder / "depth") shutil.copytree(config.nerfstudio_dataset / "masks", output_folder / "masks") shutil.copy(config.nerfstudio_dataset / "transforms.json", output_folder / "transforms.json") (output_folder / "images").mkdir(parents=True) (output_folder / "inpaint").mkdir(parents=True) f = open(config.nerfstudio_dataset / "transforms.json") transforms = json.load(f) f.close() num_images = len(transforms["frames"]) if config.randomize_image_order: indices = torch.randperm(num_images) else: indices = torch.arange(num_images) padded_num_images = config.chunk_size * math.ceil(num_images / config.chunk_size) if num_images != padded_num_images: indices = torch.cat([indices, indices[: padded_num_images - num_images]]) for i in range(0, padded_num_images - config.chunk_size + 1, config.new_size): images = [] masks = [] depths = [] Ks = [] c2ws = [] for j in range(i, i + config.chunk_size): if i == 0 or j >= (i + config.chunk_size - config.new_size): # new stuff to inpaint image, depth, mask, c2w, K = parse_nerfstudio_frame( transforms, config.nerfstudio_dataset, indices[j], depth_max=config.depth_max, device=config.device, ) else: # old stuff already inpainted image, depth, mask, c2w, K = parse_nerfstudio_frame( transforms, output_folder, indices[j], depth_max=config.depth_max, device=config.device, ) images.append(image) masks.append(mask) depths.append(depth) Ks.append(K) c2ws.append(c2w) images = torch.cat(images) masks = torch.cat(masks) depths = torch.cat(depths) Ks = torch.cat(Ks) c2ws = torch.cat(c2ws) # generator = [ # torch.Generator(device=config.device).manual_seed(int(indices[j])) for j in range(i, i + config.chunk_size) # ] generator = None image = torch.nn.functional.interpolate(images, scale_factor=config.scale_factor, mode="bilinear") depth = torch.nn.functional.interpolate(depths, scale_factor=config.scale_factor, mode="bilinear") mask = downscale_mask( masks, scale_factor=config.scale_factor, dilate_iters=config.dilate_iters, dilate_kernel_size=config.dilate_kernel_size, ) if config.method_name == "individual-lama": imagein = rgb_inpainter.get_image(image=image, mask=mask) else: enable_gradient = config.num_guidance_steps > 0 and len(config.guidance_steps) > 0 with torch.enable_grad() if enable_gradient else torch.no_grad(): imagein = rgb_inpainter.get_image( text_embeddings=text_embeddings, image=image, mask=mask, depth=depth, denoise_in_grid=config.denoise_in_grid, multidiffusion_steps=config.multidiffusion_steps, randomize_latents=config.randomize_latents, text_guidance_scale=config.text_guidance_scale, image_guidance_scale=config.image_guidance_scale, num_inference_steps=config.num_inference_steps, num_guidance_steps=config.num_guidance_steps, classifier_guidance_scale=config.classifier_guidance_scale, guidance_steps=config.guidance_steps, multiview_guidance_scale=config.multiview_guidance_scale, K=rescale_intrinsics(Ks, config.scale_factor, config.scale_factor), c2w=c2ws, output_folder=output_folder / "inpaint" / f"{i:06d}-{i+config.chunk_size:06d}" if config.save_intermediates else None, show_multiview=False, generator=generator, use_decoder_approximation=config.use_decoder_approximation, ) # TODO: use an upscaler here imagein = torch.nn.functional.interpolate(imagein, scale_factor=1 / config.scale_factor, mode="bilinear") imagein = torch.where(masks == 1, imagein, images)
def main( config: InpaintConfig, ): """ Inpaint a Nerfstudio dataset where the masks == 0. """ if config.method_name == "individual-lama": rgb_inpainter = LaMaInpainter(device=config.device, model_path=Path("data/models/big-lama")) else: # Load the inpainting module. rgb_inpainter = RGBInpainter( half_precision_weights=config.half_precision_weights, lora_model_path=config.lora_model_path, device=config.device, vae_device=config.vae_device, ) if config.text_guidance_scale != 0.0: assert config.prompt != "", "You need to set an actual prompt to use this method." # Process the text prompts. text_embeddings = rgb_inpainter.compute_text_embeddings(config.prompt, config.negative_prompt) if config.use_expanded_attention: register_extended_attention(rgb_inpainter.unet) # Setup the modules for guidance. # multiview_metric = ReprojectMetric(lossfeatmult=1.0) # feature_extractor = SuperPointExtractor(device=config.device) # TODO: make sure feature_extractor is half precision with half_precision_weights # Copy the original dataset besides the images, which we will inpaint. output_folder = ( Path(str(config.nerfstudio_dataset) + "-" + "inpaint") / str(config.method_name) / datetime.now().strftime("%Y-%m-%d_%H%M%S") ) output_folder.mkdir(parents=True) shutil.copytree(config.nerfstudio_dataset / "images", output_folder / "original_images") shutil.copytree(config.nerfstudio_dataset / "masks", output_folder / "original_masks") shutil.copytree(config.nerfstudio_dataset / "depth", output_folder / "depth") shutil.copytree(config.nerfstudio_dataset / "masks", output_folder / "masks") shutil.copy(config.nerfstudio_dataset / "transforms.json", output_folder / "transforms.json") (output_folder / "images").mkdir(parents=True) (output_folder / "inpaint").mkdir(parents=True) f = open(config.nerfstudio_dataset / "transforms.json") transforms = json.load(f) f.close() num_images = len(transforms["frames"]) if config.randomize_image_order: indices = torch.randperm(num_images) else: indices = torch.arange(num_images) padded_num_images = config.chunk_size * math.ceil(num_images / config.chunk_size) if num_images != padded_num_images: indices = torch.cat([indices, indices[: padded_num_images - num_images]]) for i in range(0, padded_num_images - config.chunk_size + 1, config.new_size): images = [] masks = [] depths = [] Ks = [] c2ws = [] for j in range(i, i + config.chunk_size): if i == 0 or j >= (i + config.chunk_size - config.new_size): # new stuff to inpaint image, depth, mask, c2w, K = parse_nerfstudio_frame( transforms, config.nerfstudio_dataset, indices[j], depth_max=config.depth_max, device=config.device, ) else: # old stuff already inpainted image, depth, mask, c2w, K = parse_nerfstudio_frame( transforms, output_folder, indices[j], depth_max=config.depth_max, device=config.device, ) images.append(image) masks.append(mask) depths.append(depth) Ks.append(K) c2ws.append(c2w) images = torch.cat(images) masks = torch.cat(masks) depths = torch.cat(depths) Ks = torch.cat(Ks) c2ws = torch.cat(c2ws) # generator = [ # torch.Generator(device=config.device).manual_seed(int(indices[j])) for j in range(i, i + config.chunk_size) # ] generator = None image = torch.nn.functional.interpolate(images, scale_factor=config.scale_factor, mode="bilinear") depth = torch.nn.functional.interpolate(depths, scale_factor=config.scale_factor, mode="bilinear") mask = downscale_mask( masks, scale_factor=config.scale_factor, dilate_iters=config.dilate_iters, dilate_kernel_size=config.dilate_kernel_size, ) if config.method_name == "individual-lama": imagein = rgb_inpainter.get_image(image=image, mask=mask) else: enable_gradient = config.num_guidance_steps > 0 and len(config.guidance_steps) > 0 with torch.enable_grad() if enable_gradient else torch.no_grad(): imagein = rgb_inpainter.get_image( text_embeddings=text_embeddings, image=image, mask=mask, depth=depth, denoise_in_grid=config.denoise_in_grid, multidiffusion_steps=config.multidiffusion_steps, randomize_latents=config.randomize_latents, text_guidance_scale=config.text_guidance_scale, image_guidance_scale=config.image_guidance_scale, num_inference_steps=config.num_inference_steps, num_guidance_steps=config.num_guidance_steps, classifier_guidance_scale=config.classifier_guidance_scale, guidance_steps=config.guidance_steps, multiview_guidance_scale=config.multiview_guidance_scale, K=rescale_intrinsics(Ks, config.scale_factor, config.scale_factor), c2w=c2ws, output_folder=output_folder / "inpaint" / f"{i:06d}-{i+config.chunk_size:06d}" if config.save_intermediates else None, show_multiview=False, generator=generator, use_decoder_approximation=config.use_decoder_approximation, ) # TODO: use an upscaler here imagein = torch.nn.functional.interpolate(imagein, scale_factor=1 / config.scale_factor, mode="bilinear") imagein = torch.where(masks == 1, imagein, images)
imageinrow = get_inpainted_image_row(image=images, mask=masks, inpainted_image=imagein, show_original=True)
3
2023-12-07 19:12:08+00:00
16k
nnanhuang/Customize-it-3D
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.rank_zero import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.attention import CrossAttention
12,358
force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2]//25) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2]//25) log['conditioning'] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, ddim_steps=ddim_steps,eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( self.first_stage_model, IdentityFirstStage): # also display when quantizing x0 while sampling with ema_scope("Plotting Quantized Denoised"): samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, ddim_steps=ddim_steps,eta=ddim_eta, quantize_denoised=True) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, # quantize_denoised=True) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_x0_quantized"] = x_samples if unconditional_guidance_scale > 1.0: uc = self.get_unconditional_conditioning(N, unconditional_guidance_label, image_size=x.shape[-1]) # uc = torch.zeros_like(c) with ema_scope("Sampling with classifier-free guidance"): samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=uc, ) x_samples_cfg = self.decode_first_stage(samples_cfg) log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg if inpaint: # make a simple center square b, h, w = z.shape[0], z.shape[2], z.shape[3] mask = torch.ones(N, h, w).to(self.device) # zeros will be filled in mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. mask = mask[:, None, ...] with ema_scope("Plotting Inpaint"): samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, ddim_steps=ddim_steps, x0=z[:N], mask=mask) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_inpainting"] = x_samples log["mask"] = mask # outpaint mask = 1. - mask with ema_scope("Plotting Outpaint"): samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, ddim_steps=ddim_steps, x0=z[:N], mask=mask) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_outpainting"] = x_samples if plot_progressive_rows: with ema_scope("Plotting Progressives"): img, progressives = self.progressive_denoising(c, shape=(self.channels, self.image_size, self.image_size), batch_size=N) prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") log["progressive_row"] = prog_row if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = [] if self.unet_trainable == "attn": print("Training only unet attention layers") for n, m in self.model.named_modules():
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape)==len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1-p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05): x = super().get_input(batch, k) T = batch['T'].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange((random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1") null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [self.cc_projection(torch.cat([torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :]], dim=-1))] cond["c_concat"] = [input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach()] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # @torch.no_grad() # wasted two hours to find this bug... why no grad here! def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None # if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') adapted_cond = self.get_learned_conditioning(adapted_cond) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None, image_size=512): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to(self.device)] return cond @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2]//25) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2]//25) log['conditioning'] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, ddim_steps=ddim_steps,eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( self.first_stage_model, IdentityFirstStage): # also display when quantizing x0 while sampling with ema_scope("Plotting Quantized Denoised"): samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, ddim_steps=ddim_steps,eta=ddim_eta, quantize_denoised=True) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, # quantize_denoised=True) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_x0_quantized"] = x_samples if unconditional_guidance_scale > 1.0: uc = self.get_unconditional_conditioning(N, unconditional_guidance_label, image_size=x.shape[-1]) # uc = torch.zeros_like(c) with ema_scope("Sampling with classifier-free guidance"): samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=uc, ) x_samples_cfg = self.decode_first_stage(samples_cfg) log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg if inpaint: # make a simple center square b, h, w = z.shape[0], z.shape[2], z.shape[3] mask = torch.ones(N, h, w).to(self.device) # zeros will be filled in mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. mask = mask[:, None, ...] with ema_scope("Plotting Inpaint"): samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, ddim_steps=ddim_steps, x0=z[:N], mask=mask) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_inpainting"] = x_samples log["mask"] = mask # outpaint mask = 1. - mask with ema_scope("Plotting Outpaint"): samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, ddim_steps=ddim_steps, x0=z[:N], mask=mask) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_outpainting"] = x_samples if plot_progressive_rows: with ema_scope("Plotting Progressives"): img, progressives = self.progressive_denoising(c, shape=(self.channels, self.image_size, self.image_size), batch_size=N) prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") log["progressive_row"] = prog_row if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = [] if self.unet_trainable == "attn": print("Training only unet attention layers") for n, m in self.model.named_modules():
if isinstance(m, CrossAttention) and n.endswith('attn2'):
18
2023-12-14 11:03:35+00:00
16k
mkang315/ASF-YOLO
segment/predict.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=Tr...
import argparse import os import platform import sys import time import torch from pathlib import Path from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode
14,309
# Run inference model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim # Inference with dt[1]: act = time.time() visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred, proto = model(im, augment=augment, visualize=visualize)[:2] print('time.time():',time.time()-act) # NMS with dt[2]: pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): if retina_masks: # scale bbox first the crop masks det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC else: masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size # Segments if save_txt: segments = reversed(masks2segments(masks)) segments = [ scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) for x in segments] # Print results for c in det[:, 5].unique(): n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ if retina_masks else im[i] annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file segj = segments[j].reshape(-1) # (n,2) to (n*2) line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Stream results im0 = annotator.result() if view_img: if platform.system() == 'Linux' and p not in windows: windows.append(p) cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) cv2.imshow(str(p), im0) if cv2.waitKey(1) == ord('q'): # 1 millisecond exit() # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print time (inference-only) LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") # Print results t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. Usage - sources: $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam img.jpg # image vid.mp4 # video screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_model # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @smart_inference_mode() def run( weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/predict-seg', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference vid_stride=1, # video frame-rate stride retina_masks=False, ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, names, pt = model.stride, model.names, model.pt imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader bs = 1 # batch_size if webcam: view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim # Inference with dt[1]: act = time.time() visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred, proto = model(im, augment=augment, visualize=visualize)[:2] print('time.time():',time.time()-act) # NMS with dt[2]: pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): if retina_masks: # scale bbox first the crop masks det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC else: masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size # Segments if save_txt: segments = reversed(masks2segments(masks)) segments = [ scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) for x in segments] # Print results for c in det[:, 5].unique(): n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ if retina_masks else im[i] annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file segj = segments[j].reshape(-1) # (n,2) to (n*2) line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Stream results im0 = annotator.result() if view_img: if platform.system() == 'Linux' and p not in windows: windows.append(p) cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) cv2.imshow(str(p), im0) if cv2.waitKey(1) == ord('q'): # 1 millisecond exit() # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print time (inference-only) LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") # Print results t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
6
2023-12-10 14:18:29+00:00
16k
youngskkim/CRN
models/camera_radar_net_det.py
[ { "identifier": "BaseBEVDepth", "path": "models/base_bev_depth.py", "snippet": "class BaseBEVDepth(nn.Module):\n \"\"\"Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`.\n\n Args:\n backbone_conf (dict): Config of backbone.\n head_conf (dict): Config of head.\n \"\"\"\...
import mmcv from models.base_bev_depth import BaseBEVDepth from layers.backbones.rvt_lss_fpn import RVTLSSFPN from layers.backbones.pts_backbone import PtsBackbone from layers.fuser.multimodal_feature_aggregation import MFAFuser from layers.heads.bev_depth_head_det import BEVDepthHead
11,825
logger = mmcv.utils.get_logger('mmdet') logger.setLevel('WARNING') __all__ = ['CameraRadarNetDet'] class CameraRadarNetDet(BaseBEVDepth): """Source code of `CRN`, `https://arxiv.org/abs/2304.00670`. Args: backbone_img_conf (dict): Config of image backbone. backbone_pts_conf (dict): Config of point backbone. fuser_conf (dict): Config of BEV feature fuser. head_conf (dict): Config of head. """ def __init__(self, backbone_img_conf, backbone_pts_conf, fuser_conf, head_conf): super(BaseBEVDepth, self).__init__() self.backbone_img = RVTLSSFPN(**backbone_img_conf) self.backbone_pts = PtsBackbone(**backbone_pts_conf)
logger = mmcv.utils.get_logger('mmdet') logger.setLevel('WARNING') __all__ = ['CameraRadarNetDet'] class CameraRadarNetDet(BaseBEVDepth): """Source code of `CRN`, `https://arxiv.org/abs/2304.00670`. Args: backbone_img_conf (dict): Config of image backbone. backbone_pts_conf (dict): Config of point backbone. fuser_conf (dict): Config of BEV feature fuser. head_conf (dict): Config of head. """ def __init__(self, backbone_img_conf, backbone_pts_conf, fuser_conf, head_conf): super(BaseBEVDepth, self).__init__() self.backbone_img = RVTLSSFPN(**backbone_img_conf) self.backbone_pts = PtsBackbone(**backbone_pts_conf)
self.fuser = MFAFuser(**fuser_conf)
3
2023-12-06 14:57:49+00:00
16k
LIU-Yuxin/SyncMVD
src/pipeline.py
[ { "identifier": "UVProjection", "path": "src/renderer/project.py", "snippet": "class UVProjection():\n\tdef __init__(self, texture_size=96, render_size=64, sampling_mode=\"nearest\", channels=3, device=None):\n\t\tself.channels = channels\n\t\tself.device = device or torch.device(\"cpu\")\n\t\tself.ligh...
import os import numpy as np import math import random import torch import select import sys from typing import Any, Callable, Dict, List, Optional, Tuple, Union from PIL import Image from IPython.display import display from torch import functional as F from torch import nn from torchvision.transforms import Compose, Resize, GaussianBlur, InterpolationMode from diffusers import StableDiffusionControlNetPipeline, ControlNetModel from diffusers import DDPMScheduler, DDIMScheduler, UniPCMultistepScheduler from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.image_processor import VaeImageProcessor from diffusers.utils import ( BaseOutput, randn_tensor, numpy_to_pil, pt_to_pil, # make_image_grid, is_accelerate_available, is_accelerate_version, is_compiled_module, logging, randn_tensor, replace_example_docstring ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.models.attention_processor import Attention, AttentionProcessor from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from .renderer.project import UVProjection as UVP from .syncmvd.attention import SamplewiseAttnProcessor2_0, replace_attention_processors from .syncmvd.prompt import * from .syncmvd.step import step_tex from .utils import *
12,093
for prompt_tag, prompt_embeds in prompt_embeds_groups.items(): if prompt_tag == "positive" or not guess_mode: # controlnet(s) inference control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] # Split into micro-batches according to group meta info # Ignore this feature for now down_block_res_samples_list = [] mid_block_res_sample_list = [] model_input_batches = [torch.index_select(control_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(controlnet_prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] conditioning_images_batches = [torch.index_select(conditioning_images, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch ,prompt_embeds_batch, conditioning_images_batch \ in zip (model_input_batches, prompt_embeds_batches, conditioning_images_batches): down_block_res_samples, mid_block_res_sample = self.controlnet( model_input_batch, t, encoder_hidden_states=prompt_embeds_batch, controlnet_cond=conditioning_images_batch, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) down_block_res_samples_list.append(down_block_res_samples) mid_block_res_sample_list.append(mid_block_res_sample) ''' For the ith element of down_block_res_samples, concat the ith element of all mini-batch result ''' model_input_batches = prompt_embeds_batches = conditioning_images_batches = None if guess_mode: for dbres in down_block_res_samples_list: dbres_sizes = [] for res in dbres: dbres_sizes.append(res.shape) dbres_sizes_list.append(dbres_sizes) for mbres in mid_block_res_sample_list: mbres_size_list.append(mbres.shape) else: # Infered ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. # We copy the tensor shapes from a conditional batch down_block_res_samples_list = [] mid_block_res_sample_list = [] for dbres_sizes in dbres_sizes_list: down_block_res_samples_list.append([torch.zeros(shape, device=self._execution_device, dtype=latents.dtype) for shape in dbres_sizes]) for mbres in mbres_size_list: mid_block_res_sample_list.append(torch.zeros(mbres, device=self._execution_device, dtype=latents.dtype)) dbres_sizes_list = [] mbres_size_list = [] ''' predict the noise residual, split into mini-batches Downblock res samples has n samples, we split each sample into m batches and re group them into m lists of n mini batch samples. ''' noise_pred_list = [] model_input_batches = [torch.index_select(latent_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch, prompt_embeds_batch, down_block_res_samples_batch, mid_block_res_sample_batch, meta \ in zip(model_input_batches, prompt_embeds_batches, down_block_res_samples_list, mid_block_res_sample_list, self.group_metas): if t > num_timesteps * (1- ref_attention_end): replace_attention_processors(self.unet, SamplewiseAttnProcessor2_0, attention_mask=meta[2], ref_attention_mask=meta[3], ref_weight=1) else: replace_attention_processors(self.unet, SamplewiseAttnProcessor2_0, attention_mask=meta[2], ref_attention_mask=meta[3], ref_weight=0) noise_pred = self.unet( model_input_batch, t, encoder_hidden_states=prompt_embeds_batch, cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples_batch, mid_block_additional_residual=mid_block_res_sample_batch, return_dict=False, )[0] noise_pred_list.append(noise_pred) noise_pred_list = [torch.index_select(noise_pred, dim=0, index=torch.tensor(meta[1], device=self._execution_device)) for noise_pred, meta in zip(noise_pred_list, self.group_metas)] noise_pred = torch.cat(noise_pred_list, dim=0) down_block_res_samples_list = None mid_block_res_sample_list = None noise_pred_list = None model_input_batches = prompt_embeds_batches = down_block_res_samples_batches = mid_block_res_sample_batches = None result_groups[prompt_tag] = noise_pred positive_noise_pred = result_groups["positive"] # perform guidance if do_classifier_free_guidance: noise_pred = result_groups["negative"] + guidance_scale * (positive_noise_pred - result_groups["negative"]) if do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) self.uvp.to(self._execution_device) # compute the previous noisy sample x_t -> x_t-1 # Multi-View step or individual step if t > (1-multiview_diffusion_end)*num_timesteps:
if torch.cuda.is_available(): device = torch.device("cuda:0") torch.cuda.set_device(device) else: device = torch.device("cpu") # Background colors color_constants = {"black": [-1, -1, -1], "white": [1, 1, 1], "maroon": [0, -1, -1], "red": [1, -1, -1], "olive": [0, 0, -1], "yellow": [1, 1, -1], "green": [-1, 0, -1], "lime": [-1 ,1, -1], "teal": [-1, 0, 0], "aqua": [-1, 1, 1], "navy": [-1, -1, 0], "blue": [-1, -1, 1], "purple": [0, -1 , 0], "fuchsia": [1, -1, 1]} color_names = list(color_constants.keys()) # Used to generate depth or normal conditioning images @torch.no_grad() def get_conditioning_images(uvp, output_size, render_size=512, blur_filter=5, cond_type="normal"): verts, normals, depths, cos_maps, texels, fragments = uvp.render_geometry(image_size=render_size) masks = normals[...,3][:,None,...] masks = Resize((output_size//8,)*2, antialias=True)(masks) normals_transforms = Compose([ Resize((output_size,)*2, interpolation=InterpolationMode.BILINEAR, antialias=True), GaussianBlur(blur_filter, blur_filter//3+1)] ) if cond_type == "normal": view_normals = uvp.decode_view_normal(normals).permute(0,3,1,2) *2 - 1 conditional_images = normals_transforms(view_normals) # Some problem here, depth controlnet don't work when depth is normalized # But it do generate using the unnormalized form as below elif cond_type == "depth": view_depths = uvp.decode_normalized_depth(depths).permute(0,3,1,2) conditional_images = normals_transforms(view_depths) return conditional_images, masks # Revert time 0 background to time t to composite with time t foreground @torch.no_grad() def composite_rendered_view(scheduler, backgrounds, foregrounds, masks, t): composited_images = [] for i, (background, foreground, mask) in enumerate(zip(backgrounds, foregrounds, masks)): if t > 0: alphas_cumprod = scheduler.alphas_cumprod[t] noise = torch.normal(0, 1, background.shape, device=background.device) background = (1-alphas_cumprod) * noise + alphas_cumprod * background composited = foreground * mask + background * (1-mask) composited_images.append(composited) composited_tensor = torch.stack(composited_images) return composited_tensor # Split into micro-batches to use less memory in each unet prediction # But need more investigation on reducing memory usage # Assume it has no possitive effect and use a large "max_batch_size" to skip splitting def split_groups(attention_mask, max_batch_size, ref_view=[]): group_sets = [] group = set() ref_group = set() idx = 0 while idx < len(attention_mask): new_group = group | set([idx]) new_ref_group = (ref_group | set(attention_mask[idx] + ref_view)) - new_group if len(new_group) + len(new_ref_group) <= max_batch_size: group = new_group ref_group = new_ref_group idx += 1 else: assert len(group) != 0, "Cannot fit into a group" group_sets.append((group, ref_group)) group = set() ref_group = set() if len(group)>0: group_sets.append((group, ref_group)) group_metas = [] for group, ref_group in group_sets: in_mask = sorted(list(group | ref_group)) out_mask = [] group_attention_masks = [] for idx in in_mask: if idx in group: out_mask.append(in_mask.index(idx)) group_attention_masks.append([in_mask.index(idxx) for idxx in attention_mask[idx] if idxx in in_mask]) ref_attention_mask = [in_mask.index(idx) for idx in ref_view] group_metas.append([in_mask, out_mask, group_attention_masks, ref_attention_mask]) return group_metas ''' MultiView-Diffusion Stable-Diffusion Pipeline Modified from a Diffusers StableDiffusionControlNetPipeline Just mimic the pipeline structure but did not follow any API convention ''' class StableSyncMVDPipeline(StableDiffusionControlNetPipeline): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel]], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = False, ): super().__init__( vae, text_encoder, tokenizer, unet, controlnet, scheduler, safety_checker, feature_extractor, requires_safety_checker ) self.scheduler = DDPMScheduler.from_config(self.scheduler.config) self.model_cpu_offload_seq = "vae->text_encoder->unet->vae" self.enable_model_cpu_offload() self.enable_vae_slicing() self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def initialize_pipeline( self, mesh_path=None, mesh_transform=None, mesh_autouv=None, camera_azims=None, camera_centers=None, top_cameras=True, ref_views=[], latent_size=None, render_rgb_size=None, texture_size=None, texture_rgb_size=None, max_batch_size=24, logging_config=None, ): # Make output dir output_dir = logging_config["output_dir"] self.result_dir = f"{output_dir}/results" self.intermediate_dir = f"{output_dir}/intermediate" dirs = [output_dir, self.result_dir, self.intermediate_dir] for dir_ in dirs: if not os.path.isdir(dir_): os.mkdir(dir_) # Define the cameras for rendering self.camera_poses = [] self.attention_mask=[] self.centers = camera_centers cam_count = len(camera_azims) front_view_diff = 360 back_view_diff = 360 front_view_idx = 0 back_view_idx = 0 for i, azim in enumerate(camera_azims): if azim < 0: azim += 360 self.camera_poses.append((0, azim)) self.attention_mask.append([(cam_count+i-1)%cam_count, i, (i+1)%cam_count]) if abs(azim) < front_view_diff: front_view_idx = i front_view_diff = abs(azim) if abs(azim - 180) < back_view_diff: back_view_idx = i back_view_diff = abs(azim - 180) # Add two additional cameras for painting the top surfaces if top_cameras: self.camera_poses.append((30, 0)) self.camera_poses.append((30, 180)) self.attention_mask.append([front_view_idx, cam_count]) self.attention_mask.append([back_view_idx, cam_count+1]) # Reference view for attention (all views attend the the views in this list) # A forward view will be used if not specified if len(ref_views) == 0: ref_views = [front_view_idx] # Calculate in-group attention mask self.group_metas = split_groups(self.attention_mask, max_batch_size, ref_views) # Set up pytorch3D for projection between screen space and UV space # uvp is for latent and uvp_rgb for rgb color self.uvp = UVP(texture_size=texture_size, render_size=latent_size, sampling_mode="nearest", channels=4, device=self._execution_device) if mesh_path.lower().endswith(".obj"): self.uvp.load_mesh(mesh_path, scale_factor=mesh_transform["scale"] or 1, autouv=mesh_autouv) elif mesh_path.lower().endswith(".glb"): self.uvp.load_glb_mesh(mesh_path, scale_factor=mesh_transform["scale"] or 1, autouv=mesh_autouv) else: assert False, "The mesh file format is not supported. Use .obj or .glb." self.uvp.set_cameras_and_render_settings(self.camera_poses, centers=camera_centers, camera_distance=4.0) self.uvp_rgb = UVP(texture_size=texture_rgb_size, render_size=render_rgb_size, sampling_mode="nearest", channels=3, device=self._execution_device) self.uvp_rgb.mesh = self.uvp.mesh.clone() self.uvp_rgb.set_cameras_and_render_settings(self.camera_poses, centers=camera_centers, camera_distance=4.0) _,_,_,cos_maps,_, _ = self.uvp_rgb.render_geometry() self.uvp_rgb.calculate_cos_angle_weights(cos_maps, fill=False) # Save some VRAM del _, cos_maps self.uvp.to("cpu") self.uvp_rgb.to("cpu") color_images = torch.FloatTensor([color_constants[name] for name in color_names]).reshape(-1,3,1,1).to(dtype=self.text_encoder.dtype, device=self._execution_device) color_images = torch.ones( (1,1,latent_size*8, latent_size*8), device=self._execution_device, dtype=self.text_encoder.dtype ) * color_images color_images *= ((0.5*color_images)+0.5) color_latents = encode_latents(self.vae, color_images) self.color_latents = {color[0]:color[1] for color in zip(color_names, [latent for latent in color_latents])} self.vae = self.vae.to("cpu") print("Done Initialization") ''' Modified from a StableDiffusion ControlNet pipeline Multi ControlNet not supported yet ''' @torch.no_grad() def __call__( self, prompt: str = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: str = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, return_dict: bool = False, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, max_batch_size=6, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_guess_mode: bool = False, controlnet_conditioning_scale: Union[float, List[float]] = 0.7, controlnet_conditioning_end_scale: Union[float, List[float]] = 0.9, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 0.99, guidance_rescale: float = 0.0, mesh_path: str = None, mesh_transform: dict = None, mesh_autouv = False, camera_azims=None, camera_centers=None, top_cameras=True, texture_size = 1536, render_rgb_size=1024, texture_rgb_size = 1024, multiview_diffusion_end=0.8, shuffle_background_change=0.4, shuffle_background_end=0.99, #0.4 use_directional_prompt=True, ref_attention_end=0.2, logging_config=None, cond_type="depth", ): # Setup pipeline settings self.initialize_pipeline( mesh_path=mesh_path, mesh_transform=mesh_transform, mesh_autouv=mesh_autouv, camera_azims=camera_azims, camera_centers=camera_centers, top_cameras=top_cameras, ref_views=[], latent_size=height//8, render_rgb_size=render_rgb_size, texture_size=texture_size, texture_rgb_size=texture_rgb_size, max_batch_size=max_batch_size, logging_config=logging_config ) num_timesteps = self.scheduler.config.num_train_timesteps initial_controlnet_conditioning_scale = controlnet_conditioning_scale log_interval = logging_config.get("log_interval", 10) view_fast_preview = logging_config.get("view_fast_preview", True) tex_fast_preview = logging_config.get("tex_fast_preview", True) controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): # mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 mult = 1 control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [ control_guidance_end ] # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, torch.zeros((1,3,height,width), device=self._execution_device), callback_steps, negative_prompt, None, None, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, list): assert len(prompt) == 1 and len(negative_prompt) == 1, "Only implemented for 1 (negative) prompt" assert num_images_per_prompt == 1, "Only implemented for 1 image per-prompt" batch_size = len(self.uvp.cameras) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): # controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) guess_mode = controlnet_guess_mode or global_pool_conditions # 3. Encode input prompt prompt, negative_prompt = prepare_directional_prompt(prompt, negative_prompt) text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=None, negative_prompt_embeds=None, lora_scale=text_encoder_lora_scale, ) negative_prompt_embeds, prompt_embeds = torch.chunk(prompt_embeds, 2) prompt_embed_dict = dict(zip(direction_names, [emb for emb in prompt_embeds])) negative_prompt_embed_dict = dict(zip(direction_names, [emb for emb in negative_prompt_embeds])) # (4. Prepare image) This pipeline use internal conditional images from Pytorch3D self.uvp.to(self._execution_device) conditioning_images, masks = get_conditioning_images(self.uvp, height, cond_type=cond_type) conditioning_images = conditioning_images.type(prompt_embeds.dtype) cond = (conditioning_images/2+0.5).permute(0,2,3,1).cpu().numpy() cond = np.concatenate([img for img in cond], axis=1) numpy_to_pil(cond)[0].save(f"{self.intermediate_dir}/cond.jpg") # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, None, ) latent_tex = self.uvp.set_noise_texture() noise_views = self.uvp.render_textured_views() foregrounds = [view[:-1] for view in noise_views] masks = [view[-1:] for view in noise_views] composited_tensor = composite_rendered_view(self.scheduler, latents, foregrounds, masks, timesteps[0]+1) latents = composited_tensor.type(latents.dtype) self.uvp.to("cpu") # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order intermediate_results = [] background_colors = [random.choice(list(color_constants.keys())) for i in range(len(self.camera_poses))] dbres_sizes_list = [] mbres_size_list = [] with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # mix prompt embeds according to azim angle positive_prompt_embeds = [azim_prompt(prompt_embed_dict, pose) for pose in self.camera_poses] positive_prompt_embeds = torch.stack(positive_prompt_embeds, axis=0) negative_prompt_embeds = [azim_neg_prompt(negative_prompt_embed_dict, pose) for pose in self.camera_poses] negative_prompt_embeds = torch.stack(negative_prompt_embeds, axis=0) # expand the latents if we are doing classifier free guidance latent_model_input = self.scheduler.scale_model_input(latents, t) ''' Use groups to manage prompt and results Make sure negative and positive prompt does not perform attention together ''' prompt_embeds_groups = {"positive": positive_prompt_embeds} result_groups = {} if do_classifier_free_guidance: prompt_embeds_groups["negative"] = negative_prompt_embeds for prompt_tag, prompt_embeds in prompt_embeds_groups.items(): if prompt_tag == "positive" or not guess_mode: # controlnet(s) inference control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] # Split into micro-batches according to group meta info # Ignore this feature for now down_block_res_samples_list = [] mid_block_res_sample_list = [] model_input_batches = [torch.index_select(control_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(controlnet_prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] conditioning_images_batches = [torch.index_select(conditioning_images, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch ,prompt_embeds_batch, conditioning_images_batch \ in zip (model_input_batches, prompt_embeds_batches, conditioning_images_batches): down_block_res_samples, mid_block_res_sample = self.controlnet( model_input_batch, t, encoder_hidden_states=prompt_embeds_batch, controlnet_cond=conditioning_images_batch, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) down_block_res_samples_list.append(down_block_res_samples) mid_block_res_sample_list.append(mid_block_res_sample) ''' For the ith element of down_block_res_samples, concat the ith element of all mini-batch result ''' model_input_batches = prompt_embeds_batches = conditioning_images_batches = None if guess_mode: for dbres in down_block_res_samples_list: dbres_sizes = [] for res in dbres: dbres_sizes.append(res.shape) dbres_sizes_list.append(dbres_sizes) for mbres in mid_block_res_sample_list: mbres_size_list.append(mbres.shape) else: # Infered ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. # We copy the tensor shapes from a conditional batch down_block_res_samples_list = [] mid_block_res_sample_list = [] for dbres_sizes in dbres_sizes_list: down_block_res_samples_list.append([torch.zeros(shape, device=self._execution_device, dtype=latents.dtype) for shape in dbres_sizes]) for mbres in mbres_size_list: mid_block_res_sample_list.append(torch.zeros(mbres, device=self._execution_device, dtype=latents.dtype)) dbres_sizes_list = [] mbres_size_list = [] ''' predict the noise residual, split into mini-batches Downblock res samples has n samples, we split each sample into m batches and re group them into m lists of n mini batch samples. ''' noise_pred_list = [] model_input_batches = [torch.index_select(latent_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch, prompt_embeds_batch, down_block_res_samples_batch, mid_block_res_sample_batch, meta \ in zip(model_input_batches, prompt_embeds_batches, down_block_res_samples_list, mid_block_res_sample_list, self.group_metas): if t > num_timesteps * (1- ref_attention_end): replace_attention_processors(self.unet, SamplewiseAttnProcessor2_0, attention_mask=meta[2], ref_attention_mask=meta[3], ref_weight=1) else: replace_attention_processors(self.unet, SamplewiseAttnProcessor2_0, attention_mask=meta[2], ref_attention_mask=meta[3], ref_weight=0) noise_pred = self.unet( model_input_batch, t, encoder_hidden_states=prompt_embeds_batch, cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples_batch, mid_block_additional_residual=mid_block_res_sample_batch, return_dict=False, )[0] noise_pred_list.append(noise_pred) noise_pred_list = [torch.index_select(noise_pred, dim=0, index=torch.tensor(meta[1], device=self._execution_device)) for noise_pred, meta in zip(noise_pred_list, self.group_metas)] noise_pred = torch.cat(noise_pred_list, dim=0) down_block_res_samples_list = None mid_block_res_sample_list = None noise_pred_list = None model_input_batches = prompt_embeds_batches = down_block_res_samples_batches = mid_block_res_sample_batches = None result_groups[prompt_tag] = noise_pred positive_noise_pred = result_groups["positive"] # perform guidance if do_classifier_free_guidance: noise_pred = result_groups["negative"] + guidance_scale * (positive_noise_pred - result_groups["negative"]) if do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) self.uvp.to(self._execution_device) # compute the previous noisy sample x_t -> x_t-1 # Multi-View step or individual step if t > (1-multiview_diffusion_end)*num_timesteps:
step_results = step_tex(
3
2023-12-09 03:27:58+00:00
16k
SqueezeBits/owlite
owlite/backend/onnx/export.py
[ { "identifier": "OwLiteStatus", "path": "owlite/enums/owlite_status.py", "snippet": "class OwLiteStatus(Enum):\n \"\"\"The enum for specifying model status about compression with `GraphModule.meta`\n\n Attributes\n NOT_COMPRESSED: The model is symbolic traced, but before inserting quantizer...
import io import os import tempfile import onnx import onnxsim import onnxsim.model_checking import torch import torch.onnx.errors from collections.abc import Collection, Mapping, Sequence from typing import Any, Optional, Union from onnx import ModelProto from onnx.shape_inference import infer_shapes, infer_shapes_path from onnxsim.onnxsim_cpp2py_export import simplify_path from torch.fx.graph_module import GraphModule from ...enums import OwLiteStatus from ...logger import log from ...nn import FakeQuantizer from ..fx.transforms import clip_narrow_range_weights, fold_zp_to_bias, fuse_bn from ..utils import ( get_most_common_device, get_most_common_floating_point_type, map_signature, move_tensors_to, nodestr, ) from .dynamize import DynamicDimensions, dynamize from .model_checking import compare from .transforms import apply_onnx_transforms
12,508
Defaults to `torch._C._onnx.OperatorExportTypes.ONNX`. * `OperatorExportTypes.ONNX`: Export all ops as regular ONNX ops (in the default opset domain). * `OperatorExportTypes.ONNX_FALLTHROUGH`: Try to convert all ops to standard ONNX ops in the default opset domain. If unable to do so (e.g. because support has not been added to convert a particular torch op to ONNX), fall back to exporting the op into a custom opset domain without conversion. Applies to `custom ops <https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html>`_ as well as ATen ops. For the exported model to be usable, the runtime must support these non-standard ops. * `OperatorExportTypes.ONNX_ATEN`: All ATen ops (in the TorchScript namespace "aten") are exported as ATen ops (in opset domain "org.pytorch.aten"). `ATen <https://pytorch.org/cppdocs/#aten>`_ is PyTorch's built-in tensor library, so this instructs the runtime to use PyTorch's implementation of these ops. .. warning:: Models exported this way are probably runnable only by Caffe2. This may be useful if the numeric differences in implementations of operators are causing large differences in behavior between PyTorch and Caffe2 (which is more common on untrained models). * `OperatorExportTypes.ONNX_ATEN_FALLBACK`: Try to export each ATen op (in the TorchScript namespace "aten") as a regular ONNX op. If we are unable to do so (e.g. because support has not been added to convert a particular torch op to ONNX), fall back to exporting an ATen op. See documentation on OperatorExportTypes.ONNX_ATEN for context. For example:: graph(%0 : Float): %3 : int = prim::Constant[value=0]() # conversion unsupported %4 : Float = aten::triu(%0, %3) # conversion supported %5 : Float = aten::mul(%4, %0) return (%5) Assuming `aten::triu` is not supported in ONNX, this will be exported as:: graph(%0 : Float): %1 : Long() = onnx::Constant[value={0}]() # not converted %2 : Float = aten::ATen[operator="triu"](%0, %1) # converted %3 : Float = onnx::Mul(%2, %0) return (%3) If PyTorch was built with Caffe2 (i.e. with `BUILD_CAFFE2=1`), then Caffe2-specific behavior will be enabled, including special support for ops are produced by the modules described in `Quantization <https://pytorch.org/docs/stable/quantization.html>`_. .. warning:: Models exported this way are probably runnable only by Caffe2. opset_version (int, optional): The version of the default (ai.onnx) opset <https://github.com/onnx/onnx/blob/master/docs/Operators.md> to target. Must be >= 7 and <= 18. Defaults to 17. do_constant_folding (bool, optional): Apply the constant-folding optimization. Constant-folding will replace some of the ops that have all constant inputs with pre-computed constant nodes. Defaults to True. keep_initializers_as_inputs (Optional[bool], optional): If True, all the initializers (typically corresponding to parameters) in the exported graph will also be added as inputs to the graph. If False, then initializers are not added as inputs to the graph, and only the non-parameter inputs are added as inputs. This may allow for better optimizations (e.g. constant folding) by backends/runtimes. Defaults to None. custom_opsets (Optional[Mapping[str, int]], optional): A dict with schema: * KEY (str): opset domain name * VALUE (int): opset version If a custom opset is referenced by ``model`` but not mentioned in this dictionary, the opset version is set to 1. Only custom opset domain name and version should be indicated through this argument. Defaults to None. export_modules_as_functions (Union[bool, Collection[type[torch.nn.Module]]], optional): Flag to enable exporting all ``nn.Module`` forward calls as local functions in ONNX. Or a set to indicate the particular types of modules to export as local functions in ONNX. This feature requires ``opset_version`` >= 15, otherwise the export will fail. This is because ``opset_version`` < 15 implies IR version < 8, which means no local function support. Module variables will be exported as function attributes. There are two categories of function attributes. Defaults to False. use_fast_export (bool, optional): If True, export process will be done in memory. If `module` with total parameter size larger than 2GB, this flag will be automatically set to `False`. If False, temporary export process will be done using temporary files. Defaults to True. apply_transforms (bool, optional): If True, ONNX transforms defined by SqueezeBits.inc will be applied for model optimization. If False, ONNX transformations will be skipped. However, turning this flag to `False` is experimental and might yield unexpected behavior. Defaults to True. simplify (bool, optional): If True, onnx-simplifier will be run. If False, onnx-simplifier will be skipped. Defaults to True. check_n (int, optional): Only available when `simplify=True`. The number of times to run check for the simplified ONNX proto after onnx-simplifier. Defaults to 1. skip_fuse_bn (bool, optional): Only available when `simplify=True`. Whether to skip batchnorm-fusion. Defaults to False. skipped_optimizers (Optional[list[str]], optional): Only available when `simplify=True`. The list of onnx-simplifier passes to skip. Defaults to None. See https://github.com/onnx/optimizer/tree/master/onnxoptimizer/passes for available passes. dynamic_dimensions (Optional[DynamicDimensions], optional): Dynamic dimensions setting configured by `configure_dynamic_dimensions`. Defaults to None. Raises: TypeError: If `f` is not a string. ValueError: If the quantizer has invalid condition. `torch.onnx.errors.CheckerError`: If the ONNX checker detects an invalid ONNX graph. `torch.onnx.errors.UnsupportedOperatorError`: If the ONNX graph cannot be exported because it uses an operator that is not supported by the exporter. `torch.onnx.errors.OnnxExporterError`: Other errors that can occur during export. All errors are subclasses of :class:`errors.OnnxExporterError`. """ if not isinstance(f, str): raise TypeError("owlite.onnx.export requires the argument `f` to be a string.") if isinstance(module, GraphModule): if module.meta["owlite_status"] == OwLiteStatus.COMPRESSED: log.warning( "This module has not yet been calibrated. " "The onnx that comes out of this module may have unexpected results in accuracy and latency." ) clip_narrow_range_weights(module) # Batch Norm Fusing
"""An alternative for torch.onnx.export with extra optimizations""" # pylint: disable=protected-access # pylint: disable=no-name-in-module # Large models (e.g. SwinTransformer) requires # more than 50 (default) onnxsim iterations os.environ["ONNXSIM_FIXED_POINT_ITERS"] = "100" # pylint: disable=too-many-arguments, too-many-locals, invalid-name def export( module: torch.nn.Module, args: Union[tuple[Any, ...], torch.Tensor], f: str, export_params: bool = True, verbose: bool = False, training: torch._C._onnx.TrainingMode = torch._C._onnx.TrainingMode.EVAL, input_names: Optional[Sequence[str]] = None, output_names: Optional[Sequence[str]] = None, operator_export_type: torch._C._onnx.OperatorExportTypes = torch._C._onnx.OperatorExportTypes.ONNX, opset_version: int = 17, do_constant_folding: bool = True, keep_initializers_as_inputs: Optional[bool] = None, custom_opsets: Optional[Mapping[str, int]] = None, export_modules_as_functions: Union[bool, Collection[type[torch.nn.Module]]] = False, use_fast_export: bool = True, apply_transforms: bool = True, simplify: bool = True, check_n: int = 1, skip_fuse_bn: bool = False, skipped_optimizers: Optional[list[str]] = None, dynamic_dimensions: Optional[DynamicDimensions] = None, ) -> None: r"""Exports a model into ONNX format. Args: module (torch.nn.Module): The model to be exported. args (Union[tuple[Any, ...], torch.Tensor]): Argument of a `module`. args can be structured either as: 1. ONLY A TUPLE OF ARGUMENTS:: args = (x, y, z) The tuple should contain model inputs such that `module(*args)` is a valid invocation of the model. Any non-Tensor arguments will be hard-coded into the exported model; any Tensor arguments will become inputs of the exported model, in the order they occur in the tuple. 2. A TENSOR:: args = torch.Tensor([1]) This is equivalent to a 1-ary tuple of that Tensor. 3. A TUPLE OF ARGUMENTS ENDING WITH A DICTIONARY OF NAMED ARGUMENTS:: args = ( x, { "y": input_y, "z": input_z } ) All but the last element of the tuple will be passed as non-keyword arguments, and named arguments will be set from the last element. If a named argument is not present in the dictionary, it is assigned the default value, or None if a default value is not provided. .. note:: If a dictionary is the last element of the args tuple, it will be interpreted as containing named arguments. In order to pass a dict as the last non-keyword arg, provide an empty dict as the last element of the args tuple. For example, instead of:: export( module, ( x, # WRONG: will be interpreted as named arguments {y: z} ), "test.onnx.pb" ) Write:: export( module, ( x, {y: z}, {} ), "test.onnx.pb" ) f (str): A string containing a file name. A binary protocol buffer will be written to this file. export_params (bool, optional): If True, all parameters will be exported. Set this to False if you want to export an untrained model. In this case, the exported model will first take all of its parameters as arguments, with the ordering as specified by `module.state_dict().values()`. Defaults to True. verbose (bool, optional): If True, prints a description of the model being exported to stdout. In addition, the final ONNX graph will include the field `doc_string` from the exported model which mentions the source code locations for `module`. If True, ONNX exporter logging will be turned on. Defaults to False. training (torch._C._onnx.TrainingMode, optional): Defaults to torch._C._onnx.TrainingMode.EVAL. * `TrainingMode.EVAL`: export the model in inference mode. * `TrainingMode.PRESERVE`: export the model in inference mode if model.training is False and in training mode if model.training is True. * `TrainingMode.TRAINING`: export the model in training mode. Disables optimizations which might interfere with training. input_names (Optional[Sequence[str]], optional): Names to assign to the input nodes of the graph, in order. Names of `module.forward` arguments will be used when None is given. Defaults to None. output_names (Optional[Sequence[str]], optional): Names to assign to the output nodes of the graph, in order. Defaults to None. operator_export_type (torch._C._onnx.OperatorExportTypes, optional): Defaults to `torch._C._onnx.OperatorExportTypes.ONNX`. * `OperatorExportTypes.ONNX`: Export all ops as regular ONNX ops (in the default opset domain). * `OperatorExportTypes.ONNX_FALLTHROUGH`: Try to convert all ops to standard ONNX ops in the default opset domain. If unable to do so (e.g. because support has not been added to convert a particular torch op to ONNX), fall back to exporting the op into a custom opset domain without conversion. Applies to `custom ops <https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html>`_ as well as ATen ops. For the exported model to be usable, the runtime must support these non-standard ops. * `OperatorExportTypes.ONNX_ATEN`: All ATen ops (in the TorchScript namespace "aten") are exported as ATen ops (in opset domain "org.pytorch.aten"). `ATen <https://pytorch.org/cppdocs/#aten>`_ is PyTorch's built-in tensor library, so this instructs the runtime to use PyTorch's implementation of these ops. .. warning:: Models exported this way are probably runnable only by Caffe2. This may be useful if the numeric differences in implementations of operators are causing large differences in behavior between PyTorch and Caffe2 (which is more common on untrained models). * `OperatorExportTypes.ONNX_ATEN_FALLBACK`: Try to export each ATen op (in the TorchScript namespace "aten") as a regular ONNX op. If we are unable to do so (e.g. because support has not been added to convert a particular torch op to ONNX), fall back to exporting an ATen op. See documentation on OperatorExportTypes.ONNX_ATEN for context. For example:: graph(%0 : Float): %3 : int = prim::Constant[value=0]() # conversion unsupported %4 : Float = aten::triu(%0, %3) # conversion supported %5 : Float = aten::mul(%4, %0) return (%5) Assuming `aten::triu` is not supported in ONNX, this will be exported as:: graph(%0 : Float): %1 : Long() = onnx::Constant[value={0}]() # not converted %2 : Float = aten::ATen[operator="triu"](%0, %1) # converted %3 : Float = onnx::Mul(%2, %0) return (%3) If PyTorch was built with Caffe2 (i.e. with `BUILD_CAFFE2=1`), then Caffe2-specific behavior will be enabled, including special support for ops are produced by the modules described in `Quantization <https://pytorch.org/docs/stable/quantization.html>`_. .. warning:: Models exported this way are probably runnable only by Caffe2. opset_version (int, optional): The version of the default (ai.onnx) opset <https://github.com/onnx/onnx/blob/master/docs/Operators.md> to target. Must be >= 7 and <= 18. Defaults to 17. do_constant_folding (bool, optional): Apply the constant-folding optimization. Constant-folding will replace some of the ops that have all constant inputs with pre-computed constant nodes. Defaults to True. keep_initializers_as_inputs (Optional[bool], optional): If True, all the initializers (typically corresponding to parameters) in the exported graph will also be added as inputs to the graph. If False, then initializers are not added as inputs to the graph, and only the non-parameter inputs are added as inputs. This may allow for better optimizations (e.g. constant folding) by backends/runtimes. Defaults to None. custom_opsets (Optional[Mapping[str, int]], optional): A dict with schema: * KEY (str): opset domain name * VALUE (int): opset version If a custom opset is referenced by ``model`` but not mentioned in this dictionary, the opset version is set to 1. Only custom opset domain name and version should be indicated through this argument. Defaults to None. export_modules_as_functions (Union[bool, Collection[type[torch.nn.Module]]], optional): Flag to enable exporting all ``nn.Module`` forward calls as local functions in ONNX. Or a set to indicate the particular types of modules to export as local functions in ONNX. This feature requires ``opset_version`` >= 15, otherwise the export will fail. This is because ``opset_version`` < 15 implies IR version < 8, which means no local function support. Module variables will be exported as function attributes. There are two categories of function attributes. Defaults to False. use_fast_export (bool, optional): If True, export process will be done in memory. If `module` with total parameter size larger than 2GB, this flag will be automatically set to `False`. If False, temporary export process will be done using temporary files. Defaults to True. apply_transforms (bool, optional): If True, ONNX transforms defined by SqueezeBits.inc will be applied for model optimization. If False, ONNX transformations will be skipped. However, turning this flag to `False` is experimental and might yield unexpected behavior. Defaults to True. simplify (bool, optional): If True, onnx-simplifier will be run. If False, onnx-simplifier will be skipped. Defaults to True. check_n (int, optional): Only available when `simplify=True`. The number of times to run check for the simplified ONNX proto after onnx-simplifier. Defaults to 1. skip_fuse_bn (bool, optional): Only available when `simplify=True`. Whether to skip batchnorm-fusion. Defaults to False. skipped_optimizers (Optional[list[str]], optional): Only available when `simplify=True`. The list of onnx-simplifier passes to skip. Defaults to None. See https://github.com/onnx/optimizer/tree/master/onnxoptimizer/passes for available passes. dynamic_dimensions (Optional[DynamicDimensions], optional): Dynamic dimensions setting configured by `configure_dynamic_dimensions`. Defaults to None. Raises: TypeError: If `f` is not a string. ValueError: If the quantizer has invalid condition. `torch.onnx.errors.CheckerError`: If the ONNX checker detects an invalid ONNX graph. `torch.onnx.errors.UnsupportedOperatorError`: If the ONNX graph cannot be exported because it uses an operator that is not supported by the exporter. `torch.onnx.errors.OnnxExporterError`: Other errors that can occur during export. All errors are subclasses of :class:`errors.OnnxExporterError`. """ if not isinstance(f, str): raise TypeError("owlite.onnx.export requires the argument `f` to be a string.") if isinstance(module, GraphModule): if module.meta["owlite_status"] == OwLiteStatus.COMPRESSED: log.warning( "This module has not yet been calibrated. " "The onnx that comes out of this module may have unexpected results in accuracy and latency." ) clip_narrow_range_weights(module) # Batch Norm Fusing
fuse_bn(module)
5
2023-12-08 06:41:50+00:00
16k
qitan/devops-backend-lite
apps/ucenter/views.py
[ { "identifier": "FEISHU_SYNC_USER_JOB_CACHE_KEY", "path": "common/variables.py", "snippet": "FEISHU_SYNC_USER_JOB_CACHE_KEY = 'celery_job:feishu_user_sync'" }, { "identifier": "Menu", "path": "dbapp/models.py", "snippet": "" }, { "identifier": "CustomModelViewSet", "path": "c...
import hashlib import django_filters import datetime import time import shortuuid import json import logging from django.core.cache import cache from rest_framework import viewsets, status from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.decorators import action from rest_framework import pagination from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView from rest_framework_simplejwt.exceptions import TokenError, InvalidToken from rest_framework_simplejwt.authentication import JWTAuthentication from rest_framework_simplejwt.tokens import RefreshToken, Token, OutstandingToken from rest_framework.filters import SearchFilter, OrderingFilter from django_q.tasks import async_task, result from django.contrib.auth.models import update_last_login from django.db.models import Q from django.contrib.auth import logout from common.variables import FEISHU_SYNC_USER_JOB_CACHE_KEY from dbapp.models import Menu, Permission, Role, Organization, UserProfile, AuditLog, SystemConfig, DataDict from ucenter.serializers import MenuSerializers, MenuListSerializers, PermissionListSerializers, PermissionSerializers, \ RoleListSerializers, \ RoleSerializers, OrganizationSerializers, \ UserProfileListSerializers, UserProfileSerializers, UserProfileDetailSerializers, AuditLogSerializers, \ AuditLogActivitySerializers, SystemConfigSerializers, \ SystemConfigListSerializers, DataDictSerializers from common.extends.viewsets import CustomModelViewSet, CustomModelParentViewSet from common.extends.permissions import RbacPermission from common.extends.JwtAuth import CustomInvalidToken, TokenObtainPairSerializer, TokenRefreshSerializer from common.extends.handler import log_audit from common.extends.filters import AuditLogFilter, CustomSearchFilter from common.utils.JenkinsAPI import GlueJenkins from common.get_ip import user_ip from common.ext_fun import ThirdPartyUser, set_redis_data, get_redis_data, timeline_generate, time_period, \ node_filter from qtasks.tasks import test_notify from django.conf import settings from django.contrib.auth import login, REDIRECT_FIELD_NAME from django.views.decorators.csrf import csrf_exempt, csrf_protect from django.views.decorators.cache import never_cache
10,818
def perform_update(self, serializer): serializer.save() cache.delete(f"datadict:{serializer.data['key']}:0") cache.delete(f"datadict:{serializer.data['key']}:1") @action(methods=['GET'], url_path='user', detail=False) def get_user(self, request): """ 获取用户列表 ### 传递参数 force: 0|1 force为1时强制刷新 """ _force = request.query_params.get('force', None) position = request.query_params.get('position', None) _key = str( f'project:users:{self.request.user.id}-{self.request.query_params}') try: data = cache.get(_key) except BaseException as e: cache.delete(_key) data = None if not data or _force: if position: users = UserProfile.objects.exclude( username='thirdparty').filter(position=position) else: users = UserProfile.objects.exclude(username='thirdparty') data = [{'id': i.id, 'first_name': i.first_name, 'username': i.username, 'name': i.name, 'title': i.title, 'position': i.position} for i in users] cache.set(_key, data, timeout=60 * 60 * 24) return Response({'code': 20000, 'data': data}) @action(methods=['GET'], url_path='extra', detail=False) def get_by_key(self, request): """ 通过指定key名获取 参数: key """ key_name = request.query_params.get('key', None) instance = self.queryset.get(key=key_name) serializer = self.get_serializer(instance) data = {'data': serializer.data, 'code': 20000, 'status': 'success'} return Response(data) class AuditLogViewSet(CustomModelViewSet): """ 审计日志视图 ### 审计日志权限 {'get': ('audit_list', '查看审计日志')} """ perms_map = ( {'*': ('admin', '管理员')}, {'get': ('audit_list', '查看审计日志')} ) queryset = AuditLog.objects.all() serializer_class = AuditLogSerializers filter_backends = (django_filters.rest_framework.DjangoFilterBackend, CustomSearchFilter, OrderingFilter) filter_class = AuditLogFilter filter_fields = ('user', 'type', 'action', 'action_ip', 'operator') search_fields = ('user', 'type', 'action', 'action_ip', 'content') def create(self, request, *args, **kwargs): pass def update(self, request, *args, **kwargs): pass def destroy(self, request, *args, **kwargs): pass class MenuViewSet(CustomModelParentViewSet): """ 菜单视图 ### 菜单权限 {'*': ('menu_all', '菜单管理')}, {'get': ('menu_list', '查看菜单')}, {'post': ('menu_create', '创建菜单')}, {'put': ('menu_edit', '编辑菜单')}, {'patch': ('menu_edit', '编辑菜单')}, {'delete': ('menu_delete', '删除菜单')} """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('menu_all', '菜单管理')}, {'get': ('menu_list', '查看菜单')}, {'post': ('menu_create', '创建菜单')}, {'put': ('menu_edit', '编辑菜单')}, {'patch': ('menu_edit', '编辑菜单')}, {'delete': ('menu_delete', '删除菜单')} ) queryset = Menu.objects.all() serializer_class = MenuSerializers def get_serializer_class(self): if self.action in ['list', 'retrieve']: return MenuListSerializers return MenuSerializers class PermissionViewSet(CustomModelParentViewSet): """ 权限视图 ### 查看权限列表的权限 {'*': ('perm_all', '权限管理')}, {'get': ('perm_list', '查看权限')}, """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('perm_all', '权限管理')}, {'get': ('perm_list', '查看权限')} )
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : qqing_lai@hotmail.com @Time : 2020/9/15 下午4:08 @FileName: views.py @Blog :https://imaojia.com """ logger = logging.getLogger('drf') DEFAULT_SESSION_TIMEOUT = None class DataDictViewSet(CustomModelParentViewSet): """ 数据字典视图 ### 数据字典权限 {'*': ('data_all', '数据字典管理')}, {'get': ('data_list', '查看数据字典')}, {'post': ('data_create', '创建数据字典')}, {'put': ('data_edit', '编辑数据字典')}, {'patch': ('data_edit', '编辑数据字典')}, {'delete': ('data_delete', '删除数据字典')} """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('data_all', '数据字典管理')}, {'get': ('data_list', '查看数据字典')}, {'post': ('data_create', '创建数据字典')}, {'put': ('data_edit', '编辑数据字典')}, {'patch': ('data_edit', '编辑数据字典')}, {'delete': ('data_delete', '删除数据字典')} ) queryset = DataDict.objects.all() serializer_class = DataDictSerializers filter_backends = ( django_filters.rest_framework.DjangoFilterBackend, SearchFilter, OrderingFilter) filter_fields = ('key', 'value') search_fields = ('key', 'value') def perform_update(self, serializer): serializer.save() cache.delete(f"datadict:{serializer.data['key']}:0") cache.delete(f"datadict:{serializer.data['key']}:1") @action(methods=['GET'], url_path='user', detail=False) def get_user(self, request): """ 获取用户列表 ### 传递参数 force: 0|1 force为1时强制刷新 """ _force = request.query_params.get('force', None) position = request.query_params.get('position', None) _key = str( f'project:users:{self.request.user.id}-{self.request.query_params}') try: data = cache.get(_key) except BaseException as e: cache.delete(_key) data = None if not data or _force: if position: users = UserProfile.objects.exclude( username='thirdparty').filter(position=position) else: users = UserProfile.objects.exclude(username='thirdparty') data = [{'id': i.id, 'first_name': i.first_name, 'username': i.username, 'name': i.name, 'title': i.title, 'position': i.position} for i in users] cache.set(_key, data, timeout=60 * 60 * 24) return Response({'code': 20000, 'data': data}) @action(methods=['GET'], url_path='extra', detail=False) def get_by_key(self, request): """ 通过指定key名获取 参数: key """ key_name = request.query_params.get('key', None) instance = self.queryset.get(key=key_name) serializer = self.get_serializer(instance) data = {'data': serializer.data, 'code': 20000, 'status': 'success'} return Response(data) class AuditLogViewSet(CustomModelViewSet): """ 审计日志视图 ### 审计日志权限 {'get': ('audit_list', '查看审计日志')} """ perms_map = ( {'*': ('admin', '管理员')}, {'get': ('audit_list', '查看审计日志')} ) queryset = AuditLog.objects.all() serializer_class = AuditLogSerializers filter_backends = (django_filters.rest_framework.DjangoFilterBackend, CustomSearchFilter, OrderingFilter) filter_class = AuditLogFilter filter_fields = ('user', 'type', 'action', 'action_ip', 'operator') search_fields = ('user', 'type', 'action', 'action_ip', 'content') def create(self, request, *args, **kwargs): pass def update(self, request, *args, **kwargs): pass def destroy(self, request, *args, **kwargs): pass class MenuViewSet(CustomModelParentViewSet): """ 菜单视图 ### 菜单权限 {'*': ('menu_all', '菜单管理')}, {'get': ('menu_list', '查看菜单')}, {'post': ('menu_create', '创建菜单')}, {'put': ('menu_edit', '编辑菜单')}, {'patch': ('menu_edit', '编辑菜单')}, {'delete': ('menu_delete', '删除菜单')} """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('menu_all', '菜单管理')}, {'get': ('menu_list', '查看菜单')}, {'post': ('menu_create', '创建菜单')}, {'put': ('menu_edit', '编辑菜单')}, {'patch': ('menu_edit', '编辑菜单')}, {'delete': ('menu_delete', '删除菜单')} ) queryset = Menu.objects.all() serializer_class = MenuSerializers def get_serializer_class(self): if self.action in ['list', 'retrieve']: return MenuListSerializers return MenuSerializers class PermissionViewSet(CustomModelParentViewSet): """ 权限视图 ### 查看权限列表的权限 {'*': ('perm_all', '权限管理')}, {'get': ('perm_list', '查看权限')}, """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('perm_all', '权限管理')}, {'get': ('perm_list', '查看权限')} )
queryset = Permission.objects.all()
1
2023-12-13 03:09:32+00:00
16k
liujin112/PortraitDiffusion
app.py
[ { "identifier": "AttentionBase", "path": "utils/masactrl_utils.py", "snippet": "class AttentionBase:\n def __init__(self):\n self.cur_step = 0\n self.num_att_layers = -1\n self.cur_att_layer = 0\n\n def after_step(self):\n pass\n\n def __call__(self, q, k, v, sim, at...
import os import torch import random import numpy as np import gradio as gr import torch.nn.functional as F from glob import glob from datetime import datetime from diffusers import StableDiffusionPipeline from diffusers import DDIMScheduler, LCMScheduler from PIL import Image,ImageDraw from utils.masactrl_utils import (AttentionBase, regiter_attention_editor_diffusers) from utils.free_lunch_utils import register_upblock2d,register_crossattn_upblock2d,register_free_upblock2d, register_free_crossattn_upblock2d from utils.style_attn_control import MaskPromptedStyleAttentionControl from utils.pipeline import MasaCtrlPipeline from torchvision.utils import save_image from segment_anything import sam_model_registry, SamPredictor
12,178
return None else: base_model = self.personalized_model_list[base_model_dropdown] mid_model = StableDiffusionPipeline.from_single_file(base_model) self.pipeline.vae = mid_model.vae self.pipeline.unet = mid_model.unet self.pipeline.text_encoder = mid_model.text_encoder self.pipeline.to(self.device) self.personal_model_loaded = base_model_dropdown.split('.')[0] print(f'load {base_model_dropdown} model success!') return gr.Dropdown() def update_lora_model(self, lora_model_dropdown,lora_alpha_slider): if self.pipeline is None: gr.Info(f"Please select a pretrained model path.") return None else: if lora_model_dropdown == "none": self.pipeline.unfuse_lora() self.pipeline.unload_lora_weights() self.lora_loaded = None print("Restore lora.") else: lora_model_path = self.lora_model_list[lora_model_dropdown] self.pipeline.load_lora_weights(lora_model_path) self.pipeline.fuse_lora(lora_alpha_slider) self.lora_loaded = lora_model_dropdown.split('.')[0] print(f'load {lora_model_dropdown} LoRA Model Success!') return gr.Dropdown() def load_lcm_lora(self, lora_alpha_slider=1.0): # set scheduler self.pipeline = MasaCtrlPipeline.from_pretrained(self.stable_diffusion_list[0]).to(self.device) self.pipeline.scheduler = LCMScheduler.from_config(self.pipeline.scheduler.config) # load LCM-LoRA self.pipeline.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") self.pipeline.fuse_lora(lora_alpha_slider) self.lcm_lora_loaded = True print(f'load LCM-LoRA model success!') def generate(self, source, style, source_mask, style_mask, start_step, start_layer, Style_attn_step, Method, Style_Guidance, ddim_steps, scale, seed, de_bug, target_prompt, negative_prompt_textbox, inter_latents, freeu, b1, b2, s1, s2, width_slider,height_slider, ): os.makedirs(self.savedir, exist_ok=True) os.makedirs(self.savedir_sample, exist_ok=True) os.makedirs(self.savedir_mask, exist_ok=True) model = self.pipeline if seed != -1 and seed != "": torch.manual_seed(int(seed)) else: torch.seed() seed = torch.initial_seed() sample_count = len(os.listdir(self.savedir_sample)) os.makedirs(os.path.join(self.savedir_mask, f"results_{sample_count}"), exist_ok=True) # ref_prompt = [source_prompt, target_prompt] # prompts = ref_prompt+[''] ref_prompt = [target_prompt, target_prompt] prompts = ref_prompt+[target_prompt] source_image,style_image,source_mask,style_mask = load_mask_images(source,style,source_mask,style_mask,self.device,width_slider,height_slider,out_dir=os.path.join(self.savedir_mask, f"results_{sample_count}")) # global START_CODE, LATENTS_LIST with torch.no_grad(): #import pdb;pdb.set_trace() #prev_source if self.start_code is None and self.latents_list is None: content_style = torch.cat([style_image, source_image], dim=0) editor = AttentionBase() regiter_attention_editor_diffusers(model, editor) st_code, latents_list = model.invert(content_style, ref_prompt, guidance_scale=scale, num_inference_steps=ddim_steps, return_intermediates=True) start_code = torch.cat([st_code, st_code[1:]], dim=0) self.start_code = start_code self.latents_list = latents_list else: start_code = self.start_code latents_list = self.latents_list print('------------------------------------------ Use previous latents ------------------------------------------ ') #["Without mask", "Only masked region", "Seperate Background Foreground"] if Method == "Without mask": style_mask = None source_mask = None only_masked_region = False elif Method == "Only masked region": assert style_mask is not None and source_mask is not None only_masked_region = True else: assert style_mask is not None and source_mask is not None only_masked_region = False controller = MaskPromptedStyleAttentionControl(start_step, start_layer, style_attn_step=Style_attn_step, style_guidance=Style_Guidance, style_mask=style_mask, source_mask=source_mask, only_masked_region=only_masked_region, guidance=scale, de_bug=de_bug, ) if freeu: # model.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) print(f'++++++++++++++++++ Run with FreeU {b1}_{b2}_{s1}_{s2} ++++++++++++++++') if Method != "Without mask": register_free_upblock2d(model, b1=b1, b2=b2, s1=s1, s2=s1,source_mask=source_mask)
css = """ .toolbutton { margin-buttom: 0em 0em 0em 0em; max-width: 2.5em; min-width: 2.5em !important; height: 2.5em; } """ class GlobalText: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.personalized_model_dir = './models/Stable-diffusion' self.lora_model_dir = './models/Lora' self.savedir = os.path.join(self.basedir, "samples", datetime.now().strftime("Gradio-%Y-%m-%dT%H-%M-%S")) self.savedir_sample = os.path.join(self.savedir, "sample") self.savedir_mask = os.path.join(self.savedir, "mask") self.stable_diffusion_list = ["runwayml/stable-diffusion-v1-5", "latent-consistency/lcm-lora-sdv1-5"] self.personalized_model_list = [] self.lora_model_list = [] # config models self.tokenizer = None self.text_encoder = None self.vae = None self.unet = None self.pipeline = None self.lora_loaded = None self.lcm_lora_loaded = False self.personal_model_loaded = None self.sam_predictor = None self.lora_model_state_dict = {} self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") # self.refresh_stable_diffusion() self.refresh_personalized_model() self.reset_start_code() def load_base_pipeline(self, model_path): print(f'loading {model_path} model') scheduler = DDIMScheduler.from_pretrained(model_path,subfolder="scheduler") self.pipeline = MasaCtrlPipeline.from_pretrained(model_path, scheduler=scheduler).to(self.device) def refresh_stable_diffusion(self): self.load_base_pipeline(self.stable_diffusion_list[0]) self.lora_loaded = None self.personal_model_loaded = None self.lcm_lora_loaded = False return self.stable_diffusion_list[0] def refresh_personalized_model(self): personalized_model_list = glob(os.path.join(self.personalized_model_dir, "**/*.safetensors"), recursive=True) self.personalized_model_list = {os.path.basename(file): file for file in personalized_model_list} lora_model_list = glob(os.path.join(self.lora_model_dir, "**/*.safetensors"), recursive=True) self.lora_model_list = {os.path.basename(file): file for file in lora_model_list} def update_stable_diffusion(self, stable_diffusion_dropdown): if stable_diffusion_dropdown == 'latent-consistency/lcm-lora-sdv1-5': self.load_lcm_lora() else: self.load_base_pipeline(stable_diffusion_dropdown) self.lora_loaded = None self.personal_model_loaded = None return gr.Dropdown() def update_base_model(self, base_model_dropdown): if self.pipeline is None: gr.Info(f"Please select a pretrained model path.") return None else: base_model = self.personalized_model_list[base_model_dropdown] mid_model = StableDiffusionPipeline.from_single_file(base_model) self.pipeline.vae = mid_model.vae self.pipeline.unet = mid_model.unet self.pipeline.text_encoder = mid_model.text_encoder self.pipeline.to(self.device) self.personal_model_loaded = base_model_dropdown.split('.')[0] print(f'load {base_model_dropdown} model success!') return gr.Dropdown() def update_lora_model(self, lora_model_dropdown,lora_alpha_slider): if self.pipeline is None: gr.Info(f"Please select a pretrained model path.") return None else: if lora_model_dropdown == "none": self.pipeline.unfuse_lora() self.pipeline.unload_lora_weights() self.lora_loaded = None print("Restore lora.") else: lora_model_path = self.lora_model_list[lora_model_dropdown] self.pipeline.load_lora_weights(lora_model_path) self.pipeline.fuse_lora(lora_alpha_slider) self.lora_loaded = lora_model_dropdown.split('.')[0] print(f'load {lora_model_dropdown} LoRA Model Success!') return gr.Dropdown() def load_lcm_lora(self, lora_alpha_slider=1.0): # set scheduler self.pipeline = MasaCtrlPipeline.from_pretrained(self.stable_diffusion_list[0]).to(self.device) self.pipeline.scheduler = LCMScheduler.from_config(self.pipeline.scheduler.config) # load LCM-LoRA self.pipeline.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") self.pipeline.fuse_lora(lora_alpha_slider) self.lcm_lora_loaded = True print(f'load LCM-LoRA model success!') def generate(self, source, style, source_mask, style_mask, start_step, start_layer, Style_attn_step, Method, Style_Guidance, ddim_steps, scale, seed, de_bug, target_prompt, negative_prompt_textbox, inter_latents, freeu, b1, b2, s1, s2, width_slider,height_slider, ): os.makedirs(self.savedir, exist_ok=True) os.makedirs(self.savedir_sample, exist_ok=True) os.makedirs(self.savedir_mask, exist_ok=True) model = self.pipeline if seed != -1 and seed != "": torch.manual_seed(int(seed)) else: torch.seed() seed = torch.initial_seed() sample_count = len(os.listdir(self.savedir_sample)) os.makedirs(os.path.join(self.savedir_mask, f"results_{sample_count}"), exist_ok=True) # ref_prompt = [source_prompt, target_prompt] # prompts = ref_prompt+[''] ref_prompt = [target_prompt, target_prompt] prompts = ref_prompt+[target_prompt] source_image,style_image,source_mask,style_mask = load_mask_images(source,style,source_mask,style_mask,self.device,width_slider,height_slider,out_dir=os.path.join(self.savedir_mask, f"results_{sample_count}")) # global START_CODE, LATENTS_LIST with torch.no_grad(): #import pdb;pdb.set_trace() #prev_source if self.start_code is None and self.latents_list is None: content_style = torch.cat([style_image, source_image], dim=0) editor = AttentionBase() regiter_attention_editor_diffusers(model, editor) st_code, latents_list = model.invert(content_style, ref_prompt, guidance_scale=scale, num_inference_steps=ddim_steps, return_intermediates=True) start_code = torch.cat([st_code, st_code[1:]], dim=0) self.start_code = start_code self.latents_list = latents_list else: start_code = self.start_code latents_list = self.latents_list print('------------------------------------------ Use previous latents ------------------------------------------ ') #["Without mask", "Only masked region", "Seperate Background Foreground"] if Method == "Without mask": style_mask = None source_mask = None only_masked_region = False elif Method == "Only masked region": assert style_mask is not None and source_mask is not None only_masked_region = True else: assert style_mask is not None and source_mask is not None only_masked_region = False controller = MaskPromptedStyleAttentionControl(start_step, start_layer, style_attn_step=Style_attn_step, style_guidance=Style_Guidance, style_mask=style_mask, source_mask=source_mask, only_masked_region=only_masked_region, guidance=scale, de_bug=de_bug, ) if freeu: # model.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) print(f'++++++++++++++++++ Run with FreeU {b1}_{b2}_{s1}_{s2} ++++++++++++++++') if Method != "Without mask": register_free_upblock2d(model, b1=b1, b2=b2, s1=s1, s2=s1,source_mask=source_mask)
register_free_crossattn_upblock2d(model, b1=b1, b2=b2, s1=s1, s2=s1,source_mask=source_mask)
5
2023-12-06 01:18:39+00:00
16k
AsuradaYuci/TF-CLIP
datasets/make_dataloader_clipreid.py
[ { "identifier": "VideoDataset", "path": "datasets/video_loader_xh.py", "snippet": "class VideoDataset(Dataset):\n \"\"\"Video Person ReID Dataset.\n Note batch data has shape (batch, seq_len, channel, height, width).\n \"\"\"\n sample_methods = ['evenly', 'random', 'dense']\n\n def __init...
import torch import utils.spatial_transforms as ST import utils.temporal_transforms as TT import utils.transforms as T import utils.seqtransforms as SeqT from torch.utils.data import DataLoader from datasets.video_loader_xh import VideoDataset from datasets.samplers import RandomIdentitySampler, RandomIdentitySamplerForSeq, RandomIdentitySamplerWYQ from datasets.seqpreprocessor import SeqTrainPreprocessor, SeqTestPreprocessor from datasets.set.mars import Mars from datasets.set.ilidsvidsequence import iLIDSVIDSEQUENCE from datasets.set.lsvid import LSVID
11,830
# from torchvision.transforms import InterpolationMode # import torchvision.transforms as T __factory = { 'mars': Mars, 'ilidsvidsequence': iLIDSVIDSEQUENCE,
# from torchvision.transforms import InterpolationMode # import torchvision.transforms as T __factory = { 'mars': Mars, 'ilidsvidsequence': iLIDSVIDSEQUENCE,
'lsvid': LSVID
8
2023-12-11 04:03:46+00:00
16k
MarilynKeller/aitviewer-skel
aitviewer/renderables/rigid_bodies.py
[ { "identifier": "Arrows", "path": "aitviewer/renderables/arrows.py", "snippet": "class Arrows(Node):\n \"\"\"\n Draw an arrow.\n \"\"\"\n\n def __init__(\n self,\n origins,\n tips,\n r_base=0.01,\n r_head=0.02,\n p=0.1,\n color=(0.0, 0.0, 0.5,...
import numpy as np from aitviewer.renderables.arrows import Arrows from aitviewer.renderables.spheres import Spheres from aitviewer.scene.node import Node from aitviewer.utils.utils import ( compute_union_of_bounds, compute_union_of_current_bounds, )
11,651
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class RigidBodies(Node): """ A sequence of N 3D positions and orientations in space. """ def __init__( self, rb_pos, rb_ori, radius=0.02, length=0.2, radius_cylinder=None, color=(0.0, 1.0, 0.5, 1.0), icon="\u0086", **kwargs, ): """ Initializer. :param rb_pos: A np array of shape (F, N, 3) containing N rigid-body centers over F time steps. :param rb_ori: A np array of shape (F, N, 3, 3) containing N rigid-body orientations over F time steps. :param radius: Radius of the sphere at the origin of the rigid body. :param length: Length of arrows representing the orientation of the rigid body. :param radius_cylinder: Radius of the cylinder representing the orientation, default is length / 50 :param color: Color of the rigid body centers (4-tuple). """ self._rb_pos = rb_pos[np.newaxis] if rb_pos.ndim == 2 else rb_pos self._rb_ori = rb_ori[np.newaxis] if rb_ori.ndim == 3 else rb_ori super(RigidBodies, self).__init__(n_frames=self.rb_pos.shape[0], color=color, icon=icon, **kwargs) self.radius = radius self.length = length self.spheres = Spheres(rb_pos, radius=radius, color=color, is_selectable=False) self._add_node(self.spheres, show_in_hierarchy=False) self.coords = [] r_base = radius_cylinder or length / 50 r_head = r_base * 2 c = [0.0, 0.0, 0.0, 1.0] for i in range(3): line = self.rb_ori[..., :, i] line = line / np.linalg.norm(line, axis=-1, keepdims=True) * length color = c.copy() color[i] = 1.0 axs = Arrows( self.rb_pos, self.rb_pos + line, r_base=r_base, r_head=r_head, color=tuple(color), is_selectable=False, ) self._add_node(axs, show_in_hierarchy=False) self.coords.append(axs) @Node.color.setter def color(self, color): self.material.color = color self.spheres.color = color @property def current_rb_pos(self): idx = self.current_frame_id if self.rb_pos.shape[0] > 1 else 0 return self.rb_pos[idx] @current_rb_pos.setter def current_rb_pos(self, pos): idx = self.current_frame_id if self.rb_pos.shape[0] > 1 else 0 self.rb_pos[idx] = pos @property def current_rb_ori(self): idx = self.current_frame_id if self.rb_ori.shape[0] > 1 else 0 return self.rb_ori[idx] @current_rb_ori.setter def current_rb_ori(self, ori): idx = self.current_frame_id if self.rb_ori.shape[0] > 1 else 0 self.rb_ori[idx] = ori @property def rb_pos(self): return self._rb_pos @rb_pos.setter def rb_pos(self, rb_pos): self._rb_pos = rb_pos if len(rb_pos.shape) == 3 else rb_pos[np.newaxis] self.n_frames = self._rb_pos.shape[0] @property def rb_ori(self): return self._rb_ori @rb_ori.setter def rb_ori(self, rb_ori): self._rb_ori = rb_ori if len(rb_ori.shape) == 4 else rb_ori[np.newaxis] self.n_frames = self._rb_ori.shape[0] @property def bounds(self):
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class RigidBodies(Node): """ A sequence of N 3D positions and orientations in space. """ def __init__( self, rb_pos, rb_ori, radius=0.02, length=0.2, radius_cylinder=None, color=(0.0, 1.0, 0.5, 1.0), icon="\u0086", **kwargs, ): """ Initializer. :param rb_pos: A np array of shape (F, N, 3) containing N rigid-body centers over F time steps. :param rb_ori: A np array of shape (F, N, 3, 3) containing N rigid-body orientations over F time steps. :param radius: Radius of the sphere at the origin of the rigid body. :param length: Length of arrows representing the orientation of the rigid body. :param radius_cylinder: Radius of the cylinder representing the orientation, default is length / 50 :param color: Color of the rigid body centers (4-tuple). """ self._rb_pos = rb_pos[np.newaxis] if rb_pos.ndim == 2 else rb_pos self._rb_ori = rb_ori[np.newaxis] if rb_ori.ndim == 3 else rb_ori super(RigidBodies, self).__init__(n_frames=self.rb_pos.shape[0], color=color, icon=icon, **kwargs) self.radius = radius self.length = length self.spheres = Spheres(rb_pos, radius=radius, color=color, is_selectable=False) self._add_node(self.spheres, show_in_hierarchy=False) self.coords = [] r_base = radius_cylinder or length / 50 r_head = r_base * 2 c = [0.0, 0.0, 0.0, 1.0] for i in range(3): line = self.rb_ori[..., :, i] line = line / np.linalg.norm(line, axis=-1, keepdims=True) * length color = c.copy() color[i] = 1.0 axs = Arrows( self.rb_pos, self.rb_pos + line, r_base=r_base, r_head=r_head, color=tuple(color), is_selectable=False, ) self._add_node(axs, show_in_hierarchy=False) self.coords.append(axs) @Node.color.setter def color(self, color): self.material.color = color self.spheres.color = color @property def current_rb_pos(self): idx = self.current_frame_id if self.rb_pos.shape[0] > 1 else 0 return self.rb_pos[idx] @current_rb_pos.setter def current_rb_pos(self, pos): idx = self.current_frame_id if self.rb_pos.shape[0] > 1 else 0 self.rb_pos[idx] = pos @property def current_rb_ori(self): idx = self.current_frame_id if self.rb_ori.shape[0] > 1 else 0 return self.rb_ori[idx] @current_rb_ori.setter def current_rb_ori(self, ori): idx = self.current_frame_id if self.rb_ori.shape[0] > 1 else 0 self.rb_ori[idx] = ori @property def rb_pos(self): return self._rb_pos @rb_pos.setter def rb_pos(self, rb_pos): self._rb_pos = rb_pos if len(rb_pos.shape) == 3 else rb_pos[np.newaxis] self.n_frames = self._rb_pos.shape[0] @property def rb_ori(self): return self._rb_ori @rb_ori.setter def rb_ori(self, rb_ori): self._rb_ori = rb_ori if len(rb_ori.shape) == 4 else rb_ori[np.newaxis] self.n_frames = self._rb_ori.shape[0] @property def bounds(self):
return compute_union_of_bounds(self.coords)
3
2023-12-07 16:13:50+00:00
16k
nexB/dejacode
dejacode/urls.py
[ { "identifier": "ComponentViewSet", "path": "component_catalog/api.py", "snippet": "class ComponentViewSet(CreateRetrieveUpdateListViewSet):\n queryset = Component.objects.all()\n serializer_class = ComponentSerializer\n filterset_class = ComponentFilterSet\n lookup_field = \"uuid\"\n sea...
from django.conf import settings from django.conf.urls import include from django.contrib import admin from django.contrib.auth import views as auth_views from django.contrib.auth.decorators import login_required from django.template.loader import render_to_string from django.urls import path from django.views.defaults import page_not_found from django.views.generic import RedirectView from django.views.generic import TemplateView from notifications.views import mark_all_as_read from rest_framework.documentation import include_docs_urls from rest_framework.routers import DefaultRouter from component_catalog.api import ComponentViewSet from component_catalog.api import PackageViewSet from component_catalog.api import SubcomponentViewSet from component_catalog.views import send_scan_notification from dje import two_factor from dje.admin import dejacode_site from dje.api import ExternalReferenceViewSet from dje.forms import DejaCodeAuthenticationForm from dje.registration import DejaCodeActivationView from dje.registration import DejaCodeRegistrationForm from dje.views import AccountProfileView from dje.views import AllNotificationsList from dje.views import DataspaceAwareAutocompleteLookup from dje.views import DataspaceAwareRelatedLookup from dje.views import GlobalSearchListView from dje.views import IntegrationsStatusView from dje.views import UnreadNotificationsList from dje.views import home_view from dje.views import index_dispatch from dje.views import urn_resolve_view from license_library.api import LicenseAnnotationViewSet from license_library.api import LicenseViewSet from organization.api import OwnerViewSet from policy.api import UsagePolicyViewSet from product_portfolio.api import CodebaseResourceViewSet from product_portfolio.api import ProductComponentViewSet from product_portfolio.api import ProductPackageViewSet from product_portfolio.api import ProductViewSet from reporting.api import ReportViewSet from workflow.api import RequestTemplateViewSet from workflow.api import RequestViewSet from django_registration.backends.activation.views import RegistrationView
11,751
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # # Replace the default admin site with the DejaCode one. admin.site = dejacode_site # Restframework API api_router = DefaultRouter() api_router.register("owners", OwnerViewSet) api_router.register("licenses", LicenseViewSet) api_router.register("license_annotations", LicenseAnnotationViewSet) api_router.register("components", ComponentViewSet) api_router.register("subcomponents", SubcomponentViewSet) api_router.register("packages", PackageViewSet) api_router.register("products", ProductViewSet) api_router.register("product_components", ProductComponentViewSet) api_router.register("product_packages", ProductPackageViewSet) api_router.register("codebase_resources", CodebaseResourceViewSet) api_router.register("request_templates", RequestTemplateViewSet) api_router.register("requests", RequestViewSet)
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # # Replace the default admin site with the DejaCode one. admin.site = dejacode_site # Restframework API api_router = DefaultRouter() api_router.register("owners", OwnerViewSet) api_router.register("licenses", LicenseViewSet) api_router.register("license_annotations", LicenseAnnotationViewSet) api_router.register("components", ComponentViewSet) api_router.register("subcomponents", SubcomponentViewSet) api_router.register("packages", PackageViewSet) api_router.register("products", ProductViewSet) api_router.register("product_components", ProductComponentViewSet) api_router.register("product_packages", ProductPackageViewSet) api_router.register("codebase_resources", CodebaseResourceViewSet) api_router.register("request_templates", RequestTemplateViewSet) api_router.register("requests", RequestViewSet)
api_router.register("reports", ReportViewSet)
28
2023-12-07 16:57:42+00:00
16k
wusize/CLIM
src/open_clip/factory.py
[ { "identifier": "OPENAI_DATASET_MEAN", "path": "src/open_clip/constants.py", "snippet": "OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)" }, { "identifier": "OPENAI_DATASET_STD", "path": "src/open_clip/constants.py", "snippet": "OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.2...
import json import logging import os import pathlib import re import torch from copy import deepcopy from pathlib import Path from typing import Any, Dict, Optional, Tuple, Union from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\ resize_pos_embed, get_cast_dtype from .coca_model import CoCa from .loss import ClipLoss, DistillClipLoss, CoCaLoss from .openai import load_openai_model from .pretrained import is_pretrained_cfg, get_pretrained_cfg, \ download_pretrained, list_pretrained_tags_by_model, download_pretrained_from_hf from .transform import image_transform, AugmentationCfg, det_image_transform from .tokenizer import HFTokenizer, tokenize from open_clip import eva_clip from open_clip import eva_clip
13,347
def load_checkpoint(model, checkpoint_path, strict=True): state_dict = load_state_dict(checkpoint_path) # detect old format and make compatible with new format if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'): state_dict = convert_to_custom_text_state_dict(state_dict) resize_pos_embed(state_dict, model) incompatible_keys = model.load_state_dict(state_dict, strict=strict) return incompatible_keys def create_model( model_name: str, pretrained: Optional[str] = None, precision: str = 'fp32', device: Union[str, torch.device] = 'cpu', jit: bool = False, force_quick_gelu: bool = False, force_custom_text: bool = False, force_patch_dropout: Optional[float] = None, force_image_size: Optional[Union[int, Tuple[int, int]]] = None, pretrained_image: bool = False, pretrained_hf: bool = True, cache_dir: Optional[str] = None, output_dict: Optional[bool] = None, require_pretrained: bool = False, ): has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX) if has_hf_hub_prefix: model_id = model_name[len(HF_HUB_PREFIX):] checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir) config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir) with open(config_path, 'r', encoding='utf-8') as f: config = json.load(f) pretrained_cfg = config['preprocess_cfg'] model_cfg = config['model_cfg'] else: model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names checkpoint_path = None pretrained_cfg = {} model_cfg = None if isinstance(device, str): device = torch.device(device) if pretrained == 'eva': return eva_clip.create_model(model_name=model_name, pretrained=cache_dir, force_custom_clip=True, precision=precision, device=device,) if pretrained and pretrained.lower() == 'openai': logging.info(f'Loading pretrained {model_name} from OpenAI.') model = load_openai_model( model_name, precision=precision, device=device, jit=jit, cache_dir=cache_dir, ) # to always output dict even if it is clip if output_dict and hasattr(model, "output_dict"): model.output_dict = True else: model_cfg = model_cfg or get_model_config(model_name) if model_cfg is not None: logging.info(f'Loaded {model_name} model config.') else: logging.error(f'Model config for {model_name} not found; available models {list_models()}.') raise RuntimeError(f'Model config for {model_name} not found.') if force_quick_gelu: # override for use of QuickGELU on non-OpenAI transformer models model_cfg["quick_gelu"] = True if force_patch_dropout is not None: # override the default patch dropout value model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout if force_image_size is not None: # override model config's image size model_cfg["vision_cfg"]["image_size"] = force_image_size if pretrained_image: if 'timm_model_name' in model_cfg.get('vision_cfg', {}): # pretrained weight loading for timm models set via vision_cfg model_cfg['vision_cfg']['timm_model_pretrained'] = True else: assert False, 'pretrained image towers currently only supported for timm models' cast_dtype = get_cast_dtype(precision) is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {}) custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model if custom_text: if is_hf_model: model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf if "coca" in model_name: model = CoCa(**model_cfg, cast_dtype=cast_dtype) else: model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype) else: model = CLIP(**model_cfg, cast_dtype=cast_dtype) pretrained_loaded = False if pretrained: checkpoint_path = '' pretrained_cfg = get_pretrained_cfg(model_name, pretrained) if pretrained_cfg: checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir) elif os.path.exists(pretrained): checkpoint_path = pretrained if checkpoint_path: print(f'Loading pretrained {model_name} weights ({pretrained}).', flush=True) logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') load_checkpoint(model, checkpoint_path) else: error_str = ( f'Pretrained weights ({pretrained}) not found for model {model_name}.'
HF_HUB_PREFIX = 'hf-hub:' _MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"] _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs def _natural_key(string_): return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] def _rescan_model_configs(): global _MODEL_CONFIGS config_ext = ('.json',) config_files = [] for config_path in _MODEL_CONFIG_PATHS: if config_path.is_file() and config_path.suffix in config_ext: config_files.append(config_path) elif config_path.is_dir(): for ext in config_ext: config_files.extend(config_path.glob(f'*{ext}')) for cf in config_files: with open(cf, 'r') as f: model_cfg = json.load(f) if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')): _MODEL_CONFIGS[cf.stem] = model_cfg _MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))} _rescan_model_configs() # initial populate of model config registry def list_models(): """ enumerate available model architectures based on config files """ return list(_MODEL_CONFIGS.keys()) def add_model_config(path): """ add model config path or file and update registry """ if not isinstance(path, Path): path = Path(path) _MODEL_CONFIG_PATHS.append(path) _rescan_model_configs() def get_model_config(model_name): if model_name in _MODEL_CONFIGS: return deepcopy(_MODEL_CONFIGS[model_name]) else: return None def get_tokenizer(model_name): if 'EVA' in model_name: return eva_clip.get_tokenizer(model_name) if model_name.startswith(HF_HUB_PREFIX): tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):]) else: config = get_model_config(model_name) tokenizer = HFTokenizer( config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize return tokenizer def load_state_dict(checkpoint_path: str, map_location='cpu'): checkpoint = torch.load(checkpoint_path, map_location=map_location) if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: state_dict = checkpoint if next(iter(state_dict.items()))[0].startswith('module'): state_dict = {k[7:]: v for k, v in state_dict.items()} return state_dict def load_checkpoint(model, checkpoint_path, strict=True): state_dict = load_state_dict(checkpoint_path) # detect old format and make compatible with new format if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'): state_dict = convert_to_custom_text_state_dict(state_dict) resize_pos_embed(state_dict, model) incompatible_keys = model.load_state_dict(state_dict, strict=strict) return incompatible_keys def create_model( model_name: str, pretrained: Optional[str] = None, precision: str = 'fp32', device: Union[str, torch.device] = 'cpu', jit: bool = False, force_quick_gelu: bool = False, force_custom_text: bool = False, force_patch_dropout: Optional[float] = None, force_image_size: Optional[Union[int, Tuple[int, int]]] = None, pretrained_image: bool = False, pretrained_hf: bool = True, cache_dir: Optional[str] = None, output_dict: Optional[bool] = None, require_pretrained: bool = False, ): has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX) if has_hf_hub_prefix: model_id = model_name[len(HF_HUB_PREFIX):] checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir) config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir) with open(config_path, 'r', encoding='utf-8') as f: config = json.load(f) pretrained_cfg = config['preprocess_cfg'] model_cfg = config['model_cfg'] else: model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names checkpoint_path = None pretrained_cfg = {} model_cfg = None if isinstance(device, str): device = torch.device(device) if pretrained == 'eva': return eva_clip.create_model(model_name=model_name, pretrained=cache_dir, force_custom_clip=True, precision=precision, device=device,) if pretrained and pretrained.lower() == 'openai': logging.info(f'Loading pretrained {model_name} from OpenAI.') model = load_openai_model( model_name, precision=precision, device=device, jit=jit, cache_dir=cache_dir, ) # to always output dict even if it is clip if output_dict and hasattr(model, "output_dict"): model.output_dict = True else: model_cfg = model_cfg or get_model_config(model_name) if model_cfg is not None: logging.info(f'Loaded {model_name} model config.') else: logging.error(f'Model config for {model_name} not found; available models {list_models()}.') raise RuntimeError(f'Model config for {model_name} not found.') if force_quick_gelu: # override for use of QuickGELU on non-OpenAI transformer models model_cfg["quick_gelu"] = True if force_patch_dropout is not None: # override the default patch dropout value model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout if force_image_size is not None: # override model config's image size model_cfg["vision_cfg"]["image_size"] = force_image_size if pretrained_image: if 'timm_model_name' in model_cfg.get('vision_cfg', {}): # pretrained weight loading for timm models set via vision_cfg model_cfg['vision_cfg']['timm_model_pretrained'] = True else: assert False, 'pretrained image towers currently only supported for timm models' cast_dtype = get_cast_dtype(precision) is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {}) custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model if custom_text: if is_hf_model: model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf if "coca" in model_name: model = CoCa(**model_cfg, cast_dtype=cast_dtype) else: model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype) else: model = CLIP(**model_cfg, cast_dtype=cast_dtype) pretrained_loaded = False if pretrained: checkpoint_path = '' pretrained_cfg = get_pretrained_cfg(model_name, pretrained) if pretrained_cfg: checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir) elif os.path.exists(pretrained): checkpoint_path = pretrained if checkpoint_path: print(f'Loading pretrained {model_name} weights ({pretrained}).', flush=True) logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') load_checkpoint(model, checkpoint_path) else: error_str = ( f'Pretrained weights ({pretrained}) not found for model {model_name}.'
f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
16
2023-12-09 05:43:08+00:00
16k
moonshot-admin/moonshot
third-party/tqdm-4.66.1/tqdm/auto.py
[ { "identifier": "TqdmExperimentalWarning", "path": "third-party/tqdm-4.66.1/tqdm/std.py", "snippet": "class TqdmExperimentalWarning(TqdmWarning, FutureWarning):\n \"\"\"beta feature, unstable API and behaviour\"\"\"\n pass" }, { "identifier": "tqdm", "path": "third-party/tqdm-4.66.1/tq...
import warnings from .std import TqdmExperimentalWarning from .autonotebook import tqdm as notebook_tqdm from .asyncio import tqdm as asyncio_tqdm from .std import tqdm as std_tqdm
12,677
""" Enables multiple commonly used features. Method resolution order: - `tqdm.autonotebook` without import warnings - `tqdm.asyncio` - `tqdm.std` base class Usage: >>> from tqdm.auto import trange, tqdm >>> for i in trange(10): ... ... """ with warnings.catch_warnings():
""" Enables multiple commonly used features. Method resolution order: - `tqdm.autonotebook` without import warnings - `tqdm.asyncio` - `tqdm.std` base class Usage: >>> from tqdm.auto import trange, tqdm >>> for i in trange(10): ... ... """ with warnings.catch_warnings():
warnings.simplefilter("ignore", category=TqdmExperimentalWarning)
0
2023-12-14 07:43:03+00:00
16k
LkPrtctrd/BSL-V53
Heart/Logic/LogicLaserMessageFactory.py
[ { "identifier": "ClientHelloMessage", "path": "Heart/Packets/Client/Authentification/ClientHelloMessage.py", "snippet": "class ClientHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields)...
from Heart.Packets.Client.Authentification.ClientHelloMessage import ClientHelloMessage from Heart.Packets.Client.Authentification.LoginMessage import LoginMessage from Heart.Packets.Client.Battle.AskForBattleEndMessage import AskForBattleEndMessage from Heart.Packets.Client.Home.ChangeAvatarNameMessage import ChangeAvatarNameMessage from Heart.Packets.Client.Home.EndClientTurnMessage import EndClientTurnMessage from Heart.Packets.Client.Home.GoHomeFromOfflinePractiseMessage import GoHomeFromOfflinePractiseMessage from Heart.Packets.Client.Home.GoHomeMessage import GoHomeMessage from Heart.Packets.Client.Home.GetPlayerProfileMessage import GetPlayerProfileMessage from Heart.Packets.Client.Home.AskForAllianceDataMessage import AskForAllianceDataMessage from Heart.Packets.Client.Socket.KeepAliveMessage import KeepAliveMessage from Heart.Packets.Server.Authentification.LoginFailedMessage import LoginFailedMessage from Heart.Packets.Server.Authentification.LoginOkMessage import LoginOkMessage from Heart.Packets.Server.Authentification.OutOfSyncMessage import OutOfSyncMessage from Heart.Packets.Server.Authentification.ServerHelloMessage import ServerHelloMessage from Heart.Packets.Server.Battle.BattleEndMessage import BattleEndMessage from Heart.Packets.Server.Home.AvailableServerCommandMessage import AvailableServerCommandMessage from Heart.Packets.Server.Home.LobbyInfoMessage import LobbyInfoMessage from Heart.Packets.Server.Home.OwnHomeDataMessage import OwnHomeDataMessage from Heart.Packets.Server.Socket.KeepAliveServerMessage import KeepAliveServerMessage from Heart.Packets.Server.Home.PlayerProfileMessage import PlayerProfileMessage from Heart.Packets.Server.Home.MyAllianceMessage import MyAllianceMessage from Heart.Packets.Server.Home.AllianceDataMessage import AllianceDataMessage
13,938
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage',
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage',
10100: ClientHelloMessage,
0
2023-12-14 18:57:56+00:00
16k
pan-x-c/EE-LLM
megatron/core/models/gpt/gpt_layer_specs.py
[ { "identifier": "get_bias_dropout_add", "path": "megatron/core/fusions/fused_bias_dropout.py", "snippet": "def get_bias_dropout_add(training, fused):\n if fused:\n # jit scripting for a nn.module (with dropout) is not\n # triggering the fusion kernel. For now, we use two\n # diff...
from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add from megatron.core.fusions.fused_layer_norm import FusedLayerNorm from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules from megatron.core.transformer.custom_layers.transformer_engine import ( TEDotProductAttention, TELayerNormColumnParallelLinear, TERowParallelLinear, ) from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.mlp import MLP, MLPSubmodules from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.switch_mlp import SwitchMLP from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules
12,884
# Use this spec to use lower level Transformer Engine modules (required for fp8 training) gpt_layer_with_transformer_engine_spec = ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( self_attention=ModuleSpec( module=SelfAttention,
# Use this spec to use lower level Transformer Engine modules (required for fp8 training) gpt_layer_with_transformer_engine_spec = ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( self_attention=ModuleSpec( module=SelfAttention,
params={"attn_mask_type": AttnMaskType.causal},
10
2023-12-07 08:29:38+00:00
16k
tommy-xq/SA2VP
vit_train_sa2vp.py
[ { "identifier": "create_optimizer", "path": "optim_factory.py", "snippet": "def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):\n opt_lower = args.opt.lower()\n weight_decay = args.weight_decay\n if weight_decay and filter_bias_a...
import argparse import datetime import numpy as np import time import torch import torch.nn as nn import torch.backends.cudnn as cudnn import json import os import utils import random import deepspeed from pathlib import Path from time import sleep from timm.data.mixup import Mixup from timm.models import create_model from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from timm.utils import ModelEma from optim_factory import create_optimizer, get_parameter_groups, LayerDecayValueAssigner from datasets import build_dataset from datasets import build_beit_pretraining_dataset, build_beit_pretraining_dataset_val from engine_for_train import train_one_epoch, evaluate # engine for sa2vp from utils import NativeScalerWithGradNormCount as NativeScaler from scipy import interpolate from timm.models.layers import trunc_normal_ from functools import partial from vpt_main.src.models.vit_backbones.vit_tinypara import VisionTransformer # choose model to train from deepspeed import DeepSpeedConfig
11,119
dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, ) if dataset_val is not None: data_loader_val = torch.utils.data.DataLoader( dataset_val, sampler=sampler_val, batch_size=int(4*args.batch_size), num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False ) else: data_loader_val = None mixup_fn = None mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: print("Mixup is activated!") mixup_fn = Mixup( mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes) model = Dual_model(args) n_parameters = sum(p.numel() for p in model.vit_base.parameters() if p.requires_grad) frozen_parameters = sum(p.numel() for p in model.vit_base.parameters() if not p.requires_grad) total_parameters = sum(p.numel() for p in model.vit_base.parameters()) print('------------------------------') # print to show parameters are frozen or learnable for name, param in model.named_parameters(): print(name, param.requires_grad) print('------------------------------') model.to(device) model_ema = None if args.model_ema: # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper model_ema = ModelEma( model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else '', resume='') print("Using EMA with decay = %.8f" % args.model_ema_decay) model_without_ddp = model # print("Model = %s" % str(model_without_ddp)) total_batch_size = args.batch_size * args.update_freq * utils.get_world_size() num_training_steps_per_epoch = len(dataset_train) // total_batch_size print("LR = %.8f" % args.lr) print("Batch size = %d" % total_batch_size) print("Update frequent = %d" % args.update_freq) print("Number of training examples = %d" % len(dataset_train)) print("Number of training training per epoch = %d" % num_training_steps_per_epoch) assigner = None if assigner is not None: print("Assigned values = %s" % str(assigner.values)) # skip_weight_decay_list = model.no_weight_decay() skip_weight_decay_list = None if args.enable_deepspeed: loss_scaler = None optimizer_params = get_parameter_groups( model, args.weight_decay, skip_weight_decay_list, assigner.get_layer_id if assigner is not None else None, assigner.get_scale if assigner is not None else None) model, optimizer, _, _ = ds_init( args=args, model=model, model_parameters=optimizer_params, dist_init_required=not args.distributed, ) print("model.gradient_accumulation_steps() = %d" % model.gradient_accumulation_steps()) assert model.gradient_accumulation_steps() == args.update_freq else: if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) model_without_ddp = model.module optimizer = create_optimizer( args, model_without_ddp, skip_list=skip_weight_decay_list, get_num_layer=assigner.get_layer_id if assigner is not None else None, get_layer_scale=assigner.get_scale if assigner is not None else None) loss_scaler = NativeScaler() print("Use step level LR scheduler!") lr_schedule_values = utils.cosine_scheduler( args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps, ) if args.weight_decay_end is None: args.weight_decay_end = args.weight_decay wd_schedule_values = utils.cosine_scheduler( args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch) print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values))) if mixup_fn is not None: # smoothing is handled with mixup label transform criterion = SoftTargetCrossEntropy() elif args.smoothing > 0.: criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing) else: criterion = torch.nn.CrossEntropyLoss() print("criterion = %s" % str(criterion)) utils.auto_load_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema) if args.eval:
# -------------------------------------------------------- # SA2VP: Spatially Aligned-and-Adapted Visual Prompt code # reference: # BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) # Github source: https://github.com/microsoft/unilm/tree/master/beit # Based on timm # https://github.com/rwightman/pytorch-image-models/tree/master/timm # --------------------------------------------------------' #os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID' # can be removed and set in bash #os.environ['CUDA_VISIBLE_DEVICES']='0' # can be removed and set in bash class Dual_model(nn.Module): def __init__(self, args): super(Dual_model, self).__init__() self.vit_base = VisionTransformer("sup_vitb16_imagenet21k", 224, num_classes=-1, vis=False) # choose ViT as backbone self.vit_base.load_from(np.load(os.path.join("./backbone_ckpt", "imagenet21k_ViT-B_16.npz"))) # where to save pre-trained model ./backbone_ckpt # show parameters is frozen or learnable for k, p in self.vit_base.named_parameters(): name_list = k.split('.') # print(name_list) if name_list[2] == 'ppt' or name_list[2] == 'cross_conv' or name_list[2] == 'deep_ppt' or name_list[2] == 'encoder_norm_2' or name_list[2] == 'deep_proj' or name_list[2] == 'ppt_proj': p.requires_grad = True elif name_list[2] == 'cross_attn': if name_list[4] == 'attn' or name_list[4] == 'attention_norm': p.requires_grad = False else: p.requires_grad = True elif name_list[2] == 'layer': if name_list[4] == 'down_proj' or name_list[4] == 'up_proj' or name_list[4] == 'before_norm': p.requires_grad = True else: p.requires_grad = False else: p.requires_grad = False self.class_head = nn.Linear(768, args.nb_classes, bias=True) trunc_normal_(self.class_head.weight, std=0.02) def forward(self, x): x, p = self.vit_base(x) # B*768 return self.class_head(x), self.class_head(p) def get_args(): parser = argparse.ArgumentParser('SA2VP script for image classification', add_help=False) parser.add_argument('--batch_size', default=64, type=int) parser.add_argument('--epochs', default=30, type=int) parser.add_argument('--update_freq', default=1, type=int) parser.add_argument('--save_ckpt_freq', default=20, type=int) # Model parameters parser.add_argument('--model', default='beit_base_patch16_224', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--rel_pos_bias', action='store_true') parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias') parser.set_defaults(rel_pos_bias=False) parser.add_argument('--abs_pos_emb', action='store_true') parser.set_defaults(abs_pos_emb=True) parser.add_argument('--layer_scale_init_value', default=0.1, type=float, help="0.1 for base, 1e-5 for large. set 0 to disable layer scale") parser.add_argument('--input_size', default=224, type=int, help='images input size') parser.add_argument('--second_input_size', default=112, type=int, help='images input size for discrete vae') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--attn_drop_rate', type=float, default=0.0, metavar='PCT', help='Attention dropout rate (default: 0.)') parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)') parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False) parser.add_argument('--model_ema', action='store_true', default=False) parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='') parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='') # Optimizer parameters parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"') parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)') parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)') parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the weight decay. We use a cosine schedule for WD and using a larger decay by the end of training improves performance for ViTs.""") parser.add_argument('--lr', type=float, default=5e-4, metavar='LR', help='learning rate (default: 5e-4)') parser.add_argument('--layer_decay', type=float, default=0.9) parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)') parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N', help='num of steps to warmup LR, will overload warmup_epochs if set > 0') # Augmentation parameters parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)') parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'), parser.add_argument('--smoothing', type=float, default=0, help='Label smoothing (default: 0)') parser.add_argument('--train_interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")') parser.add_argument('--second_interpolation', type=str, default='lanczos', help='Interpolation for discrete vae (random, bilinear, bicubic default: "lanczos")') # Evaluation parameters parser.add_argument('--crop_pct', type=float, default=None) # * Random Erase params parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)') parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') # * Mixup params parser.add_argument('--mixup', type=float, default=0, help='mixup alpha, mixup enabled if > 0.') parser.add_argument('--cutmix', type=float, default=0, help='cutmix alpha, cutmix enabled if > 0.') parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') parser.add_argument('--mixup_prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') parser.add_argument('--mixup_switch_prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') parser.add_argument('--mixup_mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') # * Finetuning params parser.add_argument('--finetune', default='', help='finetune from checkpoint') parser.add_argument('--model_key', default='model|module', type=str) parser.add_argument('--model_prefix', default='', type=str) parser.add_argument('--init_scale', default=0.001, type=float) parser.add_argument('--use_mean_pooling', action='store_true') parser.set_defaults(use_mean_pooling=True) parser.add_argument('--use_cls', action='store_false', dest='use_mean_pooling') parser.add_argument('--disable_weight_decay_on_rel_pos_bias', action='store_true', default=False) # Dataset parameters parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path') parser.add_argument('--my_mode', default='train_val', type=str, help='my mode to train or test') parser.add_argument('--eval_data_path', default=None, type=str, help='dataset path for evaluation') parser.add_argument('--nb_classes', default=0, type=int, help='number of the classification types') parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true') parser.add_argument('--data_set', default='CUB', choices=['CIFAR', 'IMNET', 'image_folder', 'CUB', 'DOG', 'FLOWER', 'CAR', 'BIRD', 'CAL101', 'DMLAB','EUROSAT','PATCH_CAMELYON','Resisc45','Retinopathy','CLEVR_COUNT','CIFAR100','FOOD101','SVHN','DTD','FLOWER_S','PET','SVHN_S','SUN','CLEVR_DISTANCE','KITTI_DISTANCE','DS_LOC','DS_ORI','SN_AZI','SN_ELE', 'DTD_DAM', 'GTSRB_DAM', 'FOOD_DAM', 'CIFAR10_DAM', 'CIFAR100_DAM', 'SVHN_DAM'], type=str, help='ImageNet dataset path') parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--log_dir', default=None, help='path where to tensorboard log') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--auto_resume', action='store_true') parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume') parser.set_defaults(auto_resume=True) parser.add_argument('--save_ckpt', action='store_true') parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt') parser.set_defaults(save_ckpt=True) parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true', help='Perform evaluation only') parser.add_argument('--dist_eval', action='store_true', default=False, help='Enabling distributed evaluation') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') parser.set_defaults(pin_mem=True) # distributed training parameters parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=-1, type=int) parser.add_argument('--dist_on_itp', action='store_true') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') parser.add_argument('--enable_deepspeed', action='store_true', default=False) known_args, _ = parser.parse_known_args() if known_args.enable_deepspeed: try: parser = deepspeed.add_config_arguments(parser) ds_init = deepspeed.initialize except: print("Please 'pip install deepspeed==0.4.0'") exit(0) else: ds_init = None return parser.parse_args(), ds_init def main(args, ds_init): utils.init_distributed_mode(args) if ds_init is not None: utils.create_ds_config(args) print(args) device = torch.device(args.device) seed = 42 torch.manual_seed(seed) np.random.seed(seed) cudnn.benchmark = True dataset_train, args.nb_classes = build_dataset(is_train=True, args=args) if args.disable_eval_during_finetuning: dataset_val = None else: dataset_val, _ = build_dataset(is_train=False, args=args) print("Calculation of training examples = %d" % len(dataset_train)) print("Calculation of other examples = %d" % len(dataset_val)) if True: # args.distributed: num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_train = torch.utils.data.DistributedSampler( dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True ) print("Sampler_train = %s" % str(sampler_train)) if args.dist_eval: if len(dataset_val) % num_tasks != 0: print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. ' 'This will slightly alter validation results as extra duplicate entries are added to achieve ' 'equal num of samples per-process.') sampler_val = torch.utils.data.DistributedSampler( dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False) else: sampler_val = torch.utils.data.SequentialSampler(dataset_val) else: sampler_train = torch.utils.data.RandomSampler(dataset_train) sampler_val = torch.utils.data.SequentialSampler(dataset_val) if global_rank == 0 and args.log_dir is not None: os.makedirs(args.log_dir, exist_ok=True) log_writer = utils.TensorboardLogger(log_dir=args.log_dir) else: log_writer = None data_loader_train = torch.utils.data.DataLoader( dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, ) if dataset_val is not None: data_loader_val = torch.utils.data.DataLoader( dataset_val, sampler=sampler_val, batch_size=int(4*args.batch_size), num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False ) else: data_loader_val = None mixup_fn = None mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: print("Mixup is activated!") mixup_fn = Mixup( mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes) model = Dual_model(args) n_parameters = sum(p.numel() for p in model.vit_base.parameters() if p.requires_grad) frozen_parameters = sum(p.numel() for p in model.vit_base.parameters() if not p.requires_grad) total_parameters = sum(p.numel() for p in model.vit_base.parameters()) print('------------------------------') # print to show parameters are frozen or learnable for name, param in model.named_parameters(): print(name, param.requires_grad) print('------------------------------') model.to(device) model_ema = None if args.model_ema: # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper model_ema = ModelEma( model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else '', resume='') print("Using EMA with decay = %.8f" % args.model_ema_decay) model_without_ddp = model # print("Model = %s" % str(model_without_ddp)) total_batch_size = args.batch_size * args.update_freq * utils.get_world_size() num_training_steps_per_epoch = len(dataset_train) // total_batch_size print("LR = %.8f" % args.lr) print("Batch size = %d" % total_batch_size) print("Update frequent = %d" % args.update_freq) print("Number of training examples = %d" % len(dataset_train)) print("Number of training training per epoch = %d" % num_training_steps_per_epoch) assigner = None if assigner is not None: print("Assigned values = %s" % str(assigner.values)) # skip_weight_decay_list = model.no_weight_decay() skip_weight_decay_list = None if args.enable_deepspeed: loss_scaler = None optimizer_params = get_parameter_groups( model, args.weight_decay, skip_weight_decay_list, assigner.get_layer_id if assigner is not None else None, assigner.get_scale if assigner is not None else None) model, optimizer, _, _ = ds_init( args=args, model=model, model_parameters=optimizer_params, dist_init_required=not args.distributed, ) print("model.gradient_accumulation_steps() = %d" % model.gradient_accumulation_steps()) assert model.gradient_accumulation_steps() == args.update_freq else: if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) model_without_ddp = model.module optimizer = create_optimizer( args, model_without_ddp, skip_list=skip_weight_decay_list, get_num_layer=assigner.get_layer_id if assigner is not None else None, get_layer_scale=assigner.get_scale if assigner is not None else None) loss_scaler = NativeScaler() print("Use step level LR scheduler!") lr_schedule_values = utils.cosine_scheduler( args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps, ) if args.weight_decay_end is None: args.weight_decay_end = args.weight_decay wd_schedule_values = utils.cosine_scheduler( args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch) print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values))) if mixup_fn is not None: # smoothing is handled with mixup label transform criterion = SoftTargetCrossEntropy() elif args.smoothing > 0.: criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing) else: criterion = torch.nn.CrossEntropyLoss() print("criterion = %s" % str(criterion)) utils.auto_load_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema) if args.eval:
test_stats = evaluate(data_loader_val, model, device)
7
2023-12-12 13:19:17+00:00
16k
lumina-test/lumina
lumina/e2e_test/test_gbn.py
[ { "identifier": "get_qp_info_list", "path": "lumina/analyzer/main.py", "snippet": "def get_qp_info_list(switch_msg_snapshot):\n \"\"\" Get the list of QP info from the switch message snapshot\n\n Args:\n switch_msg_snapshot (str): The path to the switch message snapshot\n\n Returns:\n ...
import argparse, os, math, glob, logging, time import lumina.analyzer.checker.integrity_check as integrity_check import lumina.analyzer.checker.host_check as host_check import lumina.analyzer.checker.gbn_check as gbn_check import lumina.analyzer.checker.read_gbn_check as read_gbn_check import lumina.orchestrator.host as host import lumina.orchestrator.switch as switch from lumina.analyzer.main import get_qp_info_list from lumina.orchestrator.main import Orchestrator from lumina.analyzer.counter.switch_counter import SwitchCounter from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter from lumina.analyzer.pcap_processor.pcap_process import get_packet_list from lumina.analyzer.measurer.latency_measure import LatencyMeasure from lumina.utils.config_loggers import config_stream_handler, config_file_handler from lumina.analyzer.packet_parser.roce_packet import TRIGGER_OOS, TRIGGER_TIMEOUT
13,821
RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger) config_file_handler(logger=root_logger, log_file=os.path.join(orchestrator.result_path, LOG_FILENAME), no_format=False) def run_traffic(orchestrator): """ Run the traffic and collect the results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: bool: True if the experiment is successful, False otherwise """ orchestrator.rm_old_files() if orchestrator.sync_and_compile() == False: logging.error("Failed to sync and compile the code") sys.exit(-1) logging.info("Sync and compile completed") if orchestrator.generate_switch_config_file() == False: logging.error("Failed to generate switch configuration file") sys.exit(-1) num_repeats = orchestrator.get_num_repeats() for i in range(num_repeats): logging.info("=" * 100) nb_retry = 0 iter_result = False while nb_retry < MAX_NB_EXP_RETRIES: if orchestrator.run_experiment() == False: logging.error("Iteration %d: Failed to complete experiment" % i) logging.error("Iteration %d: Rerun experiment (retry: %d)" % i, nb_retry) nb_retry += 1 orchestrator.clean_up() time.sleep(5) continue logging.info("Iteration %d: Completed experiment" % i) try: orchestrator.clean_up() orchestrator.fetch_results(i) logging.info("Iteration %d: Fetch experiment results" % i) orchestrator.merge_traces(i) logging.info("Iteration %d: Merge the pcap files" % i) except: logging.error("Iteration %d: Result collection failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue if orchestrator.check_integrity(i) == False: logging.error("Iteration %d: Integrity check failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue iter_result = True break if iter_result is False: logging.error("Iteration %d: Still failed after %d retries" % (i, nb_retry)) return False return True def analyze_retrans_latency(pkt, latency_measurement, is_read, logger): """ Analyze the retransmission latency breakdown for an undelivered packet Args: pkt (Packet object): The undelivered packet latency_measurement (LatencyMeasure object): A LatencyMeasure object that can compute latency breakdown is_read (bool): If we use RDMA READ in this experiment logger (logging.Logger): A logger object Returns: N/A """ # All the undelivered packets should be retransmitted in our test cases if latency_measurement.get_retransmit_pkt(pkt) == None: logger.error("\t\t No retransmit packet found for this packet") logger.error("\t\t It is possible that this undelivered packet is a redundant transmission") return retrans_latency = latency_measurement.get_retransmit_latency(pkt) if is_read == True: # For RDMA READ, we should always find a NACK READ request that triggers retransmission nack = latency_measurement.get_nack(pkt) if nack is not None: trigger = nack.get_trigger() if trigger == TRIGGER_OOS: next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK READ request generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6))
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_gbn.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger) config_file_handler(logger=root_logger, log_file=os.path.join(orchestrator.result_path, LOG_FILENAME), no_format=False) def run_traffic(orchestrator): """ Run the traffic and collect the results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: bool: True if the experiment is successful, False otherwise """ orchestrator.rm_old_files() if orchestrator.sync_and_compile() == False: logging.error("Failed to sync and compile the code") sys.exit(-1) logging.info("Sync and compile completed") if orchestrator.generate_switch_config_file() == False: logging.error("Failed to generate switch configuration file") sys.exit(-1) num_repeats = orchestrator.get_num_repeats() for i in range(num_repeats): logging.info("=" * 100) nb_retry = 0 iter_result = False while nb_retry < MAX_NB_EXP_RETRIES: if orchestrator.run_experiment() == False: logging.error("Iteration %d: Failed to complete experiment" % i) logging.error("Iteration %d: Rerun experiment (retry: %d)" % i, nb_retry) nb_retry += 1 orchestrator.clean_up() time.sleep(5) continue logging.info("Iteration %d: Completed experiment" % i) try: orchestrator.clean_up() orchestrator.fetch_results(i) logging.info("Iteration %d: Fetch experiment results" % i) orchestrator.merge_traces(i) logging.info("Iteration %d: Merge the pcap files" % i) except: logging.error("Iteration %d: Result collection failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue if orchestrator.check_integrity(i) == False: logging.error("Iteration %d: Integrity check failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue iter_result = True break if iter_result is False: logging.error("Iteration %d: Still failed after %d retries" % (i, nb_retry)) return False return True def analyze_retrans_latency(pkt, latency_measurement, is_read, logger): """ Analyze the retransmission latency breakdown for an undelivered packet Args: pkt (Packet object): The undelivered packet latency_measurement (LatencyMeasure object): A LatencyMeasure object that can compute latency breakdown is_read (bool): If we use RDMA READ in this experiment logger (logging.Logger): A logger object Returns: N/A """ # All the undelivered packets should be retransmitted in our test cases if latency_measurement.get_retransmit_pkt(pkt) == None: logger.error("\t\t No retransmit packet found for this packet") logger.error("\t\t It is possible that this undelivered packet is a redundant transmission") return retrans_latency = latency_measurement.get_retransmit_latency(pkt) if is_read == True: # For RDMA READ, we should always find a NACK READ request that triggers retransmission nack = latency_measurement.get_nack(pkt) if nack is not None: trigger = nack.get_trigger() if trigger == TRIGGER_OOS: next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK READ request generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6))
elif trigger == TRIGGER_TIMEOUT:
10
2023-12-09 08:21:14+00:00
16k
boweniac/autogan
autogan/agents/tool_agent_search.py
[ { "identifier": "CodeExecution", "path": "autogan/tools/code_execution_tool.py", "snippet": "class CodeExecution:\n def __init__(self, work_dir: Optional[str] = None):\n \"\"\"A class for code execution\n 用于代码执行的类\n\n Supports python, bash, shell, powershell code\n 支持 pyth...
import re from collections import defaultdict from typing import Optional, Dict from autogan.tools.code_execution_tool import CodeExecution from autogan.tools.wolfram_alpha_tool import WolframAlphaAPIWrapper from autogan.oai.count_tokens_utils import count_text_tokens from autogan.agents.universal_agent import UniversalAgent from autogan.utils.compressed_text_utils import compressed_text_universal from autogan.tools.web_search_tool import WebSearch
11,974
class ToolAgentSearch(UniversalAgent): def __init__( self, search_config: Dict, agent_config: Optional[Dict] = None, retry_times: Optional[int] = 10, name: Optional[str] = "WebSearchExp", duty: Optional[str] = 'Not only can I search for information on the internet, ' 'but I can also answer questions using the Wolfram engine.', work_flow: Optional[str] = """I hope you are an internet search expert. When you receive a search request, you have the following two tools to choose from: 1. web: You can search for information on the internet. When using it, please enclose the search keywords in your output with the ```web\n ``` symbol, for example: ```web Your search keywords ``` 2. wolfram: You can use the Wolfram engine to help you calculate or query data related to Mathematics, finance, unit conversion, data analysis, science, geography, history, culture, movies, music, etc. When using it, please enclose the English question that Wolfram can understand in your output with the ```wolfram\n ``` symbol, for example: ```wolfram one wolfram query ``` Note: When you decide to use a tool, please do not @ anyone.""", # duty: Optional[str] = '我不但可以从网络上搜索资料,还可以通过 wolfram 引擎来回答问题。', # work_flow: Optional[str] = """我希望你是一个网络搜索专家,当你收到搜索请求时,你有一下两种工具可供选择: # # 1. web: 可以在网络上查找资料。使用时请在你的输出内容中,将搜索关键词用```web\n ``` 符号封装,例如: # ```web # Your search keywords # ``` # # 2.wolfram: 可以使用wolfram引擎,帮你计算或查询数学、金融、单位转换、数据分析、科学、地理、历史、文化、电影、音乐等相关数据。使用时请在你的输出内容中,将 wolfram 可以理解的英文问题用```wolfram\n ``` 符号封装,例如: # ```wolfram # one wolfram query # ``` # # 注意:当你决定使用工具时,请不要@任何人""", ): """WebSearchExpert 1.Receive the user's question and convert it into search keywords. 2.Call the Google Search API to obtain a result and extract the webpage content. 3.If no content related to the user's question is extracted, call the Google Search API again to obtain the next result. 4.Repeat operations 2 and 3 until reaching retry_times. Within the same task session domain, if the search keywords are the same, the offset of the search results will accumulate and move backwards. :param agent_config: The agent configuration includes: agent 配置包括: - main_model: The LLM configuration of the agent's main body. agent 主体的 LLM 配置。 - summary_model: The LLM configuration used for compressing context and generating text summaries. 用于压缩上下文以及生成文本摘要的 LLM 配置。 - request_interval_time: The interval time of LLM requests. LLM 请求间隔时间。 - request_timeout:The timeout of LLM requests. LLM 请求超时时间。 - max_retries: The maximum number of retries for LLM requests. LLM 请求最大重试次数。 :param search_config: JSON format of email_config {"cx": "", "key": ""} :param retry_times: Represent the maximum number of attempts for each search, the default is 10. :param name: The agent name should be unique in the organizational structure. :param duty: Used to explain one's job responsibilities to other agents. :param work_flow: Defines the workflow of the agent. 定义 agent 的工作流程。 """ super().__init__( name, agent_config=agent_config, duty=duty, work_flow=work_flow, use_tool="join" ) self._web_search = WebSearch(search_config["google_search"]) if "google_search" in search_config else None self._wolfram_alpha = WolframAlphaAPIWrapper( search_config["wolfram_alpha"]) if "wolfram_alpha" in search_config else None self._conversation_search_index = defaultdict(int) self._retry_times = retry_times def tool_function(self, task_id: str, param: Optional[str] = None, tokens: Optional[int] = None) -> tuple[str, int]: lang, code = CodeExecution.extract_code(param) if lang == "web" and code: if self._web_search: return self._web_function(task_id, code) else: return "Please add the Google Custom Search JSON API configuration.", 0 elif lang == "wolfram" and code: if self._wolfram_alpha: return self._wolfram_alpha_function(code) else: return "Please add the WolframAlphaAPI configuration.", 0 else: return """Please make a choice between web and wolfram, and use the ``` symbol for encapsulation, for example: ```wolfram one wolfram query ```""", 18 def _web_function(self, task_id: str, param: str) -> tuple[str, int]: loop = self._retry_times for i in range(loop): # Accumulate the search offset of the same task and the same keyword. self._conversation_search_index[task_id] += 1 start = self._conversation_search_index[task_id] # Get webpage content. detail = self._web_search.get_search_detail(param, start, self.name, "search", self.response_func) if detail: # Extract content related to the user's question from the webpage content.
class ToolAgentSearch(UniversalAgent): def __init__( self, search_config: Dict, agent_config: Optional[Dict] = None, retry_times: Optional[int] = 10, name: Optional[str] = "WebSearchExp", duty: Optional[str] = 'Not only can I search for information on the internet, ' 'but I can also answer questions using the Wolfram engine.', work_flow: Optional[str] = """I hope you are an internet search expert. When you receive a search request, you have the following two tools to choose from: 1. web: You can search for information on the internet. When using it, please enclose the search keywords in your output with the ```web\n ``` symbol, for example: ```web Your search keywords ``` 2. wolfram: You can use the Wolfram engine to help you calculate or query data related to Mathematics, finance, unit conversion, data analysis, science, geography, history, culture, movies, music, etc. When using it, please enclose the English question that Wolfram can understand in your output with the ```wolfram\n ``` symbol, for example: ```wolfram one wolfram query ``` Note: When you decide to use a tool, please do not @ anyone.""", # duty: Optional[str] = '我不但可以从网络上搜索资料,还可以通过 wolfram 引擎来回答问题。', # work_flow: Optional[str] = """我希望你是一个网络搜索专家,当你收到搜索请求时,你有一下两种工具可供选择: # # 1. web: 可以在网络上查找资料。使用时请在你的输出内容中,将搜索关键词用```web\n ``` 符号封装,例如: # ```web # Your search keywords # ``` # # 2.wolfram: 可以使用wolfram引擎,帮你计算或查询数学、金融、单位转换、数据分析、科学、地理、历史、文化、电影、音乐等相关数据。使用时请在你的输出内容中,将 wolfram 可以理解的英文问题用```wolfram\n ``` 符号封装,例如: # ```wolfram # one wolfram query # ``` # # 注意:当你决定使用工具时,请不要@任何人""", ): """WebSearchExpert 1.Receive the user's question and convert it into search keywords. 2.Call the Google Search API to obtain a result and extract the webpage content. 3.If no content related to the user's question is extracted, call the Google Search API again to obtain the next result. 4.Repeat operations 2 and 3 until reaching retry_times. Within the same task session domain, if the search keywords are the same, the offset of the search results will accumulate and move backwards. :param agent_config: The agent configuration includes: agent 配置包括: - main_model: The LLM configuration of the agent's main body. agent 主体的 LLM 配置。 - summary_model: The LLM configuration used for compressing context and generating text summaries. 用于压缩上下文以及生成文本摘要的 LLM 配置。 - request_interval_time: The interval time of LLM requests. LLM 请求间隔时间。 - request_timeout:The timeout of LLM requests. LLM 请求超时时间。 - max_retries: The maximum number of retries for LLM requests. LLM 请求最大重试次数。 :param search_config: JSON format of email_config {"cx": "", "key": ""} :param retry_times: Represent the maximum number of attempts for each search, the default is 10. :param name: The agent name should be unique in the organizational structure. :param duty: Used to explain one's job responsibilities to other agents. :param work_flow: Defines the workflow of the agent. 定义 agent 的工作流程。 """ super().__init__( name, agent_config=agent_config, duty=duty, work_flow=work_flow, use_tool="join" ) self._web_search = WebSearch(search_config["google_search"]) if "google_search" in search_config else None self._wolfram_alpha = WolframAlphaAPIWrapper( search_config["wolfram_alpha"]) if "wolfram_alpha" in search_config else None self._conversation_search_index = defaultdict(int) self._retry_times = retry_times def tool_function(self, task_id: str, param: Optional[str] = None, tokens: Optional[int] = None) -> tuple[str, int]: lang, code = CodeExecution.extract_code(param) if lang == "web" and code: if self._web_search: return self._web_function(task_id, code) else: return "Please add the Google Custom Search JSON API configuration.", 0 elif lang == "wolfram" and code: if self._wolfram_alpha: return self._wolfram_alpha_function(code) else: return "Please add the WolframAlphaAPI configuration.", 0 else: return """Please make a choice between web and wolfram, and use the ``` symbol for encapsulation, for example: ```wolfram one wolfram query ```""", 18 def _web_function(self, task_id: str, param: str) -> tuple[str, int]: loop = self._retry_times for i in range(loop): # Accumulate the search offset of the same task and the same keyword. self._conversation_search_index[task_id] += 1 start = self._conversation_search_index[task_id] # Get webpage content. detail = self._web_search.get_search_detail(param, start, self.name, "search", self.response_func) if detail: # Extract content related to the user's question from the webpage content.
compressed_text, total_tokens = compressed_text_universal(
4
2023-12-06 03:24:34+00:00
16k
TACJu/Compositor
Compositor_Mask2Former/mask2former_video/data_video/ytvis_eval.py
[ { "identifier": "YTVOS", "path": "Compositor_Mask2Former/mask2former_video/data_video/datasets/ytvis_api/ytvos.py", "snippet": "class YTVOS:\n def __init__(self, annotation_file=None):\n \"\"\"\n Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n ...
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pycocotools.mask as mask_util import torch import detectron2.utils.comm as comm from collections import OrderedDict from .datasets.ytvis_api.ytvos import YTVOS from .datasets.ytvis_api.ytvoseval import YTVOSeval from tabulate import tabulate from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.evaluation import DatasetEvaluator from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table
12,570
Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = ["AP", "AP50", "AP75", "APs", "APm", "APl", "AR1", "AR10"] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } self._logger.info( "Evaluation results for {}: \n".format("segm") + create_small_table(results) ) if not np.isfinite(sum(results.values())): self._logger.info("Some metrics cannot be computed and is shown as NaN.") if class_names is None or len(class_names) <= 1: return results # Compute per-category AP # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa precisions = coco_eval.eval["precision"] # precision has dims (iou, recall, cls, area range, max dets) assert len(class_names) == precisions.shape[2] results_per_category = [] for idx, name in enumerate(class_names): # area range index 0: all area ranges # max dets index -1: typically 100 per image precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] ap = np.mean(precision) if precision.size else float("nan") results_per_category.append(("{}".format(name), float(ap * 100))) # tabulate it N_COLS = min(6, len(results_per_category) * 2) results_flatten = list(itertools.chain(*results_per_category)) results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) table = tabulate( results_2d, tablefmt="pipe", floatfmt=".3f", headers=["category", "AP"] * (N_COLS // 2), numalign="left", ) self._logger.info("Per-category {} AP: \n".format("segm") + table) results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def instances_to_coco_json_video(inputs, outputs): """ Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): video_id (int): the image id Returns: list[dict]: list of json annotations in COCO format. """ assert len(inputs) == 1, "More than one inputs are loaded for inference!" video_id = inputs[0]["video_id"] video_length = inputs[0]["length"] scores = outputs["pred_scores"] labels = outputs["pred_labels"] masks = outputs["pred_masks"] ytvis_results = [] for instance_id, (s, l, m) in enumerate(zip(scores, labels, masks)): segms = [ mask_util.encode(np.array(_mask[:, :, None], order="F", dtype="uint8"))[0] for _mask in m ] for rle in segms: rle["counts"] = rle["counts"].decode("utf-8") res = { "video_id": video_id, "score": s, "category_id": l, "segmentations": segms, } ytvis_results.append(res) return ytvis_results def _evaluate_predictions_on_coco( coco_gt, coco_results, img_ids=None, ): """ Evaluate the coco results using COCOEval API. """ assert len(coco_results) > 0 coco_results = copy.deepcopy(coco_results) # When evaluating mask AP, if the results contain bbox, cocoapi will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in coco_results: c.pop("bbox", None) coco_dt = coco_gt.loadRes(coco_results)
# Copyright (c) Facebook, Inc. and its affiliates. # Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC class YTVISEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, tasks=None, distributed=True, output_dir=None, *, use_fast_impl=True, ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file in torch serialization format that contains all the raw original predictions. 2. "coco_instances_results.json" a json file in COCO's result format. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. """ self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self._use_fast_impl = use_fast_impl if tasks is not None and isinstance(tasks, CfgNode): self._logger.warning( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self._metadata = MetadataCatalog.get(dataset_name) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._ytvis_api = YTVOS(json_file) # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._ytvis_api.dataset def reset(self): self._predictions = [] def process(self, inputs, outputs): """ Args: inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). It is a list of dict. Each dict corresponds to an image and contains keys like "height", "width", "file_name", "image_id". outputs: the outputs of a COCO model. It is a list of dicts with key "instances" that contains :class:`Instances`. """ prediction = instances_to_coco_json_video(inputs, outputs) self._predictions.extend(prediction) def evaluate(self): """ Args: img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset """ if self._distributed: comm.synchronize() predictions = comm.gather(self._predictions, dst=0) predictions = list(itertools.chain(*predictions)) if not comm.is_main_process(): return {} else: predictions = self._predictions if len(predictions) == 0: self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") return {} if self._output_dir: PathManager.mkdirs(self._output_dir) file_path = os.path.join(self._output_dir, "instances_predictions.pth") with PathManager.open(file_path, "wb") as f: torch.save(predictions, f) self._results = OrderedDict() self._eval_predictions(predictions) # Copy so the caller can do whatever with results return copy.deepcopy(self._results) def _eval_predictions(self, predictions): """ Evaluate predictions. Fill self._results with the metrics of the tasks. """ self._logger.info("Preparing results for YTVIS format ...") # unmap the category ids for COCO if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) num_classes = len(all_contiguous_ids) assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} for result in predictions: category_id = result["category_id"] assert category_id < num_classes, ( f"A prediction has class={category_id}, " f"but the dataset only has {num_classes} classes and " f"predicted class id should be in [0, {num_classes - 1}]." ) result["category_id"] = reverse_id_mapping[category_id] if self._output_dir: file_path = os.path.join(self._output_dir, "results.json") self._logger.info("Saving results to {}".format(file_path)) with PathManager.open(file_path, "w") as f: f.write(json.dumps(predictions)) f.flush() if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return coco_eval = ( _evaluate_predictions_on_coco( self._ytvis_api, predictions, ) if len(predictions) > 0 else None # cocoapi does not handle empty results very well ) res = self._derive_coco_results( coco_eval, class_names=self._metadata.get("thing_classes") ) self._results["segm"] = res def _derive_coco_results(self, coco_eval, class_names=None): """ Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = ["AP", "AP50", "AP75", "APs", "APm", "APl", "AR1", "AR10"] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } self._logger.info( "Evaluation results for {}: \n".format("segm") + create_small_table(results) ) if not np.isfinite(sum(results.values())): self._logger.info("Some metrics cannot be computed and is shown as NaN.") if class_names is None or len(class_names) <= 1: return results # Compute per-category AP # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa precisions = coco_eval.eval["precision"] # precision has dims (iou, recall, cls, area range, max dets) assert len(class_names) == precisions.shape[2] results_per_category = [] for idx, name in enumerate(class_names): # area range index 0: all area ranges # max dets index -1: typically 100 per image precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] ap = np.mean(precision) if precision.size else float("nan") results_per_category.append(("{}".format(name), float(ap * 100))) # tabulate it N_COLS = min(6, len(results_per_category) * 2) results_flatten = list(itertools.chain(*results_per_category)) results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) table = tabulate( results_2d, tablefmt="pipe", floatfmt=".3f", headers=["category", "AP"] * (N_COLS // 2), numalign="left", ) self._logger.info("Per-category {} AP: \n".format("segm") + table) results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def instances_to_coco_json_video(inputs, outputs): """ Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): video_id (int): the image id Returns: list[dict]: list of json annotations in COCO format. """ assert len(inputs) == 1, "More than one inputs are loaded for inference!" video_id = inputs[0]["video_id"] video_length = inputs[0]["length"] scores = outputs["pred_scores"] labels = outputs["pred_labels"] masks = outputs["pred_masks"] ytvis_results = [] for instance_id, (s, l, m) in enumerate(zip(scores, labels, masks)): segms = [ mask_util.encode(np.array(_mask[:, :, None], order="F", dtype="uint8"))[0] for _mask in m ] for rle in segms: rle["counts"] = rle["counts"].decode("utf-8") res = { "video_id": video_id, "score": s, "category_id": l, "segmentations": segms, } ytvis_results.append(res) return ytvis_results def _evaluate_predictions_on_coco( coco_gt, coco_results, img_ids=None, ): """ Evaluate the coco results using COCOEval API. """ assert len(coco_results) > 0 coco_results = copy.deepcopy(coco_results) # When evaluating mask AP, if the results contain bbox, cocoapi will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in coco_results: c.pop("bbox", None) coco_dt = coco_gt.loadRes(coco_results)
coco_eval = YTVOSeval(coco_gt, coco_dt)
1
2023-12-12 11:49:28+00:00
16k
ebb-earl-co/tidal-wave
tidal_wave/playlist.py
[ { "identifier": "AudioFormat", "path": "tidal_wave/media.py", "snippet": "class AudioFormat(str, Enum):\n sony_360_reality_audio = \"360\"\n dolby_atmos = \"Atmos\"\n hi_res = \"HiRes\"\n mqa = \"MQA\"\n lossless = \"Lossless\"\n high = \"High\"\n low = \"Low\"" }, { "identi...
from dataclasses import dataclass from pathlib import Path from types import SimpleNamespace from typing import Dict, List, Optional, Set, Tuple, Union from requests import HTTPError, Session from .media import AudioFormat from .models import ( PlaylistsEndpointResponseJSON, TracksEndpointResponseJSON, VideosEndpointResponseJSON, ) from .requesting import request_playlists from .track import Track from .utils import download_cover_image, temporary_file, TIDAL_API_URL from .video import Video import json import logging import math import shutil import sys import ffmpeg import mutagen
11,213
else: subdirs.add(tv.album_dir) subdirs.add(tv.album_dir.parent) files[i - 1] = {i: None} continue _path: Optional[Path] = Path(tv.outfile) if tv is not None else None # if the item never got turned into a track or video if _path is None: files[i - 1] = {i: None} continue # if the track or video didn't download if _path.exists(): if _path.stat().st_size == 0: files[i - 1] = {i: None} continue else: files[i - 1] = {i: None} continue # otherwise, move files and clean up if isinstance(tv, Track): new_path: Path = self.playlist_dir / f"{i:03d} - {tv.trackname}" new_path.write_bytes(_path.read_bytes()) _path.unlink() files[i - 1] = {i: str(new_path.absolute())} elif isinstance(tv, Video): new_path: Path = self.playlist_dir / f"{i:03d} - {_path.name}" new_path.write_bytes(_path.read_bytes()) _path.unlink() files[i - 1] = {i: str(new_path.absolute())} else: self.files: List[Dict[int, Optional[str]]] = files # Find all subdirectories written to subdirs: Set[Path] = set() for tv in self.tracks_videos: if isinstance(tv, Track): try: getattr(tv, "album_dir") except AttributeError: pass else: subdirs.add(tv.album_dir) subdirs.add(tv.album_dir.parent) elif isinstance(tv, Video): subdirs.add(tv.artist_dir) # Copy all artist images, artist bio JSON files out # of subdirs artist_images: Set[Path] = set() for subdir in subdirs: for p in subdir.glob("*.jpg"): if p.name == "cover.jpg": continue artist_images.add(p) else: for artist_image_path in artist_images: if artist_image_path.exists(): shutil.copyfile( artist_image_path.absolute(), self.playlist_dir / artist_image_path.name, ) artist_bios: Set[Path] = set() for subdir in subdirs: for p in subdir.glob("*bio.json"): artist_bios.add(p) else: for artist_bio_path in artist_bios: if artist_bio_path.exists(): shutil.copyfile( artist_bio_path.absolute(), self.playlist_dir / artist_bio_path.name, ) # Remove all subdirs for subdir in subdirs: if subdir.exists(): shutil.rmtree(subdir) else: return self.playlist_dir def craft_m3u8_text(self): """This method creates a file called playlist.m3u8 in self.playlist_dir that is a standard M3U. Needs to be called after self.flatten_playlist_dir in order to be able to access self.files N.b. the already-written file is temporarily copied to a .mp4 version in a temporary directory because .m4a files cannot be read with mutagen.""" m3u_text: str = f"#EXTM3U\n#EXTENC:UTF-8\n#EXTIMG:{str(self.cover_path.absolute())}\n#PLAYLIST:{self.name}\n" logger.info( f"Creating .m3u8 playlist file for Playlist with ID '{self.playlist_id}'" ) for d in self.files: file: str = next(iter(d.values())) if file is None: continue elif file.endswith(".flac"): m = mutagen.File(file) artist: str = m.get("artist", [""])[0] title: str = m.get("title", [""])[0] extinf: str = ( f"#EXTINF:{math.ceil(m.info.length)}," f"{artist} - {title}\n{file}\n" ) m3u_text += extinf elif file.endswith(".mka"): m = mutagen.File(file) artist: str = m.get("ARTI", [""])[0] title: str = m.get("TITL", [""])[0] extinf: str = ( f"#EXTINF:{math.ceil(m.info.length)}," f"{artist} - {title}\n{file}\n" ) m3u_text += extinf elif file.endswith(".m4a"): # Mutagen cannot read .m4a files, so make a copy with all # of the metadata tags as a .mp4 in a temporary directory
logger = logging.getLogger("__name__") @dataclass class Playlist: playlist_id: str # UUID4 def __post_init__(self): self.playlist_dir: Optional[Path] = None self.playlist_cover_saved: bool = False def get_metadata(self, session: Session): """Request from TIDAL API /playlists endpoint""" self.metadata: Optional[PlaylistsEndpointResponseJSON] = request_playlists( session=session, identifier=self.playlist_id ) if self.metadata is None: return self.name = ( self.metadata.title.replace("/", "_") .replace("|", "_") .replace(":", " -") .replace('"', "") .replace("..", "") ) def set_items(self, session: Session): """Uses data from TIDAL API /playlists/items endpoint to populate self.items""" playlist_items: Optional[PlaylistsItemsResponseJSON] = get_playlist( session=session, playlist_id=self.playlist_id ) if playlist_items is None: self.items = tuple() else: self.items: Tuple[Optional[PlaylistItem]] = tuple(playlist_items.items) def set_dir(self, out_dir: Path): """Populates self.playlist_dir based on self.name, self.playlist_id""" playlist_substring: str = f"{self.name} [{self.playlist_id}]" self.playlist_dir: Path = out_dir / "Playlists" / playlist_substring self.playlist_dir.mkdir(parents=True, exist_ok=True) def save_cover_image(self, session: Session, out_dir: Path): """Requests self.metadata.image and attempts to write it to disk""" if self.playlist_dir is None: self.set_dir(out_dir=out_dir) self.cover_path: Path = self.playlist_dir / "cover.jpg" if not self.cover_path.exists(): download_cover_image( session=session, cover_uuid=self.metadata.square_image, output_dir=self.playlist_dir, dimension=1080, ) else: self.playlist_cover_saved = True def save_description(self): """Requests self.metadata.description and attempts to write it to disk""" description_path: Path = self.playlist_dir / "PlaylistDescription.txt" if self.metadata.description is not None and len(self.metadata.description) > 0: if not description_path.exists(): description_path.write_text(f"{self.metadata.description}\n") def get_items(self, session: Session, audio_format: AudioFormat): """Using either Track.get() or Video.get(), attempt to request the data for each track or video in self.items""" if len(self.items) == 0: return tracks_videos: list = [None] * len(self.items) for i, item in enumerate(self.items): if item is None: tracks_videos[i] = None continue elif isinstance(item, TracksEndpointResponseJSON): track: Track = Track(track_id=item.id) track.get( session=session, audio_format=audio_format, out_dir=self.playlist_dir, metadata=item, ) tracks_videos[i] = track elif isinstance(item, VideosEndpointResponseJSON): video: Video = Video(video_id=item.id) video.get( session=session, out_dir=self.playlist_dir, metadata=item, ) tracks_videos[i] = video else: tracks_videos[i] = None continue else: self.tracks_videos: Tuple[ Tuple[int, Optional[Union[Track, Video]]] ] = tuple(tracks_videos) return tracks_videos def flatten_playlist_dir(self): """When self.get_items() is called, the tracks and/or videos in self.items are downloaded using their self-contained .get() logic; this means that they will be downloaded to albums. This function "flattens" self.playlist_dir, meaning that it moves all downloaded audio and video files to self.playlist_dir, and removes the various subdirectories created""" files: List[Dict[int, Optional[str]]] = [None] * len(self.tracks_videos) if len(self.tracks_videos) == 0: return subdirs: Set[Path] = set() for i, tv in enumerate(self.tracks_videos, 1): if getattr(tv, "outfile") is None: try: getattr(tv, "album_dir") except AttributeError: pass else: subdirs.add(tv.album_dir) subdirs.add(tv.album_dir.parent) files[i - 1] = {i: None} continue _path: Optional[Path] = Path(tv.outfile) if tv is not None else None # if the item never got turned into a track or video if _path is None: files[i - 1] = {i: None} continue # if the track or video didn't download if _path.exists(): if _path.stat().st_size == 0: files[i - 1] = {i: None} continue else: files[i - 1] = {i: None} continue # otherwise, move files and clean up if isinstance(tv, Track): new_path: Path = self.playlist_dir / f"{i:03d} - {tv.trackname}" new_path.write_bytes(_path.read_bytes()) _path.unlink() files[i - 1] = {i: str(new_path.absolute())} elif isinstance(tv, Video): new_path: Path = self.playlist_dir / f"{i:03d} - {_path.name}" new_path.write_bytes(_path.read_bytes()) _path.unlink() files[i - 1] = {i: str(new_path.absolute())} else: self.files: List[Dict[int, Optional[str]]] = files # Find all subdirectories written to subdirs: Set[Path] = set() for tv in self.tracks_videos: if isinstance(tv, Track): try: getattr(tv, "album_dir") except AttributeError: pass else: subdirs.add(tv.album_dir) subdirs.add(tv.album_dir.parent) elif isinstance(tv, Video): subdirs.add(tv.artist_dir) # Copy all artist images, artist bio JSON files out # of subdirs artist_images: Set[Path] = set() for subdir in subdirs: for p in subdir.glob("*.jpg"): if p.name == "cover.jpg": continue artist_images.add(p) else: for artist_image_path in artist_images: if artist_image_path.exists(): shutil.copyfile( artist_image_path.absolute(), self.playlist_dir / artist_image_path.name, ) artist_bios: Set[Path] = set() for subdir in subdirs: for p in subdir.glob("*bio.json"): artist_bios.add(p) else: for artist_bio_path in artist_bios: if artist_bio_path.exists(): shutil.copyfile( artist_bio_path.absolute(), self.playlist_dir / artist_bio_path.name, ) # Remove all subdirs for subdir in subdirs: if subdir.exists(): shutil.rmtree(subdir) else: return self.playlist_dir def craft_m3u8_text(self): """This method creates a file called playlist.m3u8 in self.playlist_dir that is a standard M3U. Needs to be called after self.flatten_playlist_dir in order to be able to access self.files N.b. the already-written file is temporarily copied to a .mp4 version in a temporary directory because .m4a files cannot be read with mutagen.""" m3u_text: str = f"#EXTM3U\n#EXTENC:UTF-8\n#EXTIMG:{str(self.cover_path.absolute())}\n#PLAYLIST:{self.name}\n" logger.info( f"Creating .m3u8 playlist file for Playlist with ID '{self.playlist_id}'" ) for d in self.files: file: str = next(iter(d.values())) if file is None: continue elif file.endswith(".flac"): m = mutagen.File(file) artist: str = m.get("artist", [""])[0] title: str = m.get("title", [""])[0] extinf: str = ( f"#EXTINF:{math.ceil(m.info.length)}," f"{artist} - {title}\n{file}\n" ) m3u_text += extinf elif file.endswith(".mka"): m = mutagen.File(file) artist: str = m.get("ARTI", [""])[0] title: str = m.get("TITL", [""])[0] extinf: str = ( f"#EXTINF:{math.ceil(m.info.length)}," f"{artist} - {title}\n{file}\n" ) m3u_text += extinf elif file.endswith(".m4a"): # Mutagen cannot read .m4a files, so make a copy with all # of the metadata tags as a .mp4 in a temporary directory
with temporary_file(suffix=".mp4") as tf:
7
2023-12-12 21:50:25+00:00
16k
Deltares/imod-python
imod/tests/fixtures/mf6_flow_with_transport_fixture.py
[ { "identifier": "InitialConditions", "path": "imod/mf6/ic.py", "snippet": "class InitialConditions(Package):\n \"\"\"\n Initial Conditions (IC) Package information is read from the file that is\n specified by \"IC6\" as the file type. Only one IC Package can be specified\n for a GWF model.\n...
import numpy as np import pytest import xarray as xr from imod.mf6 import ( GroundwaterFlowModel, InitialConditions, NodePropertyFlow, OutputControl, River, SpecificStorage, )
12,564
def conductance_fc(): globaltimes = np.array( [ "2000-01-01", "2000-01-02", "2000-01-03", ], dtype="datetime64[ns]", ) idomain = get_data_array(grid_dimensions(), globaltimes) # Constant head conductance = xr.full_like(idomain, np.nan) return conductance @pytest.fixture(scope="session") def elevation_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) elevation = xr.full_like(idomain, np.nan) elevation[:, 0, 7, 7:9] = 1.0 return elevation @pytest.fixture(scope="session") def rate_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) rate = xr.full_like(idomain, np.nan) rate[:, 0, 7, 7:9] = 0.001 return rate @pytest.fixture(scope="session") def proportion_rate_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) proportion_rate = xr.full_like(idomain, np.nan) proportion_rate[:, 0, 7, 7:9] = 0.3 return proportion_rate @pytest.fixture(scope="session") def proportion_depth_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) proportion_depth = xr.full_like(idomain, np.nan) proportion_depth[:, 0, 7, 7:9] = 0.4 return proportion_depth @pytest.fixture(scope="session") def porosity_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) porosity_fc = xr.full_like(idomain, np.nan).isel(time=0) return porosity_fc @pytest.fixture(scope="session") def decay_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) decay_fc = xr.full_like(idomain, np.nan).isel(time=0) return decay_fc @pytest.fixture(scope="session") def decay_sorbed_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) decay_sorbed_fc = xr.full_like(idomain, np.nan).isel(time=0) return decay_sorbed_fc @pytest.fixture(scope="session") def bulk_density_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) bulk_density_fc = xr.full_like(idomain, np.nan).isel(time=0) return bulk_density_fc @pytest.fixture(scope="session") def distcoef_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) distcoef_fc = xr.full_like(idomain, np.nan).isel(time=0) return distcoef_fc @pytest.fixture(scope="session") def sp2_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) sp2_fc = xr.full_like(idomain, np.nan).isel(time=0) return sp2_fc @pytest.fixture(scope="function") @pytest.mark.usefixtures("concentration_fc") def flow_model_with_concentration(concentration_fc): idomain = get_data_array(grid_dimensions(), globaltimes) cellType = xr.full_like(idomain.isel(time=0), 1, dtype=np.int32) k = xr.full_like(idomain.isel(time=0), 10.0) k33 = xr.full_like(idomain.isel(time=0), 10.0) # River riv_dict = dict( stage=idomain.sel(layer=1), conductance=idomain.sel(layer=1), bottom_elevation=idomain.sel(layer=1) - 1.0, concentration=concentration_fc.sel(layer=1), ) gwf_model = GroundwaterFlowModel()
globaltimes = np.array( [ "2000-01-01", "2000-01-02", "2000-01-03", ], dtype="datetime64[ns]", ) class grid_dimensions: nlay = 3 nrow = 15 ncol = 15 dx = 5000 dy = -5000 xmin = 0 ymin = 0 def get_data_array(dimensions, globaltimes): ntimes = len(globaltimes) shape = (ntimes, dimensions.nlay, dimensions.nrow, dimensions.ncol) dims = ("time", "layer", "y", "x") layer = np.array([1, 2, 3]) xmax = dimensions.dx * dimensions.ncol ymax = abs(dimensions.dy) * dimensions.nrow y = np.arange(ymax, dimensions.ymin, dimensions.dy) + 0.5 * dimensions.dy x = np.arange(dimensions.xmin, xmax, dimensions.dx) + 0.5 * dimensions.dx coords = {"time": globaltimes, "layer": layer, "y": y, "x": x} # Discretization data return xr.DataArray( np.ones(shape), coords=coords, dims=dims, ) @pytest.fixture(scope="session") def head_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) # Constant head head = xr.full_like(idomain, np.nan) return head @pytest.fixture(scope="session") def concentration_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) idomain = idomain.expand_dims(species=["salinity", "temperature"]) concentration = xr.full_like(idomain, np.nan) return concentration @pytest.fixture(scope="session") def conductance_fc(): globaltimes = np.array( [ "2000-01-01", "2000-01-02", "2000-01-03", ], dtype="datetime64[ns]", ) idomain = get_data_array(grid_dimensions(), globaltimes) # Constant head conductance = xr.full_like(idomain, np.nan) return conductance @pytest.fixture(scope="session") def elevation_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) elevation = xr.full_like(idomain, np.nan) elevation[:, 0, 7, 7:9] = 1.0 return elevation @pytest.fixture(scope="session") def rate_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) rate = xr.full_like(idomain, np.nan) rate[:, 0, 7, 7:9] = 0.001 return rate @pytest.fixture(scope="session") def proportion_rate_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) proportion_rate = xr.full_like(idomain, np.nan) proportion_rate[:, 0, 7, 7:9] = 0.3 return proportion_rate @pytest.fixture(scope="session") def proportion_depth_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) proportion_depth = xr.full_like(idomain, np.nan) proportion_depth[:, 0, 7, 7:9] = 0.4 return proportion_depth @pytest.fixture(scope="session") def porosity_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) porosity_fc = xr.full_like(idomain, np.nan).isel(time=0) return porosity_fc @pytest.fixture(scope="session") def decay_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) decay_fc = xr.full_like(idomain, np.nan).isel(time=0) return decay_fc @pytest.fixture(scope="session") def decay_sorbed_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) decay_sorbed_fc = xr.full_like(idomain, np.nan).isel(time=0) return decay_sorbed_fc @pytest.fixture(scope="session") def bulk_density_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) bulk_density_fc = xr.full_like(idomain, np.nan).isel(time=0) return bulk_density_fc @pytest.fixture(scope="session") def distcoef_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) distcoef_fc = xr.full_like(idomain, np.nan).isel(time=0) return distcoef_fc @pytest.fixture(scope="session") def sp2_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) sp2_fc = xr.full_like(idomain, np.nan).isel(time=0) return sp2_fc @pytest.fixture(scope="function") @pytest.mark.usefixtures("concentration_fc") def flow_model_with_concentration(concentration_fc): idomain = get_data_array(grid_dimensions(), globaltimes) cellType = xr.full_like(idomain.isel(time=0), 1, dtype=np.int32) k = xr.full_like(idomain.isel(time=0), 10.0) k33 = xr.full_like(idomain.isel(time=0), 10.0) # River riv_dict = dict( stage=idomain.sel(layer=1), conductance=idomain.sel(layer=1), bottom_elevation=idomain.sel(layer=1) - 1.0, concentration=concentration_fc.sel(layer=1), ) gwf_model = GroundwaterFlowModel()
gwf_model["npf"] = NodePropertyFlow(
2
2023-12-08 13:57:59+00:00
16k
camenduru/MotionDirector-hf
MotionDirector_train.py
[ { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample sh...
import argparse import datetime import logging import inspect import math import os import random import gc import copy import torch import torch.nn.functional as F import torch.utils.checkpoint import diffusers import transformers import imageio import numpy as np import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from torchvision import transforms from tqdm.auto import tqdm from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from models.unet_3d_condition import UNet3DConditionModel from diffusers.models import AutoencoderKL from diffusers import DDIMScheduler, TextToVideoSDPipeline from diffusers.optimization import get_scheduler from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from transformers import CLIPTextModel, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset from einops import rearrange, repeat from utils.lora_handler import LoraHandler from utils.lora import extract_lora_child_module from utils.ddim_utils import ddim_inversion from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
11,373
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] # Loop through all available datasets, get the name, then add to list of data to process.
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] # Loop through all available datasets, get the name, then add to list of data to process.
for DataSet in [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset]:
2
2023-12-11 04:51:39+00:00
16k
ZS-YANG/FemtoDet-v3
mmdet/models/dense_heads/atss_vlfusion_head.py
[ { "identifier": "MODELS", "path": "mmdet/registry.py", "snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])" }, { "identifier": "cat_boxes", "path": "mmdet/structures/bbox/transforms.py", "snippet": "def cat_boxes(data_list: List[Union[Tensor, BaseBo...
import copy import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Callable, List, Optional, Sequence, Tuple, Union from mmcv.cnn import Scale from mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d from mmengine.config import ConfigDict from mmengine.model import BaseModel from mmengine.structures import InstanceData from torch import Tensor from transformers import BertConfig from mmdet.registry import MODELS from mmdet.structures.bbox import cat_boxes from mmdet.utils import InstanceList, OptInstanceList, reduce_mean from ..utils import (BertEncoderLayer, VLFuse, filter_scores_and_topk, permute_and_flatten, select_single_mlvl, unpack_gt_instances) from ..utils.vlfuse_helper import MAX_CLAMP_VALUE from .atss_head import ATSSHead
10,959
self.dot_product_projection_text = nn.Linear( self.lang_dim, self.num_base_priors * self.feat_channels, bias=True) self.log_scale = nn.Parameter(torch.Tensor([0.0]), requires_grad=True) self.bias_lang = nn.Parameter( torch.zeros(self.lang_dim), requires_grad=True) self.bias0 = nn.Parameter( torch.Tensor([bias_value]), requires_grad=True) self.scales = nn.ModuleList([Scale(1.0) for _ in range(5)]) def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple: feat_inputs = {'visual': visual_feats, 'lang': language_feats} dyhead_tower = self.dyhead_tower(feat_inputs) if self.early_fuse: embedding = dyhead_tower['lang']['hidden'] else: embedding = language_feats['embedded'] embedding = F.normalize(embedding, p=2, dim=-1) dot_product_proj_tokens = self.dot_product_projection_text(embedding / 2.0) dot_product_proj_tokens_bias = torch.matmul( embedding, self.bias_lang) + self.bias0 bbox_preds = [] centerness = [] cls_logits = [] for i, feature in enumerate(visual_feats): visual = dyhead_tower['visual'][i] B, C, H, W = visual.shape bbox_pred = self.scales[i](self.bbox_pred(visual)) bbox_preds.append(bbox_pred) centerness.append(self.centerness(visual)) dot_product_proj_queries = permute_and_flatten( visual, B, self.num_base_priors, C, H, W) bias = dot_product_proj_tokens_bias.unsqueeze(1).repeat( 1, self.num_base_priors, 1) dot_product_logit = ( torch.matmul(dot_product_proj_queries, dot_product_proj_tokens.transpose(-1, -2)) / self.log_scale.exp()) + bias dot_product_logit = torch.clamp( dot_product_logit, max=MAX_CLAMP_VALUE) dot_product_logit = torch.clamp( dot_product_logit, min=-MAX_CLAMP_VALUE) cls_logits.append(dot_product_logit) return bbox_preds, centerness, cls_logits @MODELS.register_module() class ATSSVLFusionHead(ATSSHead): """ATSS head with visual-language fusion module. Args: early_fuse (bool): Whether to fuse visual and language features Defaults to False. use_checkpoint (bool): Whether to use checkpoint. Defaults to False. num_dyhead_blocks (int): Number of dynamic head blocks. Defaults to 6. lang_model_name (str): Name of the language model. Defaults to 'bert-base-uncased'. """ def __init__(self, *args, early_fuse: bool = False, use_checkpoint: bool = False, num_dyhead_blocks: int = 6, lang_model_name: str = 'bert-base-uncased', init_cfg=None, **kwargs): super().__init__(*args, **kwargs, init_cfg=init_cfg) self.head = VLFusionModule( in_channels=self.in_channels, feat_channels=self.feat_channels, num_base_priors=self.num_base_priors, early_fuse=early_fuse, use_checkpoint=use_checkpoint, num_dyhead_blocks=num_dyhead_blocks, lang_model_name=lang_model_name) self.text_masks = None def _init_layers(self) -> None: """No need to initialize the ATSS head layer.""" pass def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple[Tensor]: """Forward function.""" bbox_preds, centerness, cls_logits = self.head(visual_feats, language_feats) return cls_logits, bbox_preds, centerness def loss(self, visual_feats: Tuple[Tensor], language_feats: dict, batch_data_samples): outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs outs = self(visual_feats, language_feats) self.text_masks = language_feats['masks'] loss_inputs = outs + (batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) losses = self.loss_by_feat(*loss_inputs) return losses def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], centernesses: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict],
# Copyright (c) OpenMMLab. All rights reserved. try: except ImportError: BertConfig = None def convert_grounding_to_cls_scores(logits: Tensor, positive_maps: List[dict]) -> Tensor: """Convert logits to class scores.""" assert len(positive_maps) == logits.shape[0] # batch size scores = torch.zeros(logits.shape[0], logits.shape[1], len(positive_maps[0])).to(logits.device) if positive_maps is not None: if all(x == positive_maps[0] for x in positive_maps): # only need to compute once positive_map = positive_maps[0] for label_j in positive_map: scores[:, :, label_j - 1] = logits[:, :, torch.LongTensor(positive_map[label_j] )].mean(-1) else: for i, positive_map in enumerate(positive_maps): for label_j in positive_map: scores[i, :, label_j - 1] = logits[ i, :, torch.LongTensor(positive_map[label_j])].mean(-1) return scores class Conv3x3Norm(nn.Module): """Conv3x3 and norm.""" def __init__(self, in_channels: int, out_channels: int, stride: int, groups: int = 1, use_dcn: bool = False, norm_type: Optional[Union[Sequence, str]] = None): super().__init__() if use_dcn: self.conv = ModulatedDeformConv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups) else: self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups) if isinstance(norm_type, Sequence): assert len(norm_type) == 2 assert norm_type[0] == 'gn' gn_group = norm_type[1] norm_type = norm_type[0] if norm_type == 'bn': bn_op = nn.BatchNorm2d(out_channels) elif norm_type == 'gn': bn_op = nn.GroupNorm( num_groups=gn_group, num_channels=out_channels) if norm_type is not None: self.bn = bn_op else: self.bn = None def forward(self, x, **kwargs): x = self.conv(x, **kwargs) if self.bn: x = self.bn(x) return x class DyReLU(nn.Module): """Dynamic ReLU.""" def __init__(self, in_channels: int, out_channels: int, expand_ratio: int = 4): super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.expand_ratio = expand_ratio self.out_channels = out_channels self.fc = nn.Sequential( nn.Linear(in_channels, in_channels // expand_ratio), nn.ReLU(inplace=True), nn.Linear(in_channels // expand_ratio, out_channels * self.expand_ratio), nn.Hardsigmoid(inplace=True)) def forward(self, x) -> Tensor: x_out = x b, c, h, w = x.size() x = self.avg_pool(x).view(b, c) x = self.fc(x).view(b, -1, 1, 1) a1, b1, a2, b2 = torch.split(x, self.out_channels, dim=1) a1 = (a1 - 0.5) * 2 + 1.0 a2 = (a2 - 0.5) * 2 b1 = b1 - 0.5 b2 = b2 - 0.5 out = torch.max(x_out * a1 + b1, x_out * a2 + b2) return out class DyConv(nn.Module): """Dynamic Convolution.""" def __init__(self, conv_func: Callable, in_channels: int, out_channels: int, use_dyfuse: bool = True, use_dyrelu: bool = False, use_dcn: bool = False): super().__init__() self.dyconvs = nn.ModuleList() self.dyconvs.append(conv_func(in_channels, out_channels, 1)) self.dyconvs.append(conv_func(in_channels, out_channels, 1)) self.dyconvs.append(conv_func(in_channels, out_channels, 2)) if use_dyfuse: self.attnconv = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, 1, kernel_size=1), nn.ReLU(inplace=True)) self.h_sigmoid = nn.Hardsigmoid(inplace=True) else: self.attnconv = None if use_dyrelu: self.relu = DyReLU(in_channels, out_channels) else: self.relu = nn.ReLU() if use_dcn: self.offset = nn.Conv2d( in_channels, 27, kernel_size=3, stride=1, padding=1) else: self.offset = None self.init_weights() def init_weights(self): for m in self.dyconvs.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight.data, 0, 0.01) if m.bias is not None: m.bias.data.zero_() if self.attnconv is not None: for m in self.attnconv.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight.data, 0, 0.01) if m.bias is not None: m.bias.data.zero_() def forward(self, inputs: dict) -> dict: visual_feats = inputs['visual'] out_vis_feats = [] for level, feature in enumerate(visual_feats): offset_conv_args = {} if self.offset is not None: offset_mask = self.offset(feature) offset = offset_mask[:, :18, :, :] mask = offset_mask[:, 18:, :, :].sigmoid() offset_conv_args = dict(offset=offset, mask=mask) temp_feats = [self.dyconvs[1](feature, **offset_conv_args)] if level > 0: temp_feats.append(self.dyconvs[2](visual_feats[level - 1], **offset_conv_args)) if level < len(visual_feats) - 1: temp_feats.append( F.upsample_bilinear( self.dyconvs[0](visual_feats[level + 1], **offset_conv_args), size=[feature.size(2), feature.size(3)])) mean_feats = torch.mean( torch.stack(temp_feats), dim=0, keepdim=False) if self.attnconv is not None: attn_feat = [] res_feat = [] for feat in temp_feats: res_feat.append(feat) attn_feat.append(self.attnconv(feat)) res_feat = torch.stack(res_feat) spa_pyr_attn = self.h_sigmoid(torch.stack(attn_feat)) mean_feats = torch.mean( res_feat * spa_pyr_attn, dim=0, keepdim=False) out_vis_feats.append(mean_feats) out_vis_feats = [self.relu(item) for item in out_vis_feats] features_dict = {'visual': out_vis_feats, 'lang': inputs['lang']} return features_dict class VLFusionModule(BaseModel): """Visual-lang Fusion Module.""" def __init__(self, in_channels: int, feat_channels: int, num_base_priors: int, early_fuse: bool = False, num_dyhead_blocks: int = 6, lang_model_name: str = 'bert-base-uncased', use_dyrelu: bool = True, use_dyfuse: bool = True, use_dcn: bool = True, use_checkpoint: bool = False, **kwargs) -> None: super().__init__(**kwargs) if BertConfig is None: raise RuntimeError( 'transformers is not installed, please install it by: ' 'pip install transformers.') self.in_channels = in_channels self.feat_channels = feat_channels self.num_base_priors = num_base_priors self.early_fuse = early_fuse self.num_dyhead_blocks = num_dyhead_blocks self.use_dyrelu = use_dyrelu self.use_dyfuse = use_dyfuse self.use_dcn = use_dcn self.use_checkpoint = use_checkpoint self.lang_cfg = BertConfig.from_pretrained(lang_model_name) self.lang_dim = self.lang_cfg.hidden_size self._init_layers() def _init_layers(self) -> None: """Initialize layers of the model.""" bias_value = -math.log((1 - 0.01) / 0.01) dyhead_tower = [] for i in range(self.num_dyhead_blocks): if self.early_fuse: # cross-modality fusion dyhead_tower.append(VLFuse(use_checkpoint=self.use_checkpoint)) # lang branch dyhead_tower.append( BertEncoderLayer( self.lang_cfg, clamp_min_for_underflow=True, clamp_max_for_overflow=True)) # vision branch dyhead_tower.append( DyConv( lambda i, o, s: Conv3x3Norm( i, o, s, use_dcn=self.use_dcn, norm_type=['gn', 16]), self.in_channels if i == 0 else self.feat_channels, self.feat_channels, use_dyrelu=(self.use_dyrelu and self.in_channels == self.feat_channels) if i == 0 else self.use_dyrelu, use_dyfuse=(self.use_dyfuse and self.in_channels == self.feat_channels) if i == 0 else self.use_dyfuse, use_dcn=(self.use_dcn and self.in_channels == self.feat_channels) if i == 0 else self.use_dcn, )) self.add_module('dyhead_tower', nn.Sequential(*dyhead_tower)) self.bbox_pred = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, kernel_size=1) self.centerness = nn.Conv2d( self.feat_channels, self.num_base_priors * 1, kernel_size=1) self.dot_product_projection_text = nn.Linear( self.lang_dim, self.num_base_priors * self.feat_channels, bias=True) self.log_scale = nn.Parameter(torch.Tensor([0.0]), requires_grad=True) self.bias_lang = nn.Parameter( torch.zeros(self.lang_dim), requires_grad=True) self.bias0 = nn.Parameter( torch.Tensor([bias_value]), requires_grad=True) self.scales = nn.ModuleList([Scale(1.0) for _ in range(5)]) def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple: feat_inputs = {'visual': visual_feats, 'lang': language_feats} dyhead_tower = self.dyhead_tower(feat_inputs) if self.early_fuse: embedding = dyhead_tower['lang']['hidden'] else: embedding = language_feats['embedded'] embedding = F.normalize(embedding, p=2, dim=-1) dot_product_proj_tokens = self.dot_product_projection_text(embedding / 2.0) dot_product_proj_tokens_bias = torch.matmul( embedding, self.bias_lang) + self.bias0 bbox_preds = [] centerness = [] cls_logits = [] for i, feature in enumerate(visual_feats): visual = dyhead_tower['visual'][i] B, C, H, W = visual.shape bbox_pred = self.scales[i](self.bbox_pred(visual)) bbox_preds.append(bbox_pred) centerness.append(self.centerness(visual)) dot_product_proj_queries = permute_and_flatten( visual, B, self.num_base_priors, C, H, W) bias = dot_product_proj_tokens_bias.unsqueeze(1).repeat( 1, self.num_base_priors, 1) dot_product_logit = ( torch.matmul(dot_product_proj_queries, dot_product_proj_tokens.transpose(-1, -2)) / self.log_scale.exp()) + bias dot_product_logit = torch.clamp( dot_product_logit, max=MAX_CLAMP_VALUE) dot_product_logit = torch.clamp( dot_product_logit, min=-MAX_CLAMP_VALUE) cls_logits.append(dot_product_logit) return bbox_preds, centerness, cls_logits @MODELS.register_module() class ATSSVLFusionHead(ATSSHead): """ATSS head with visual-language fusion module. Args: early_fuse (bool): Whether to fuse visual and language features Defaults to False. use_checkpoint (bool): Whether to use checkpoint. Defaults to False. num_dyhead_blocks (int): Number of dynamic head blocks. Defaults to 6. lang_model_name (str): Name of the language model. Defaults to 'bert-base-uncased'. """ def __init__(self, *args, early_fuse: bool = False, use_checkpoint: bool = False, num_dyhead_blocks: int = 6, lang_model_name: str = 'bert-base-uncased', init_cfg=None, **kwargs): super().__init__(*args, **kwargs, init_cfg=init_cfg) self.head = VLFusionModule( in_channels=self.in_channels, feat_channels=self.feat_channels, num_base_priors=self.num_base_priors, early_fuse=early_fuse, use_checkpoint=use_checkpoint, num_dyhead_blocks=num_dyhead_blocks, lang_model_name=lang_model_name) self.text_masks = None def _init_layers(self) -> None: """No need to initialize the ATSS head layer.""" pass def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple[Tensor]: """Forward function.""" bbox_preds, centerness, cls_logits = self.head(visual_feats, language_feats) return cls_logits, bbox_preds, centerness def loss(self, visual_feats: Tuple[Tensor], language_feats: dict, batch_data_samples): outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs outs = self(visual_feats, language_feats) self.text_masks = language_feats['masks'] loss_inputs = outs + (batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) losses = self.loss_by_feat(*loss_inputs) return losses def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], centernesses: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
3
2023-12-11 15:23:03+00:00
16k
merlresearch/PixPNet
pixpnet/protonets/lit_model.py
[ { "identifier": "get_metadata", "path": "pixpnet/data.py", "snippet": "def get_metadata(config):\n dataset = config.dataset.name.upper().replace(\"-\", \"\")\n metadata = DatasetMeta(\n output_size=DATA_NUM_OUTPUTS[dataset],\n input_channels=DATA_CHANNELS[dataset],\n input_siz...
import argparse import torch from typing import Tuple from torch import nn from torchmetrics import Accuracy from pytorch_lightning.loops import FitLoop from pytorch_lightning.loops.fit_loop import _FitLoop as FitLoop from pytorch_lightning import LightningModule, Trainer from pytorch_lightning.loggers import TensorBoardLogger from pixpnet.data import get_metadata from pixpnet.lightning.lightning_data import LitData from pixpnet.lightning.lit_module import BaseLitModel from pixpnet.optim import get_optimizer_cls, get_scheduler from pixpnet.protonets.loss import ClusterLoss, L1ReadoutLoss, SeparationLoss from pixpnet.protonets.models.protonet import ProtoNet, protonet from pixpnet.protonets.push import push_prototypes from pixpnet.utils import get_logger, intersect_func_and_kwargs
10,856
# Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL) # # SPDX-License-Identifier: AGPL-3.0-or-later try: except ImportError: logger = get_logger(__name__) def params_with_grad(parameters): return filter(lambda p: p.requires_grad, parameters) def make_optimizers_proto( model: ProtoNet, config: argparse.Namespace, ) -> Tuple[torch.optim.Optimizer, ...]: """"""
# Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL) # # SPDX-License-Identifier: AGPL-3.0-or-later try: except ImportError: logger = get_logger(__name__) def params_with_grad(parameters): return filter(lambda p: p.requires_grad, parameters) def make_optimizers_proto( model: ProtoNet, config: argparse.Namespace, ) -> Tuple[torch.optim.Optimizer, ...]: """"""
optimizer_cls, hparams = get_optimizer_cls(config, ignore={"fine_tune_lr", "readout_lr"})
3
2023-12-06 23:49:31+00:00
16k
open-mmlab/PIA
animatediff/pipelines/i2v_pipeline.py
[ { "identifier": "InflatedConv3d", "path": "animatediff/models/resnet.py", "snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f)...
import inspect import os.path as osp import numpy as np import torch from dataclasses import dataclass from typing import Callable, List, Optional, Union from diffusers.configuration_utils import FrozenDict from diffusers.loaders import IPAdapterMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL from diffusers.pipelines import DiffusionPipeline from diffusers.schedulers import (DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler) from diffusers.utils import (BaseOutput, deprecate, is_accelerate_available, logging) from diffusers.utils.import_utils import is_xformers_available from einops import rearrange from omegaconf import OmegaConf from packaging import version from safetensors import safe_open from tqdm import tqdm from transformers import (CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection) from animatediff.models.resnet import InflatedConv3d from animatediff.models.unet import UNet3DConditionModel from animatediff.utils.convert_from_ckpt import (convert_ldm_clip_checkpoint, convert_ldm_unet_checkpoint, convert_ldm_vae_checkpoint) from animatediff.utils.convert_lora_safetensor_to_diffusers import \ convert_lora_model_level from animatediff.utils.util import prepare_mask_coef_by_statistics from accelerate import cpu_offload
11,990
# Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py logger = logging.get_logger(__name__) # pylint: disable=invalid-name DEFAULT_N_PROMPT = ('wrong white balance, dark, sketches,worst quality,' 'low quality, deformed, distorted, disfigured, bad eyes, ' 'wrong lips,weird mouth, bad teeth, mutated hands and fingers, ' 'bad anatomy,wrong anatomy, amputation, extra limb, ' 'missing limb, floating,limbs, disconnected limbs, mutation, ' 'ugly, disgusting, bad_pictures, negative_hand-neg') @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class I2VPipeline(DiffusionPipeline, IPAdapterMixin, TextualInversionLoaderMixin): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
# Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py logger = logging.get_logger(__name__) # pylint: disable=invalid-name DEFAULT_N_PROMPT = ('wrong white balance, dark, sketches,worst quality,' 'low quality, deformed, distorted, disfigured, bad eyes, ' 'wrong lips,weird mouth, bad teeth, mutated hands and fingers, ' 'bad anatomy,wrong anatomy, amputation, extra limb, ' 'missing limb, floating,limbs, disconnected limbs, mutation, ' 'ugly, disgusting, bad_pictures, negative_hand-neg') @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class I2VPipeline(DiffusionPipeline, IPAdapterMixin, TextualInversionLoaderMixin): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
unet: UNet3DConditionModel,
1
2023-12-21 03:29:34+00:00
16k
xinghaochen/TinySAM
tinysam/hierarchical_mask_generator.py
[ { "identifier": "Sam", "path": "tinysam/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: Union[ImageEncoderViT, TinyViT],\n prompt_encoder: PromptEncoder,\n mask...
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
10,985
for sx in [-1, 0, 1]: if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * sx)]: continue new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * sx) / ih]) if point_coords[1] + hstride * 2 < ih: for sy in [-1, 0, 1]: if or_results[int(point_coords[0] + wstride * sy), int(point_coords[1] + hstride * 2)]: continue new_points.append([(point_coords[0] + wstride * sy) / iw, (point_coords[1] + hstride * 2) / ih]) if point_coords[0] + wstride * 2 < iw and point_coords[1] + hstride * 2 < ih: if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * 2)]: continue new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * 2) / ih]) self.set_point_grids([np.array(new_points)]) new_masks = self.generate(image, False) new_masks.cat(ori_masks) new_masks = self.post_process(image, new_masks) return new_masks @torch.no_grad() def generate(self, image: np.ndarray, need_high: bool) -> MaskData: orig_size = image.shape[:2] # Get points for this crop points_scale = np.array(orig_size)[None, ::-1] points_for_image = self.point_grids[0] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, orig_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], return_logits=True, ) # Serialize predictions and store in MaskData batch_data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks if self.pred_iou_thresh > 0.0: keep_mask = batch_data["iou_preds"] > self.pred_iou_thresh batch_data.filter(keep_mask) # Calculate stability score batch_data["stability_score"] = calculate_stability_score( batch_data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = batch_data["stability_score"] >= self.stability_score_thresh batch_data.filter(keep_mask) if need_high: batch_data["high_masks"] = batch_data["masks"] > self.high_score_thresh batch_data["masks"] = batch_data["masks"] > self.predictor.model.mask_threshold batch_data["boxes"] = batched_mask_to_box(batch_data["masks"]) keep_mask = ~is_box_near_crop_edge(batch_data["boxes"], [0, 0, orig_w, orig_h], [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): batch_data.filter(keep_mask) # Compress to RLE batch_data["rles"] = mask_to_rle_pytorch(batch_data["masks"]) data.cat(batch_data) del batch_data if need_high: high_masks = data["high_masks"] or_results = torch.zeros([high_masks.shape[1], high_masks.shape[2]]).to(high_masks.device) for mask in high_masks: or_results = torch.logical_or(or_results, mask) del data["high_masks"] or_results = or_results.permute(1, 0) del data['masks'] return data, or_results else: del data['masks'] return data @torch.no_grad() def reset_image(self): self.predictor.reset_image() @torch.no_grad() def post_process(self, image: np.ndarray, data: MaskData) -> List[Dict[str, Any]]: orig_size = image.shape[:2] orig_h, orig_w = orig_size keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: data = self.postprocess_small_regions( data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": data["segmentations"] = [coco_encode_rle(rle) for rle in data["rles"]] elif self.output_mode == "binary_mask":
# Copyright 2023 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ class SamHierarchicalMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, high_score_thresh: float = 8.5, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. high_score_thresh (float): A filtering threshold in [-inf,inf], to find out the unmasked area for the next generation. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_side = points_per_side self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.high_score_thresh = high_score_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode def set_point_grids(self, point_grids): self.point_grids = point_grids def set_points_per_side(self, points_per_side): self.point_grids = build_all_layer_point_grids( points_per_side, 0, 1, ) @torch.no_grad() def set_image(self, image: np.ndarray) -> MaskData: # Crop the image and calculate embeddings self.predictor.set_image(image) @torch.no_grad() def hierarchical_generate(self, image: np.ndarray) -> List[Dict[str, Any]]: self.set_image(image) self.set_points_per_side(self.points_per_side // 4) ori_masks, or_results = self.generate(image, True) ih, iw, _ = image.shape hstride = ih // self.points_per_side wstride = iw // self.points_per_side new_points = [] pass_counter = 0 full_point_grids = np.array(self.point_grids) for mask in range(full_point_grids.shape[1]): point_coords = [full_point_grids[0, mask, 0] * iw, full_point_grids[0, mask, 1] * ih] for sy in [-1, 0, 1]: for sx in [-1, 0, 1]: if (sy == 0 and sx == 0) or or_results[int(point_coords[0] + wstride * sy), int(point_coords[1] + hstride * sx)]: continue new_points.append([(point_coords[0] + wstride * sy) / iw, (point_coords[1] + hstride * sx) / ih]) if point_coords[0] + wstride * 2 < iw: for sx in [-1, 0, 1]: if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * sx)]: continue new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * sx) / ih]) if point_coords[1] + hstride * 2 < ih: for sy in [-1, 0, 1]: if or_results[int(point_coords[0] + wstride * sy), int(point_coords[1] + hstride * 2)]: continue new_points.append([(point_coords[0] + wstride * sy) / iw, (point_coords[1] + hstride * 2) / ih]) if point_coords[0] + wstride * 2 < iw and point_coords[1] + hstride * 2 < ih: if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * 2)]: continue new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * 2) / ih]) self.set_point_grids([np.array(new_points)]) new_masks = self.generate(image, False) new_masks.cat(ori_masks) new_masks = self.post_process(image, new_masks) return new_masks @torch.no_grad() def generate(self, image: np.ndarray, need_high: bool) -> MaskData: orig_size = image.shape[:2] # Get points for this crop points_scale = np.array(orig_size)[None, ::-1] points_for_image = self.point_grids[0] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, orig_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], return_logits=True, ) # Serialize predictions and store in MaskData batch_data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks if self.pred_iou_thresh > 0.0: keep_mask = batch_data["iou_preds"] > self.pred_iou_thresh batch_data.filter(keep_mask) # Calculate stability score batch_data["stability_score"] = calculate_stability_score( batch_data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = batch_data["stability_score"] >= self.stability_score_thresh batch_data.filter(keep_mask) if need_high: batch_data["high_masks"] = batch_data["masks"] > self.high_score_thresh batch_data["masks"] = batch_data["masks"] > self.predictor.model.mask_threshold batch_data["boxes"] = batched_mask_to_box(batch_data["masks"]) keep_mask = ~is_box_near_crop_edge(batch_data["boxes"], [0, 0, orig_w, orig_h], [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): batch_data.filter(keep_mask) # Compress to RLE batch_data["rles"] = mask_to_rle_pytorch(batch_data["masks"]) data.cat(batch_data) del batch_data if need_high: high_masks = data["high_masks"] or_results = torch.zeros([high_masks.shape[1], high_masks.shape[2]]).to(high_masks.device) for mask in high_masks: or_results = torch.logical_or(or_results, mask) del data["high_masks"] or_results = or_results.permute(1, 0) del data['masks'] return data, or_results else: del data['masks'] return data @torch.no_grad() def reset_image(self): self.predictor.reset_image() @torch.no_grad() def post_process(self, image: np.ndarray, data: MaskData) -> List[Dict[str, Any]]: orig_size = image.shape[:2] orig_h, orig_w = orig_size keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: data = self.postprocess_small_regions( data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": data["segmentations"] = [coco_encode_rle(rle) for rle in data["rles"]] elif self.output_mode == "binary_mask":
data["segmentations"] = [rle_to_mask(rle) for rle in data["rles"]]
14
2023-12-19 11:25:54+00:00
16k
DeepWok/mase
machop/chop/passes/graph/transforms/verilog/emit_tb.py
[ { "identifier": "emit_data_in_tb_sv", "path": "machop/chop/passes/graph/transforms/verilog/emit_tb_data_in.py", "snippet": "def emit_data_in_tb_sv(data_width, load_path, out_file):\n buff = f\"\"\"\n`timescale 1 ns / 1 ps\n\nmodule AESL_autofifo_data_in_V (\n clk,\n reset,\n if_empty_n,\n ...
import math, time, os, logging, torch, glob, shutil from chop.passes.graph.utils import vf, v2p, init_project from chop.passes.graph.transforms.quantize.quantizers import integer_quantizer_for_hw from .emit_tb_data_in import emit_data_in_tb_sv, emit_data_in_tb_dat from .emit_tb_data_out import emit_data_out_tb_sv, emit_data_out_tb_dat from .emit_tb_testbench import emit_top_tb from pathlib import Path
13,095
w_in_param = graph.nodes_in[0].meta["mase"].parameters["common"]["args"] in_width = w_in_param["data_in_0"]["precision"][0] in_size = v_in_param["DATA_IN_0_TENSOR_SIZE_DIM_0"] data_width = in_width * in_size # TODO : need to check addr_width = 1 depth = 1 load_path = os.path.join(tv_dir, f"sw_data_in.dat") out_file = os.path.join(v_dir, f"top_data_in_fifo.sv") emit_data_in_tb_sv(data_width, load_path, out_file) v_out_param = ( graph.nodes_out[0].meta["mase"].parameters["hardware"]["verilog_param"] ) w_out_param = graph.nodes_in[0].meta["mase"].parameters["common"]["results"] out_width = w_out_param["data_out_0"]["precision"][0] out_size = v_out_param["DATA_OUT_0_TENSOR_SIZE_0_DIM_0"] data_width = out_width * out_size # TODO : need to check addr_width = 1 depth = 1 load_path = os.path.join(tv_dir, f"sw_data_out.dat") store_path = os.path.join(tv_dir, f"hw_data_out.dat") out_file = os.path.join(v_dir, f"top_data_out_fifo.sv") emit_data_out_tb_sv(data_width, load_path, store_path, out_file) out_file = os.path.join(v_dir, f"top_tb.sv") # in_trans_num = v_in_param["DATA_IN_0_DEPTH"] * trans_num in_trans_num = trans_num out_trans_num = trans_num emit_top_tb( tv_dir, "top", out_file, in_width, in_size, out_width, out_size, in_trans_num, out_trans_num, ) out_file = os.path.join(v_dir, f"fifo_para.v") with open(out_file, "w", encoding="utf-8") as outf: outf.write("// initial a empty file") # Copy testbench components dut_dir = os.path.join(project_dir, "hardware", "rtl") for svfile in glob.glob(os.path.join(dut_dir, "*.sv")): shutil.copy(svfile, v_dir) def emit_tb_dat(graph, trans_num=1, project_dir="top", test_inputs=None): """ Emit the test vectors in dat files for simulation """ sim_dir = os.path.join(project_dir, "hardware", "sim") tv_dir = os.path.join(sim_dir, "tv") if not os.path.exists(tv_dir): os.mkdir(tv_dir) in_type = ( graph.nodes_in[0].meta["mase"].parameters["common"]["args"]["data_in_0"]["type"] ) in_width = ( graph.nodes_in[0] .meta["mase"] .parameters["common"]["args"]["data_in_0"]["precision"][0] ) in_frac_width = ( graph.nodes_in[0] .meta["mase"] .parameters["common"]["args"]["data_in_0"]["precision"][1] ) sw_data_out = [graph.model(trans) for trans in test_inputs] out_type = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["type"] ) prec = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["precision"] ) out_width = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["precision"][0] ) if len(prec) > 1: out_frac_width = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["precision"][1] ) else: out_frac_width = 0 # TODO: Make out_type as input to support casting to any type hw_data_out = [ integer_quantizer_for_hw(trans, width=out_width, frac_width=out_frac_width) .squeeze(0) .to(torch.int) for trans in sw_data_out ] # TODO: for now for i, trans in enumerate(test_inputs): test_inputs[i] = torch.flatten(trans).tolist() for i, trans in enumerate(hw_data_out): hw_data_out[i] = torch.flatten(trans).tolist() load_path = os.path.join(tv_dir, "sw_data_in.dat")
logger = logging.getLogger(__name__) def emit_tb_verilog(graph, trans_num=1, project_dir="top"): sim_dir = os.path.join(project_dir, "hardware", "sim") tv_dir = os.path.join(sim_dir, "tv") if not os.path.exists(tv_dir): os.mkdir(tv_dir) v_dir = os.path.join(sim_dir, "verilog") if not os.path.exists(v_dir): os.mkdir(v_dir) # TODO : need to emit all the inputs v_in_param = graph.nodes_in[0].meta["mase"].parameters["hardware"]["verilog_param"] w_in_param = graph.nodes_in[0].meta["mase"].parameters["common"]["args"] in_width = w_in_param["data_in_0"]["precision"][0] in_size = v_in_param["DATA_IN_0_TENSOR_SIZE_DIM_0"] data_width = in_width * in_size # TODO : need to check addr_width = 1 depth = 1 load_path = os.path.join(tv_dir, f"sw_data_in.dat") out_file = os.path.join(v_dir, f"top_data_in_fifo.sv") emit_data_in_tb_sv(data_width, load_path, out_file) v_out_param = ( graph.nodes_out[0].meta["mase"].parameters["hardware"]["verilog_param"] ) w_out_param = graph.nodes_in[0].meta["mase"].parameters["common"]["results"] out_width = w_out_param["data_out_0"]["precision"][0] out_size = v_out_param["DATA_OUT_0_TENSOR_SIZE_0_DIM_0"] data_width = out_width * out_size # TODO : need to check addr_width = 1 depth = 1 load_path = os.path.join(tv_dir, f"sw_data_out.dat") store_path = os.path.join(tv_dir, f"hw_data_out.dat") out_file = os.path.join(v_dir, f"top_data_out_fifo.sv") emit_data_out_tb_sv(data_width, load_path, store_path, out_file) out_file = os.path.join(v_dir, f"top_tb.sv") # in_trans_num = v_in_param["DATA_IN_0_DEPTH"] * trans_num in_trans_num = trans_num out_trans_num = trans_num emit_top_tb( tv_dir, "top", out_file, in_width, in_size, out_width, out_size, in_trans_num, out_trans_num, ) out_file = os.path.join(v_dir, f"fifo_para.v") with open(out_file, "w", encoding="utf-8") as outf: outf.write("// initial a empty file") # Copy testbench components dut_dir = os.path.join(project_dir, "hardware", "rtl") for svfile in glob.glob(os.path.join(dut_dir, "*.sv")): shutil.copy(svfile, v_dir) def emit_tb_dat(graph, trans_num=1, project_dir="top", test_inputs=None): """ Emit the test vectors in dat files for simulation """ sim_dir = os.path.join(project_dir, "hardware", "sim") tv_dir = os.path.join(sim_dir, "tv") if not os.path.exists(tv_dir): os.mkdir(tv_dir) in_type = ( graph.nodes_in[0].meta["mase"].parameters["common"]["args"]["data_in_0"]["type"] ) in_width = ( graph.nodes_in[0] .meta["mase"] .parameters["common"]["args"]["data_in_0"]["precision"][0] ) in_frac_width = ( graph.nodes_in[0] .meta["mase"] .parameters["common"]["args"]["data_in_0"]["precision"][1] ) sw_data_out = [graph.model(trans) for trans in test_inputs] out_type = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["type"] ) prec = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["precision"] ) out_width = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["precision"][0] ) if len(prec) > 1: out_frac_width = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["precision"][1] ) else: out_frac_width = 0 # TODO: Make out_type as input to support casting to any type hw_data_out = [ integer_quantizer_for_hw(trans, width=out_width, frac_width=out_frac_width) .squeeze(0) .to(torch.int) for trans in sw_data_out ] # TODO: for now for i, trans in enumerate(test_inputs): test_inputs[i] = torch.flatten(trans).tolist() for i, trans in enumerate(hw_data_out): hw_data_out[i] = torch.flatten(trans).tolist() load_path = os.path.join(tv_dir, "sw_data_in.dat")
emit_data_in_tb_dat(graph.nodes_in[0], test_inputs, load_path)
1
2023-12-18 12:50:53+00:00
16k
OPPOMKLab/u-LLaVA
models/GroundingDINO/groundingdino/models/GroundingDINO/groundingdino.py
[ { "identifier": "box_ops", "path": "models/GroundingDINO/groundingdino/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(box...
import copy import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast from models.GroundingDINO.groundingdino.util import box_ops, get_tokenlizer from models.GroundingDINO.groundingdino.util.misc import ( NestedTensor, accuracy, get_world_size, interpolate, inverse_sigmoid, is_dist_avail_and_initialized, nested_tensor_from_tensor_list, ) from models.GroundingDINO.groundingdino.util.utils import get_phrases_from_posmap from models.GroundingDINO.groundingdino.util.visualizer import COCOVisualizer from models.GroundingDINO.groundingdino.util.vl_utils import create_positive_map_from_span from ..registry import MODULE_BUILD_FUNCS from .backbone import build_backbone from .bertwarper import ( BertModelWarper, generate_masks_with_special_tokens, generate_masks_with_special_tokens_and_transfer_map, ) from .transformer import build_transformer from .utils import MLP, ContrastiveEmbed, sigmoid_focal_loss
11,415
self.nheads = nheads self.max_text_len = 256 self.sub_sentence_present = sub_sentence_present # setting query dim self.query_dim = query_dim assert query_dim == 4 # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # bert self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type) self.bert.pooler.dense.weight.requires_grad_(False) self.bert.pooler.dense.bias.requires_grad_(False) self.bert = BertModelWarper(bert_model=self.bert) self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True) nn.init.constant_(self.feat_map.bias.data, 0) nn.init.xavier_uniform_(self.feat_map.weight.data) # freeze # special tokens self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"]) # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), ) ) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ] ) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = ContrastiveEmbed() _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [ copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers) ] class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format( two_stage_type ) if two_stage_type != "no": if two_stage_bbox_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def set_image_tensor(self, samples: NestedTensor): if isinstance(samples, (list, torch.Tensor)):
# ------------------------------------------------------------------------ # Grounding DINO # url: https://github.com/IDEA-Research/GroundingDINO # Copyright (c) 2023 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class GroundingDINO(nn.Module): """This is the Cross-Attention Detector module that performs object detection""" def __init__( self, backbone, transformer, num_queries, aux_loss=False, iter_update=False, query_dim=2, num_feature_levels=1, nheads=8, # two stage two_stage_type="no", # ['no', 'standard'] dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, num_patterns=0, dn_number=100, dn_box_noise_scale=0.4, dn_label_noise_ratio=0.5, dn_labelbook_size=100, text_encoder_type="bert-base-uncased", sub_sentence_present=True, max_text_len=256, ): """Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.max_text_len = 256 self.sub_sentence_present = sub_sentence_present # setting query dim self.query_dim = query_dim assert query_dim == 4 # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # bert self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type) self.bert.pooler.dense.weight.requires_grad_(False) self.bert.pooler.dense.bias.requires_grad_(False) self.bert = BertModelWarper(bert_model=self.bert) self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True) nn.init.constant_(self.feat_map.bias.data, 0) nn.init.xavier_uniform_(self.feat_map.weight.data) # freeze # special tokens self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"]) # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), ) ) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ] ) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = ContrastiveEmbed() _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [ copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers) ] class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format( two_stage_type ) if two_stage_type != "no": if two_stage_bbox_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def set_image_tensor(self, samples: NestedTensor): if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
8
2023-12-21 08:10:23+00:00
16k
chinhsuanwu/ifusion
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities import rank_zero_only from omegaconf import ListConfig from ldm.util import ( log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config, ) from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import ( normal_kl, DiagonalGaussianDistribution, ) from ldm.models.autoencoder import ( VQModelInterface, IdentityFirstStage, AutoencoderKL, ) from ldm.modules.diffusionmodules.util import ( make_beta_schedule, extract_into_tensor, noise_like, ) from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.attention import CrossAttention
12,198
opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image_cond", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image_target", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") elif self.loss_type == "smooth_l1": if mean: loss = torch.nn.functional.smooth_l1_loss(target, pred) else: loss = torch.nn.functional.smooth_l1_loss( target, pred, reduction="none" ) else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image_cond", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config):
model = instantiate_from_config(config)
7
2023-12-17 12:45:38+00:00
16k
wangzhecheng/SkyScript
customized_train_and_test.py
[ { "identifier": "create_model_and_transforms", "path": "src/open_clip/factory.py", "snippet": "def create_model_and_transforms(\n model_name: str,\n pretrained: Optional[str] = None,\n precision: str = 'fp32',\n device: Union[str, torch.device] = 'cpu',\n jit: bool = F...
import glob import json import logging import os import re import subprocess import sys import random import numpy as np import torch import wandb import torch.utils.tensorboard as tensorboard import horovod.torch as hvd from datetime import datetime from torch import optim from torch.cuda.amp import GradScaler from torchvision import transforms from src.open_clip.factory import create_model_and_transforms, get_tokenizer, create_loss from src.open_clip.model import trace_model from src.training.data import get_data from src.training.distributed import is_master, init_distributed_device, broadcast_object from src.training.logger import setup_logging from src.training.scheduler import cosine_lr, const_lr, const_lr_cooldown from src.training.train import train_one_epoch, evaluate from src.training.file_utils import pt_load, check_exists, start_sync_process, remote_sync from src.training.main import natural_key, get_latest_checkpoint, copy_codebase from test_zero_shot_classification import * from params import parse_args
11,284
""" Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt """ try: except ImportError: wandb = None try: except ImportError: tensorboard = None try: except ImportError: hvd = None # from src.open_clip import create_model_and_transforms, trace_model, get_tokenizer, create_loss LATEST_CHECKPOINT_NAME = "epoch_latest.pt" def RandomRotationNew(image): angle = random.choice([0, 90, 180, 270]) image = transforms.functional.rotate(image, angle) return image def zero_shot_eval_during_training(model, test_dataloaders, epoch, args, tb_writer=None): logging.info('Starting zero-shot evaluation.') zero_shot_metrics = {} for dataset_name in test_dataloaders: logging.info(f'Evaluating zero-shot classification for dataset {dataset_name}') results = test_zero_shot_classification(model, test_dataloaders[dataset_name]['dataloader'], test_dataloaders[dataset_name]['labels'], test_dataloaders[dataset_name]['is_binary'], args, dataset_name=dataset_name, debugging=args.debugging) for k, v in results.items(): if type(v) in [float, int, np.float16, np.float32, np.float64, np.int8, np.int16, np.int32, np.int64]: zero_shot_metrics[k] = v logging.info( f"Zero-Shot Eval Epoch: {epoch} " + "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in zero_shot_metrics.items()]) ) if args.save_logs: for name, val in zero_shot_metrics.items(): if tb_writer is not None: tb_writer.add_scalar(f"val/{name}", val, epoch) with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f: f.write(json.dumps(zero_shot_metrics)) f.write("\n") # if args.wandb: # assert wandb is not None, 'Please install wandb.' # for name, val in zero_shot_metrics.items(): # wandb.log({f"val/{name}": val, 'epoch': epoch}) logging.info('Finished zero-shot evaluation.') return zero_shot_metrics def train_and_test(args): args = parse_args(args) if torch.cuda.is_available(): # This enables tf32 on Ampere GPUs which is only 8% slower than # float16 and almost as accurate as float32 # This was a default in pytorch until 1.12 torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False # fully initialize distributed device environment device = init_distributed_device(args) # get the name of the experiments if args.name is None: # sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule? model_name_safe = args.model.replace('/', '-') date_str = datetime.now().strftime("%Y_%m_%d-%H_%M_%S") if args.distributed: # sync date_str from master to all ranks date_str = broadcast_object(args, date_str) args.name = '-'.join([ date_str, f"model_{model_name_safe}", f"lr_{args.lr}", f"b_{args.batch_size}", f"j_{args.workers}", f"p_{args.precision}", ]) resume_latest = args.resume == 'latest' log_base_path = os.path.join(args.logs, args.name) args.log_path = None if is_master(args, local=args.log_local): os.makedirs(log_base_path, exist_ok=True) log_filename = f'out-{args.rank}' if args.log_local else 'out.log' args.log_path = os.path.join(log_base_path, log_filename) if os.path.exists(args.log_path) and not resume_latest: print( "Error. Experiment already exists. Use --name {} to specify a new experiment." ) return -1 # Setup text logger args.log_level = logging.DEBUG if args.debug else logging.INFO
""" Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt """ try: except ImportError: wandb = None try: except ImportError: tensorboard = None try: except ImportError: hvd = None # from src.open_clip import create_model_and_transforms, trace_model, get_tokenizer, create_loss LATEST_CHECKPOINT_NAME = "epoch_latest.pt" def RandomRotationNew(image): angle = random.choice([0, 90, 180, 270]) image = transforms.functional.rotate(image, angle) return image def zero_shot_eval_during_training(model, test_dataloaders, epoch, args, tb_writer=None): logging.info('Starting zero-shot evaluation.') zero_shot_metrics = {} for dataset_name in test_dataloaders: logging.info(f'Evaluating zero-shot classification for dataset {dataset_name}') results = test_zero_shot_classification(model, test_dataloaders[dataset_name]['dataloader'], test_dataloaders[dataset_name]['labels'], test_dataloaders[dataset_name]['is_binary'], args, dataset_name=dataset_name, debugging=args.debugging) for k, v in results.items(): if type(v) in [float, int, np.float16, np.float32, np.float64, np.int8, np.int16, np.int32, np.int64]: zero_shot_metrics[k] = v logging.info( f"Zero-Shot Eval Epoch: {epoch} " + "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in zero_shot_metrics.items()]) ) if args.save_logs: for name, val in zero_shot_metrics.items(): if tb_writer is not None: tb_writer.add_scalar(f"val/{name}", val, epoch) with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f: f.write(json.dumps(zero_shot_metrics)) f.write("\n") # if args.wandb: # assert wandb is not None, 'Please install wandb.' # for name, val in zero_shot_metrics.items(): # wandb.log({f"val/{name}": val, 'epoch': epoch}) logging.info('Finished zero-shot evaluation.') return zero_shot_metrics def train_and_test(args): args = parse_args(args) if torch.cuda.is_available(): # This enables tf32 on Ampere GPUs which is only 8% slower than # float16 and almost as accurate as float32 # This was a default in pytorch until 1.12 torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False # fully initialize distributed device environment device = init_distributed_device(args) # get the name of the experiments if args.name is None: # sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule? model_name_safe = args.model.replace('/', '-') date_str = datetime.now().strftime("%Y_%m_%d-%H_%M_%S") if args.distributed: # sync date_str from master to all ranks date_str = broadcast_object(args, date_str) args.name = '-'.join([ date_str, f"model_{model_name_safe}", f"lr_{args.lr}", f"b_{args.batch_size}", f"j_{args.workers}", f"p_{args.precision}", ]) resume_latest = args.resume == 'latest' log_base_path = os.path.join(args.logs, args.name) args.log_path = None if is_master(args, local=args.log_local): os.makedirs(log_base_path, exist_ok=True) log_filename = f'out-{args.rank}' if args.log_local else 'out.log' args.log_path = os.path.join(log_base_path, log_filename) if os.path.exists(args.log_path) and not resume_latest: print( "Error. Experiment already exists. Use --name {} to specify a new experiment." ) return -1 # Setup text logger args.log_level = logging.DEBUG if args.debug else logging.INFO
setup_logging(args.log_path, args.log_level)
8
2023-12-19 11:50:56+00:00
16k
penghao-wu/vstar
VisualSearch/utils/dataset.py
[ { "identifier": "conversation", "path": "VisualSearch/model/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n ...
import glob import os import random import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image from pycocotools import mask from transformers import CLIPImageProcessor from transformers import OwlViTProcessor from VisualSearch.model.llava import conversation as conversation_lib from VisualSearch.model.llava.constants import (DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX) from VisualSearch.model.llava.mm_utils import tokenizer_image_token from VisualSearch.utils.data_processing import get_mask_from_json from VisualSearch.utils.refer import REFER from VisualSearch.utils.refer_seg_dataset import ReferSegDataset from VisualSearch.utils.general_segdet_dataset import SegDetDataset from VisualSearch.utils.mixed_grounding_dataset import MixedGroundingDataset from VisualSearch.utils.vqa_dataset import VQADataset from VisualSearch.utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN) from VisualSearch.utils.utils import box_xyxy_to_cxcywh, expand2square
14,360
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [
tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
4
2023-12-15 14:58:24+00:00
16k
foocker/Bert-VITS2-Faster
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, ...
import platform import os import torch import torch.distributed as dist import logging import argparse import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import generator_loss, discriminator_loss, feature_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
13,258
if rank == 0: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, )
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cudnn.benchmark = True torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 torch.backends.cuda.enable_math_sdp(True) global_step = 0 def run(): # 环境变量解析 envs = config.train_ms_config.env for env_name, env_value in envs.items(): if env_name not in os.environ.keys(): print("加载config中的配置{}".format(str(env_value))) os.environ[env_name] = str(env_value) print( "加载环境变量 \nMASTER_ADDR: {},\nMASTER_PORT: {},\nWORLD_SIZE: {},\nRANK: {},\nLOCAL_RANK: {}".format( os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"], os.environ["WORLD_SIZE"], os.environ["RANK"], os.environ["LOCAL_RANK"], ) ) # 多卡训练设置 backend = "nccl" if platform.system() == "Windows": backend = "gloo" dist.init_process_group( backend=backend, init_method="env://", # If Windows,switch to gloo backend. ) # Use torchrun instead of mp.spawn rank = dist.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) n_gpus = dist.get_world_size() # 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=16, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank) net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(local_rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[local_rank]) net_d = DDP(net_d, device_ids=[local_rank]) dur_resume_lr = None if net_dur_disc is not None: net_dur_disc = DDP( net_dur_disc, device_ids=[local_rank], find_unused_parameters=True ) # 下载底模 if config.train_ms_config.base["use_base_model"]: utils.download_checkpoint( hps.model_dir, config.train_ms_config.base, token=config.openi_token, mirror=config.mirror, ) try: if net_dur_disc is not None: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) print( f"******************检测到模型存在,epoch为 {epoch_str},gloabl step为 {global_step}*********************" ) except Exception as e: print(e) epoch_str = 1 global_step = 0 scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, )
mel = spec_to_mel_torch(
12
2023-12-18 09:53:41+00:00
16k
sinoyou/nelf-pro
nerfstudio/models/nelfpro.py
[ { "identifier": "RayBundle", "path": "nerfstudio/cameras/rays.py", "snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions...
from dataclasses import dataclass, field from typing import Dict, List, Tuple, Type from torch.nn import Parameter from torchmetrics import PeakSignalNoiseRatio from torchmetrics.functional import structural_similarity_index_measure from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity from typing_extensions import Literal from nerfstudio.cameras.rays import RayBundle from nerfstudio.engine.callbacks import ( TrainingCallback, TrainingCallbackAttributes, TrainingCallbackLocation, ) from nerfstudio.fields.nelfpro_field import NeLFProField from nerfstudio.fields.density_fields import NeLFDensityField from nerfstudio.model_components.losses import ( MSELoss, distortion_loss, interlevel_loss, field_tv_loss, ) from nerfstudio.model_components.ray_samplers import ProposalNetworkSampler from nerfstudio.model_components.renderers import ( AccumulationRenderer, DepthRenderer, RGBRenderer, ) from nerfstudio.model_components.scene_colliders import NearFarCollider, EarthCollider from nerfstudio.models.base_model import Model, ModelConfig from nerfstudio.utils import colormaps from nerfstudio.utils.colors import get_color import numpy as np import torch
13,802
callbacks = [] def reinitialize_optimizer(params_name, training_callback_attributes, step): '''reinitialize optimizer and scheduler after upsampling. ''' optimizers_config = training_callback_attributes.optimizers.config data = training_callback_attributes.pipeline.get_param_groups()[params_name] lr_init = optimizers_config[params_name]["optimizer"].lr opt_state_param_groups = training_callback_attributes.optimizers.optimizers[params_name].state_dict()['param_groups'] training_callback_attributes.optimizers.optimizers[params_name] = optimizers_config[params_name]["optimizer"].setup(params=data) # note: we load_state_dict() for loading param_groups's _lr_initial, which is used for scheduler's last_epoch. training_callback_attributes.optimizers.optimizers[params_name].load_state_dict({ 'state': training_callback_attributes.optimizers.optimizers[params_name].state_dict()['state'], 'param_groups': opt_state_param_groups }) if optimizers_config[params_name]["scheduler"]: # save current state dict training_callback_attributes.optimizers.schedulers[params_name] = optimizers_config[params_name]["scheduler"].setup( optimizer=training_callback_attributes.optimizers.optimizers[params_name], lr_init=lr_init, last_epoch=step, ) # upsampling core factor if self.config.resolution_core_angular < self.config.resolution_max_core_angular or self.config.resolution_core_radial < self.config.resolution_max_core_radial: def upsample_core(self, training_callback_attributes: TrainingCallbackAttributes, step: int): angular_res = self.core_angular_upsamling_step.pop(0) radial_res = self.core_radial_upsamling_step.pop(0) self.field.upsample_core((angular_res, radial_res)) reinitialize_optimizer('field_core_angular', training_callback_attributes, step) reinitialize_optimizer('field_core_radial', training_callback_attributes, step) callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], iters=self.config.iters_core_upsampling, func=upsample_core, args=[self, training_callback_attributes] ) ) # update distortion loss multiplier if self.config.distortion_loss_mult_factor_max != 1.0: def update_distortion_loss_mult_factor(self, step: int): self.current_distort_loss_mult_factor = self.distortion_loss_mult_factor_step.pop(0) * self.config.distortion_loss_mult callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], iters=self.config.distortion_loss_mult_factor_iters, func=update_distortion_loss_mult_factor, args=[self] ) ) # Proposal Network Annealing and Status Update if self.config.use_proposal_weight_anneal: N = self.config.proposal_weights_anneal_max_num_iters def set_anneal(step): train_frac = np.clip(step / N, 0, 1) bias = lambda x, b: (b * x) / ((b - 1) * x + 1) anneal = bias(train_frac, self.config.proposal_weights_anneal_slope) self.proposal_sampler.set_anneal(anneal) callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.BEFORE_TRAIN_ITERATION], update_every_num_iters=1, func=set_anneal, ) ) callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], update_every_num_iters=1, func=self.proposal_sampler.step_cb, ) ) return callbacks def get_outputs(self, ray_bundle: RayBundle): ray_samples, weights_list, ray_samples_list = self.proposal_sampler(ray_bundle, density_fns=self.density_fns) field_outputs = self.field(ray_samples) weights = ray_samples.get_weights(field_outputs['density']) weights_list.append(weights) ray_samples_list.append(ray_samples) rgb = self.renderer_rgb(rgb=field_outputs['rgb'], weights=weights) depth = self.renderer_depth(weights=weights, ray_samples=ray_samples) accumulation = self.renderer_accumulation(weights=weights) outputs = { "rgb": rgb, "accumulation": accumulation, "depth": depth, } if self.training: outputs["tensors"] = { 'basis': field_outputs['basis'], 'core_angular': field_outputs['core_angular'], } # These use a lot of GPU memory, so we avoid storing them for eval. if self.training: outputs["weights_list"] = weights_list outputs["ray_samples_list"] = ray_samples_list for i in range(self.config.num_proposal_iterations): outputs[f"prop_depth_{i}"] = self.renderer_depth(weights=weights_list[i], ray_samples=ray_samples_list[i]) return outputs def get_metrics_dict(self, outputs, batch): metrics_dict = {} image = batch["image"].to(self.device) metrics_dict["psnr"] = self.psnr(outputs["rgb"], image) if self.training:
from __future__ import annotations @dataclass class NeLFProModelConfig(ModelConfig): _target: Type = field(default_factory=lambda: NeLFProModel) # basis factor configs num_basis: int = 64 """Number of basis factors.""" near_basis: int = 16 """Number of near basis factors for each ray. """ dim_basis: int = 2 """Feature dimension of basis factor. """ resolution_basis: int = 256 """Tensor resolution of basis factor. """ freq_theta: float = 1 """Frequency of multiplicative warping for theta. """ freq_phi: float = 1 """Frequency of multiplicative warping for phi.""" # core factor configs num_core: int = 3 """Number of core factors.""" near_core: int = 3 """Number of near core factors for each ray.""" dim_core: int = 32 """Feature dimension of core factor.""" resolution_core_angular: int = 64 """Initial tensor resolution of angular core factor. (a.k.a theta and phi)""" resolution_max_core_angular: int = 128 """Max tensor resolution of angular core factor. (a.k.a theta and phi)""" resolution_core_radial: int = 64 """Initial tensor resolution of radial core factor. (a.k.a depth direction)""" resolution_max_core_radial: int = 1024 """Max tensor resolution of radial core factor. (a.k.a depth direction)""" iters_core_upsampling: Tuple[int, ...] = (2000, 3000, 4000, 5500, 7000) """Iterations for upsampling core factor. """ # apperance embedding settings use_appearance_embedding: bool = False """Whether to use appearance embedding. """ # sampler config near_plane: float = 0.05 """Near plane for initial ray sampler. """ far_plane: float = 1000.0 """Far plane for initial ray sampler.""" use_earth_collider: bool = False """Whether to use earth model collider, must pass scene-specific scale. (e.g. for bungeenerf dataset)""" earth_collider_scale: float = 1.0 """Scale of the earth model collider. """ use_single_jitter: bool = False """Whether to use single jitter for initial ray sampler.""" init_sampler: Literal['uniform', 'sqrt', 'log', 'uniformdisp'] = 'uniformdisp' """Initial ray sampler function type. """ # proposal network config num_proposal_iterations: int = 2 """Number of proposal network iterations.""" num_proposal_samples_per_ray_1: int = 256 """Number of proposal samples per ray for the #1 proposal network iteration.""" num_proposal_samples_per_ray_2: int = 96 """Number of proposal samples per ray for the #2 proposal network iteration.""" num_nerf_samples_per_ray: int = 48 """Number of nerf samples per ray for the main field. """ proposal_update_every: int = 5 """Update proposal network every # iterations.""" proposal_warmup: int = 5000 """Warmup proposal network by linear learning rate for the first # iterations.""" proposal_net_args_list: List[Dict] = field( default_factory=lambda: [ {"angular_resolution": 64, "radial_resolution": 128, "feat_dim": 8, }, {"angular_resolution": 128, "radial_resolution": 256, "feat_dim": 8, }, ] ) """List of proposal network arguments for each proposal networks""" proposal_weights_anneal_slope: float = 10.0 """Slope of the annealing function for the proposal weights.""" proposal_weights_anneal_max_num_iters: int = 100 """Max num iterations for the annealing function.""" use_proposal_weight_anneal: bool = True """Whether to use proposal weight annealing.""" # rendering and training config background_color: Literal["random", "last_sample", "white", "black"] = "random" """Background color for rendering when accumulation doesn't reach 1.""" interlevel_loss_mult: float = 1.0 """Interlevel loss multiplier.""" distortion_loss_mult: float = 0.002 """Initial distortion loss multiplier.""" distortion_loss_mult_factor_max: int = 1 """Max multiplication factor for distortion loss multiplier.""" distortion_loss_mult_factor_iters: Tuple[int, ...] = (500, 1000, 2000, 4000) """Iterations for upsampling distortion loss multiplier.""""" basis_tv_loss_mult: float = 0.0 """Tv loss multiplier for basis factor.""" core_tv_loss_mult: float = 0.0 """Tv loss multiplier for core factor.""" def get_upsample_steps(res_base, res_max, num_iters): x = ( np.round( np.exp( np.linspace( np.log(res_base), np.log(res_max), num_iters + 1, ) ) ).astype("int").tolist()[1:] ) return x class NeLFProModel(Model): config: NeLFProModelConfig def populate_modules(self): """Set the fields and modules.""" super().populate_modules() # Resolution and Loss Multiplier Upsampling Config if self.config.resolution_core_angular < self.config.resolution_max_core_angular or self.config.resolution_core_radial < self.config.resolution_max_core_radial: self.core_angular_upsamling_step = get_upsample_steps(self.config.resolution_core_angular, self.config.resolution_max_core_angular, len(self.config.iters_core_upsampling)) self.core_radial_upsamling_step = get_upsample_steps(self.config.resolution_core_radial, self.config.resolution_max_core_radial, len(self.config.iters_core_upsampling)) if self.config.distortion_loss_mult_factor_max != 1.0: self.distortion_loss_mult_factor_step = get_upsample_steps(1.0, self.config.distortion_loss_mult_factor_max, len(self.config.distortion_loss_mult_factor_iters)) self.current_distort_loss_mult_factor = self.config.distortion_loss_mult * 1.0 else: self.current_distort_loss_mult_factor = self.config.distortion_loss_mult * 1.0 # Main Field self.field = NeLFProField( num_images=self.num_train_data, num_basis = self.config.num_basis, near_basis = self.config.near_basis, dim_basis = self.config.dim_basis, resolution_basis = self.config.resolution_basis, num_core = self.config.num_core, near_core = self.config.near_core, dim_core = self.config.dim_core, resolution_core_angular = self.config.resolution_core_angular, resolution_core_radial = self.config.resolution_core_radial, freq_theta = self.config.freq_theta, freq_phi = self.config.freq_phi, use_appearance_embedding=self.config.use_appearance_embedding, ) # Proposal Networks self.proposal_networks = torch.nn.ModuleList() assert len(self.config.proposal_net_args_list) == self.config.num_proposal_iterations, 'proposal_net_args_list should have the same length as num_proposal_iterations' for i in range(self.config.num_proposal_iterations): prop_net_args = self.config.proposal_net_args_list[i] network = NeLFDensityField(num_core=self.config.num_core, near_core=self.config.near_core, **prop_net_args) self.proposal_networks.append(network) self.density_fns = [network.density_fn for network in self.proposal_networks] # Proposal Sampler update_schedule = lambda step: np.clip( np.interp(step, [0, self.config.proposal_warmup], [0, self.config.proposal_update_every]), 1, self.config.proposal_update_every, ) self.proposal_sampler = ProposalNetworkSampler( init_sampler=self.config.init_sampler, num_nerf_samples_per_ray=self.config.num_nerf_samples_per_ray, num_proposal_samples_per_ray=[self.config.num_proposal_samples_per_ray_1, self.config.num_proposal_samples_per_ray_2], num_proposal_network_iterations=self.config.num_proposal_iterations, single_jitter=self.config.use_single_jitter, update_sched=update_schedule, ) # Collider if self.config.use_earth_collider: self.collider = EarthCollider(scene_scaling_factor=self.config.earth_collider_scale) else: self.collider = NearFarCollider(near_plane=self.config.near_plane, far_plane=self.config.far_plane) # Renders background_color = ( get_color(self.config.background_color) if self.config.background_color in set(["white", "black"]) else self.config.background_color ) self.renderer_rgb = RGBRenderer(background_color=background_color) self.renderer_accumulation = AccumulationRenderer() self.renderer_depth = DepthRenderer() # losses self.rgb_loss = MSELoss() # metrics self.psnr = PeakSignalNoiseRatio(data_range=1.0) self.ssim = structural_similarity_index_measure self.lpips = LearnedPerceptualImagePatchSimilarity(normalize=True, net_type='vgg') # adjust step-dependent components self.adjust_step_dependent_components() def n_parameters(self): """Return the number of parameters in the model.""" return sum(p.numel() for p in self.field.parameters()) + sum(p.numel() for p in self.proposal_networks.parameters()) def adjust_step_dependent_components(self) -> None: """Call the model's customized load function. Args: checkpoint_dir: directory of checkpoint """ load_step = self.load_step if not load_step: return assert load_step >= 0, f"load_step must be non-negative, got {load_step}" # distortion factor if self.config.distortion_loss_mult_factor_max != 1.0: i = 0 while len(self.distortion_loss_mult_factor_step) > 0 and load_step >= self.config.distortion_loss_mult_factor_iters[i]: i += 1 self.current_distort_loss_mult_factor = self.distortion_loss_mult_factor_step.pop(0) * self.config.distortion_loss_mult assert len(self.config.distortion_loss_mult_factor_iters) - i == len(self.distortion_loss_mult_factor_step), 'distortion_loss_mult_factor_step should have the same length as distortion_loss_mult_factor_iters' # core upsampling if self.config.resolution_core_angular < self.config.resolution_max_core_angular or self.config.resolution_core_radial < self.config.resolution_max_core_radial: i = 0 while len(self.core_radial_upsamling_step) > 0 and load_step >= self.config.iters_core_upsampling[i]: i += 1 angular_res = self.core_angular_upsamling_step.pop(0) radial_res = self.core_radial_upsamling_step.pop(0) self.field.upsample_core((angular_res, radial_res)) assert len(self.config.iters_core_upsampling) - i == len(self.core_radial_upsamling_step), 'core_radial_upsamling_step should have the same length as iters_core_upsampling' # proposal network annealing: ignore as its update frequency is very high. def get_param_groups(self) -> Dict[str, List[Parameter]]: param_groups = {} # seperate the parameters of the field into two groups: fields (e.g. MLP) and fields_coef (e.g. camera fields) field_basis = self.field.get_basis_fields() field_core_angular = self.field.get_core_fields(name='angular') field_core_radial = self.field.get_core_fields(name='radial') param_groups["field_mlp"] = list() param_groups["field_basis"] = list() param_groups["field_core_angular"] = list() param_groups["field_core_radial"] = list() param_groups["proposal_networks"] = list() for field_params in self.field.parameters(): if field_params in field_basis: param_groups["field_basis"].append(field_params) elif field_params in field_core_angular: param_groups["field_core_angular"].append(field_params) elif field_params in field_core_radial: param_groups["field_core_radial"].append(field_params) else: param_groups["field_mlp"].append(field_params) for proposal_parameter in self.proposal_networks.parameters(): param_groups["proposal_networks"].append(proposal_parameter) return param_groups def get_training_callbacks( self, training_callback_attributes: TrainingCallbackAttributes ) -> List[TrainingCallback]: callbacks = [] def reinitialize_optimizer(params_name, training_callback_attributes, step): '''reinitialize optimizer and scheduler after upsampling. ''' optimizers_config = training_callback_attributes.optimizers.config data = training_callback_attributes.pipeline.get_param_groups()[params_name] lr_init = optimizers_config[params_name]["optimizer"].lr opt_state_param_groups = training_callback_attributes.optimizers.optimizers[params_name].state_dict()['param_groups'] training_callback_attributes.optimizers.optimizers[params_name] = optimizers_config[params_name]["optimizer"].setup(params=data) # note: we load_state_dict() for loading param_groups's _lr_initial, which is used for scheduler's last_epoch. training_callback_attributes.optimizers.optimizers[params_name].load_state_dict({ 'state': training_callback_attributes.optimizers.optimizers[params_name].state_dict()['state'], 'param_groups': opt_state_param_groups }) if optimizers_config[params_name]["scheduler"]: # save current state dict training_callback_attributes.optimizers.schedulers[params_name] = optimizers_config[params_name]["scheduler"].setup( optimizer=training_callback_attributes.optimizers.optimizers[params_name], lr_init=lr_init, last_epoch=step, ) # upsampling core factor if self.config.resolution_core_angular < self.config.resolution_max_core_angular or self.config.resolution_core_radial < self.config.resolution_max_core_radial: def upsample_core(self, training_callback_attributes: TrainingCallbackAttributes, step: int): angular_res = self.core_angular_upsamling_step.pop(0) radial_res = self.core_radial_upsamling_step.pop(0) self.field.upsample_core((angular_res, radial_res)) reinitialize_optimizer('field_core_angular', training_callback_attributes, step) reinitialize_optimizer('field_core_radial', training_callback_attributes, step) callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], iters=self.config.iters_core_upsampling, func=upsample_core, args=[self, training_callback_attributes] ) ) # update distortion loss multiplier if self.config.distortion_loss_mult_factor_max != 1.0: def update_distortion_loss_mult_factor(self, step: int): self.current_distort_loss_mult_factor = self.distortion_loss_mult_factor_step.pop(0) * self.config.distortion_loss_mult callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], iters=self.config.distortion_loss_mult_factor_iters, func=update_distortion_loss_mult_factor, args=[self] ) ) # Proposal Network Annealing and Status Update if self.config.use_proposal_weight_anneal: N = self.config.proposal_weights_anneal_max_num_iters def set_anneal(step): train_frac = np.clip(step / N, 0, 1) bias = lambda x, b: (b * x) / ((b - 1) * x + 1) anneal = bias(train_frac, self.config.proposal_weights_anneal_slope) self.proposal_sampler.set_anneal(anneal) callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.BEFORE_TRAIN_ITERATION], update_every_num_iters=1, func=set_anneal, ) ) callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], update_every_num_iters=1, func=self.proposal_sampler.step_cb, ) ) return callbacks def get_outputs(self, ray_bundle: RayBundle): ray_samples, weights_list, ray_samples_list = self.proposal_sampler(ray_bundle, density_fns=self.density_fns) field_outputs = self.field(ray_samples) weights = ray_samples.get_weights(field_outputs['density']) weights_list.append(weights) ray_samples_list.append(ray_samples) rgb = self.renderer_rgb(rgb=field_outputs['rgb'], weights=weights) depth = self.renderer_depth(weights=weights, ray_samples=ray_samples) accumulation = self.renderer_accumulation(weights=weights) outputs = { "rgb": rgb, "accumulation": accumulation, "depth": depth, } if self.training: outputs["tensors"] = { 'basis': field_outputs['basis'], 'core_angular': field_outputs['core_angular'], } # These use a lot of GPU memory, so we avoid storing them for eval. if self.training: outputs["weights_list"] = weights_list outputs["ray_samples_list"] = ray_samples_list for i in range(self.config.num_proposal_iterations): outputs[f"prop_depth_{i}"] = self.renderer_depth(weights=weights_list[i], ray_samples=ray_samples_list[i]) return outputs def get_metrics_dict(self, outputs, batch): metrics_dict = {} image = batch["image"].to(self.device) metrics_dict["psnr"] = self.psnr(outputs["rgb"], image) if self.training:
metrics_dict["distortion"] = distortion_loss(outputs["weights_list"], outputs["ray_samples_list"])
6
2023-12-15 20:07:22+00:00
16k
amazon-science/c2f-seg
src/video_model.py
[ { "identifier": "VQModel", "path": "taming_src/taming_models.py", "snippet": "class VQModel(nn.Module):\n def __init__(self, config):\n super(VQModel, self).__init__()\n self.config = config\n self.iteration = 0\n self.name = config.model_type\n self.m_path = os.pat...
import os import math import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.distributed as dist from torchvision import transforms from taming_src.taming_models import VQModel from src.video_component import MaskedTransformer, Resnet_Encoder, Refine_Module from src.loss import VGG19, PerceptualLoss from utils.pytorch_optimization import AdamW, get_linear_schedule_with_warmup from utils.utils import torch_show_all_params, torch_init_model from utils.utils import Config from utils.evaluation import video_iou from utils.loss import CrossEntropyLoss from tqdm import tqdm
11,267
v, ix = torch.topk(logits, k) out = logits.clone() out[out < v[..., [-1]]] = -float('Inf') return out @torch.no_grad() def batch_predict_maskgit(self, meta, iter, mode, temperature=1.0, T=3, start_iter=0): ''' :param x:[B,3,H,W] image :param c:[b,X,H,W] condition :param mask: [1,1,H,W] mask ''' self.sample_iter += 1 img_feat = self.img_encoder(meta['img_crop'].squeeze().permute((0,3,1,2)).to(torch.float32)) _, src_indices = self.encode_to_z(meta['vm_crop']) # _, tgt_indices = self.encode_to_z(meta['fm_crop']) bhwc = (_.shape[0], _.shape[2], _.shape[3], _.shape[1]) masked_indices = self.mask_token_idx * torch.ones_like(src_indices, device=src_indices.device) # [B, L] unknown_number_in_the_beginning = torch.sum(masked_indices == self.mask_token_idx, dim=-1) # [B] gamma = self.gamma_func("cosine") cur_ids = masked_indices # [B, L] seq_out = [] mask_out = [] for t in range(start_iter, T): logits = self.transformer(img_feat[-1], src_indices, cur_ids, mask=None) # [B, L, N] logits = logits[..., :-1] logits = self.top_k_logits(logits, k=3) probs = F.softmax(logits, dim=-1) # convert logits into probs [B, 256, vocab_size+1] sampled_ids = torch.distributions.categorical.Categorical(probs=probs).sample() # [B, L] unknown_map = (cur_ids == self.mask_token_idx) # which tokens need to be sampled -> bool [B, 256] sampled_ids = torch.where(unknown_map, sampled_ids, cur_ids) # replace all -1 with their samples and leave the others untouched [B, 256] seq_out.append(sampled_ids) mask_out.append(1. * unknown_map) ratio = 1. * (t + 1) / T # just a percentage e.g. 1 / 12 mask_ratio = gamma(ratio) selected_probs = probs.gather(dim=-1, index=sampled_ids.unsqueeze(-1)).squeeze(-1) selected_probs = torch.where(unknown_map, selected_probs, torch.Tensor([np.inf]).to(logits.device)) # ignore tokens which are already sampled [B, 256] mask_len = torch.unsqueeze(torch.floor(unknown_number_in_the_beginning * mask_ratio), 1) # floor(256 * 0.99) = 254 --> [254, 254, 254, 254, ....] (B x 1) mask_len = torch.maximum(torch.ones_like(mask_len), torch.minimum(torch.sum(unknown_map, dim=-1, keepdim=True) - 1, mask_len)) # Adds noise for randomness masking = self.mask_by_random_topk(mask_len, selected_probs, temperature=self.choice_temperature * (1. - ratio)) # Masks tokens with lower confidence. cur_ids = torch.where(masking, self.mask_token_idx, sampled_ids) # [B, L] seq_ids = torch.stack(seq_out, dim=1) # [B, T, L] quant_z = self.g_model.quantize.get_codebook_entry(seq_ids[:,-1,:].reshape(-1), shape=bhwc) pred_fm_crop = self.g_model.decode(quant_z) pred_fm_crop = pred_fm_crop.mean(dim=1, keepdim=True) pred_fm_crop_old = torch.clamp(pred_fm_crop, min=0, max=1) pred_vm_crop, pred_fm_crop = self.refine_module(img_feat, pred_fm_crop_old) pred_vm_crop = F.interpolate(pred_vm_crop, size=(256, 256), mode="nearest") pred_vm_crop = torch.sigmoid(pred_vm_crop) loss_vm = self.refine_criterion(pred_vm_crop, meta['vm_crop'].transpose(1,0)) # pred_vm_crop = (pred_vm_crop>=0.5).to(torch.float32) pred_fm_crop = F.interpolate(pred_fm_crop, size=(256, 256), mode="nearest") pred_fm_crop = torch.sigmoid(pred_fm_crop) loss_fm = self.refine_criterion(pred_fm_crop, meta['fm_crop'].transpose(1,0)) # pred_fm_crop = (pred_fm_crop>=0.5).to(torch.float32) pred_fm_crop_old = self.align_raw_size(pred_fm_crop_old, meta['obj_position'], meta["vm_pad"], meta) # pred_vm = self.align_raw_size(pred_vm_crop, meta['obj_position'], meta["vm_pad"], meta) pred_fm = self.align_raw_size(pred_fm_crop, meta['obj_position'], meta["vm_pad"], meta) pred_fm = pred_fm + pred_fm_crop_old loss_eval = self.loss_and_evaluation(pred_fm, meta) loss_eval["loss_fm"] = loss_fm loss_eval["loss_vm"] = loss_vm return loss_eval def create_inputs_tokens_normal(self, num, device): self.num_latent_size = self.config['resolution'] // self.config['patch_size'] blank_tokens = torch.ones((num, self.num_latent_size ** 2), device=device) masked_tokens = self.mask_token_idx * blank_tokens return masked_tokens.to(torch.int64) def gamma_func(self, mode="cosine"): if mode == "linear": return lambda r: 1 - r elif mode == "cosine": return lambda r: np.cos(r * np.pi / 2) elif mode == "square": return lambda r: 1 - r ** 2 elif mode == "cubic": return lambda r: 1 - r ** 3 elif mode == "log": return lambda r, total_unknown: - np.log2(r) / np.log2(total_unknown) else: raise NotImplementedError def mask_by_random_topk(self, mask_len, probs, temperature=1.0): confidence = torch.log(probs) + temperature * torch.distributions.gumbel.Gumbel(0, 1).sample(probs.shape).to(probs.device) sorted_confidence, _ = torch.sort(confidence, dim=-1) # from small to large # Obtains cut off threshold given the mask lengths. # cut_off = torch.take_along_dim(sorted_confidence, mask_len.to(torch.long), dim=-1) cut_off = sorted_confidence.gather(dim=-1, index=mask_len.to(torch.long)) # Masks tokens with lower confidence. masking = (confidence < cut_off) return masking def load(self, is_test=False, prefix=None): if prefix is not None: transformer_path = self.transformer_path + prefix + '.pth' else: transformer_path = self.transformer_path + '_last.pth' if self.config.restore or is_test: # transformer_path = '/home/ubuntu/AmodalVQGAN_pre/check_points/fish_amodal_transformer_gjx/GETransformer_210000.pth' if os.path.exists(transformer_path): print('Rank {} is loading {} Transformer...'.format(self.rank, transformer_path)) data = torch.load(transformer_path, map_location="cpu")
class C2F_Seg(nn.Module): def __init__(self, config, g_path, mode, logger=None, save_eval_dict={}): super(C2F_Seg, self).__init__() self.config = config self.iteration = 0 self.sample_iter = 0 self.name = config.model_type # load g model for mask self.g_config = Config(os.path.join(g_path, 'vqgan_{}.yml'.format(config.dataset))) self.g_path = os.path.join(g_path, self.g_config.model_type) self.root_path = config.path self.transformer_path = os.path.join(config.path, self.name) # self.refine_path = os.path.join(config.path, "Refine") self.trans_size = config.trans_size self.mode = mode self.save_eval_dict = save_eval_dict self.eps = 1e-6 self.train_sample_iters = config.train_sample_iters self.g_model = VQModel(self.g_config).to(config.device) self.img_encoder = Resnet_Encoder().to(config.device) self.refine_module = Refine_Module().to(config.device) self.transformer = MaskedTransformer(config).to(config.device) self.g_model.eval() self.refine_criterion = nn.BCELoss() self.criterion = CrossEntropyLoss(num_classes=config.vocab_size+1, device=config.device) if config.train_with_dec: if not config.gumbel_softmax: self.temperature = nn.Parameter(torch.tensor([config.tp], dtype=torch.float32), requires_grad=True).to(config.device) if config.use_vgg: vgg = VGG19(pretrained=True, vgg_norm=config.vgg_norm).to(config.device) vgg.eval() reduction = 'mean' if config.balanced_loss is False else 'none' self.perceptual_loss = PerceptualLoss(vgg, weights=config.vgg_weights, reduction=reduction).to(config.device) else: self.perceptual_loss = None if config.init_gpt_with_vqvae: self.transformer.z_emb.weight = self.g_model.quantize.embedding.weight if logger is not None: logger.info('Gen Parameters:{}'.format(torch_show_all_params(self.g_model))) logger.info('Transformer Parameters:{}'.format(torch_show_all_params(self.transformer))) else: print('Gen Parameters:{}'.format(torch_show_all_params(self.g_model))) print('Transformer Parameters:{}'.format(torch_show_all_params(self.transformer))) # loss no_decay = ['bias', 'ln1.bias', 'ln1.weight', 'ln2.bias', 'ln2.weight'] ignored_param = ['z_emb.weight', 'c_emb.weight'] param_optimizer = self.transformer.named_parameters() param_optimizer_encoder = self.img_encoder.named_parameters() param_optimizer_refine= self.refine_module.named_parameters() optimizer_parameters = [ {'params': [p for n, p in param_optimizer if not any([nd in n for nd in no_decay])], 'weight_decay': config.weight_decay}, {'params': [p for n, p in param_optimizer if any([nd in n for nd in no_decay])], 'weight_decay': 0.0}, {'params': [p for n, p in param_optimizer_encoder], 'weight_decay': config.weight_decay}, {'params': [p for n, p in param_optimizer_refine], 'weight_decay': config.weight_decay}, ] self.opt = AdamW(params=optimizer_parameters, lr=float(config.lr), betas=(config.beta1, config.beta2)) self.sche = get_linear_schedule_with_warmup(self.opt, num_warmup_steps=config.warmup_iters, num_training_steps=config.max_iters) self.rank = dist.get_rank() self.gamma = self.gamma_func(mode=config.gamma_mode) self.mask_token_idx = config.vocab_size self.choice_temperature = 4.5 self.Image_W = config.Image_W self.Image_H = config.Image_H self.patch_W = config.patch_W self.patch_H = config.patch_H @torch.no_grad() def encode_to_z(self, x, mask=None): if len(x.size())==5: x = x[0] x = x.permute((1,0,2,3)) quant_z, _, info = self.g_model.encode(x.float(), mask) # [B,D,H,W] indices = info[2].view(quant_z.shape[0], -1) # [B, L] return quant_z, indices def data_augmentation(self, mask): w = random.randint(5, 11) h = random.randint(5, 11) rdv = random.random() n_repeat = random.randint(1, 3) max_pool = nn.MaxPool2d(kernel_size=(w, h), stride=1, padding=(w//2, h//2)) if rdv < 0.3: for i in range(n_repeat): mask = max_pool(mask) elif rdv >=0.3 and rdv < 0.6: for i in range(n_repeat): mask = -max_pool(-mask) else: mask = mask return mask def get_attn_map(self, feature, guidance): guidance = F.interpolate(guidance, scale_factor=(1/16)) b,c,h,w = guidance.shape q = torch.flatten(guidance, start_dim=2) v = torch.flatten(feature, start_dim=2) k = v * q k = k.sum(dim=-1, keepdim=True) / (q.sum(dim=-1, keepdim=True) + 1e-6) attn = (k.transpose(-2, -1) @ v) / 1 attn = F.softmax(attn, dim=-1) attn = attn.reshape(b, c, h, w) return attn def get_losses(self, meta): self.iteration += 1 z_loss = 0 img_feat = self.img_encoder(meta['img_crop'].squeeze().permute((0,3,1,2)).to(torch.float32)) _, src_indices = self.encode_to_z(meta['vm_crop']) _, tgt_indices = self.encode_to_z(meta['fm_crop']) bhwc = (_.shape[0], _.shape[2], _.shape[3], _.shape[1]) r = np.maximum(self.gamma(np.random.uniform()), self.config.min_mask_rate) r = math.floor(r * tgt_indices.shape[1]) sample = torch.rand(tgt_indices.shape, device=tgt_indices.device).topk(r, dim=1).indices random_mask = torch.zeros(tgt_indices.shape, dtype=torch.bool, device=tgt_indices.device) random_mask.scatter_(dim=1, index=sample, value=True) # [B, L] # concat mask mask = random_mask masked_indices = self.mask_token_idx * torch.ones_like(tgt_indices, device=tgt_indices.device) # [B, L] z_indices = (~mask) * tgt_indices + mask * masked_indices # [B, L] logits_z = self.transformer(img_feat[-1], src_indices, z_indices, mask=None) target = tgt_indices z_loss = self.criterion(logits_z.view(-1, logits_z.size(-1)), target.view(-1)) with torch.no_grad(): logits_z = logits_z[..., :-1] logits_z = self.top_k_logits(logits_z, k=5) probs = F.softmax(logits_z, dim=-1) seq_ids = torch.distributions.categorical.Categorical(probs=probs).sample() # [B, L] quant_z = self.g_model.quantize.get_codebook_entry(seq_ids.reshape(-1), shape=bhwc) pred_fm_crop = self.g_model.decode(quant_z) pred_fm_crop = pred_fm_crop.mean(dim=1, keepdim=True) pred_fm_crop = torch.clamp(pred_fm_crop, min=0, max=1) pred_vm_crop, pred_fm_crop = self.refine_module(img_feat, pred_fm_crop.detach()) pred_vm_crop = F.interpolate(pred_vm_crop, size=(256, 256), mode="nearest") pred_vm_crop = torch.sigmoid(pred_vm_crop) loss_vm = self.refine_criterion(pred_vm_crop, meta['vm_crop_gt'].transpose(1,0)) pred_fm_crop = F.interpolate(pred_fm_crop, size=(256, 256), mode="nearest") pred_fm_crop = torch.sigmoid(pred_fm_crop) loss_fm = self.refine_criterion(pred_fm_crop, meta['fm_crop'].transpose(1,0)) logs = [ ("z_loss", z_loss.item()), ("loss_vm", loss_vm.item()), ("loss_fm", loss_fm.item()), ] return z_loss, loss_vm+loss_fm, logs def align_raw_size(self, full_mask, obj_position, vm_pad, meta): vm_np_crop = meta["vm_no_crop"].squeeze() H, W = vm_np_crop.shape[-2], vm_np_crop.shape[-1] bz, seq_len = full_mask.shape[:2] new_full_mask = torch.zeros((bz, seq_len, H, W)).to(torch.float32).cuda() if len(vm_pad.shape)==3: vm_pad = vm_pad[0] obj_position = obj_position[0] for b in range(bz): paddings = vm_pad[b] position = obj_position[b] new_fm = full_mask[ b, :, :-int(paddings[0]) if int(paddings[0]) !=0 else None, :-int(paddings[1]) if int(paddings[1]) !=0 else None ] vx_min = int(position[0]) vx_max = min(H, int(position[1])+1) vy_min = int(position[2]) vy_max = min(W, int(position[3])+1) resize = transforms.Resize([vx_max-vx_min, vy_max-vy_min]) try: new_fm = resize(new_fm) new_full_mask[b, :, vx_min:vx_max, vy_min:vy_max] = new_fm[0] except: new_fm = new_fm return new_full_mask def loss_and_evaluation(self, pred_fm, meta): loss_eval = {} pred_fm = pred_fm.squeeze() loss_mask = meta["loss_mask"].squeeze().to(pred_fm.device) counts = meta["counts"].reshape(-1).to(pred_fm.device) fm_no_crop = meta["fm_no_crop"].squeeze() vm_no_crop = meta["vm_no_crop"].squeeze() pred_fm = (pred_fm > 0.5).to(torch.int64) full_iou = video_iou(pred_fm, fm_no_crop) occ_iou = video_iou(pred_fm-vm_no_crop, fm_no_crop-vm_no_crop) m_full_iou = (counts * full_iou).sum() / counts.sum() m_occ_iou = (counts * occ_iou).sum() / counts.sum() loss_eval["iou"] = m_full_iou loss_eval["invisible_iou_"] = m_occ_iou loss_eval["iou_count"] = torch.Tensor([1]).cuda() loss_eval["occ_count"] = torch.Tensor([1]).cuda() # post-process pred_fm = pred_fm * (1 - loss_mask) + vm_no_crop * loss_mask pred_fm = (pred_fm > 0.5).to(torch.int64) full_iou = video_iou(pred_fm, fm_no_crop) occ_iou = video_iou(pred_fm-vm_no_crop, fm_no_crop-vm_no_crop) m_full_iou = (counts * full_iou).sum() / counts.sum() m_occ_iou = (counts * occ_iou).sum() / counts.sum() loss_eval["iou_post"] = m_full_iou loss_eval["invisible_iou_post"] = m_occ_iou return loss_eval def backward(self, loss=None): self.opt.zero_grad() loss.backward() self.opt.step() self.sche.step() def top_k_logits(self, logits, k): v, ix = torch.topk(logits, k) out = logits.clone() out[out < v[..., [-1]]] = -float('Inf') return out @torch.no_grad() def batch_predict_maskgit(self, meta, iter, mode, temperature=1.0, T=3, start_iter=0): ''' :param x:[B,3,H,W] image :param c:[b,X,H,W] condition :param mask: [1,1,H,W] mask ''' self.sample_iter += 1 img_feat = self.img_encoder(meta['img_crop'].squeeze().permute((0,3,1,2)).to(torch.float32)) _, src_indices = self.encode_to_z(meta['vm_crop']) # _, tgt_indices = self.encode_to_z(meta['fm_crop']) bhwc = (_.shape[0], _.shape[2], _.shape[3], _.shape[1]) masked_indices = self.mask_token_idx * torch.ones_like(src_indices, device=src_indices.device) # [B, L] unknown_number_in_the_beginning = torch.sum(masked_indices == self.mask_token_idx, dim=-1) # [B] gamma = self.gamma_func("cosine") cur_ids = masked_indices # [B, L] seq_out = [] mask_out = [] for t in range(start_iter, T): logits = self.transformer(img_feat[-1], src_indices, cur_ids, mask=None) # [B, L, N] logits = logits[..., :-1] logits = self.top_k_logits(logits, k=3) probs = F.softmax(logits, dim=-1) # convert logits into probs [B, 256, vocab_size+1] sampled_ids = torch.distributions.categorical.Categorical(probs=probs).sample() # [B, L] unknown_map = (cur_ids == self.mask_token_idx) # which tokens need to be sampled -> bool [B, 256] sampled_ids = torch.where(unknown_map, sampled_ids, cur_ids) # replace all -1 with their samples and leave the others untouched [B, 256] seq_out.append(sampled_ids) mask_out.append(1. * unknown_map) ratio = 1. * (t + 1) / T # just a percentage e.g. 1 / 12 mask_ratio = gamma(ratio) selected_probs = probs.gather(dim=-1, index=sampled_ids.unsqueeze(-1)).squeeze(-1) selected_probs = torch.where(unknown_map, selected_probs, torch.Tensor([np.inf]).to(logits.device)) # ignore tokens which are already sampled [B, 256] mask_len = torch.unsqueeze(torch.floor(unknown_number_in_the_beginning * mask_ratio), 1) # floor(256 * 0.99) = 254 --> [254, 254, 254, 254, ....] (B x 1) mask_len = torch.maximum(torch.ones_like(mask_len), torch.minimum(torch.sum(unknown_map, dim=-1, keepdim=True) - 1, mask_len)) # Adds noise for randomness masking = self.mask_by_random_topk(mask_len, selected_probs, temperature=self.choice_temperature * (1. - ratio)) # Masks tokens with lower confidence. cur_ids = torch.where(masking, self.mask_token_idx, sampled_ids) # [B, L] seq_ids = torch.stack(seq_out, dim=1) # [B, T, L] quant_z = self.g_model.quantize.get_codebook_entry(seq_ids[:,-1,:].reshape(-1), shape=bhwc) pred_fm_crop = self.g_model.decode(quant_z) pred_fm_crop = pred_fm_crop.mean(dim=1, keepdim=True) pred_fm_crop_old = torch.clamp(pred_fm_crop, min=0, max=1) pred_vm_crop, pred_fm_crop = self.refine_module(img_feat, pred_fm_crop_old) pred_vm_crop = F.interpolate(pred_vm_crop, size=(256, 256), mode="nearest") pred_vm_crop = torch.sigmoid(pred_vm_crop) loss_vm = self.refine_criterion(pred_vm_crop, meta['vm_crop'].transpose(1,0)) # pred_vm_crop = (pred_vm_crop>=0.5).to(torch.float32) pred_fm_crop = F.interpolate(pred_fm_crop, size=(256, 256), mode="nearest") pred_fm_crop = torch.sigmoid(pred_fm_crop) loss_fm = self.refine_criterion(pred_fm_crop, meta['fm_crop'].transpose(1,0)) # pred_fm_crop = (pred_fm_crop>=0.5).to(torch.float32) pred_fm_crop_old = self.align_raw_size(pred_fm_crop_old, meta['obj_position'], meta["vm_pad"], meta) # pred_vm = self.align_raw_size(pred_vm_crop, meta['obj_position'], meta["vm_pad"], meta) pred_fm = self.align_raw_size(pred_fm_crop, meta['obj_position'], meta["vm_pad"], meta) pred_fm = pred_fm + pred_fm_crop_old loss_eval = self.loss_and_evaluation(pred_fm, meta) loss_eval["loss_fm"] = loss_fm loss_eval["loss_vm"] = loss_vm return loss_eval def create_inputs_tokens_normal(self, num, device): self.num_latent_size = self.config['resolution'] // self.config['patch_size'] blank_tokens = torch.ones((num, self.num_latent_size ** 2), device=device) masked_tokens = self.mask_token_idx * blank_tokens return masked_tokens.to(torch.int64) def gamma_func(self, mode="cosine"): if mode == "linear": return lambda r: 1 - r elif mode == "cosine": return lambda r: np.cos(r * np.pi / 2) elif mode == "square": return lambda r: 1 - r ** 2 elif mode == "cubic": return lambda r: 1 - r ** 3 elif mode == "log": return lambda r, total_unknown: - np.log2(r) / np.log2(total_unknown) else: raise NotImplementedError def mask_by_random_topk(self, mask_len, probs, temperature=1.0): confidence = torch.log(probs) + temperature * torch.distributions.gumbel.Gumbel(0, 1).sample(probs.shape).to(probs.device) sorted_confidence, _ = torch.sort(confidence, dim=-1) # from small to large # Obtains cut off threshold given the mask lengths. # cut_off = torch.take_along_dim(sorted_confidence, mask_len.to(torch.long), dim=-1) cut_off = sorted_confidence.gather(dim=-1, index=mask_len.to(torch.long)) # Masks tokens with lower confidence. masking = (confidence < cut_off) return masking def load(self, is_test=False, prefix=None): if prefix is not None: transformer_path = self.transformer_path + prefix + '.pth' else: transformer_path = self.transformer_path + '_last.pth' if self.config.restore or is_test: # transformer_path = '/home/ubuntu/AmodalVQGAN_pre/check_points/fish_amodal_transformer_gjx/GETransformer_210000.pth' if os.path.exists(transformer_path): print('Rank {} is loading {} Transformer...'.format(self.rank, transformer_path)) data = torch.load(transformer_path, map_location="cpu")
torch_init_model(self.transformer, transformer_path, 'model')
9
2023-12-21 04:25:47+00:00
16k
lipku/metahuman-stream
main.py
[ { "identifier": "NeRFDataset", "path": "nerf_triplane/provider.py", "snippet": "class NeRFDataset:\n def __init__(self, opt, device, type='train', downscale=1):\n super().__init__()\n \n self.opt = opt\n self.device = device\n self.type = type # train, val, test\n ...
import torch import argparse from nerf_triplane.provider import NeRFDataset from nerf_triplane.utils import * from nerf_triplane.network import NeRFNetwork from nerf_triplane.gui import NeRFGUI
11,006
parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera") parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)") parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)") parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable") parser.add_argument('--init_lips', action='store_true', help="init lips region") parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region") parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...") parser.add_argument('--torso', action='store_true', help="fix head and train torso") parser.add_argument('--head_ckpt', type=str, default='', help="head model") ### GUI options parser.add_argument('--gui', action='store_true', help="start a GUI") parser.add_argument('--W', type=int, default=450, help="GUI width") parser.add_argument('--H', type=int, default=450, help="GUI height") parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center") parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy") parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") ### else parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)") parser.add_argument('--aud', type=str, default='', help="audio source (empty will load the default, else should be a path to a npy file)") parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits") parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off") parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size") parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") parser.add_argument('--asr_model', type=str, default='deepspeech') # parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=50) parser.add_argument('-r', type=int, default=10) opt = parser.parse_args() if opt.O: opt.fp16 = True opt.exp_eye = True if opt.test and False: opt.smooth_path = True opt.smooth_eye = True opt.smooth_lips = True opt.cuda_ray = True # assert opt.cuda_ray, "Only support CUDA ray mode." if opt.patch_size > 1: # assert opt.patch_size > 16, "patch_size should > 16 to run LPIPS loss." assert opt.num_rays % (opt.patch_size ** 2) == 0, "patch_size ** 2 should be dividable by num_rays." # if opt.finetune_lips: # # do not update density grid in finetune stage # opt.update_extra_interval = 1e9 print(opt) seed_everything(opt.seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = NeRFNetwork(opt) # manually load state dict for head if opt.torso and opt.head_ckpt != '': model_dict = torch.load(opt.head_ckpt, map_location='cpu')['model'] missing_keys, unexpected_keys = model.load_state_dict(model_dict, strict=False) if len(missing_keys) > 0: print(f"[WARN] missing keys: {missing_keys}") if len(unexpected_keys) > 0: print(f"[WARN] unexpected keys: {unexpected_keys}") # freeze these keys for k, v in model.named_parameters(): if k in model_dict: # print(f'[INFO] freeze {k}, {v.shape}') v.requires_grad = False # print(model) criterion = torch.nn.MSELoss(reduction='none') if opt.test: if opt.gui: metrics = [] # use no metric in GUI for faster initialization... else: # metrics = [PSNRMeter(), LPIPSMeter(device=device)] metrics = [PSNRMeter(), LPIPSMeter(device=device), LMDMeter(backend='fan')] trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, criterion=criterion, fp16=opt.fp16, metrics=metrics, use_checkpoint=opt.ckpt) if opt.test_train:
# torch.autograd.set_detect_anomaly(True) # Close tf32 features. Fix low numerical accuracy on rtx30xx gpu. try: torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False except AttributeError as e: print('Info. This pytorch version is not support with tf32.') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('path', type=str) parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray --exp_eye") parser.add_argument('--test', action='store_true', help="test mode (load model and test dataset)") parser.add_argument('--test_train', action='store_true', help="test mode (load model and train dataset)") parser.add_argument('--data_range', type=int, nargs='*', default=[0, -1], help="data range to use") parser.add_argument('--workspace', type=str, default='workspace') parser.add_argument('--seed', type=int, default=0) ### training options parser.add_argument('--iters', type=int, default=200000, help="training iters") parser.add_argument('--lr', type=float, default=1e-2, help="initial learning rate") parser.add_argument('--lr_net', type=float, default=1e-3, help="initial learning rate") parser.add_argument('--ckpt', type=str, default='latest') parser.add_argument('--num_rays', type=int, default=4096 * 16, help="num rays sampled per image for each training step") parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch") parser.add_argument('--max_steps', type=int, default=16, help="max num steps sampled per ray (only valid when using --cuda_ray)") parser.add_argument('--num_steps', type=int, default=16, help="num steps sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)") parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)") ### loss set parser.add_argument('--warmup_step', type=int, default=10000, help="warm up steps") parser.add_argument('--amb_aud_loss', type=int, default=1, help="use ambient aud loss") parser.add_argument('--amb_eye_loss', type=int, default=1, help="use ambient eye loss") parser.add_argument('--unc_loss', type=int, default=1, help="use uncertainty loss") parser.add_argument('--lambda_amb', type=float, default=1e-4, help="lambda for ambient loss") ### network backbone options parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training") parser.add_argument('--bg_img', type=str, default='', help="background image") parser.add_argument('--fbg', action='store_true', help="frame-wise bg") parser.add_argument('--exp_eye', action='store_true', help="explicitly control the eyes") parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye") parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence") parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform") ### dataset options parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)") parser.add_argument('--preload', type=int, default=0, help="0 means load data from disk on-the-fly, 1 means preload to CPU, 2 means GPU.") # (the default value is for the fox dataset) parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.") parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3") parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location") parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera") parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)") parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)") parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable") parser.add_argument('--init_lips', action='store_true', help="init lips region") parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region") parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...") parser.add_argument('--torso', action='store_true', help="fix head and train torso") parser.add_argument('--head_ckpt', type=str, default='', help="head model") ### GUI options parser.add_argument('--gui', action='store_true', help="start a GUI") parser.add_argument('--W', type=int, default=450, help="GUI width") parser.add_argument('--H', type=int, default=450, help="GUI height") parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center") parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy") parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") ### else parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)") parser.add_argument('--aud', type=str, default='', help="audio source (empty will load the default, else should be a path to a npy file)") parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits") parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off") parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size") parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") parser.add_argument('--asr_model', type=str, default='deepspeech') # parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=50) parser.add_argument('-r', type=int, default=10) opt = parser.parse_args() if opt.O: opt.fp16 = True opt.exp_eye = True if opt.test and False: opt.smooth_path = True opt.smooth_eye = True opt.smooth_lips = True opt.cuda_ray = True # assert opt.cuda_ray, "Only support CUDA ray mode." if opt.patch_size > 1: # assert opt.patch_size > 16, "patch_size should > 16 to run LPIPS loss." assert opt.num_rays % (opt.patch_size ** 2) == 0, "patch_size ** 2 should be dividable by num_rays." # if opt.finetune_lips: # # do not update density grid in finetune stage # opt.update_extra_interval = 1e9 print(opt) seed_everything(opt.seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = NeRFNetwork(opt) # manually load state dict for head if opt.torso and opt.head_ckpt != '': model_dict = torch.load(opt.head_ckpt, map_location='cpu')['model'] missing_keys, unexpected_keys = model.load_state_dict(model_dict, strict=False) if len(missing_keys) > 0: print(f"[WARN] missing keys: {missing_keys}") if len(unexpected_keys) > 0: print(f"[WARN] unexpected keys: {unexpected_keys}") # freeze these keys for k, v in model.named_parameters(): if k in model_dict: # print(f'[INFO] freeze {k}, {v.shape}') v.requires_grad = False # print(model) criterion = torch.nn.MSELoss(reduction='none') if opt.test: if opt.gui: metrics = [] # use no metric in GUI for faster initialization... else: # metrics = [PSNRMeter(), LPIPSMeter(device=device)] metrics = [PSNRMeter(), LPIPSMeter(device=device), LMDMeter(backend='fan')] trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, criterion=criterion, fp16=opt.fp16, metrics=metrics, use_checkpoint=opt.ckpt) if opt.test_train:
test_set = NeRFDataset(opt, device=device, type='train')
0
2023-12-19 01:32:46+00:00
16k
MingtaoGuo/AnimateAnyone_unofficial
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
11,828
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
14
2023-12-16 03:31:33+00:00
16k
yasserben/CLOUDS
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "clouds/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME...
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import ( MetadataCatalog, build_detection_train_loader, build_detection_test_loader, ) from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.modeling import build_model from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, inference_on_dataset, print_csv_format, DatasetEvaluator, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from fvcore.nn.precise_bn import get_bn_modules from clouds import ( CityscapesSemSegEvaluator, ClassicalSemSegEvaluator, MapperTrain, MapperTest, add_maskformer2_config, add_clouds_config, add_wandb_config, add_prerocessing_training_set_config, PersoEvalHook, add_repeat_factors, ) from clouds.utils import setup_wandb, WandbWriter import warnings import copy import itertools import logging import os import ast import torch import detectron2.utils.comm as comm
12,937
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper
mapper = MapperTrain(cfg, True)
5
2023-12-15 15:40:58+00:00
16k
modelscope/scepter
scepter/studio/preprocess/preprocess.py
[ { "identifier": "Config", "path": "scepter/modules/utils/config.py", "snippet": "class Config(object):\n def __init__(self,\n cfg_dict={},\n load=True,\n cfg_file=None,\n logger=None,\n parser_ins=None):\n '''\n ...
import os.path import gradio as gr from scepter.modules.utils.config import Config from scepter.modules.utils.file_system import FS from scepter.studio.preprocess.caption_editor_ui.create_dataset_ui import \ CreateDatasetUI from scepter.studio.preprocess.caption_editor_ui.dataset_gallery_ui import \ DatasetGalleryUI from scepter.studio.preprocess.caption_editor_ui.export_dataset_ui import \ ExportDatasetUI from scepter.studio.utils.env import init_env
13,149
# -*- coding: utf-8 -*- class PreprocessUI(): def __init__(self, cfg_general_file, is_debug=False, language='en', root_work_dir='./'): cfg_general = Config(cfg_file=cfg_general_file) cfg_general.WORK_DIR = os.path.join(root_work_dir, cfg_general.WORK_DIR)
# -*- coding: utf-8 -*- class PreprocessUI(): def __init__(self, cfg_general_file, is_debug=False, language='en', root_work_dir='./'): cfg_general = Config(cfg_file=cfg_general_file) cfg_general.WORK_DIR = os.path.join(root_work_dir, cfg_general.WORK_DIR)
if not FS.exists(cfg_general.WORK_DIR):
1
2023-12-21 02:01:48+00:00
16k
RomGai/BrainVis
dc_ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "dc_ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import os import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.rank_zero import rank_zero_only from dc_ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from dc_ldm.modules.ema import LitEma from dc_ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from dc_ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from dc_ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from dc_ldm.models.diffusion.ddim import DDIMSampler from dc_ldm.models.diffusion.plms import PLMSSampler from PIL import Image from eval_metrics import get_similarity_metric from dc_ldm.modules.encoders.modules import FrozenImageEmbedder
14,227
b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): if ddim:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ddim_steps=300 ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.validation_count = 0 self.ddim_steps = ddim_steps self.return_cond = False self.output_path = None self.main_config = None self.best_val = 0.0 self.run_full_validation_threshold = 0.0 self.eval_avg = True def re_init_ema(self): if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): self.train() self.cond_stage_model.train() ###到底是在哪里训练的 loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=False, on_epoch=True) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=False, on_epoch=True) return loss @torch.no_grad() def generate(self, data, num_samples, ddim_steps=300, HW=None, limit=None, state=None): # fmri_embedding: n, seq_len, embed_dim all_samples = [] if HW is None: shape = (self.p_channels, self.p_image_size, self.p_image_size) else: num_resolutions = len(self.ch_mult) shape = (self.p_channels, HW[0] // 2**(num_resolutions-1), HW[1] // 2**(num_resolutions-1)) model = self sampler = PLMSSampler(model) # sampler = DDIMSampler(model) model.eval() if torch.cuda.is_available(): state = torch.cuda.get_rng_state() if state is None else state torch.cuda.set_rng_state(state) else: state = torch.get_rng_state() if state is None else state torch.set_rng_state(state) # rng = torch.Generator(device=self.device).manual_seed(2022).set_state(state) # state = torch.cuda.get_rng_state() with model.ema_scope(): for count, item in enumerate(zip(data['eeg'], data['image'])): if limit is not None: if count >= limit: break latent = item[0] # fmri embedding gt_image = rearrange(item[1], 'h w c -> 1 c h w') # h w c print(f"rendering {num_samples} examples in {ddim_steps} steps.") # c = model.get_learned_conditioning(repeat(latent, 'h w -> c h w', c=num_samples).to(self.device)) c, re_latent = model.get_learned_conditioning(repeat(latent, 'h w -> c h w', c=num_samples).to(self.device)) samples_ddim, _ = sampler.sample(S=ddim_steps, conditioning=c, batch_size=num_samples, shape=shape, verbose=False, generator=None) x_samples_ddim = model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0,min=0.0, max=1.0) gt_image = torch.clamp((gt_image+1.0)/2.0,min=0.0, max=1.0) all_samples.append(torch.cat([gt_image.detach().cpu(), x_samples_ddim.detach().cpu()], dim=0)) # put groundtruth at first # display as grid grid = torch.stack(all_samples, 0) grid = rearrange(grid, 'n b c h w -> (n b) c h w') grid = make_grid(grid, nrow=num_samples+1) # to image grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy() return grid, (255. * torch.stack(all_samples, 0).cpu().numpy()).astype(np.uint8), state def save_images(self, all_samples, suffix=0): # print('output_path') # print(self.output_path) if self.output_path is not None: os.makedirs(os.path.join(self.output_path, 'val', f'{self.validation_count}_{suffix}'), exist_ok=True) for sp_idx, imgs in enumerate(all_samples): # for copy_idx, img in enumerate(imgs[1:]): for copy_idx, img in enumerate(imgs): img = rearrange(img, 'c h w -> h w c') Image.fromarray(img).save(os.path.join(self.output_path, 'val', f'{self.validation_count}_{suffix}', f'test{sp_idx}-{copy_idx}.png')) def full_validation(self, batch, state=None): print('###### run full validation! ######\n') grid, all_samples, state = self.generate(batch, ddim_steps=self.ddim_steps, num_samples=5, limit=None, state=state) metric, metric_list = self.get_eval_metric(all_samples) self.save_images(all_samples, suffix='%.4f'%metric[-1]) metric_dict = {f'val/{k}_full':v for k, v in zip(metric_list, metric)} # self.logger.log_metrics(metric_dict) grid_imgs = Image.fromarray(grid.astype(np.uint8)) # self.logger.log_image(key=f'samples_test_full', images=[grid_imgs]) if metric[-1] > self.best_val: self.best_val = metric[-1] torch.save( { 'model_state_dict': self.state_dict(), 'config': self.main_config, 'state': state }, os.path.join(self.output_path, 'checkpoint_best.pth') ) @torch.no_grad() def validation_step(self, batch, batch_idx): if batch_idx != 0: return if self.validation_count % 5 == 0 and self.trainer.current_epoch != 0: self.full_validation(batch) else: # pass grid, all_samples, state = self.generate(batch, ddim_steps=self.ddim_steps, num_samples=3, limit=5) metric, metric_list = self.get_eval_metric(all_samples, avg=self.eval_avg) grid_imgs = Image.fromarray(grid.astype(np.uint8)) # self.logger.log_image(key=f'samples_test', images=[grid_imgs]) metric_dict = {f'val/{k}':v for k, v in zip(metric_list, metric)} # self.logger.log_metrics(metric_dict) if metric[-1] > self.run_full_validation_threshold: self.full_validation(batch, state=state) self.validation_count += 1 def get_eval_metric(self, samples, avg=True): metric_list = ['mse', 'pcc', 'ssim', 'psm'] res_list = [] gt_images = [img[0] for img in samples] gt_images = rearrange(np.stack(gt_images), 'n c h w -> n h w c') samples_to_run = np.arange(1, len(samples[0])) if avg else [1] for m in metric_list: res_part = [] for s in samples_to_run: pred_images = [img[s] for img in samples] pred_images = rearrange(np.stack(pred_images), 'n c h w -> n h w c') res = get_similarity_metric(pred_images, gt_images, method='pair-wise', metric_name=m) res_part.append(np.mean(res)) res_list.append(np.mean(res_part)) res_part = [] for s in samples_to_run: pred_images = [img[s] for img in samples] pred_images = rearrange(np.stack(pred_images), 'n c h w -> n h w c') res = get_similarity_metric(pred_images, gt_images, 'class', None, n_way=50, num_trials=50, top_k=1, device='cuda') res_part.append(np.mean(res)) res_list.append(np.mean(res_part)) res_list.append(np.max(res_part)) metric_list.append('top-1-class') metric_list.append('top-1-class (max)') return res_list, metric_list def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=True, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.train_cond_stage_only = False self.clip_tune = True if self.clip_tune: self.image_embedder = FrozenImageEmbedder() self.cls_tune = False def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() def freeze_diffusion_model(self): for param in self.model.parameters(): param.requires_grad = False def unfreeze_diffusion_model(self): for param in self.model.parameters(): param.requires_grad = True def freeze_cond_stage(self): for param in self.cond_stage_model.parameters(): param.requires_grad = False def unfreeze_cond_stage(self): for param in self.cond_stage_model.parameters(): param.requires_grad = True def freeze_first_stage(self): self.first_stage_model.trainable = False for param in self.first_stage_model.parameters(): param.requires_grad = False def unfreeze_first_stage(self): self.first_stage_model.trainable = True for param in self.first_stage_model.parameters(): param.requires_grad = True def freeze_whole_model(self): self.first_stage_model.trainable = False for param in self.parameters(): param.requires_grad = False def unfreeze_whole_model(self): self.first_stage_model.trainable = True for param in self.parameters(): param.requires_grad = True def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() # self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): # self.cond_stage_model.eval() if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c, re_latent = self.cond_stage_model.encode(c) # c = self.cond_stage_model.encode(c) else: c, re_latent = self.cond_stage_model(c) # c = self.cond_stage_model(c) # return c return c, re_latent def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) # print('encoder_posterior.shape') # print(encoder_posterior.shape) z = self.get_first_stage_encoding(encoder_posterior).detach() # print('z.shape') # print(z.shape) # print(cond_key) # print(self.cond_stage_key) # print(cond_key) if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox','fmri', 'eeg']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x # print('get input') # print(not self.cond_stage_trainable) # print(force_c_encode) if not self.cond_stage_trainable or force_c_encode : # print('get learned condition') if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c, re_latent = self.get_learned_conditioning(xc) # c = self.get_learned_conditioning(xc) else: c, re_latent = self.get_learned_conditioning(xc.to(self.device)) # c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c , batch['label'], batch['image_raw']] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): self.freeze_first_stage() # print('share step\'s get input') x, c, label, image_raw = self.get_input(batch, self.first_stage_key) # print('get input shape') # print('x.shape') # print(x.shape) # print('c.shape') # print(c.shape) if self.return_cond: loss, cc = self(x, c, label, image_raw) return loss, cc else: loss = self(x, c, label, image_raw) return loss def forward(self, x, c, label, image_raw, *args, **kwargs): # print(self.num_timesteps) t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() # print('t.shape') # print(t.shape) if self.model.conditioning_key is not None: assert c is not None imgs = c if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) c, re_latent = self.get_learned_conditioning(c) # print('c.shape') # print(c.shape) prefix = 'train' if self.training else 'val' loss, loss_dict = self.p_losses(x, c, t, *args, **kwargs) # pre_cls = self.cond_stage_model.get_cls(re_latent) # rencon = self.cond_stage_model.recon(re_latent) if self.clip_tune: image_embeds = self.image_embedder(image_raw) loss_clip = self.cond_stage_model.get_clip_loss(re_latent, image_embeds) # loss_recon = self.recon_loss(imgs, rencon) # loss_cls = self.cls_loss(label, pre_cls) loss += loss_clip # loss += loss_cls # loss_recon + #(self.original_elbo_weight * loss_vlb) # loss_dict.update({f'{prefix}/loss_recon': loss_recon}) # loss_dict.update({f'{prefix}/loss_cls': loss_cls}) loss_dict.update({f'{prefix}/loss_clip': loss_clip}) if self.cls_tune: pre_cls = self.cond_stage_model.get_cls(re_latent) loss_cls = self.cls_loss(label, pre_cls) # image_embeds = self.image_embedder(image_raw) # loss_clip = self.cond_stage_model.get_clip_loss(re_latent, image_embeds) # loss_recon = self.recon_loss(imgs, rencon) # loss_cls = self.cls_loss(label, pre_cls) loss += loss_cls # loss += loss_cls # loss_recon + #(self.original_elbo_weight * loss_vlb) # loss_dict.update({f'{prefix}/loss_recon': loss_recon}) # loss_dict.update({f'{prefix}/loss_cls': loss_cls}) loss_dict.update({f'{prefix}/loss_cls': loss_cls}) # if self.return_cond: # return self.p_losses(x, c, t, *args, **kwargs), c # return self.p_losses(x, c, t, *args, **kwargs) if self.return_cond: return loss, loss_dict, c return loss, loss_dict # def recon_loss(self, ) def recon_loss(self, imgs, pred): """ imgs: [N, 1, num_voxels] pred: [N, L, p] mask: [N, L], 0 is keep, 1 is remove, """ # target = self.patchify(imgs) loss = (pred - imgs) ** 2 loss = loss.mean() # loss = loss.mean(dim=-1) # [N, L], mean loss per patch # loss = (loss * mask).sum() / mask.sum() if mask.sum() != 0 else (loss * mask).sum() # mean loss on removed patches return loss def cls_loss(self, label, pred): return torch.nn.CrossEntropyLoss()(pred, label) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = torch.clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = torch.clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) # print('x_recon') # if isinstance(x_recon, tuple): # print('is tuple') # # print(len(x_recon)) # # print(x_recon[0].shape) # else: # print(x_recon.shape) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) # print('p_losses') # print('noise.shape') # print(noise.shape) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) # print('x_noisy[0].shape') # print(x_noisy[0].shape) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): if ddim:
ddim_sampler = DDIMSampler(self)
17
2023-12-16 12:52:14+00:00
16k
tonnetonne814/PL-Bert-VITS2
train_ms.py
[ { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3]...
import argparse import itertools import json import math import os import logging import torch import torch.distributed as dist import torch.multiprocessing as mp import tqdm import commons import models import utils from torch import nn, optim from torch.cuda.amp import GradScaler, autocast from torch.nn import functional as F from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from data_utils import (DistributedBucketSampler, TextAudioSpeakerCollate, TextAudioSpeakerLoader) from losses import discriminator_loss, feature_loss, generator_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from models import (AVAILABLE_DURATION_DISCRIMINATOR_TYPES, AVAILABLE_FLOW_TYPES, DurationDiscriminatorV1, DurationDiscriminatorV2, MultiPeriodDiscriminator, SynthesizerTrn) from PL_BERT_ja.text.symbols import symbols
11,091
train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 500, 700, 900, 1100, 1300, 1500, 3000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=8, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, ) if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=8, shuffle=False, batch_size=hps.train.batch_size, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) # some of these flags are not being used in the code and directly set in hps json file. # they are kept here for reference and prototyping. if ( "use_transformer_flows" in hps.model.keys() and hps.model.use_transformer_flows == True ): use_transformer_flows = True transformer_flow_type = hps.model.transformer_flow_type print(f"Using transformer flows {transformer_flow_type} for VITS2") assert ( transformer_flow_type in AVAILABLE_FLOW_TYPES ), f"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}" else: print("Using normal flows for VITS1") use_transformer_flows = False if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) use_spk_conditioned_encoder = True else: print("Using normal encoder for VITS1") use_spk_conditioned_encoder = False if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True ): print("Using noise scaled MAS for VITS2") use_noise_scaled_mas = True mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") use_noise_scaled_mas = False mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True ): # print("Using duration discriminator for VITS2") use_duration_discriminator = True # comment - choihkk # add duration discriminator type here # I think it would be a good idea to come up with a method to input this part accurately, like a hydra duration_discriminator_type = getattr( hps.model, "duration_discriminator_type", "dur_disc_1" ) print(f"Using duration_discriminator {duration_discriminator_type} for VITS2") assert ( duration_discriminator_type in AVAILABLE_DURATION_DISCRIMINATOR_TYPES ), f"duration_discriminator_type must be one of {AVAILABLE_DURATION_DISCRIMINATOR_TYPES}" # duration_discriminator_type = AVAILABLE_DURATION_DISCRIMINATOR_TYPES # ここ修正 if duration_discriminator_type == "dur_disc_1": net_dur_disc = DurationDiscriminatorV1( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) elif duration_discriminator_type == "dur_disc_2": net_dur_disc = DurationDiscriminatorV2( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) else: print("NOT using any duration discriminator like VITS1") net_dur_disc = None use_duration_discriminator = False net_g = SynthesizerTrn( len(symbols)+1, posterior_channels, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(rank)
numba_logger = logging.getLogger('numba') numba_logger.setLevel(logging.WARNING) # from tensorboardX import SummaryWriter torch.backends.cudnn.benchmark = True global_step = 0 def main(): """Assume Single Node Multi GPUs Training Only""" assert torch.cuda.is_available(), "CPU training is not allowed." n_gpus = torch.cuda.device_count() os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "6060" hps = utils.get_hparams() mp.spawn( run, nprocs=n_gpus, args=( n_gpus, hps, ), ) def run(rank, n_gpus, hps): net_dur_disc = None global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) dist.init_process_group( backend="nccl", init_method="env://", world_size=n_gpus, rank=rank ) torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) if ( "use_mel_posterior_encoder" in hps.model.keys() and hps.model.use_mel_posterior_encoder == True ): print("Using mel posterior encoder for VITS2") posterior_channels = 128 # vits2 hps.data.use_mel_posterior_encoder = True else: print("Using lin posterior encoder for VITS1") posterior_channels = hps.data.filter_length // 2 + 1 hps.data.use_mel_posterior_encoder = False train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 500, 700, 900, 1100, 1300, 1500, 3000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=8, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, ) if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=8, shuffle=False, batch_size=hps.train.batch_size, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) # some of these flags are not being used in the code and directly set in hps json file. # they are kept here for reference and prototyping. if ( "use_transformer_flows" in hps.model.keys() and hps.model.use_transformer_flows == True ): use_transformer_flows = True transformer_flow_type = hps.model.transformer_flow_type print(f"Using transformer flows {transformer_flow_type} for VITS2") assert ( transformer_flow_type in AVAILABLE_FLOW_TYPES ), f"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}" else: print("Using normal flows for VITS1") use_transformer_flows = False if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) use_spk_conditioned_encoder = True else: print("Using normal encoder for VITS1") use_spk_conditioned_encoder = False if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True ): print("Using noise scaled MAS for VITS2") use_noise_scaled_mas = True mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") use_noise_scaled_mas = False mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True ): # print("Using duration discriminator for VITS2") use_duration_discriminator = True # comment - choihkk # add duration discriminator type here # I think it would be a good idea to come up with a method to input this part accurately, like a hydra duration_discriminator_type = getattr( hps.model, "duration_discriminator_type", "dur_disc_1" ) print(f"Using duration_discriminator {duration_discriminator_type} for VITS2") assert ( duration_discriminator_type in AVAILABLE_DURATION_DISCRIMINATOR_TYPES ), f"duration_discriminator_type must be one of {AVAILABLE_DURATION_DISCRIMINATOR_TYPES}" # duration_discriminator_type = AVAILABLE_DURATION_DISCRIMINATOR_TYPES # ここ修正 if duration_discriminator_type == "dur_disc_1": net_dur_disc = DurationDiscriminatorV1( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) elif duration_discriminator_type == "dur_disc_2": net_dur_disc = DurationDiscriminatorV2( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) else: print("NOT using any duration discriminator like VITS1") net_dur_disc = None use_duration_discriminator = False net_g = SynthesizerTrn( len(symbols)+1, posterior_channels, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(rank)
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
13
2023-12-16 05:34:02+00:00
16k
Ruiyuan-Zhang/CCS
multi_part_assembly/utils/wx_transformer_utilities/multihead_attention.py
[ { "identifier": "FairseqDropout", "path": "multi_part_assembly/utils/wx_transformer_utilities/fairseq_dropout.py", "snippet": "class FairseqDropout(nn.Module):\n\n def __init__(self, p, module_name=None):\n super().__init__()\n self.p = p\n self.module_name = module_name\n ...
import math import time import numpy as np import torch import torch.nn.functional as F import multi_part_assembly.utils.wx_transformer_utilities.fairseq_utils as utils from typing import Dict, Optional, Tuple from torch import Tensor, nn from torch.nn import Parameter from .fairseq_dropout import FairseqDropout from .attention_rim import MultiHeadAttention as MHAMemory from .quant_noise import quant_noise from .group_linear_layer import GroupLinearLayer from .relational_memory_volatile import RelationalMemory from .relational_memory_regressive import RelationalMemory as RelationalMemoryRegressive
14,080
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #import models.fairseq_util #from fairseq.incremental_decoding_utils import with_incremental_state #from .relational_memory_lstm import RelationalMemory # 为什么作者没有从这两个类别中引入relmem? #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer as GroupLinearLayer class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, nblocks=1, top_k_ratio=None, use_value_competition=True, shared_memory_attention = False, use_topk = False, topk = 3, num_steps = 5, mem_slots = 4, null_attention = False, regressive = False ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads self.shared_memory_attention = shared_memory_attention print('total heads', self.num_heads) print('head dim', self.head_dim) self.use_topk = use_topk self.topk = topk print('use topk?' + str(self.use_topk)) print('topk:'+str(self.topk)) assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) if not self.shared_memory_attention: # 这里的共享memory_attention是什么内容呢?表示的是不在不同的layer之间共享memory吗?
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #import models.fairseq_util #from fairseq.incremental_decoding_utils import with_incremental_state #from .relational_memory_lstm import RelationalMemory # 为什么作者没有从这两个类别中引入relmem? #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer as GroupLinearLayer class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, nblocks=1, top_k_ratio=None, use_value_competition=True, shared_memory_attention = False, use_topk = False, topk = 3, num_steps = 5, mem_slots = 4, null_attention = False, regressive = False ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads self.shared_memory_attention = shared_memory_attention print('total heads', self.num_heads) print('head dim', self.head_dim) self.use_topk = use_topk self.topk = topk print('use topk?' + str(self.use_topk)) print('topk:'+str(self.topk)) assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) if not self.shared_memory_attention: # 这里的共享memory_attention是什么内容呢?表示的是不在不同的layer之间共享memory吗?
self.k_proj = quant_noise(GroupLinearLayer(self.kdim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)
3
2023-12-15 13:13:01+00:00
16k
camenduru/FreeInit-hf
app.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in...
import os import torch import random import gradio as gr from glob import glob from omegaconf import OmegaConf from safetensors import safe_open from diffusers import AutoencoderKL from diffusers import EulerDiscreteScheduler, DDIMScheduler from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer from animatediff.models.unet import UNet3DConditionModel from animatediff.pipelines.pipeline_animation import AnimationFreeInitPipeline from animatediff.utils.util import save_videos_grid from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint from diffusers.training_utils import set_seed from animatediff.utils.freeinit_utils import get_freq_filter from collections import namedtuple
13,894
["use_fp16"] ], # 1-ToonYou [ "toonyou_beta3.safetensors", "mm_sd_v14.ckpt", "(best quality, masterpiece), 1girl, looking at viewer, blurry background, upper body, contemporary, dress", "(worst quality, low quality)", 512, 512, "478028150728261", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 2-Lyriel [ "lyriel_v16.safetensors", "mm_sd_v14.ckpt", "hypercars cyberpunk moving, muted colors, swirling color smokes, legend, cityscape, space", "3d, cartoon, anime, sketches, worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 3-RCNZ [ "rcnzCartoon3d_v10.safetensors", "mm_sd_v14.ckpt", "A cute raccoon playing guitar in a boat on the ocean", "worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 4-MajicMix [ "majicmixRealistic_v5Preview.safetensors", "mm_sd_v14.ckpt", "1girl, reading book", "(ng_deepnegative_v1_75t:1.2), (badhandv4:1), (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, watermark, moles", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # # 5-RealisticVision # [ # "realisticVisionV51_v20Novae.safetensors", # "mm_sd_v14.ckpt", # "A panda standing on a surfboard in the ocean in sunset.", # "worst quality, low quality, nsfw, logo", # 512, 512, "2005563494988190", # "butterworth", 0.25, 0.25, 3, # ["use_fp16"] # ] ] # clean unrelated ckpts # ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda()
pretrained_model_path = "models/StableDiffusion/stable-diffusion-v1-5" inference_config_path = "configs/inference/inference-v1.yaml" css = """ .toolbutton { margin-buttom: 0em 0em 0em 0em; max-width: 2.5em; min-width: 2.5em !important; height: 2.5em; } """ examples = [ # 0-RealisticVision [ "realisticVisionV51_v20Novae.safetensors", "mm_sd_v14.ckpt", "A panda standing on a surfboard in the ocean under moonlight.", "worst quality, low quality, nsfw, logo", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 1-ToonYou [ "toonyou_beta3.safetensors", "mm_sd_v14.ckpt", "(best quality, masterpiece), 1girl, looking at viewer, blurry background, upper body, contemporary, dress", "(worst quality, low quality)", 512, 512, "478028150728261", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 2-Lyriel [ "lyriel_v16.safetensors", "mm_sd_v14.ckpt", "hypercars cyberpunk moving, muted colors, swirling color smokes, legend, cityscape, space", "3d, cartoon, anime, sketches, worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 3-RCNZ [ "rcnzCartoon3d_v10.safetensors", "mm_sd_v14.ckpt", "A cute raccoon playing guitar in a boat on the ocean", "worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 4-MajicMix [ "majicmixRealistic_v5Preview.safetensors", "mm_sd_v14.ckpt", "1girl, reading book", "(ng_deepnegative_v1_75t:1.2), (badhandv4:1), (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, watermark, moles", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # # 5-RealisticVision # [ # "realisticVisionV51_v20Novae.safetensors", # "mm_sd_v14.ckpt", # "A panda standing on a surfboard in the ocean in sunset.", # "worst quality, low quality, nsfw, logo", # 512, 512, "2005563494988190", # "butterworth", 0.25, 0.25, 3, # ["use_fp16"] # ] ] # clean unrelated ckpts # ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda()
self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda()
0
2023-12-19 21:06:32+00:00
16k
exislow/tidal-dl-ng
tidal_dl_ng/gui.py
[ { "identifier": "get_format_template", "path": "tidal_dl_ng/helper/path.py", "snippet": "def get_format_template(\n media: Track | Album | Playlist | UserPlaylist | Video | Mix | MediaType, settings\n) -> str | bool:\n result = False\n\n if isinstance(media, Track) or media == MediaType.TRACK:\...
import math import sys import qdarktheme import coloredlogs.converter from collections.abc import Callable from tidal_dl_ng.helper.path import get_format_template from PySide6 import QtCore, QtGui, QtWidgets from rich.progress import Progress from tidalapi import Album, Mix, Playlist, Quality, Track, UserPlaylist, Video from tidalapi.session import SearchTypes from tidal_dl_ng.config import Settings, Tidal from tidal_dl_ng.constants import QualityVideo, TidalLists from tidal_dl_ng.download import Download from tidal_dl_ng.logger import XStream, logger_gui from tidal_dl_ng.model.gui_data import ProgressBars, ResultSearch from tidal_dl_ng.ui.main import Ui_MainWindow from tidal_dl_ng.ui.spinner import QtWaitingSpinner from tidal_dl_ng.worker import Worker
14,197
if isinstance(l_media, list): result = result + self.search_result_to_model(l_media) return result def search_result_to_model(self, items: [*SearchTypes]) -> [ResultSearch]: result = [] for idx, item in enumerate(items): if isinstance(item, Track): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.artists), title=item.name, album=item.album.name, duration_sec=item.duration, obj=item, ) result.append(result_item) elif isinstance(item, Video): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.artists), title=item.name, album=item.album.name if item.album else "", duration_sec=item.duration, obj=item, ) result.append(result_item) elif isinstance(item, Playlist): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.promoted_artists) if item.promoted_artists else "", title=item.name, album="", duration_sec=item.duration, obj=item, ) result.append(result_item) elif isinstance(item, Album): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.artists), title="", album=item.name, duration_sec=item.duration, obj=item, ) result.append(result_item) return result def _init_signals(self): self.b_download.clicked.connect(lambda: self.thread_it(self.on_download_results)) self.l_search.returnPressed.connect( lambda: self.search_populate_results(self.l_search.text(), self.cb_search_type.currentData()) ) self.b_search.clicked.connect( lambda: self.search_populate_results(self.l_search.text(), self.cb_search_type.currentData()) ) self.cb_quality_audio.currentIndexChanged.connect(self.quality_set_audio) self.cb_quality_video.currentIndexChanged.connect(self.quality_set_video) self.tr_lists_user.itemClicked.connect(self.on_list_items_show) self.spinner_start[QtWidgets.QWidget].connect(self.on_spinner_start) self.spinner_stop.connect(self.on_spinner_stop) self.s_item_advance.connect(self.progress_item) self.s_item_name.connect(self.progress_item_name) self.s_list_advance.connect(self.progress_list) self.s_pb_reset.connect(self.progress_reset) self.s_populate_tree_lists.connect(self.on_populate_tree_lists) def progress_list(self, value: float): self.pb_list.setValue(int(math.ceil(value))) def progress_item(self, value: float): self.pb_item.setValue(int(math.ceil(value))) def progress_item_name(self, value: str): self.pb_item.setFormat(f"%p% {value}") def progress_list_name(self, value: str): self.pb_list.setFormat(f"%p% {value}") def quality_set_audio(self, index): self.settings.data.quality_audio = Quality(self.cb_quality_audio.itemData(index).value) self.settings.save() if self.tidal: self.tidal.settings_apply() def quality_set_video(self, index): self.settings.data.quality_video = QualityVideo(self.cb_quality_video.itemData(index).value) self.settings.save() if self.tidal: self.tidal.settings_apply() def on_list_items_show(self, item: QtWidgets.QTreeWidgetItem): media_list: Album | Playlist = item.data(3, QtCore.Qt.ItemDataRole.UserRole) # Only if clicked item is not a top level item. if media_list: self.list_items_show(media_list) def list_items_show(self, media_list: Album | Playlist | None = None, point: QtCore.QPoint | None = None): if point: item = self.tr_lists_user.itemAt(point) media_list = item.data(3, QtCore.Qt.ItemDataRole.UserRole) media_items = media_list.items() result = self.search_result_to_model(media_items) self.populate_tree_results(result) def thread_it(self, fn: Callable, *args, **kwargs): # Any other args, kwargs are passed to the run function
try: except ImportError as e: print(e) print("Qt dependencies missing. Cannot start GUI. Please execute: 'pip install pyside6 pyqtdarktheme'") sys.exit(1) # TODO: Make more use of Exceptions # TODO: Add File -> Version class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): settings: Settings = None tidal: Tidal = None dl: Download = None threadpool: QtCore.QThreadPool = None spinner: QtWaitingSpinner = None spinner_start: QtCore.Signal = QtCore.Signal(QtWidgets.QWidget) spinner_stop: QtCore.Signal = QtCore.Signal() pb_item: QtWidgets.QProgressBar = None s_item_advance: QtCore.Signal = QtCore.Signal(float) s_item_name: QtCore.Signal = QtCore.Signal(str) pb_list: QtWidgets.QProgressBar = None s_list_advance: QtCore.Signal = QtCore.Signal(float) s_pb_reset: QtCore.Signal = QtCore.Signal() s_populate_tree_lists: QtCore.Signal = QtCore.Signal(list) def __init__(self, tidal: Tidal | None = None): super().__init__() self.setupUi(self) # self.setGeometry(50, 50, 500, 300) self.setWindowTitle("TIDAL Downloader Next Gen!") # TODO: Fix icons (make them visible). # my_pixmap = QtGui.QPixmap("tidal_dl_ng/ui/icon.png") my_icon = QtGui.QIcon("tidal_dl_ng/ui/icon.png") self.setWindowIcon(my_icon) tray = QtWidgets.QSystemTrayIcon() tray.setIcon(my_icon) tray.setVisible(True) # Logging redirect. XStream.stdout().messageWritten.connect(self._log_output) # XStream.stderr().messageWritten.connect(self._log_output) self.settings = Settings() self.threadpool = QtCore.QThreadPool() # TODO: Show GUI, create a progress bar showing the TIDAL querying progress. self._init_tree_results(self.tr_results) self._init_tree_lists(self.tr_lists_user) self._init_progressbar() self._populate_quality(self.cb_quality_audio, Quality) self._populate_quality(self.cb_quality_video, QualityVideo) self._populate_search_types(self.cb_search_type, SearchTypes) self.apply_settings(self.settings) self._init_signals() self.init_tidal(tidal) logger_gui.debug("Multithreading with maximum %d threads" % self.threadpool.maxThreadCount()) logger_gui.debug("All setup.") def init_tidal(self, tidal: Tidal = None): result: bool = False if tidal: self.tidal = tidal result = True else: self.tidal = Tidal(self.settings) while True: result = self.tidal.login(logger_gui.info) if result: break if result: self.dl = Download(self.tidal.session, self.tidal.settings.data.skip_existing) self.thread_it(self.tidal_user_lists) def _init_progressbar(self): self.pb_list = QtWidgets.QProgressBar() self.pb_item = QtWidgets.QProgressBar() pbs = [self.pb_list, self.pb_item] for pb in pbs: pb.setRange(0, 100) # self.pb_progress.setVisible() self.statusbar.addPermanentWidget(pb) def progress_reset(self): self.pb_list.setValue(0) self.pb_item.setValue(0) def _log_output(self, text): display_msg = coloredlogs.converter.convert(text) cursor: QtGui.QTextCursor = self.te_debug.textCursor() cursor.movePosition(QtGui.QTextCursor.End) cursor.insertHtml(display_msg) self.te_debug.setTextCursor(cursor) self.te_debug.ensureCursorVisible() def _populate_quality(self, ui_target: QtWidgets.QComboBox, options: type[Quality | QualityVideo]): for item in options: ui_target.addItem(item.name, item) def _populate_search_types(self, ui_target: QtWidgets.QComboBox, options: SearchTypes): for item in options: if item and item.__name__ != "Artist": ui_target.addItem(item.__name__, item) self.cb_search_type.setCurrentIndex(1) def _init_tree_results(self, tree: QtWidgets.QTableWidget): tree.setColumnHidden(5, True) tree.sortByColumn(0, QtCore.Qt.SortOrder.AscendingOrder) # TODO: Refactor to own TIDAL file or so. def tidal_user_lists(self): # Start loading spinner self.spinner_start.emit(self.tr_lists_user) user_playlists: [Playlist | UserPlaylist] = self.tidal.session.user.playlist_and_favorite_playlists() user_mixes: [Mix] = self.tidal.session.mixes().categories[0].items user_all: [Playlist | UserPlaylist | Mix] = user_playlists + user_mixes self.s_populate_tree_lists.emit(user_all) def on_populate_tree_lists(self, user_lists: [Playlist | UserPlaylist | Mix]): self.tr_results.clear() twi_playlists: QtWidgets.QTreeWidgetItem = self.tr_lists_user.findItems( TidalLists.PLAYLISTS.value, QtCore.Qt.MatchExactly, 0 )[0] twi_mixes: QtWidgets.QTreeWidgetItem = self.tr_lists_user.findItems( TidalLists.FAVORITES.value, QtCore.Qt.MatchExactly, 0 )[0] twi_favorites: QtWidgets.QTreeWidgetItem = self.tr_lists_user.findItems( TidalLists.MIXES.value, QtCore.Qt.MatchExactly, 0 )[0] for item in user_lists: if isinstance(item, UserPlaylist): twi_child = QtWidgets.QTreeWidgetItem(twi_playlists) name: str = item.name info: str = f"({item.num_tracks + item.num_videos} Tracks)" elif isinstance(item, Playlist): twi_child = QtWidgets.QTreeWidgetItem(twi_mixes) name: str = item.name info: str = f"({item.num_tracks + item.num_videos} Tracks) {item.description}" elif isinstance(item, Mix): twi_child = QtWidgets.QTreeWidgetItem(twi_favorites) name: str = item.title info: str = item.sub_title twi_child.setText(0, name) twi_child.setText(1, info) twi_child.setData(3, QtCore.Qt.ItemDataRole.UserRole, item) # Stop load spinner self.spinner_stop.emit() def _init_tree_lists(self, tree: QtWidgets.QTreeWidget): # Adjust Tree. tree.setColumnWidth(0, 200) tree.setColumnWidth(1, 300) tree.setColumnHidden(2, True) tree.expandAll() # Connect the contextmenu tree.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) tree.customContextMenuRequested.connect(self.menu_context_tree_lists) def apply_settings(self, settings: Settings): l_cb = [ {"element": self.cb_quality_audio, "setting": settings.data.quality_audio.name, "default_id": 1}, {"element": self.cb_quality_video, "setting": settings.data.quality_video.name, "default_id": 0}, ] for item in l_cb: idx = item["element"].findText(item["setting"]) if idx > -1: item["element"].setCurrentIndex(idx) else: item["element"].setCurrentIndex(item["default_id"]) def on_spinner_start(self, parent: QtWidgets.QWidget): self.spinner = QtWaitingSpinner(parent, True, True) self.spinner.setColor(QtGui.QColor(255, 255, 255)) self.spinner.start() def on_spinner_stop(self): self.spinner.stop() self.spinner = None def menu_context_tree_lists(self, point): # Infos about the node selected. index = self.tr_lists_user.indexAt(point) # Do not open menu if something went wrong or a parent node is clicked. if not index.isValid() or not index.parent().data(): return # We build the menu. menu = QtWidgets.QMenu() menu.addAction("Download Playlist", lambda: self.thread_download_list_media(point)) menu.exec(self.tr_lists_user.mapToGlobal(point)) def thread_download_list_media(self, point): self.thread_it(self.on_download_list_media, point) self.thread_it(self.list_items_show, point=point) def on_download_list_media(self, point: QtCore.QPoint): item = self.tr_lists_user.itemAt(point) media = item.data(3, QtCore.Qt.ItemDataRole.UserRole) # TODO: Implement disable download button etc. self.download(media, self.dl) def search_populate_results(self, query: str, type_media: SearchTypes): self.tr_results.clear() results: [ResultSearch] = self.search(query, [type_media]) self.populate_tree_results(results) def populate_tree_results(self, results: [ResultSearch]): self.tr_results.clear() for item in results: # Format seconds to mm:ss. m, s = divmod(item.duration_sec, 60) duration: str = f"{m:02d}:{s:02d}" # Since sorting happens only by string, we need to pad the index and add 1 (to avoid start at 0) index: str = f"{item.position + 1:03}" child = QtWidgets.QTreeWidgetItem() child.setText(0, index) child.setText(1, item.artist) child.setText(2, item.title) child.setText(3, item.album) child.setText(4, duration) child.setData(5, QtCore.Qt.ItemDataRole.UserRole, item.obj) self.tr_results.addTopLevelItem(child) def search(self, query: str, types_media: SearchTypes) -> [ResultSearch]: result_search: [dict[str, SearchTypes]] = self.tidal.session.search(query, models=types_media, limit=999) result: [ResultSearch] = [] for _media_type, l_media in result_search.items(): if isinstance(l_media, list): result = result + self.search_result_to_model(l_media) return result def search_result_to_model(self, items: [*SearchTypes]) -> [ResultSearch]: result = [] for idx, item in enumerate(items): if isinstance(item, Track): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.artists), title=item.name, album=item.album.name, duration_sec=item.duration, obj=item, ) result.append(result_item) elif isinstance(item, Video): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.artists), title=item.name, album=item.album.name if item.album else "", duration_sec=item.duration, obj=item, ) result.append(result_item) elif isinstance(item, Playlist): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.promoted_artists) if item.promoted_artists else "", title=item.name, album="", duration_sec=item.duration, obj=item, ) result.append(result_item) elif isinstance(item, Album): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.artists), title="", album=item.name, duration_sec=item.duration, obj=item, ) result.append(result_item) return result def _init_signals(self): self.b_download.clicked.connect(lambda: self.thread_it(self.on_download_results)) self.l_search.returnPressed.connect( lambda: self.search_populate_results(self.l_search.text(), self.cb_search_type.currentData()) ) self.b_search.clicked.connect( lambda: self.search_populate_results(self.l_search.text(), self.cb_search_type.currentData()) ) self.cb_quality_audio.currentIndexChanged.connect(self.quality_set_audio) self.cb_quality_video.currentIndexChanged.connect(self.quality_set_video) self.tr_lists_user.itemClicked.connect(self.on_list_items_show) self.spinner_start[QtWidgets.QWidget].connect(self.on_spinner_start) self.spinner_stop.connect(self.on_spinner_stop) self.s_item_advance.connect(self.progress_item) self.s_item_name.connect(self.progress_item_name) self.s_list_advance.connect(self.progress_list) self.s_pb_reset.connect(self.progress_reset) self.s_populate_tree_lists.connect(self.on_populate_tree_lists) def progress_list(self, value: float): self.pb_list.setValue(int(math.ceil(value))) def progress_item(self, value: float): self.pb_item.setValue(int(math.ceil(value))) def progress_item_name(self, value: str): self.pb_item.setFormat(f"%p% {value}") def progress_list_name(self, value: str): self.pb_list.setFormat(f"%p% {value}") def quality_set_audio(self, index): self.settings.data.quality_audio = Quality(self.cb_quality_audio.itemData(index).value) self.settings.save() if self.tidal: self.tidal.settings_apply() def quality_set_video(self, index): self.settings.data.quality_video = QualityVideo(self.cb_quality_video.itemData(index).value) self.settings.save() if self.tidal: self.tidal.settings_apply() def on_list_items_show(self, item: QtWidgets.QTreeWidgetItem): media_list: Album | Playlist = item.data(3, QtCore.Qt.ItemDataRole.UserRole) # Only if clicked item is not a top level item. if media_list: self.list_items_show(media_list) def list_items_show(self, media_list: Album | Playlist | None = None, point: QtCore.QPoint | None = None): if point: item = self.tr_lists_user.itemAt(point) media_list = item.data(3, QtCore.Qt.ItemDataRole.UserRole) media_items = media_list.items() result = self.search_result_to_model(media_items) self.populate_tree_results(result) def thread_it(self, fn: Callable, *args, **kwargs): # Any other args, kwargs are passed to the run function
worker = Worker(fn, *args, **kwargs)
11
2023-12-19 23:05:47+00:00
16k
zyrant/SPGroup3D
tests/test_data/test_datasets/test_scannet_dataset.py
[ { "identifier": "ScanNetDataset", "path": "mmdet3d/datasets/scannet_dataset.py", "snippet": "class ScanNetDataset(Custom3DDataset):\n r\"\"\"ScanNet Dataset for Detection Task.\n\n This class serves as the API for experiments on the ScanNet Dataset.\n\n Please refer to the `github repo <https:/...
import copy import numpy as np import pytest import torch import tempfile import tempfile import mmcv import tempfile import tempfile import mmcv import mmcv from mmdet3d.datasets import (ScanNetDataset, ScanNetInstanceSegDataset, ScanNetSegDataset, ScanNetInstanceSegV2Dataset) from mmdet3d.core.bbox.structures import DepthInstance3DBoxes from os import path as osp from mmdet3d.core.bbox import DepthInstance3DBoxes from os import path as osp from os import path as osp
11,054
3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results = [result] scannet_dataset.show(results, temp_dir, show=False) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() # test show with pipeline tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) ] scannet_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() def test_seg_format_results(): root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, test_mode=True) results = [] pred_sem_mask = dict( semantic_mask=torch.tensor([ 13, 5, 1, 2, 6, 2, 13, 1, 14, 2, 0, 0, 5, 5, 3, 0, 1, 14, 0, 0, 0, 18, 6, 15, 13, 0, 2, 4, 0, 3, 16, 6, 13, 5, 13, 0, 0, 0, 0, 1, 7, 3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results.append(pred_sem_mask) result_files, tmp_dir = scannet_dataset.format_results(results) expected_label = np.array([ 16, 6, 2, 3, 7, 3, 16, 2, 24, 3, 1, 1, 6, 6, 4, 1, 2, 24, 1, 1, 1, 36, 7, 28, 16, 1, 3, 5, 1, 4, 33, 7, 16, 6, 16, 1, 1, 1, 1, 2, 8, 4, 39, 14, 9, 1, 12, 1, 1, 2, 3, 16, 34, 2, 2, 2, 7, 3, 16, 39, 5, 34, 1, 24, 2, 8, 3, 2, 8, 3, 1, 6, 34, 6, 1, 1, 4, 7, 6, 12, 2, 16, 16, 3, 4, 2, 1, 16, 39, 2, 24, 6, 4, 2, 16, 2, 3, 4, 3, 2 ]) expected_txt_path = osp.join(tmp_dir.name, 'results', 'scene0000_00.txt') assert np.all(result_files[0]['seg_mask'] == expected_label) mmcv.check_file_exist(expected_txt_path) def test_instance_seg_getitem(): np.random.seed(0) root_path = './tests/data/scannet/' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') train_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=True, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=['points', 'pts_semantic_mask', 'pts_instance_mask']) ]
# Copyright (c) OpenMMLab. All rights reserved. def test_getitem(): np.random.seed(0) root_path = './tests/data/scannet/' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') pipelines = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=True, load_dim=6, use_dim=[0, 1, 2]), dict( type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_mask_3d=True, with_seg_3d=True), dict(type='GlobalAlignment', rotation_axis=2), dict( type='PointSegClassMapping', valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39)), dict(type='PointSample', num_points=5), dict( type='RandomFlip3D', sync_2d=False, flip_ratio_bev_horizontal=1.0, flip_ratio_bev_vertical=1.0), dict( type='GlobalRotScaleTrans', rot_range=[-0.087266, 0.087266], scale_ratio_range=[1.0, 1.0], shift_height=True), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=[ 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', 'pts_instance_mask' ], meta_keys=['file_name', 'sample_idx', 'pcd_rotation']), ] scannet_dataset = ScanNetDataset(root_path, ann_file, pipelines) data = scannet_dataset[0] points = data['points']._data gt_bboxes_3d = data['gt_bboxes_3d']._data gt_labels = data['gt_labels_3d']._data pts_semantic_mask = data['pts_semantic_mask']._data pts_instance_mask = data['pts_instance_mask']._data file_name = data['img_metas']._data['file_name'] pcd_rotation = data['img_metas']._data['pcd_rotation'] sample_idx = data['img_metas']._data['sample_idx'] expected_rotation = np.array([[0.99654, 0.08311407, 0.], [-0.08311407, 0.99654, 0.], [0., 0., 1.]]) assert file_name == './tests/data/scannet/points/scene0000_00.bin' assert np.allclose(pcd_rotation, expected_rotation, 1e-3) assert sample_idx == 'scene0000_00' expected_points = torch.tensor( [[1.8339e+00, 2.1093e+00, 2.2900e+00, 2.3895e+00], [3.6079e+00, 1.4592e-01, 2.0687e+00, 2.1682e+00], [4.1886e+00, 5.0614e+00, -1.0841e-01, -8.8736e-03], [6.8790e+00, 1.5086e+00, -9.3154e-02, 6.3816e-03], [4.8253e+00, 2.6668e-01, 1.4917e+00, 1.5912e+00]]) expected_gt_bboxes_3d = torch.tensor( [[-1.1835, -3.6317, 1.5704, 1.7577, 0.3761, 0.5724, 0.0000], [-3.1832, 3.2269, 1.1911, 0.6727, 0.2251, 0.6715, 0.0000], [-0.9598, -2.2864, 0.0093, 0.7506, 2.5709, 1.2145, 0.0000], [-2.6988, -2.7354, 0.8288, 0.7680, 1.8877, 0.2870, 0.0000], [3.2989, 0.2885, -0.0090, 0.7600, 3.8814, 2.1603, 0.0000]]) expected_gt_labels = np.array([ 6, 6, 4, 9, 11, 11, 10, 0, 15, 17, 17, 17, 3, 12, 4, 4, 14, 1, 0, 0, 0, 0, 0, 0, 5, 5, 5 ]) expected_pts_semantic_mask = np.array([0, 18, 18, 18, 18]) expected_pts_instance_mask = np.array([44, 22, 10, 10, 57]) original_classes = scannet_dataset.CLASSES assert scannet_dataset.CLASSES == class_names assert torch.allclose(points, expected_points, 1e-2) assert gt_bboxes_3d.tensor[:5].shape == (5, 7) assert torch.allclose(gt_bboxes_3d.tensor[:5], expected_gt_bboxes_3d, 1e-2) assert np.all(gt_labels.numpy() == expected_gt_labels) assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask) assert np.all(pts_instance_mask.numpy() == expected_pts_instance_mask) assert original_classes == class_names scannet_dataset = ScanNetDataset( root_path, ann_file, pipeline=None, classes=['cabinet', 'bed']) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ['cabinet', 'bed'] scannet_dataset = ScanNetDataset( root_path, ann_file, pipeline=None, classes=('cabinet', 'bed')) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ('cabinet', 'bed') # Test load classes from file with tempfile.TemporaryDirectory() as tmpdir: path = tmpdir + 'classes.txt' with open(path, 'w') as f: f.write('cabinet\nbed\n') scannet_dataset = ScanNetDataset( root_path, ann_file, pipeline=None, classes=path) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ['cabinet', 'bed'] def test_evaluate(): if not torch.cuda.is_available(): pytest.skip() root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetDataset(root_path, ann_file) results = [] pred_boxes = dict() pred_boxes['boxes_3d'] = DepthInstance3DBoxes( torch.tensor([[ 1.4813e+00, 3.5207e+00, 1.5704e+00, 1.7445e+00, 2.3196e-01, 5.7235e-01, 0.0000e+00 ], [ 2.9040e+00, -3.4803e+00, 1.1911e+00, 6.6078e-01, 1.7072e-01, 6.7154e-01, 0.0000e+00 ], [ 1.1466e+00, 2.1987e+00, 9.2576e-03, 5.4184e-01, 2.5346e+00, 1.2145e+00, 0.0000e+00 ], [ 2.9168e+00, 2.5016e+00, 8.2875e-01, 6.1697e-01, 1.8428e+00, 2.8697e-01, 0.0000e+00 ], [ -3.3114e+00, -1.3351e-02, -8.9524e-03, 4.4082e-01, 3.8582e+00, 2.1603e+00, 0.0000e+00 ], [ -2.0135e+00, -3.4857e+00, 9.3848e-01, 1.9911e+00, 2.1603e-01, 1.2767e+00, 0.0000e+00 ], [ -2.1945e+00, -3.1402e+00, -3.8165e-02, 1.4801e+00, 6.8676e-01, 1.0586e+00, 0.0000e+00 ], [ -2.7553e+00, 2.4055e+00, -2.9972e-02, 1.4764e+00, 1.4927e+00, 2.3380e+00, 0.0000e+00 ]])) pred_boxes['labels_3d'] = torch.tensor([6, 6, 4, 9, 11, 11]) pred_boxes['scores_3d'] = torch.tensor([0.5, 1.0, 1.0, 1.0, 1.0, 0.5]) results.append(pred_boxes) metric = [0.25, 0.5] ret_dict = scannet_dataset.evaluate(results, metric) assert abs(ret_dict['table_AP_0.25'] - 0.3333) < 0.01 assert abs(ret_dict['window_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['counter_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['curtain_AP_0.25'] - 1.0) < 0.01 # test evaluate with pipeline class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, load_dim=6, use_dim=[0, 1, 2]), dict(type='GlobalAlignment', rotation_axis=2), dict( type='DefaultFormatBundle3D', class_names=class_names, with_label=False), dict(type='Collect3D', keys=['points']) ] ret_dict = scannet_dataset.evaluate( results, metric, pipeline=eval_pipeline) assert abs(ret_dict['table_AP_0.25'] - 0.3333) < 0.01 assert abs(ret_dict['window_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['counter_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['curtain_AP_0.25'] - 1.0) < 0.01 def test_show(): tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetDataset(root_path, ann_file) boxes_3d = DepthInstance3DBoxes( torch.tensor([[ -2.4053e+00, 9.2295e-01, 8.0661e-02, 2.4054e+00, 2.1468e+00, 8.5990e-01, 0.0000e+00 ], [ -1.9341e+00, -2.0741e+00, 3.0698e-03, 3.2206e-01, 2.5322e-01, 3.5144e-01, 0.0000e+00 ], [ -3.6908e+00, 8.0684e-03, 2.6201e-01, 4.1515e-01, 7.6489e-01, 5.3585e-01, 0.0000e+00 ], [ 2.6332e+00, 8.5143e-01, -4.9964e-03, 3.0367e-01, 1.3448e+00, 1.8329e+00, 0.0000e+00 ], [ 2.0221e-02, 2.6153e+00, 1.5109e-02, 7.3335e-01, 1.0429e+00, 1.0251e+00, 0.0000e+00 ]])) scores_3d = torch.tensor( [1.2058e-04, 2.3012e-03, 6.2324e-06, 6.6139e-06, 6.7965e-05]) labels_3d = torch.tensor([0, 0, 0, 0, 0]) result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d) results = [result] scannet_dataset.show(results, temp_dir, show=False) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() # show function with pipeline class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, load_dim=6, use_dim=[0, 1, 2]), dict(type='GlobalAlignment', rotation_axis=2), dict( type='DefaultFormatBundle3D', class_names=class_names, with_label=False), dict(type='Collect3D', keys=['points']) ] tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name scannet_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() def test_seg_getitem(): np.random.seed(0) root_path = './tests/data/scannet/' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') palette = [ [174, 199, 232], [152, 223, 138], [31, 119, 180], [255, 187, 120], [188, 189, 34], [140, 86, 75], [255, 152, 150], [214, 39, 40], [197, 176, 213], [148, 103, 189], [196, 156, 148], [23, 190, 207], [247, 182, 210], [219, 219, 141], [255, 127, 14], [158, 218, 229], [44, 160, 44], [112, 128, 144], [227, 119, 194], [82, 84, 163], ] scene_idxs = [0 for _ in range(20)] # test network inputs are (xyz, rgb, normalized_xyz) pipelines = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict( type='IndoorPatchPointSample', num_points=5, block_size=1.5, ignore_index=len(class_names), use_normalized_coord=True, enlarge_size=0.2, min_unique_num=None), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=['points', 'pts_semantic_mask'], meta_keys=['file_name', 'sample_idx']) ] scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=pipelines, classes=None, palette=None, modality=None, test_mode=False, ignore_index=None, scene_idxs=scene_idxs) data = scannet_dataset[0] points = data['points']._data pts_semantic_mask = data['pts_semantic_mask']._data file_name = data['img_metas']._data['file_name'] sample_idx = data['img_metas']._data['sample_idx'] assert file_name == './tests/data/scannet/points/scene0000_00.bin' assert sample_idx == 'scene0000_00' expected_points = torch.tensor([[ 0.0000, 0.0000, 1.2427, 0.6118, 0.5529, 0.4471, -0.6462, -1.0046, 0.4280 ], [ 0.1553, -0.0074, 1.6077, 0.5882, 0.6157, 0.5569, -0.6001, -1.0068, 0.5537 ], [ 0.1518, 0.6016, 0.6548, 0.1490, 0.1059, 0.0431, -0.6012, -0.8309, 0.2255 ], [ -0.7494, 0.1033, 0.6756, 0.5216, 0.4353, 0.3333, -0.8687, -0.9748, 0.2327 ], [ -0.6836, -0.0203, 0.5884, 0.5765, 0.5020, 0.4510, -0.8491, -1.0105, 0.2027 ]]) expected_pts_semantic_mask = np.array([13, 13, 12, 2, 0]) original_classes = scannet_dataset.CLASSES original_palette = scannet_dataset.PALETTE assert scannet_dataset.CLASSES == class_names assert scannet_dataset.ignore_index == 20 assert torch.allclose(points, expected_points, 1e-2) assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask) assert original_classes == class_names assert original_palette == palette assert scannet_dataset.scene_idxs.dtype == np.int32 assert np.all(scannet_dataset.scene_idxs == np.array(scene_idxs)) # test network inputs are (xyz, rgb) np.random.seed(0) new_pipelines = copy.deepcopy(pipelines) new_pipelines[3] = dict( type='IndoorPatchPointSample', num_points=5, block_size=1.5, ignore_index=len(class_names), use_normalized_coord=False, enlarge_size=0.2, min_unique_num=None) scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=new_pipelines, scene_idxs=scene_idxs) data = scannet_dataset[0] points = data['points']._data assert torch.allclose(points, expected_points[:, :6], 1e-2) # test network inputs are (xyz, normalized_xyz) np.random.seed(0) new_pipelines = copy.deepcopy(pipelines) new_pipelines[0] = dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=False, load_dim=6, use_dim=[0, 1, 2]) new_pipelines.remove(new_pipelines[4]) scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=new_pipelines, scene_idxs=scene_idxs) data = scannet_dataset[0] points = data['points']._data assert torch.allclose(points, expected_points[:, [0, 1, 2, 6, 7, 8]], 1e-2) # test network inputs are (xyz,) np.random.seed(0) new_pipelines = copy.deepcopy(pipelines) new_pipelines[0] = dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=False, load_dim=6, use_dim=[0, 1, 2]) new_pipelines[3] = dict( type='IndoorPatchPointSample', num_points=5, block_size=1.5, ignore_index=len(class_names), use_normalized_coord=False, enlarge_size=0.2, min_unique_num=None) new_pipelines.remove(new_pipelines[4]) scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=new_pipelines, scene_idxs=scene_idxs) data = scannet_dataset[0] points = data['points']._data assert torch.allclose(points, expected_points[:, :3], 1e-2) # test dataset with selected classes scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=None, classes=['cabinet', 'chair'], scene_idxs=scene_idxs) label_map = {i: 20 for i in range(41)} label_map.update({3: 0, 5: 1}) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ['cabinet', 'chair'] assert scannet_dataset.PALETTE == [palette[2], palette[4]] assert scannet_dataset.VALID_CLASS_IDS == [3, 5] assert scannet_dataset.label_map == label_map assert scannet_dataset.label2cat == {0: 'cabinet', 1: 'chair'} # test load classes from file with tempfile.TemporaryDirectory() as tmpdir: path = tmpdir + 'classes.txt' with open(path, 'w') as f: f.write('cabinet\nchair\n') scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=None, classes=path, scene_idxs=scene_idxs) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ['cabinet', 'chair'] assert scannet_dataset.PALETTE == [palette[2], palette[4]] assert scannet_dataset.VALID_CLASS_IDS == [3, 5] assert scannet_dataset.label_map == label_map assert scannet_dataset.label2cat == {0: 'cabinet', 1: 'chair'} # test scene_idxs in dataset # we should input scene_idxs in train mode with pytest.raises(NotImplementedError): scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=None, scene_idxs=None) # test mode scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=None, test_mode=True, scene_idxs=scene_idxs) assert np.all(scannet_dataset.scene_idxs == np.array([0])) def test_seg_evaluate(): if not torch.cuda.is_available(): pytest.skip() root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, test_mode=True) results = [] pred_sem_mask = dict( semantic_mask=torch.tensor([ 13, 5, 1, 2, 6, 2, 13, 1, 14, 2, 0, 0, 5, 5, 3, 0, 1, 14, 0, 0, 0, 18, 6, 15, 13, 0, 2, 4, 0, 3, 16, 6, 13, 5, 13, 0, 0, 0, 0, 1, 7, 3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results.append(pred_sem_mask) class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) ] ret_dict = scannet_dataset.evaluate(results, pipeline=eval_pipeline) assert abs(ret_dict['miou'] - 0.5308) < 0.01 assert abs(ret_dict['acc'] - 0.8219) < 0.01 assert abs(ret_dict['acc_cls'] - 0.7649) < 0.01 def test_seg_show(): tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, scene_idxs=[0]) result = dict( semantic_mask=torch.tensor([ 13, 5, 1, 2, 6, 2, 13, 1, 14, 2, 0, 0, 5, 5, 3, 0, 1, 14, 0, 0, 0, 18, 6, 15, 13, 0, 2, 4, 0, 3, 16, 6, 13, 5, 13, 0, 0, 0, 0, 1, 7, 3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results = [result] scannet_dataset.show(results, temp_dir, show=False) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() # test show with pipeline tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) ] scannet_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() def test_seg_format_results(): root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, test_mode=True) results = [] pred_sem_mask = dict( semantic_mask=torch.tensor([ 13, 5, 1, 2, 6, 2, 13, 1, 14, 2, 0, 0, 5, 5, 3, 0, 1, 14, 0, 0, 0, 18, 6, 15, 13, 0, 2, 4, 0, 3, 16, 6, 13, 5, 13, 0, 0, 0, 0, 1, 7, 3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results.append(pred_sem_mask) result_files, tmp_dir = scannet_dataset.format_results(results) expected_label = np.array([ 16, 6, 2, 3, 7, 3, 16, 2, 24, 3, 1, 1, 6, 6, 4, 1, 2, 24, 1, 1, 1, 36, 7, 28, 16, 1, 3, 5, 1, 4, 33, 7, 16, 6, 16, 1, 1, 1, 1, 2, 8, 4, 39, 14, 9, 1, 12, 1, 1, 2, 3, 16, 34, 2, 2, 2, 7, 3, 16, 39, 5, 34, 1, 24, 2, 8, 3, 2, 8, 3, 1, 6, 34, 6, 1, 1, 4, 7, 6, 12, 2, 16, 16, 3, 4, 2, 1, 16, 39, 2, 24, 6, 4, 2, 16, 2, 3, 4, 3, 2 ]) expected_txt_path = osp.join(tmp_dir.name, 'results', 'scene0000_00.txt') assert np.all(result_files[0]['seg_mask'] == expected_label) mmcv.check_file_exist(expected_txt_path) def test_instance_seg_getitem(): np.random.seed(0) root_path = './tests/data/scannet/' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') train_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=True, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=['points', 'pts_semantic_mask', 'pts_instance_mask']) ]
scannet_dataset = ScanNetInstanceSegDataset(
1
2023-12-21 12:50:35+00:00
16k
v3ucn/Bert-vits2-V2.2
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, ...
import platform import os import torch import torch.distributed as dist import logging import argparse import datetime import gc import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import generator_loss, discriminator_loss, feature_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
11,642
def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, emo, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) emo = emo.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), g, loss_commit, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, emo, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False):
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 torch.backends.cuda.enable_math_sdp(True) global_step = 0 def run(): # 环境变量解析 envs = config.train_ms_config.env for env_name, env_value in envs.items(): if env_name not in os.environ.keys(): print("加载config中的配置{}".format(str(env_value))) os.environ[env_name] = str(env_value) print( "加载环境变量 \nMASTER_ADDR: {},\nMASTER_PORT: {},\nWORLD_SIZE: {},\nRANK: {},\nLOCAL_RANK: {}".format( os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"], os.environ["WORLD_SIZE"], os.environ["RANK"], os.environ["LOCAL_RANK"], ) ) backend = "nccl" if platform.system() == "Windows": backend = "gloo" # If Windows,switch to gloo backend. dist.init_process_group( backend=backend, init_method="env://", timeout=datetime.timedelta(seconds=300), ) # Use torchrun instead of mp.spawn rank = dist.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) n_gpus = dist.get_world_size() # 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=min(config.train_ms_config.num_workers, os.cpu_count() - 1), shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank) if getattr(hps.train, "freeze_ZH_bert", False): print("Freezing ZH bert encoder !!!") for param in net_g.enc_p.bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_EN_bert", False): print("Freezing EN bert encoder !!!") for param in net_g.enc_p.en_bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_JP_bert", False): print("Freezing JP bert encoder !!!") for param in net_g.enc_p.ja_bert_proj.parameters(): param.requires_grad = False net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(local_rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[local_rank], bucket_cap_mb=512) net_d = DDP(net_d, device_ids=[local_rank], bucket_cap_mb=512) dur_resume_lr = None if net_dur_disc is not None: net_dur_disc = DDP( net_dur_disc, device_ids=[local_rank], find_unused_parameters=True, bucket_cap_mb=512, ) # 下载底模 if config.train_ms_config.base["use_base_model"]: utils.download_checkpoint( hps.model_dir, config.train_ms_config.base, token=config.openi_token, mirror=config.mirror, ) try: if net_dur_disc is not None: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr epoch_str = max(epoch_str, 1) # global_step = (epoch_str - 1) * len(train_loader) global_step = int( utils.get_steps(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth")) ) print( f"******************检测到模型存在,epoch为 {epoch_str},gloabl step为 {global_step}*********************" ) except Exception as e: print(e) epoch_str = 1 global_step = 0 scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, emo, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) emo = emo.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), g, loss_commit, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, emo, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False):
loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(
8
2023-12-18 04:54:46+00:00
16k
m-abr/FCPCodebase
world/Robot.py
[ { "identifier": "Math_Ops", "path": "math_ops/Math_Ops.py", "snippet": "class Math_Ops():\n '''\n This class provides general mathematical operations that are not directly available through numpy \n '''\n \n @staticmethod\n def deg_sph2cart(spherical_vec):\n ''' Converts SimSpark'...
from collections import deque from math import atan, pi, sqrt, tan from math_ops.Math_Ops import Math_Ops as M from math_ops.Matrix_3x3 import Matrix_3x3 from math_ops.Matrix_4x4 import Matrix_4x4 from world.commons.Body_Part import Body_Part from world.commons.Joint_Info import Joint_Info import numpy as np import xml.etree.ElementTree as xmlp
12,934
class Robot(): STEPTIME = 0.02 # Fixed step time VISUALSTEP = 0.04 # Fixed visual step time SQ_STEPTIME = STEPTIME * STEPTIME GRAVITY = np.array([0,0,-9.81]) IMU_DECAY = 0.996 #IMU's velocity decay #------------------ constants to force symmetry in joints/effectors MAP_PERCEPTOR_TO_INDEX = {"hj1":0, "hj2":1, "llj1":2, "rlj1":3, "llj2":4, "rlj2":5, "llj3":6, "rlj3":7, "llj4":8, "rlj4":9, "llj5":10,"rlj5":11, "llj6":12,"rlj6":13,"laj1":14,"raj1":15, "laj2":16,"raj2":17,"laj3":18,"raj3":19, "laj4":20,"raj4":21,"llj7":22,"rlj7":23 } # Fix symmetry issues 1a/4 (identification) FIX_PERCEPTOR_SET = {'rlj2','rlj6','raj2','laj3','laj4'} FIX_INDICES_LIST = [5,13,17,18,20] # Recommended height for unofficial beam (near ground) BEAM_HEIGHTS = [0.4, 0.43, 0.4, 0.46, 0.4] def __init__(self, unum:int, robot_type:int) -> None: robot_xml = "nao"+str(robot_type)+".xml" # Typical NAO file name self.type = robot_type self.beam_height = Robot.BEAM_HEIGHTS[robot_type] self.no_of_joints = 24 if robot_type == 4 else 22 #Fix symmetry issues 1b/4 (identification) self.FIX_EFFECTOR_MASK = np.ones(self.no_of_joints) self.FIX_EFFECTOR_MASK[Robot.FIX_INDICES_LIST] = -1 self.body_parts = dict() # keys='body part names' (given by the robot's XML), values='Body_Part objects' self.unum = unum # Robot's uniform number self.gyro = np.zeros(3) # Angular velocity along the three axes of freedom of the robot's torso (deg/s) self.acc = np.zeros(3) # Proper acceleration along the three axes of freedom of the robot's torso (m/s2) self.frp = dict() # foot "lf"/"rf", toe "lf1"/"rf1" resistance perceptor (relative [p]oint of origin + [f]orce vector) e.g. {"lf":(px,py,pz,fx,fy,fz)} self.feet_toes_last_touch = {"lf":0,"rf":0,"lf1":0,"rf1":0} # foot "lf"/"rf", toe "lf1"/"rf1" World.time_local_ms when foot/toe last touched any surface self.feet_toes_are_touching = {"lf":False,"rf":False,"lf1":False,"rf1":False} # foot "lf"/"rf", toe "lf1"/"rf1" True if touching in last received server message self.fwd_kinematics_list = None # List of body parts, ordered according to dependencies self.rel_cart_CoM_position = np.zeros(3) # Center of Mass position, relative to head, in cartesian coordinates (m) # Joint variables are optimized for performance / array operations self.joints_position = np.zeros(self.no_of_joints) # Joints' angular position (deg) self.joints_speed = np.zeros(self.no_of_joints) # Joints' angular speed (rad/s) self.joints_target_speed = np.zeros(self.no_of_joints) # Joints' target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_target_last_speed = np.zeros(self.no_of_joints) # Joints' last target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_info = [None] * self.no_of_joints # Joints' constant information (see class Joint_Info) self.joints_transform = [Matrix_4x4() for _ in range(self.no_of_joints)] # Joints' transformation matrix # Localization variables relative to head self.loc_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field self.loc_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head
class Robot(): STEPTIME = 0.02 # Fixed step time VISUALSTEP = 0.04 # Fixed visual step time SQ_STEPTIME = STEPTIME * STEPTIME GRAVITY = np.array([0,0,-9.81]) IMU_DECAY = 0.996 #IMU's velocity decay #------------------ constants to force symmetry in joints/effectors MAP_PERCEPTOR_TO_INDEX = {"hj1":0, "hj2":1, "llj1":2, "rlj1":3, "llj2":4, "rlj2":5, "llj3":6, "rlj3":7, "llj4":8, "rlj4":9, "llj5":10,"rlj5":11, "llj6":12,"rlj6":13,"laj1":14,"raj1":15, "laj2":16,"raj2":17,"laj3":18,"raj3":19, "laj4":20,"raj4":21,"llj7":22,"rlj7":23 } # Fix symmetry issues 1a/4 (identification) FIX_PERCEPTOR_SET = {'rlj2','rlj6','raj2','laj3','laj4'} FIX_INDICES_LIST = [5,13,17,18,20] # Recommended height for unofficial beam (near ground) BEAM_HEIGHTS = [0.4, 0.43, 0.4, 0.46, 0.4] def __init__(self, unum:int, robot_type:int) -> None: robot_xml = "nao"+str(robot_type)+".xml" # Typical NAO file name self.type = robot_type self.beam_height = Robot.BEAM_HEIGHTS[robot_type] self.no_of_joints = 24 if robot_type == 4 else 22 #Fix symmetry issues 1b/4 (identification) self.FIX_EFFECTOR_MASK = np.ones(self.no_of_joints) self.FIX_EFFECTOR_MASK[Robot.FIX_INDICES_LIST] = -1 self.body_parts = dict() # keys='body part names' (given by the robot's XML), values='Body_Part objects' self.unum = unum # Robot's uniform number self.gyro = np.zeros(3) # Angular velocity along the three axes of freedom of the robot's torso (deg/s) self.acc = np.zeros(3) # Proper acceleration along the three axes of freedom of the robot's torso (m/s2) self.frp = dict() # foot "lf"/"rf", toe "lf1"/"rf1" resistance perceptor (relative [p]oint of origin + [f]orce vector) e.g. {"lf":(px,py,pz,fx,fy,fz)} self.feet_toes_last_touch = {"lf":0,"rf":0,"lf1":0,"rf1":0} # foot "lf"/"rf", toe "lf1"/"rf1" World.time_local_ms when foot/toe last touched any surface self.feet_toes_are_touching = {"lf":False,"rf":False,"lf1":False,"rf1":False} # foot "lf"/"rf", toe "lf1"/"rf1" True if touching in last received server message self.fwd_kinematics_list = None # List of body parts, ordered according to dependencies self.rel_cart_CoM_position = np.zeros(3) # Center of Mass position, relative to head, in cartesian coordinates (m) # Joint variables are optimized for performance / array operations self.joints_position = np.zeros(self.no_of_joints) # Joints' angular position (deg) self.joints_speed = np.zeros(self.no_of_joints) # Joints' angular speed (rad/s) self.joints_target_speed = np.zeros(self.no_of_joints) # Joints' target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_target_last_speed = np.zeros(self.no_of_joints) # Joints' last target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_info = [None] * self.no_of_joints # Joints' constant information (see class Joint_Info) self.joints_transform = [Matrix_4x4() for _ in range(self.no_of_joints)] # Joints' transformation matrix # Localization variables relative to head self.loc_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field self.loc_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head
self.loc_rotation_head_to_field = Matrix_3x3() # Rotation matrix from head to field
1
2023-12-16 23:40:23+00:00
16k
Sam-Izdat/tinycio
src/tinycio/util/colorutil.py
[ { "identifier": "Float2", "path": "src/tinycio/numerics/vector.py", "snippet": "class Float2(np.ndarray):\n \"\"\"\n Float2 type using numpy.ndarray.\n \"\"\"\n def __new__(cls, *args):\n if len(args) == 1:\n if isinstance(args[0], list) or isinstance(args[0], tuple):\n ...
import typing import torch import numpy as np from typing import Union from ..numerics import Float2, Float3
11,196
def col_hsv_to_rgb(hsv:Union[Float3, Color]) -> Float3: """ Convert HSV color to RGB. :param hsv: HSV color :type hsv: Float3 | Color """ h, s, v = hsv.x, hsv.y, hsv.z i = np.floor(h * 6) f = h * 6 - i p = v * (1 - s) q = v * (1 - f * s) t = v * (1 - (1 - f) * s) r, g, b = [ (v, t, p), (q, v, p), (p, v, t), (p, q, v), (t, p, v), (v, p, q), ][int(i%6)] return Float3(r, g, b) def col_rgb_to_hsv(rgb:Union[Float3, Color]) -> Float3: """ Convert RGB color to HSV. :param rgb: RGB color :type rgb: Float3 | Color """ r, g, b = rgb.r, rgb.g, rgb.b high = np.max([r, g, b]) low = np.min([r, g, b]) h, s, v = high, high, high d = high - low s = 0 if high == 0 else d/high if high == low: h = 0.0 else: h = { r: (g - b) / d + (6 if g < b else 0), g: (b - r) / d + 2, b: (r - g) / d + 4, }[high] h /= 6 return Float3(h, s, v) # My fairly lazy OKHSV/OKHSL "port" of: # https://github.com/holbrookdev/ok-color-picker # MIT License # Copyright (c) 2022 Brian Holbrook # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. __ok_eps = 1e-7 def col_oklab_to_linear_srgb(lab:Union[Float3, Color]) -> Float3: """ Convert OKLAB color to linear sRGB. :param lab: OKLAB color :type lab: Float3 | Color """ L, a, b = lab.x, lab.y, lab.z l_ = L + 0.3963377774 * a + 0.2158037573 * b m_ = L - 0.1055613458 * a - 0.0638541728 * b s_ = L - 0.0894841775 * a - 1.291485548 * b l = l_ * l_ * l_ m = m_ * m_ * m_ s = s_ * s_ * s_ res = Float3( +4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s, -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s, -0.0041960863 * l - 0.7034186147 * m + 1.707614701 * s ) return res def col_linear_srgb_to_oklab(srgb:Union[Float3, Color]) -> Float3: """ Convert linear sRGB color to OKLAB. :param srgb: linear sRGB color :type srgb: Float3 | Color """ r, g, b = srgb.r, srgb.g, srgb.b l = 0.4122214708 * r + 0.5363325363 * g + 0.0514459929 * b m = 0.2119034982 * r + 0.6806995451 * g + 0.1073969566 * b s = 0.0883024619 * r + 0.2817188376 * g + 0.6299787005 * b l_ = np.cbrt(l) m_ = np.cbrt(m) s_ = np.cbrt(s) res = Float3( 0.2104542553 * l_ + 0.793617785 * m_ - 0.0040720468 * s_, 1.9779984951 * l_ - 2.428592205 * m_ + 0.4505937099 * s_, 0.0259040371 * l_ + 0.7827717662 * m_ - 0.808675766 * s_ ) return res
from __future__ import annotations def srgb_luminance(im_srgb:Union[torch.Tensor, ColorImage]) -> torch.Tensor: """ Return relative luminance of linear sRGB image. :param im_srgb: [C=3, H, W] color image tensor in sRGB color space :type im_srgb: torch.Tensor | ColorImage :return: [C=1, H, W] image tensor """ lum_r, lum_g, lum_b = 0.2126, 0.7152, 0.0722 return lum_r * im_srgb[0:1,...] + lum_g * im_srgb[1:2,...] + lum_b * im_srgb[2:3,...] def apply_gamma(im:Union[torch.Tensor, ColorImage], gamma:float) -> torch.Tensor: """ Apply arbitrary gamma correction. :param im: Image tensor :type im: torch.Tensor | ColorImage :param gamma: Gamma correction (should be in the range [0.1, 10.0]) :return: Gamma-corrected image tensor """ if gamma == 1.: return im assert 0.1 <= gamma <= 10.0, "gamma value should be in range [0.1, 10.0]" im = torch.pow(im, gamma) def apply_hue_oklab(im_oklab:Union[torch.Tensor, ColorImage], hue_delta:float) -> torch.Tensor: """ Manually shift hue of an image by a -1 to +1 delta value. :param im_oklab: Image tensor in OKLAB color space :type im_oklab: torch.Tensor | ColorImage :param hue_delta: Hue shift value in the range [-1., 1.] :return: Image tensor in OKLAB color space with adjusted hue """ assert -1. <= hue_delta <= 1., "hue_delta value should be in range [-1., 1.]" L, a, b = im_oklab[0:1], im_oklab[1:2], im_oklab[2:3] hue_delta = ((hue_delta * 0.5) % 1.) * 2. * torch.pi # Calculate angle and magnitude in the a-b plane angle = torch.atan2(b, a) magnitude = torch.sqrt(a**2 + b**2) # Apply hue correction angle += hue_delta # Convert back to Cartesian coordinates a_corrected = magnitude * torch.cos(angle) b_corrected = magnitude * torch.sin(angle) corrected = torch.cat([L, a_corrected, b_corrected], dim=0) return corrected return im def col_hsv_to_rgb(hsv:Union[Float3, Color]) -> Float3: """ Convert HSV color to RGB. :param hsv: HSV color :type hsv: Float3 | Color """ h, s, v = hsv.x, hsv.y, hsv.z i = np.floor(h * 6) f = h * 6 - i p = v * (1 - s) q = v * (1 - f * s) t = v * (1 - (1 - f) * s) r, g, b = [ (v, t, p), (q, v, p), (p, v, t), (p, q, v), (t, p, v), (v, p, q), ][int(i%6)] return Float3(r, g, b) def col_rgb_to_hsv(rgb:Union[Float3, Color]) -> Float3: """ Convert RGB color to HSV. :param rgb: RGB color :type rgb: Float3 | Color """ r, g, b = rgb.r, rgb.g, rgb.b high = np.max([r, g, b]) low = np.min([r, g, b]) h, s, v = high, high, high d = high - low s = 0 if high == 0 else d/high if high == low: h = 0.0 else: h = { r: (g - b) / d + (6 if g < b else 0), g: (b - r) / d + 2, b: (r - g) / d + 4, }[high] h /= 6 return Float3(h, s, v) # My fairly lazy OKHSV/OKHSL "port" of: # https://github.com/holbrookdev/ok-color-picker # MIT License # Copyright (c) 2022 Brian Holbrook # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. __ok_eps = 1e-7 def col_oklab_to_linear_srgb(lab:Union[Float3, Color]) -> Float3: """ Convert OKLAB color to linear sRGB. :param lab: OKLAB color :type lab: Float3 | Color """ L, a, b = lab.x, lab.y, lab.z l_ = L + 0.3963377774 * a + 0.2158037573 * b m_ = L - 0.1055613458 * a - 0.0638541728 * b s_ = L - 0.0894841775 * a - 1.291485548 * b l = l_ * l_ * l_ m = m_ * m_ * m_ s = s_ * s_ * s_ res = Float3( +4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s, -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s, -0.0041960863 * l - 0.7034186147 * m + 1.707614701 * s ) return res def col_linear_srgb_to_oklab(srgb:Union[Float3, Color]) -> Float3: """ Convert linear sRGB color to OKLAB. :param srgb: linear sRGB color :type srgb: Float3 | Color """ r, g, b = srgb.r, srgb.g, srgb.b l = 0.4122214708 * r + 0.5363325363 * g + 0.0514459929 * b m = 0.2119034982 * r + 0.6806995451 * g + 0.1073969566 * b s = 0.0883024619 * r + 0.2817188376 * g + 0.6299787005 * b l_ = np.cbrt(l) m_ = np.cbrt(m) s_ = np.cbrt(s) res = Float3( 0.2104542553 * l_ + 0.793617785 * m_ - 0.0040720468 * s_, 1.9779984951 * l_ - 2.428592205 * m_ + 0.4505937099 * s_, 0.0259040371 * l_ + 0.7827717662 * m_ - 0.808675766 * s_ ) return res
def __ok_compute_max_saturation(ab:Float2) -> float:
0
2023-12-15 15:39:08+00:00
16k
quocanh34/magic-animate-modified
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,...
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.multicontrolnet import ControlNetProcessor #fix from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
13,110
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ # from magicanimate.models.controlnet import ControlNetModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ # from magicanimate.models.controlnet import ControlNetModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
unet: UNet3DConditionModel,
0
2023-12-15 01:22:37+00:00
16k
daihaojun554/biliscrapy
biliscrapy/network/bilibili_danmu.py
[ { "identifier": "bili_pb2", "path": "biliscrapy/network/protobuf/bili_pb2.py", "snippet": "DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\\n\\x08my.proto\\x12 bilibili.community.service.dm.v1\\\"d\\n\\x06\\x41vatar\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\t\\x12\\x0b\\n\\x03url\\x18\\x02 \\x01(...
import logging import os import json import sys import requests from datetime import datetime from .protobuf import bili_pb2 as Danmaku from .bilibili_utils import bili_utils
12,137
headers = { 'authority': 'message.bilibili.com', 'accept': 'application/json, text/plain, */*', 'accept-language': 'zh-CN,zh;q=0.9', 'cache-control': 'no-cache', 'origin': 'https://www.bilibili.com', 'pragma': 'no-cache', 'referer': 'https://www.bilibili.com/', 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"Windows"', 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-site', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', } # import bili_pb2 as Danmaku class Danmu: def __init__(self):
headers = { 'authority': 'message.bilibili.com', 'accept': 'application/json, text/plain, */*', 'accept-language': 'zh-CN,zh;q=0.9', 'cache-control': 'no-cache', 'origin': 'https://www.bilibili.com', 'pragma': 'no-cache', 'referer': 'https://www.bilibili.com/', 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"Windows"', 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-site', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', } # import bili_pb2 as Danmaku class Danmu: def __init__(self):
self.utils = bili_utils()
1
2023-12-14 10:14:24+00:00
16k