repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
aliyun/pai-python-sdk
pai/model.py
[ { "identifier": "git_utils", "path": "pai/common/git_utils.py", "snippet": "def git_clone_repo(git_config: Dict[str, str], source_dir: Optional[str] = None):\ndef _validate_git_config(git_config):\ndef _build_and_run_clone_command(git_config, dest_dir):\ndef _clone_command_for_codeup(git_config, dest_di...
import copy import distutils.dir_util import json import logging import os.path import posixpath import shlex import shutil import tempfile import textwrap import time import requests from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from addict import Dict as AttrDict from oss2 import ObjectIterator from .common import git_utils from .common.consts import INSTANCE_TYPE_LOCAL_GPU, ModelFormat from .common.docker_utils import ContainerRun, run_container from .common.oss_utils import OssUriObj, download, is_oss_uri, upload from .common.utils import ( generate_repr, is_local_run_instance_type, random_str, to_plain_text, ) from .exception import DuplicatedMountException, MountPathIsOccupiedException from .image import ImageInfo from .predictor import AsyncPredictor, LocalPredictor, Predictor, ServiceType from .serializers import SerializerBase from .session import Session, get_default_session from .estimator import AlgorithmEstimator
17,210
if instance_count: inference_spec.add_option("metadata.instance", instance_count) if instance_type: inference_spec.add_option("cloud.computing.instance_type", instance_type) elif resource_config: inference_spec.add_option("metadata.cpu", resource_config.cpu) inference_spec.add_option("metadata.memory", resource_config.memory) if resource_config.gpu: inference_spec.add_option("metadata.gpu", resource_config.gpu) if resource_config.gpu_memory: inference_spec.add_option( "metadata.gpu_memory", resource_config.gpu_memory ) if resource_config.gpu: logger.warning( "Parameters 'gpu' is set, the 'gpu_memory' parameter " "does not take effect." ) if resource_id: inference_spec.add_option("metadata.resource", resource_id) if options: inference_spec.merge_options(options=options) return inference_spec.to_dict() def _deploy_local( self, instance_type: str, serializer: SerializerBase = None, wait: bool = True, ) -> LocalPredictor: """Deploy the model in local using docker.""" if not self.inference_spec.is_container_serving(): raise RuntimeError( "Currently, only model using the InferenceSpec that serving with" " container support local run." ) if len(self.inference_spec.containers) > 1: raise RuntimeError( "InferenceSpec that serving with multiple container " "does not support local run." ) # prepare model data to local work_dir = tempfile.mkdtemp() model_dir = os.path.join(work_dir, "model") self._download_model_data(target_dir=model_dir) volumes = { model_dir: { "bind": DefaultServiceConfig.model_path, "mode": "rw", } } # prepare used storage to local directory. if "storage" in self.inference_spec: # only OSS storage config support local run. if any(s for s in self.inference_spec.storage if "oss" not in s): raise ValueError( f"Local run only support InferenceSpec using OSS storage config: " f"{self.inference_spec.to_dict()}" ) for idx, storage in enumerate(self.inference_spec.storage): store_dir = os.path.join(work_dir, f"storage_{idx}") os.makedirs(store_dir, exist_ok=True) oss_uri = OssUriObj(storage.oss.path) download( oss_path=oss_uri.object_key, local_path=store_dir, bucket=self.session.get_oss_bucket(oss_uri.bucket_name), ) volumes[store_dir] = {"bind": storage.mount_path, "mode": "rw"} container_spec = self.inference_spec.containers[0].to_dict() env_vars = { item["name"]: item["value"] for item in container_spec.get("env", []) } # build local launch script requirements_list = container_spec.get("prepare", dict()).get( "pythonRequirements", [] ) requirements_path = container_spec.get("prepare", dict()).get( "pythonRequirementsPath", None ) # build command to install requirements if requirements_list: install_requirements = " ".join( [ shlex.quote(s) for s in ["python", "-m", "pip", "install"] + requirements_list ] ) elif requirements_path: install_requirements = " ".join( [ shlex.quote(s) for s in ["python", "-m", "pip", "install", "-r", requirements_path] ] ) else: install_requirements = "" user_scripts = container_spec.get("script", "") launch_script = textwrap.dedent( f"""\ set -e {install_requirements} {user_scripts} """ )
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) # Reserved ports for internal use, do not use them for service _RESERVED_PORTS = [8080, 9090] class DefaultServiceConfig(object): """Default configuration used in creating prediction service.""" # Listen Port listen_port = 8000 # Default model path in container model_path = "/eas/workspace/model/" # Default user code path in container code_path = "/ml/usercode/" class ResourceConfig(object): """A class that represents the resource used by a PAI prediction service instance.""" def __init__(self, cpu: int, memory: int, gpu: int = None, gpu_memory: int = None): """ResourceConfig initializer. The public resource group does not support requesting GPU resources with `ResourceConfig`. Use the 'gpu' and 'gpu_memory' parameter only for services deployed to dedicated resource groups that provide GPU machine instances. Args: cpu (int): The number of CPUs that each instance requires. memory (int): The amount of memory that each instance requires, must be an integer, Unit: MB. gpu (int): The number of GPUs that each instance requires. gpu_memory (int): The amount of GPU memory that each instance requires. The value must be an integer, Unit: GB. PAI allows memory resources of a GPU to be allocated to multiple instances. If you want multiple instances to share the memory resources of a GPU, set the gpu parameter to 0. If you set the ``gpu`` parameter to 1, each instance occupies a GPU and the gpu_memory parameter does not take effect. .. note:: **Important** PAI does not enable the strict isolation of GPU memory. To prevent out of memory (OOM) errors, make sure that the GPU memory used by each instance does not exceed the requested amount. """ self.cpu = cpu self.memory = memory self.gpu = gpu self.gpu_memory = gpu_memory def __repr__(self): return ( f"ResourceConfig(cpu={self.cpu}, memory={self.memory}MB, gpu={self.gpu or 0}," f" gpu_memory={self.gpu_memory or 0}GB)" ) def __str__(self): return self.__repr__() def to_dict(self): """Transform the ResourceConfig instance to a dictionary. Returns: dict: """ res = { "cpu": self.cpu, "gpu": self.gpu, "gpu_memory": self.gpu_memory, "memory": self.memory, } return {k: v for k, v in res.items() if v is not None} class InferenceSpec(object): """A class used to describe how to create a prediction service. InferenceSpec is using to describe how the model is serving in PAI. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. Example of how to config a InferneceSpec:: >>> # build an inference_spec that using XGBoost processor. >>> infer_spec = InferenceSpec(processor="xgboost") >>> infer_spec.metadata.rpc.keepalive = 1000 >>> infer_spec.warm_up_data_path = "oss://bucket-name/path/to/warmup-data" >>> infer_spec.add_option("metadata.rpc.max_batch_size", 8) >>> print(infer_spec.processor) xgboost >>> print(infer_spec.metadata.rpc.keepalive) 1000 >>> print(infer_spec.metadata.rpc.max_batch_size) 8 >>> print(infer_spec.to_dict()) {'processor': 'xgboost', 'metadata': {'rpc': {'keepalive': 1000, 'max_batch_size': 8}}, 'warm_up_data_path': 'oss://bucket-name/path/to/warmup-data'} """ def __init__(self, *args, **kwargs): """InferenceSpec initializer. Args: **kwargs: Parameters of the inference spec. """ properties = kwargs.pop("__properties", []) cfg_dict = copy.deepcopy(kwargs) cfg_dict = {k: v for k, v in cfg_dict.items() if not k.startswith("_")} if args: if len(args) > 1: raise TypeError() cfg_dict.update(args[0]) super(InferenceSpec, self).__setattr__( "_cfg_dict", self._transform_value(cfg_dict) ) super(InferenceSpec, self).__setattr__("__properties", properties) def __repr__(self): return json.dumps(self.to_dict(), sort_keys=True, indent=4) def _transform_value(self, value): if isinstance(value, (List, Tuple)): return [self._transform_value(item) for item in value] elif isinstance(value, (Dict, AttrDict)): return AttrDict( {key: self._transform_value(value) for key, value in value.items()} ) return value def __missing__(self, name): return self._cfg_dict.__missing__(name) def __setitem__(self, name, value): return self._cfg_dict.__setitem__(name, self._transform_value(value)) def __setattr__(self, name, value): if name in getattr(self, "__properties"): super(InferenceSpec, self).__setattr__(name, self._transform_value(value)) else: self._cfg_dict.__setattr__(name, self._transform_value(value)) def __getattr__(self, item): if item.startswith("_"): return getattr(self, item) return self._cfg_dict.__getitem__(item) def __contains__(self, item): return item in self._cfg_dict def to_dict(self) -> Dict: """Return a dictionary that represent the InferenceSpec.""" return self._cfg_dict.to_dict() def add_option(self, name: str, value): """Add an option to the inference_spec instance. Args: name (str): Name of the option to set, represented as the JSON path of the parameter for the InferenceSpec. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. value: Value for the option. Examples: >>> infer_spec = InferenceSpec(processor="tensorflow_gpu_1.12") >>> infer_spec.add_option("metadata.rpc.keepalive", 10000) >>> infer_spec.metadata.rpc.keepalive 10000 >>> infer_spec.to_dict() {'processor': 'tensorflow_gpu_1.12', 'metadata': {'rpc': {'keepalive': 10000}}} """ src = self._transform_value(value) for k in reversed(name.split(".")): src = {k: src} self._cfg_dict.update(AttrDict(src)) def merge_options(self, options: Dict[str, Any]): """Merge options from a dictionary.""" for key, value in options.items(): self.add_option(key, value) @classmethod def from_dict(cls, config: Dict[str, Any]) -> "InferenceSpec": """Initialize a InferenceSpec from a dictionary. You can use this method to initialize a InferenceSpec instance from a dictionary. Returns: :class:`pai.model.InferenceSpec`: A InferenceSpec instance. """ config = config or dict() return cls(**config) def is_container_serving(self): return "containers" in self._cfg_dict @classmethod def _upload_source_dir(cls, source_dir, session): """Upload source files to OSS bucket.""" if not os.path.exists(source_dir): raise ValueError(f"Input source code path does not exist: {source_dir}.") if not os.path.isdir(source_dir): raise ValueError( f"Input source code path should be a directory: {source_dir}." ) target_dir = session.get_storage_path_by_category(category="inference_src") # upload local script data to the OSS bucket. uploaded_source_code = upload( source_dir, target_dir, session.oss_bucket, ) logger.debug("Uploaded source code to OSS: %s", uploaded_source_code) return uploaded_source_code def mount( self, source: str, mount_path: str, session: Session = None, ) -> Dict[str, Any]: """Mount a source storage to the running container. .. note:: If source is a local path, it will be uploaded to the OSS bucket and mounted. If source is a OSS path, it will be mounted directly. Args: source (str): The source storage to be attached, currently only support OSS path in OSS URI format and local path. mount_path (str): The mount path in the container. session (Session, optional): A PAI session instance used for communicating with PAI service. Returns: Dict[str, Any]: The storage config. Raises: DuplicateMountException: If the mount path is already used or source OSS path is mounted to the container. Examples:: # Mount a OSS storage path to the running container. >>> inference_spec.mount("oss://<YourOssBucket>/path/to/directory/model.json", ... "/ml/model/") # 'Mount' a local path to the running container. >>> inference_spec.mount("/path/to/your/data/", "/ml/model/") """ session = session or get_default_session() # TODO: supports more storages, such as NAS, PAI Dataset, PAI CodeSource, etc. if not isinstance(source, str): raise ValueError( "Parameter should be a string which represents an OSS storage path" " or a local file path." ) if "storage" in self._cfg_dict: configs = self._cfg_dict.get("storage", []) else: configs = [] uris = set() for conf in configs: # check if target mount path is already used. if conf.get("mount_path") == mount_path: raise MountPathIsOccupiedException( f"The mount path '{mount_path}' has already been used." ) mount_uri = conf.get("oss", {}).get("path") uris.add(mount_uri) if is_oss_uri(source): oss_uri_obj = OssUriObj(source) storage_config = { "mount_path": mount_path, "oss": {"path": oss_uri_obj.get_dir_uri()}, } elif os.path.exists(source): # if source is a local path, upload it to OSS bucket and use OSS URI # as storage source. oss_path = session.get_storage_path_by_category("model_data") oss_uri = upload( source_path=source, oss_path=oss_path, bucket=session.oss_bucket ) oss_uri_obj = OssUriObj(oss_uri) storage_config = { "mount_path": mount_path, "oss": {"path": oss_uri_obj.get_dir_uri()}, } else: raise ValueError( "Source path is not a valid OSS URI or a existing local path." ) # check if the source OSS Path is already mounted to the container. if oss_uri_obj.get_dir_uri() in uris: raise DuplicatedMountException( f"Source OSS path '{oss_uri_obj.get_dir_uri()}' is already " f"mounted to the container." ) configs.append(storage_config) self.storage = configs return storage_config def container_serving_spec( command: str, image_uri: Union[str, ImageInfo], source_dir: Optional[str] = None, git_config: Optional[Dict[str, Any]] = None, port: Optional[int] = None, environment_variables: Optional[Dict[str, str]] = None, requirements: Optional[List[str]] = None, requirements_path: Optional[str] = None, health_check: Optional[Dict[str, Any]] = None, session: Optional[Session] = None, ) -> InferenceSpec: """A convenient function to create an InferenceSpec instance that serving the model with given container and script. Examples:: infer_spec: InferenceSpec = container_serving_spec( command="python run.py", source_dir="./model_server/", image_uri="<ServingImageUri>", ) m = Model( model_data="oss://<YourOssBucket>/path/to/your/model", inference_spec=infer_spec, ) m.deploy( instance_type="ecs.c6.xlarge" ) Args: command (str): The command used to launch the Model server. source_dir (str): A relative path or an absolute path to the source code directory used to load model and launch the HTTP server, it will be uploaded to the OSS bucket and mounted to the container. If there is a ``requirements.txt`` file under the directory, it will be installed before the prediction server started. If 'git_config' is provided, 'source_dir' should be a relative location to a directory in the Git repo. With the following GitHub repo directory structure: .. code:: |----- README.md |----- src |----- train.py |----- test.py if you need 'src' directory as the source code directory, you can assign source_dir='./src/'. git_config (Dict[str, str]): Git configuration used to clone the repo. Including ``repo``, ``branch``, ``commit``, ``username``, ``password`` and ``token``. The ``repo`` is required. All other fields are optional. ``repo`` specifies the Git repository. If you don't provide ``branch``, the default value 'master' is used. If you don't provide ``commit``, the latest commit in the specified branch is used. ``username``, ``password`` and ``token`` are for authentication purpose. For example, the following config: .. code:: python git_config = { 'repo': 'https://github.com/modelscope/modelscope.git', 'branch': 'master', 'commit': '9bfc4a9d83c4beaf8378d0a186261ffc1cd9f960' } results in cloning the repo specified in 'repo', then checking out the 'master' branch, and checking out the specified commit. image_uri (str): The Docker image used to run the prediction service. port (int): Expose port of the server in container, the prediction request will be forward to the port. The environment variable ``LISTENING_PORT`` in the container will be set to this value. Default to 8000. environment_variables (Dict[str, str], optional): Dictionary of environment variable key-value pairs to set on the running container. requirements (List[str], optional): A list of Python package dependency, it will be installed before the serving container run. requirements_path (str, optional): A absolute path to the requirements.txt in the container. health_check (Dict[str, Any], optional): The health check configuration. If it not set, A TCP readiness probe will be used to check the health of the HTTP server. session (Session, optional): A PAI session instance used for communicating with PAI service. Returns: :class:`pai.model.InferenceSpec`: An InferenceSpec instance. """ session = session or get_default_session() if git_config: updated_args = git_utils.git_clone_repo( git_config=git_config, source_dir=source_dir, ) source_dir = updated_args["source_dir"] if not port: port = DefaultServiceConfig.listen_port elif int(port) in _RESERVED_PORTS: raise ValueError( "Reserved port {} is not allowed to use as serving port.".format(port), ) if source_dir: if not os.path.exists(source_dir): raise ValueError("Source directory {} does not exist.".format(source_dir)) if not os.path.isdir(source_dir): raise ValueError( "Source directory {} is not a directory.".format(source_dir) ) code_mount_path = DefaultServiceConfig.code_path # build the command for serving container. command = textwrap.dedent( f"""\ # change working directory to code mount path. cd {code_mount_path} {command} """ ) if not requirements_path and os.path.exists( os.path.join(source_dir, "requirements.txt") ): requirements_path = posixpath.join(code_mount_path, "requirements.txt") else: code_mount_path = None requirements_path = None if isinstance(image_uri, ImageInfo): image_uri = image_uri.image_uri environment_variables = environment_variables or dict() container_spec = { "image": image_uri, "port": port, "script": command, "env": [ {"name": key, "value": str(value)} for key, value in environment_variables.items() ] if environment_variables else [], } if health_check: container_spec["health_check"] = health_check if requirements: container_spec["prepare"] = {"pythonRequirements": requirements} if requirements_path: logger.warning( "If the parameter 'requirements' is set, the requirements_path " "parameter will be ignored." ) elif requirements_path: container_spec["prepare"] = { "pythonRequirementsPath": requirements_path, } inference_spec = InferenceSpec(containers=[container_spec]) # mount the uploaded serving scripts to the serving container. if source_dir: inference_spec.mount( source_dir, code_mount_path, session=session, ) return inference_spec class _BuiltinProcessor(object): """Helper class uses for getting the builtin processor""" PMML = "pmml" XGBoost = "xgboost" SupportedFrameworkAcceleratorVersionConfig = { "tensorflow": { "cpu": [ "1.12", "1.14", "1.15", "2.3", ], "gpu": [ "1.12", "1.14", "1.15", ], }, "pytorch": { "cpu": [ "1.6", ], "gpu": [ "1.6", ], }, } # Hard code default processor for specific model format. ModelFormatDefaultProcessorMapping = { ModelFormat.PMML: "pmml", ModelFormat.SavedModel: "tensorflow_cpu_2.3", ModelFormat.TorchScript: "pytorch_cpu_1.6", ModelFormat.FrozenPb: "pytorch_cpu_1.6", ModelFormat.CaffePrototxt: "caffe_cpu", ModelFormat.ONNX: "onnx_cu100", } @classmethod def get_default_by_model_format(cls, model_format: str) -> str: """Get the default processor for a specific model format.""" if model_format in cls.ModelFormatDefaultProcessorMapping: return cls.ModelFormatDefaultProcessorMapping[model_format] @classmethod def from_framework_version( cls, framework_name, framework_version, accelerator=None ): accelerator = accelerator or "cpu" versions = cls.SupportedFrameworkAcceleratorVersionConfig.get( framework_name, dict() ).get(accelerator, []) if framework_version in versions: return "{}_{}_{}".format(framework_name, accelerator, framework_version) else: logger.warning( "Could not find the processor for the framework_version({} {}), use the" " latest processor".format(framework_name, framework_version) ) return "{}_{}_{}".format(framework_name, accelerator, versions[-1]) class ModelBase(object): """A class represent ModelBase.""" def __init__( self, model_data: str, inference_spec: Optional[InferenceSpec] = None, session: Session = None, ): self.model_data = model_data self.inference_spec = inference_spec self.session = session or get_default_session() def download(self, target_dir: str): """Download the model data from OSS to local directory. Args: target_dir (str): The target directory to download the model data. Returns: str: Local directory path stores the model data. """ if not self.model_data: raise ValueError("Could not find the model data for this model.") if not is_oss_uri(self.model_data): raise RuntimeError("Download method only support model data stored in OSS.") self._download_model_data(target_dir) return target_dir def _download_model_data(self, target_dir): if not self.model_data: return logger.info(f"Prepare model data to local directory: {target_dir}") if self.model_data.startswith("oss://"): oss_uri = OssUriObj(self.model_data) oss_bucket = self.session.get_oss_bucket(oss_uri.bucket_name) download( oss_path=oss_uri.object_key, local_path=target_dir, bucket=oss_bucket, un_tar=True, ) else: if not os.path.exists(self.model_data): raise ValueError(f"Model data path does not exist: {self.model_data}") os.makedirs(target_dir, exist_ok=True) if os.path.isfile(self.model_data): shutil.copy( self.model_data, os.path.join(target_dir, os.path.basename(self.model_data)), ) else: distutils.dir_util.copy_tree(self.model_data, target_dir) def _upload_model_data(self): """Upload the model artifact to OSS bucket if self.model_data is a local file path. """ if not self.model_data: return elif is_oss_uri(self.model_data): return self.model_data elif not os.path.exists(self.model_data): raise RuntimeError(f"Model data path does not exist: {self.model_data}") dest_oss_path = self.session.get_storage_path_by_category(category="model_data") upload_model_data = upload( source_path=self.model_data, oss_path=dest_oss_path, bucket=self.session.oss_bucket, ) return upload_model_data def list_model_files(self, uri_format: bool = False) -> Iterator[str]: """List model files under the model path. Args: uri_format (bool): If True, return the model file path in OSS URI format. Returns: Iterator[str]: Iterator of model files. """ if not self.model_data: raise ValueError("Model data path is not specified.") if not is_oss_uri(self.model_data): raise ValueError("Method only support model data stored in OSS.") oss_uri_obj = OssUriObj(self.model_data) bucket = self.session.get_oss_bucket( bucket_name=oss_uri_obj.bucket_name, ) def _get_relative_path(obj_key: str): # if the model_data is reference an object, return the object file # name. if oss_uri_obj.object_key == obj_key: return os.path.basename(obj_key) path = obj_key[len(oss_uri_obj.object_key) :] return path.lstrip("/") if path.startswith("/") else path obj_iter = ObjectIterator(bucket=bucket, prefix=oss_uri_obj.object_key) for obj_info in obj_iter: if uri_format: yield f"oss://{bucket.bucket_name}/{obj_info.key}" else: yield _get_relative_path(obj_info.key) def _get_inference_spec(self): return self.inference_spec def deploy( self, service_name: str, instance_count: Optional[int] = 1, instance_type: Optional[str] = None, resource_config: Optional[Union[Dict[str, int], ResourceConfig]] = None, resource_id: Optional[str] = None, options: Optional[Dict[str, Any]] = None, service_type: Optional[str] = None, wait: bool = True, serializer: Optional["SerializerBase"] = None, **kwargs, ): """Deploy a prediction service with the model.""" if is_local_run_instance_type(instance_type): return self._deploy_local( instance_type=instance_type, serializer=serializer, wait=wait, ) else: return self._deploy( service_name=service_name, instance_count=instance_count, instance_type=instance_type, resource_config=resource_config, resource_id=resource_id, service_type=service_type, options=options, wait=wait, serializer=serializer, ) def _generate_service_name(self): s = os.path.basename(self.model_data.rstrip("/")) + random_str(8) return to_plain_text(s) def _deploy( self, service_name: str = None, instance_count: int = 1, instance_type: str = None, resource_config: Union[Dict[str, int], ResourceConfig] = None, resource_id: str = None, service_type: str = None, options: Dict[str, Any] = None, wait: bool = True, serializer: "SerializerBase" = None, ): """Create a prediction service.""" if not service_name: service_name = self._generate_service_name() logger.info( "Service name is not specified, using a generated service" f" name to create the service: service_name={service_name}" ) config = self._build_service_config( service_name=service_name, instance_count=instance_count, instance_type=instance_type, service_type=service_type, resource_config=resource_config, resource_id=resource_id, options=options, ) service_name = self.session.service_api.create(config=config) self._wait_service_visible(service_name) if service_type == ServiceType.Async: predictor = AsyncPredictor( service_name=service_name, session=self.session, serializer=serializer, ) else: predictor = Predictor( service_name=service_name, session=self.session, serializer=serializer, ) print( "View the service detail by accessing the console URI: \n{}".format( predictor.console_uri ) ) if wait: predictor.wait_for_ready() return predictor def _wait_service_visible(self, service_name, attempts=3, interval=2): """Wait for the service to be visible in DescribeService API. hack: https://aone.alibaba-inc.com/v2/project/1134421/bug#viewIdentifier=5dfb195e2e2b84f6b2f24718&openWorkitemIdentifier=50192431 """ while attempts > 0: obj = self.session.service_api.get(service_name) if "ServiceUid" in obj: return attempts -= 1 time.sleep(interval) logger.warning("DescribeService API failed to get the Service object.") def _build_service_config( self, service_name: str = None, instance_count: int = None, instance_type: str = None, resource_config: Union[ResourceConfig, Dict[str, Any]] = None, resource_id: str = None, service_type: str = None, options: Dict[str, Any] = None, ) -> Dict[str, Any]: """Build a service config dictionary used to create a PAI EAS service.""" self.model_data = self._upload_model_data() resource_config = ( ResourceConfig(**resource_config) if resource_config and isinstance(resource_config, dict) else None ) if resource_config and instance_type: raise ValueError( f"Only one of 'instance_type' and 'resource_config' " f"is required, but both have been provided: instance_type" f"={instance_type}, resource_config=" f"{resource_config}." ) inference_spec = InferenceSpec( self._get_inference_spec().to_dict() if self.inference_spec else dict() ) if self.model_data: if not inference_spec.is_container_serving(): # if model_data is an OSS URI with endpoint, truncate the endpoint. oss_uri_obj = OssUriObj(self.model_data) model_path_uri = "oss://{bucket_name}/{key}".format( bucket_name=oss_uri_obj.bucket_name, key=oss_uri_obj.object_key, ) inference_spec.add_option("model_path", model_path_uri) else: try: inference_spec.mount( self.model_data, mount_path=DefaultServiceConfig.model_path, ) except DuplicatedMountException as e: # ignore duplicated mount logger.info("Model is already mounted the container: %s", e) if service_type: inference_spec.add_option("metadata.type", service_type) if inference_spec.is_container_serving(): inference_spec.add_option("metadata.rpc.proxy_path", "/") if service_name: inference_spec.add_option("name", service_name) if instance_count: inference_spec.add_option("metadata.instance", instance_count) if instance_type: inference_spec.add_option("cloud.computing.instance_type", instance_type) elif resource_config: inference_spec.add_option("metadata.cpu", resource_config.cpu) inference_spec.add_option("metadata.memory", resource_config.memory) if resource_config.gpu: inference_spec.add_option("metadata.gpu", resource_config.gpu) if resource_config.gpu_memory: inference_spec.add_option( "metadata.gpu_memory", resource_config.gpu_memory ) if resource_config.gpu: logger.warning( "Parameters 'gpu' is set, the 'gpu_memory' parameter " "does not take effect." ) if resource_id: inference_spec.add_option("metadata.resource", resource_id) if options: inference_spec.merge_options(options=options) return inference_spec.to_dict() def _deploy_local( self, instance_type: str, serializer: SerializerBase = None, wait: bool = True, ) -> LocalPredictor: """Deploy the model in local using docker.""" if not self.inference_spec.is_container_serving(): raise RuntimeError( "Currently, only model using the InferenceSpec that serving with" " container support local run." ) if len(self.inference_spec.containers) > 1: raise RuntimeError( "InferenceSpec that serving with multiple container " "does not support local run." ) # prepare model data to local work_dir = tempfile.mkdtemp() model_dir = os.path.join(work_dir, "model") self._download_model_data(target_dir=model_dir) volumes = { model_dir: { "bind": DefaultServiceConfig.model_path, "mode": "rw", } } # prepare used storage to local directory. if "storage" in self.inference_spec: # only OSS storage config support local run. if any(s for s in self.inference_spec.storage if "oss" not in s): raise ValueError( f"Local run only support InferenceSpec using OSS storage config: " f"{self.inference_spec.to_dict()}" ) for idx, storage in enumerate(self.inference_spec.storage): store_dir = os.path.join(work_dir, f"storage_{idx}") os.makedirs(store_dir, exist_ok=True) oss_uri = OssUriObj(storage.oss.path) download( oss_path=oss_uri.object_key, local_path=store_dir, bucket=self.session.get_oss_bucket(oss_uri.bucket_name), ) volumes[store_dir] = {"bind": storage.mount_path, "mode": "rw"} container_spec = self.inference_spec.containers[0].to_dict() env_vars = { item["name"]: item["value"] for item in container_spec.get("env", []) } # build local launch script requirements_list = container_spec.get("prepare", dict()).get( "pythonRequirements", [] ) requirements_path = container_spec.get("prepare", dict()).get( "pythonRequirementsPath", None ) # build command to install requirements if requirements_list: install_requirements = " ".join( [ shlex.quote(s) for s in ["python", "-m", "pip", "install"] + requirements_list ] ) elif requirements_path: install_requirements = " ".join( [ shlex.quote(s) for s in ["python", "-m", "pip", "install", "-r", requirements_path] ] ) else: install_requirements = "" user_scripts = container_spec.get("script", "") launch_script = textwrap.dedent( f"""\ set -e {install_requirements} {user_scripts} """ )
gpu_count = -1 if instance_type == INSTANCE_TYPE_LOCAL_GPU else None
1
2023-12-01 01:40:12+00:00
24k
zerolink-io/zerolink-python
zerolink/req.py
[ { "identifier": "settings", "path": "zerolink/settings.py", "snippet": " CONFIG_FILE = os.path.join(os.environ[\"APPDATA\"], \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(os.environ[\"HOME\"], \".config\", \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(\n os.environ[\"HO...
from typing import Any, Optional, cast from zerolink import settings from zerolink.exc import APIError, AuthenticationError from zerolink_client import Client from zerolink_client.api.default import finetune, get_models_models_get from zerolink_client.api.entity import ( desc_entity_id, desc_entity_ontology, lookup_entity, lookup_relation, search_entity, ) from zerolink_client.api.extract import extract_text from zerolink_client.api.fact import ( create_userattribute, create_userentity, create_userrule, create_usertriple, ) from zerolink_client.api.kg import get_triple from zerolink_client.api.question import post_question from zerolink_client.api.session import ( create_session, get_session_entities, get_session_facts, get_user_session, ) from zerolink_client.api.user import create_user from zerolink_client.models import ( ChatSession, CreateAttribute, CreateEntity, CreateRule, CreateRuleResponse, CreateTriple, CreateTuneJobResponse, Entity, HTTPValidationError, Question, QuestionResponse, TextExtract, ) from zerolink_client.types import File, UNSET
16,972
def ask_question( session_id: Optional[int], body: str, assumps: Optional[dict[str, Any]] = None, **kwargs, ) -> QuestionResponse: """ Ask a question. """ check_api_key() rep = post_question.sync_detailed( client=client, session_id=(session_id or UNSET), body=Question(body=body, **(assumps or {})), **kwargs, ) if rep.status_code == 200: return cast(QuestionResponse, rep.parsed) else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_entity_id(id: str, **kwargs) -> Entity: """ Get the description of an entity by eid. """ check_api_key() rep = desc_entity_id.sync_detailed( client=client, id=id, **kwargs, ) if rep.status_code == 200: return cast(Entity, rep.parsed) else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_desc_entity(name: str, **kwargs): """ Get the description of an entity by eid. """ check_api_key() rep = lookup_entity.sync_detailed( client=client, name=name, **kwargs, ) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_search_entity(name: str, **kwargs): """ Search for an entity. """ check_api_key() rep = search_entity.sync_detailed( client=client, name=name, **kwargs, ) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_search_relation(name: str, **kwargs): """ Search for a relation. """ check_api_key() rep = lookup_relation.sync_detailed( client=client, name=name, **kwargs, ) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_ontology(name: str, **kwargs) -> dict[str, Any]: """ Get the ontology of an entity. """ check_api_key() rep = desc_entity_ontology.sync_detailed( client=client, id=name, **kwargs, ) if rep.status_code == 200: return cast(dict[str, Any], rep.parsed) else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_triples(name: str, **kwargs): """ Get the triples of a session. """ check_api_key()
# ------------------------------------------------------------------------ # Endpoints # ------------------------------------------------------------------------ client = Client( base_url=settings.server_url, raise_on_unexpected_status=False, ) def check_api_key() -> None: """ Check if the API key is set. """ if settings.api_key is None: raise AuthenticationError() else: pass def get_user_id() -> str: """ Get the user ID from the server. Only used for Demo server. """ client._headers["Authorization"] = settings.api_key rep = create_user.sync(client=client) if rep is None: raise Exception("Failed to authenticate.") settings.api_key = rep.user_id if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep.user_id def post_session(user_id: str, **kwargs) -> Optional[ChatSession]: """ Create a new session. """ check_api_key() if user_id is None: user_id = settings.api_key rep = create_session.sync(client=client, user_id=user_id, **kwargs) if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep def get_session_name(user_id: str, session_name: str, **kwargs): """ Lookup a session by user and name. """ check_api_key() rep = get_user_session.sync_detailed(user_id, session_name, client=client, **kwargs) if rep.status_code == 200: return rep.parsed elif rep.status_code == 404: return None else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_entities_list(session_id: int, **kwargs): """ Get the entities of a session. """ check_api_key() rep = get_session_entities.sync_detailed(session_id, client=client, **kwargs) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_facts_list(session_id: int, **kwargs): """ Get the facts of a session. """ check_api_key() rep = get_session_facts.sync_detailed(session_id, client=client, **kwargs) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def ask_question( session_id: Optional[int], body: str, assumps: Optional[dict[str, Any]] = None, **kwargs, ) -> QuestionResponse: """ Ask a question. """ check_api_key() rep = post_question.sync_detailed( client=client, session_id=(session_id or UNSET), body=Question(body=body, **(assumps or {})), **kwargs, ) if rep.status_code == 200: return cast(QuestionResponse, rep.parsed) else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_entity_id(id: str, **kwargs) -> Entity: """ Get the description of an entity by eid. """ check_api_key() rep = desc_entity_id.sync_detailed( client=client, id=id, **kwargs, ) if rep.status_code == 200: return cast(Entity, rep.parsed) else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_desc_entity(name: str, **kwargs): """ Get the description of an entity by eid. """ check_api_key() rep = lookup_entity.sync_detailed( client=client, name=name, **kwargs, ) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_search_entity(name: str, **kwargs): """ Search for an entity. """ check_api_key() rep = search_entity.sync_detailed( client=client, name=name, **kwargs, ) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_search_relation(name: str, **kwargs): """ Search for a relation. """ check_api_key() rep = lookup_relation.sync_detailed( client=client, name=name, **kwargs, ) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_ontology(name: str, **kwargs) -> dict[str, Any]: """ Get the ontology of an entity. """ check_api_key() rep = desc_entity_ontology.sync_detailed( client=client, id=name, **kwargs, ) if rep.status_code == 200: return cast(dict[str, Any], rep.parsed) else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_triples(name: str, **kwargs): """ Get the triples of a session. """ check_api_key()
rep = get_triple.sync_detailed(
16
2023-12-03 07:50:04+00:00
24k
JunMa11/UHNSeg-Quiz
nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerNoDeepSupervision.py
[ { "identifier": "DC_and_BCE_loss", "path": "nnunetv2/training/loss/compound_losses.py", "snippet": "class DC_and_BCE_loss(nn.Module):\n def __init__(self, bce_kwargs, soft_dice_kwargs, weight_ce=1, weight_dice=1, use_ignore_label: bool = False,\n dice_class=MemoryEfficientSoftDiceLoss...
import torch from torch import autocast from nnunetv2.training.loss.compound_losses import DC_and_BCE_loss, DC_and_CE_loss from nnunetv2.training.loss.dice import get_tp_fp_fn_tn, MemoryEfficientSoftDiceLoss from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer from nnunetv2.utilities.helpers import dummy_context from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels from torch.nn.parallel import DistributedDataParallel as DDP
17,173
class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): def _build_loss(self): if self.label_manager.has_regions: loss = DC_and_BCE_loss({}, {'batch_dice': self.configuration_manager.batch_dice, 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp}, use_ignore_label=self.label_manager.ignore_label is not None,
class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): def _build_loss(self): if self.label_manager.has_regions: loss = DC_and_BCE_loss({}, {'batch_dice': self.configuration_manager.batch_dice, 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp}, use_ignore_label=self.label_manager.ignore_label is not None,
dice_class=MemoryEfficientSoftDiceLoss)
3
2023-12-04 19:43:14+00:00
24k
opisaac9001/TTS-With-ooba-and-voice
TTS/tts/models/tortoise.py
[ { "identifier": "TorchMelSpectrogram", "path": "TTS/tts/layers/tortoise/arch_utils.py", "snippet": "class TorchMelSpectrogram(nn.Module):\n def __init__(\n self,\n filter_length=1024,\n hop_length=256,\n win_length=1024,\n n_mel_channels=80,\n mel_fmin=0,\n ...
import os import random import torch import torch.nn.functional as F import torchaudio from contextlib import contextmanager from dataclasses import dataclass from time import time from coqpit import Coqpit from tqdm import tqdm from TTS.tts.layers.tortoise.arch_utils import TorchMelSpectrogram from TTS.tts.layers.tortoise.audio_utils import denormalize_tacotron_mel, load_voice, wav_to_univnet_mel from TTS.tts.layers.tortoise.autoregressive import UnifiedVoice from TTS.tts.layers.tortoise.classifier import AudioMiniEncoderWithClassifierHead from TTS.tts.layers.tortoise.clvp import CLVP from TTS.tts.layers.tortoise.diffusion import SpacedDiffusion, get_named_beta_schedule, space_timesteps from TTS.tts.layers.tortoise.diffusion_decoder import DiffusionTts from TTS.tts.layers.tortoise.random_latent_generator import RandomLatentConverter from TTS.tts.layers.tortoise.tokenizer import VoiceBpeTokenizer from TTS.tts.layers.tortoise.vocoder import VocConf, VocType from TTS.tts.layers.tortoise.wav2vec_alignment import Wav2VecAlignment from TTS.tts.models.base_tts import BaseTTS from math import ceil
19,806
self.ar_checkpoint = self.args.ar_checkpoint self.diff_checkpoint = self.args.diff_checkpoint # TODO: check if this is even needed self.models_dir = config.model_dir self.autoregressive_batch_size = ( pick_best_batch_size_for_gpu() if self.args.autoregressive_batch_size is None else self.args.autoregressive_batch_size ) self.enable_redaction = self.args.enable_redaction self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if self.enable_redaction: self.aligner = Wav2VecAlignment() self.tokenizer = VoiceBpeTokenizer() self.autoregressive = UnifiedVoice( max_mel_tokens=self.args.ar_max_mel_tokens, max_text_tokens=self.args.ar_max_text_tokens, max_conditioning_inputs=self.args.ar_max_conditioning_inputs, layers=self.args.ar_layers, model_dim=self.args.ar_model_dim, heads=self.args.ar_heads, number_text_tokens=self.args.ar_number_text_tokens, start_text_token=self.args.ar_start_text_token, checkpointing=self.args.ar_checkpointing, train_solo_embeddings=self.args.ar_train_solo_embeddings, ).cpu() self.diffusion = DiffusionTts( model_channels=self.args.diff_model_channels, num_layers=self.args.diff_num_layers, in_channels=self.args.diff_in_channels, out_channels=self.args.diff_out_channels, in_latent_channels=self.args.diff_in_latent_channels, in_tokens=self.args.diff_in_tokens, dropout=self.args.diff_dropout, use_fp16=self.args.diff_use_fp16, num_heads=self.args.diff_num_heads, layer_drop=self.args.diff_layer_drop, unconditioned_percentage=self.args.diff_unconditioned_percentage, ).cpu() self.clvp = CLVP( dim_text=self.args.clvp_dim_text, dim_speech=self.args.clvp_dim_speech, dim_latent=self.args.clvp_dim_latent, num_text_tokens=self.args.clvp_num_text_tokens, text_enc_depth=self.args.clvp_text_enc_depth, text_seq_len=self.args.clvp_text_seq_len, text_heads=self.args.clvp_text_heads, num_speech_tokens=self.args.clvp_num_speech_tokens, speech_enc_depth=self.args.clvp_speech_enc_depth, speech_heads=self.args.clvp_speech_heads, speech_seq_len=self.args.clvp_speech_seq_len, use_xformers=self.args.clvp_use_xformers, ).cpu() self.vocoder = self.args.vocoder.value.constructor().cpu() # Random latent generators (RLGs) are loaded lazily. self.rlg_auto = None self.rlg_diffusion = None if self.args.high_vram: self.autoregressive = self.autoregressive.to(self.device) self.diffusion = self.diffusion.to(self.device) self.clvp = self.clvp.to(self.device) self.vocoder = self.vocoder.to(self.device) self.high_vram = self.args.high_vram @contextmanager def temporary_cuda(self, model): if self.high_vram: yield model else: m = model.to(self.device) yield m m = model.cpu() def get_conditioning_latents( self, voice_samples, return_mels=False, latent_averaging_mode=0, original_tortoise=False, ): """ Transforms one or more voice_samples into a tuple (autoregressive_conditioning_latent, diffusion_conditioning_latent). These are expressive learned latents that encode aspects of the provided clips like voice, intonation, and acoustic properties. :param voice_samples: List of arbitrary reference clips, which should be *pairs* of torch tensors containing arbitrary kHz waveform data. :param latent_averaging_mode: 0/1/2 for following modes: 0 - latents will be generated as in original tortoise, using ~4.27s from each voice sample, averaging latent across all samples 1 - latents will be generated using (almost) entire voice samples, averaged across all the ~4.27s chunks 2 - latents will be generated using (almost) entire voice samples, averaged per voice sample """ assert latent_averaging_mode in [ 0, 1, 2, ], "latent_averaging mode has to be one of (0, 1, 2)" with torch.no_grad(): voice_samples = [[v.to(self.device) for v in ls] for ls in voice_samples] auto_conds = [] for ls in voice_samples: auto_conds.append(format_conditioning(ls[0], device=self.device, mel_norm_file=self.mel_norm_path)) auto_conds = torch.stack(auto_conds, dim=1) with self.temporary_cuda(self.autoregressive) as ar: auto_latent = ar.get_conditioning(auto_conds) diffusion_conds = [] DURS_CONST = self.args.duration_const for ls in voice_samples: # The diffuser operates at a sample rate of 24000 (except for the latent inputs) sample = torchaudio.functional.resample(ls[0], 22050, 24000) if original_tortoise else ls[1] if latent_averaging_mode == 0: sample = pad_or_truncate(sample, DURS_CONST)
def pad_or_truncate(t, length): """ Utility function for forcing <t> to have the specified sequence length, whether by clipping it or padding it with 0s. """ tp = t[..., :length] if t.shape[-1] == length: tp = t elif t.shape[-1] < length: tp = F.pad(t, (0, length - t.shape[-1])) return tp def deterministic_state(seed=None): """ Sets the random seeds that tortoise uses to the current time() and returns that seed so results can be reproduced. """ seed = int(time()) if seed is None else seed torch.manual_seed(seed) random.seed(seed) # Can't currently set this because of CUBLAS. TODO: potentially enable it if necessary. # torch.use_deterministic_algorithms(True) return seed def load_discrete_vocoder_diffuser( trained_diffusion_steps=4000, desired_diffusion_steps=200, cond_free=True, cond_free_k=1, sampler="ddim", ): """ Helper function to load a GaussianDiffusion instance configured for use as a vocoder. """ return SpacedDiffusion( use_timesteps=space_timesteps(trained_diffusion_steps, [desired_diffusion_steps]), model_mean_type="epsilon", model_var_type="learned_range", loss_type="mse", betas=get_named_beta_schedule("linear", trained_diffusion_steps), conditioning_free=cond_free, conditioning_free_k=cond_free_k, sampler=sampler, ) def format_conditioning(clip, cond_length=132300, device="cuda", **kwargs): """ Converts the given conditioning signal to a MEL spectrogram and clips it as expected by the models. """ gap = clip.shape[-1] - cond_length if gap < 0: clip = F.pad(clip, pad=(0, abs(gap))) elif gap > 0: rand_start = random.randint(0, gap) clip = clip[:, rand_start : rand_start + cond_length] mel_clip = TorchMelSpectrogram(**kwargs)(clip.unsqueeze(0)).squeeze(0) return mel_clip.unsqueeze(0).to(device) def fix_autoregressive_output(codes, stop_token, complain=True): """ This function performs some padding on coded audio that fixes a mismatch issue between what the diffusion model was trained on and what the autoregressive code generator creates (which has no padding or end). This is highly specific to the DVAE being used, so this particular coding will not necessarily work if used with a different DVAE. This can be inferred by feeding a audio clip padded with lots of zeros on the end through the DVAE and copying out the last few codes. Failing to do this padding will produce speech with a harsh end that sounds like "BLAH" or similar. """ # Strip off the autoregressive stop token and add padding. stop_token_indices = (codes == stop_token).nonzero() if len(stop_token_indices) == 0: if complain: print( "No stop tokens found in one of the generated voice clips. This typically means the spoken audio is " "too long. In some cases, the output will still be good, though. Listen to it and if it is missing words, " "try breaking up your input text." ) return codes codes[stop_token_indices] = 83 stm = stop_token_indices.min().item() codes[stm:] = 83 if stm - 3 < codes.shape[0]: codes[-3] = 45 codes[-2] = 45 codes[-1] = 248 return codes def do_spectrogram_diffusion( diffusion_model, diffuser, latents, conditioning_latents, temperature=1, verbose=True, ): """ Uses the specified diffusion model to convert discrete codes into a spectrogram. """ with torch.no_grad(): output_seq_len = ( latents.shape[1] * 4 * 24000 // 22050 ) # This diffusion model converts from 22kHz spectrogram codes to a 24kHz spectrogram signal. output_shape = (latents.shape[0], 100, output_seq_len) precomputed_embeddings = diffusion_model.timestep_independent( latents, conditioning_latents, output_seq_len, False ) noise = torch.randn(output_shape, device=latents.device) * temperature mel = diffuser.sample_loop( diffusion_model, output_shape, noise=noise, model_kwargs={"precomputed_aligned_embeddings": precomputed_embeddings}, progress=verbose, ) return denormalize_tacotron_mel(mel)[:, :, :output_seq_len] def classify_audio_clip(clip, model_dir): """ Returns whether or not Tortoises' classifier thinks the given clip came from Tortoise. :param clip: torch tensor containing audio waveform data (get it from load_audio) :return: True if the clip was classified as coming from Tortoise and false if it was classified as real. """ classifier = AudioMiniEncoderWithClassifierHead( 2, spec_dim=1, embedding_dim=512, depth=5, downsample_factor=4, resnet_blocks=2, attn_blocks=4, num_attn_heads=4, base_channels=32, dropout=0, kernel_size=5, distribute_zero_label=False, ) classifier.load_state_dict(torch.load(os.path.join(model_dir, "classifier.pth"), map_location=torch.device("cpu"))) clip = clip.cpu().unsqueeze(0) results = F.softmax(classifier(clip), dim=-1) return results[0][0] def pick_best_batch_size_for_gpu(): """ Tries to pick a batch size that will fit in your GPU. These sizes aren't guaranteed to work, but they should give you a good shot. """ if torch.cuda.is_available(): _, available = torch.cuda.mem_get_info() availableGb = available / (1024**3) batch_size = 1 if availableGb > 14: batch_size = 16 elif availableGb > 10: batch_size = 8 elif availableGb > 7: batch_size = 4 return batch_size @dataclass class TortoiseAudioConfig(Coqpit): sample_rate: int = 22050 diffusion_sample_rate: int = 24000 output_sample_rate: int = 24000 @dataclass class TortoiseArgs(Coqpit): """A dataclass to represent Tortoise model arguments that define the model structure. Args: autoregressive_batch_size (int): The size of the auto-regressive batch. enable_redaction (bool, optional): Whether to enable redaction. Defaults to True. high_vram (bool, optional): Whether to use high VRAM. Defaults to False. kv_cache (bool, optional): Whether to use the kv_cache. Defaults to True. ar_checkpoint (str, optional): The checkpoint for the autoregressive model. Defaults to None. clvp_checkpoint (str, optional): The checkpoint for the ConditionalLatentVariablePerseq model. Defaults to None. diff_checkpoint (str, optional): The checkpoint for the DiffTTS model. Defaults to None. num_chars (int, optional): The maximum number of characters to generate. Defaults to 255. vocoder (VocType, optional): The vocoder to use for synthesis. Defaults to VocConf.Univnet. For UnifiedVoice model: ar_max_mel_tokens (int, optional): The maximum mel tokens for the autoregressive model. Defaults to 604. ar_max_text_tokens (int, optional): The maximum text tokens for the autoregressive model. Defaults to 402. ar_max_conditioning_inputs (int, optional): The maximum conditioning inputs for the autoregressive model. Defaults to 2. ar_layers (int, optional): The number of layers for the autoregressive model. Defaults to 30. ar_model_dim (int, optional): The model dimension for the autoregressive model. Defaults to 1024. ar_heads (int, optional): The number of heads for the autoregressive model. Defaults to 16. ar_number_text_tokens (int, optional): The number of text tokens for the autoregressive model. Defaults to 255. ar_start_text_token (int, optional): The start text token for the autoregressive model. Defaults to 255. ar_checkpointing (bool, optional): Whether to use checkpointing for the autoregressive model. Defaults to False. ar_train_solo_embeddings (bool, optional): Whether to train embeddings for the autoregressive model. Defaults to False. For DiffTTS model: diff_model_channels (int, optional): The number of channels for the DiffTTS model. Defaults to 1024. diff_num_layers (int, optional): The number of layers for the DiffTTS model. Defaults to 10. diff_in_channels (int, optional): The input channels for the DiffTTS model. Defaults to 100. diff_out_channels (int, optional): The output channels for the DiffTTS model. Defaults to 200. diff_in_latent_channels (int, optional): The input latent channels for the DiffTTS model. Defaults to 1024. diff_in_tokens (int, optional): The input tokens for the DiffTTS model. Defaults to 8193. diff_dropout (int, optional): The dropout percentage for the DiffTTS model. Defaults to 0. diff_use_fp16 (bool, optional): Whether to use fp16 for the DiffTTS model. Defaults to False. diff_num_heads (int, optional): The number of heads for the DiffTTS model. Defaults to 16. diff_layer_drop (int, optional): The layer dropout percentage for the DiffTTS model. Defaults to 0. diff_unconditioned_percentage (int, optional): The percentage of unconditioned inputs for the DiffTTS model. Defaults to 0. For ConditionalLatentVariablePerseq model: clvp_dim_text (int): The dimension of the text input for the CLVP module. Defaults to 768. clvp_dim_speech (int): The dimension of the speech input for the CLVP module. Defaults to 768. clvp_dim_latent (int): The dimension of the latent representation for the CLVP module. Defaults to 768. clvp_num_text_tokens (int): The number of text tokens used by the CLVP module. Defaults to 256. clvp_text_enc_depth (int): The depth of the text encoder in the CLVP module. Defaults to 20. clvp_text_seq_len (int): The maximum sequence length of the text input for the CLVP module. Defaults to 350. clvp_text_heads (int): The number of attention heads used by the text encoder in the CLVP module. Defaults to 12. clvp_num_speech_tokens (int): The number of speech tokens used by the CLVP module. Defaults to 8192. clvp_speech_enc_depth (int): The depth of the speech encoder in the CLVP module. Defaults to 20. clvp_speech_heads (int): The number of attention heads used by the speech encoder in the CLVP module. Defaults to 12. clvp_speech_seq_len (int): The maximum sequence length of the speech input for the CLVP module. Defaults to 430. clvp_use_xformers (bool): A flag indicating whether the model uses transformers in the CLVP module. Defaults to True. duration_const (int): A constant value used in the model. Defaults to 102400. """ autoregressive_batch_size: int = 1 enable_redaction: bool = False high_vram: bool = False kv_cache: bool = True ar_checkpoint: str = None clvp_checkpoint: str = None diff_checkpoint: str = None num_chars: int = 255 vocoder: VocType = VocConf.Univnet # UnifiedVoice params ar_max_mel_tokens: int = 604 ar_max_text_tokens: int = 402 ar_max_conditioning_inputs: int = 2 ar_layers: int = 30 ar_model_dim: int = 1024 ar_heads: int = 16 ar_number_text_tokens: int = 255 ar_start_text_token: int = 255 ar_checkpointing: bool = False ar_train_solo_embeddings: bool = False # DiffTTS params diff_model_channels: int = 1024 diff_num_layers: int = 10 diff_in_channels: int = 100 diff_out_channels: int = 200 diff_in_latent_channels: int = 1024 diff_in_tokens: int = 8193 diff_dropout: int = 0 diff_use_fp16: bool = False diff_num_heads: int = 16 diff_layer_drop: int = 0 diff_unconditioned_percentage: int = 0 # clvp params clvp_dim_text: int = 768 clvp_dim_speech: int = 768 clvp_dim_latent: int = 768 clvp_num_text_tokens: int = 256 clvp_text_enc_depth: int = 20 clvp_text_seq_len: int = 350 clvp_text_heads: int = 12 clvp_num_speech_tokens: int = 8192 clvp_speech_enc_depth: int = 20 clvp_speech_heads: int = 12 clvp_speech_seq_len: int = 430 clvp_use_xformers: bool = True # constants duration_const: int = 102400 class Tortoise(BaseTTS): """Tortoise model class. Currently only supports inference. Examples: >>> from TTS.tts.configs.tortoise_config import TortoiseConfig >>> from TTS.tts.models.tortoise import Tortoise >>> config = TortoiseConfig() >>> model = Tortoise.inif_from_config(config) >>> model.load_checkpoint(config, checkpoint_dir="paths/to/models_dir/", eval=True) """ def __init__(self, config: Coqpit): super().__init__(config, ap=None, tokenizer=None) self.mel_norm_path = None self.config = config self.ar_checkpoint = self.args.ar_checkpoint self.diff_checkpoint = self.args.diff_checkpoint # TODO: check if this is even needed self.models_dir = config.model_dir self.autoregressive_batch_size = ( pick_best_batch_size_for_gpu() if self.args.autoregressive_batch_size is None else self.args.autoregressive_batch_size ) self.enable_redaction = self.args.enable_redaction self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if self.enable_redaction: self.aligner = Wav2VecAlignment() self.tokenizer = VoiceBpeTokenizer() self.autoregressive = UnifiedVoice( max_mel_tokens=self.args.ar_max_mel_tokens, max_text_tokens=self.args.ar_max_text_tokens, max_conditioning_inputs=self.args.ar_max_conditioning_inputs, layers=self.args.ar_layers, model_dim=self.args.ar_model_dim, heads=self.args.ar_heads, number_text_tokens=self.args.ar_number_text_tokens, start_text_token=self.args.ar_start_text_token, checkpointing=self.args.ar_checkpointing, train_solo_embeddings=self.args.ar_train_solo_embeddings, ).cpu() self.diffusion = DiffusionTts( model_channels=self.args.diff_model_channels, num_layers=self.args.diff_num_layers, in_channels=self.args.diff_in_channels, out_channels=self.args.diff_out_channels, in_latent_channels=self.args.diff_in_latent_channels, in_tokens=self.args.diff_in_tokens, dropout=self.args.diff_dropout, use_fp16=self.args.diff_use_fp16, num_heads=self.args.diff_num_heads, layer_drop=self.args.diff_layer_drop, unconditioned_percentage=self.args.diff_unconditioned_percentage, ).cpu() self.clvp = CLVP( dim_text=self.args.clvp_dim_text, dim_speech=self.args.clvp_dim_speech, dim_latent=self.args.clvp_dim_latent, num_text_tokens=self.args.clvp_num_text_tokens, text_enc_depth=self.args.clvp_text_enc_depth, text_seq_len=self.args.clvp_text_seq_len, text_heads=self.args.clvp_text_heads, num_speech_tokens=self.args.clvp_num_speech_tokens, speech_enc_depth=self.args.clvp_speech_enc_depth, speech_heads=self.args.clvp_speech_heads, speech_seq_len=self.args.clvp_speech_seq_len, use_xformers=self.args.clvp_use_xformers, ).cpu() self.vocoder = self.args.vocoder.value.constructor().cpu() # Random latent generators (RLGs) are loaded lazily. self.rlg_auto = None self.rlg_diffusion = None if self.args.high_vram: self.autoregressive = self.autoregressive.to(self.device) self.diffusion = self.diffusion.to(self.device) self.clvp = self.clvp.to(self.device) self.vocoder = self.vocoder.to(self.device) self.high_vram = self.args.high_vram @contextmanager def temporary_cuda(self, model): if self.high_vram: yield model else: m = model.to(self.device) yield m m = model.cpu() def get_conditioning_latents( self, voice_samples, return_mels=False, latent_averaging_mode=0, original_tortoise=False, ): """ Transforms one or more voice_samples into a tuple (autoregressive_conditioning_latent, diffusion_conditioning_latent). These are expressive learned latents that encode aspects of the provided clips like voice, intonation, and acoustic properties. :param voice_samples: List of arbitrary reference clips, which should be *pairs* of torch tensors containing arbitrary kHz waveform data. :param latent_averaging_mode: 0/1/2 for following modes: 0 - latents will be generated as in original tortoise, using ~4.27s from each voice sample, averaging latent across all samples 1 - latents will be generated using (almost) entire voice samples, averaged across all the ~4.27s chunks 2 - latents will be generated using (almost) entire voice samples, averaged per voice sample """ assert latent_averaging_mode in [ 0, 1, 2, ], "latent_averaging mode has to be one of (0, 1, 2)" with torch.no_grad(): voice_samples = [[v.to(self.device) for v in ls] for ls in voice_samples] auto_conds = [] for ls in voice_samples: auto_conds.append(format_conditioning(ls[0], device=self.device, mel_norm_file=self.mel_norm_path)) auto_conds = torch.stack(auto_conds, dim=1) with self.temporary_cuda(self.autoregressive) as ar: auto_latent = ar.get_conditioning(auto_conds) diffusion_conds = [] DURS_CONST = self.args.duration_const for ls in voice_samples: # The diffuser operates at a sample rate of 24000 (except for the latent inputs) sample = torchaudio.functional.resample(ls[0], 22050, 24000) if original_tortoise else ls[1] if latent_averaging_mode == 0: sample = pad_or_truncate(sample, DURS_CONST)
cond_mel = wav_to_univnet_mel(
3
2023-11-29 08:15:06+00:00
24k
magic-research/magic-animate
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,...
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.controlnet import ControlNetModel from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
19,344
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ controlnet = self.controlnet # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size) reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size) reference_control_reader = ReferenceAttentionControl(self.unet, do_classifier_free_guidance=True, mode='read', batch_size=context_batch_size) is_dist_initialized = kwargs.get("dist", False) rank = kwargs.get("rank", 0) world_size = kwargs.get("world_size", 1) # Prepare video assert num_videos_per_prompt == 1 # FIXME: verify if num_videos_per_prompt > 1 works assert batch_size == 1 # FIXME: verify if batch_size > 1 works control = self.prepare_condition( condition=controlnet_condition, device=device, dtype=controlnet.dtype, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, ) controlnet_uncond_images, controlnet_cond_images = control.chunk(2) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables if init_latents is not None: latents = rearrange(init_latents, "(b f) c h w -> b c f h w", f=video_length) else: num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Prepare text embeddings for controlnet controlnet_text_embeddings = text_embeddings.repeat_interleave(video_length, 0) _, controlnet_text_embeddings_c = controlnet_text_embeddings.chunk(2) controlnet_res_samples_cache_dict = {i:None for i in range(video_length)} # For img2img setting if num_actual_inference_steps is None: num_actual_inference_steps = num_inference_steps if isinstance(source_image, str): ref_image_latents = self.images2latents(np.array(Image.open(source_image).resize((width, height)))[None, :], latents_dtype).cuda() elif isinstance(source_image, np.ndarray): ref_image_latents = self.images2latents(source_image[None, :], latents_dtype).cuda()
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, controlnet: ControlNetModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # prepare conditions for controlnet condition = torch.from_numpy(condition.copy()).to(device=device, dtype=dtype) / 255.0 condition = torch.stack([condition for _ in range(num_videos_per_prompt)], dim=0) condition = rearrange(condition, 'b f h w c -> (b f) c h w').clone() if do_classifier_free_guidance: condition = torch.cat([condition] * 2) return condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ controlnet = self.controlnet # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size) reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size) reference_control_reader = ReferenceAttentionControl(self.unet, do_classifier_free_guidance=True, mode='read', batch_size=context_batch_size) is_dist_initialized = kwargs.get("dist", False) rank = kwargs.get("rank", 0) world_size = kwargs.get("world_size", 1) # Prepare video assert num_videos_per_prompt == 1 # FIXME: verify if num_videos_per_prompt > 1 works assert batch_size == 1 # FIXME: verify if batch_size > 1 works control = self.prepare_condition( condition=controlnet_condition, device=device, dtype=controlnet.dtype, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, ) controlnet_uncond_images, controlnet_cond_images = control.chunk(2) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables if init_latents is not None: latents = rearrange(init_latents, "(b f) c h w -> b c f h w", f=video_length) else: num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Prepare text embeddings for controlnet controlnet_text_embeddings = text_embeddings.repeat_interleave(video_length, 0) _, controlnet_text_embeddings_c = controlnet_text_embeddings.chunk(2) controlnet_res_samples_cache_dict = {i:None for i in range(video_length)} # For img2img setting if num_actual_inference_steps is None: num_actual_inference_steps = num_inference_steps if isinstance(source_image, str): ref_image_latents = self.images2latents(np.array(Image.open(source_image).resize((width, height)))[None, :], latents_dtype).cuda() elif isinstance(source_image, np.ndarray): ref_image_latents = self.images2latents(source_image[None, :], latents_dtype).cuda()
context_scheduler = get_context_scheduler(context_schedule)
3
2023-11-21 08:33:54+00:00
24k
alexzhou907/DreamPropeller
threestudio/systems/base.py
[ { "identifier": "Exporter", "path": "threestudio/models/exporters/base.py", "snippet": "class Exporter(BaseObject):\n @dataclass\n class Config(BaseObject.Config):\n save_video: bool = False\n\n cfg: Config\n\n def configure(\n self,\n geometry: BaseImplicitGeometry,\n ...
import os import pytorch_lightning as pl import torch.nn.functional as F import threestudio import torch import numpy as np import copy from dataclasses import dataclass, field from threestudio.models.exporters.base import Exporter, ExporterOutput from threestudio.systems.utils import parse_optimizer, parse_scheduler from threestudio.utils.base import ( Updateable, update_end_if_possible, update_if_possible, ) from threestudio.utils.config import parse_structured from threestudio.utils.misc import C, cleanup, get_device, load_module_weights from threestudio.utils.saving import SaverMixin from threestudio.utils.typing import * from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.utils.config import load_config, parse_structured
15,408
# other.prompt_utils = self.prompt_utils # if hasattr(self, "guidance"): # other.guidance = self.guidance # other.geometry = type(self.geometry)(self.cfg.geometry) # other.background = type(self.background)( # self.cfg.background # ) with torch.no_grad(): for parameter_to, parameter_from in zip(other.geometry.parameters(), self.geometry.parameters()): parameter_to.data = parameter_from.data.clone() for parameter_to, parameter_from in zip(other.material.parameters(), self.material.parameters()): parameter_to.data = parameter_from.data.clone() for parameter_to, parameter_from in zip(other.background.parameters(), self.background.parameters()): parameter_to.data = parameter_from.data.clone() for parameter_to, parameter_from in zip(other.renderer.parameters(), self.renderer.parameters()): parameter_to.data = parameter_from.data.clone() if self.renderer.cfg.get('grid_prune', False): other.renderer.estimator.occs = self.renderer.estimator.occs.clone() other.renderer.estimator.binaries = self.renderer.estimator.binaries.clone() # if isinstance(self.geometry, ImplicitSDF): # other.geometry.finite_difference_normal_eps = self.geometry.finite_difference_normal_eps return other.to(device, non_blocking=True) def get_aux_params(self): aux = {} if self.renderer.cfg.get('grid_prune', False): aux['occs'] = self.renderer.estimator.occs # aux['binaries'] = self.renderer.estimator.binaries return aux def get_params(self,include_occs=True, include_binaries=False): # myparams = {**dict(self.geometry.named_parameters()), **dict(self.material.named_parameters()) , **dict(self.background.named_parameters())} # if hasattr(self.guidance, 'unet_lora'): # myparams.update(dict(self.guidance.unet_lora.named_parameters())) myparams = list(self.geometry.parameters())+list(self.material.parameters())+list(self.background.parameters())+list(self.renderer.parameters()) if self.renderer.cfg.get('grid_prune', False) and include_occs: myparams += [self.renderer.estimator.occs]#, self.renderer.estimator.binaries] if self.renderer.cfg.get('grid_prune', False) and include_binaries: myparams += [self.renderer.estimator.binaries]#, self.renderer.estimator.binaries] return myparams def set_renderer_binaries(self): if self.renderer.cfg.get('grid_prune', False): thre = torch.clamp(self.renderer.estimator.occs[self.renderer.estimator.occs >= 0].mean(), max=0.01) self.renderer.estimator.binaries = (self.renderer.estimator.occs > thre).view(self.renderer.estimator.binaries.shape) def set_aux_params(self, aux): if self.renderer.cfg.get('grid_prune', False): self.renderer.estimator.occs = aux['occs'].to(self.renderer.estimator.occs.device, non_blocking=True) # self.renderer.estimator.binaries = aux['binaries'].to(self.renderer.estimator.binaries.device, non_blocking=True) def set_params(self, params, clone=True): myparams = self.get_params(include_occs=True, include_binaries=True) # if isinstance(self.geometry, ImplicitSDF): # self.geometry.finite_difference_normal_eps = aux['finite_difference_normal_eps'] for p, q in zip(myparams, params): if clone: p.data = q.data.clone().to(p.device, non_blocking=True) else: p.data = q.to(p.device, non_blocking=True) # for p, q in zip(myparams[-2:], params[-2:]): # p.data = q.data.clone().to(p.device, non_blocking=True) # self.set_renderer_binaries() def set_grads_from_grads(self, grads): # myparams = {**dict(self.geometry.named_parameters()), **dict(self.material.named_parameters()) , **dict(self.background.named_parameters())} # if hasattr(self.guidance, 'unet_lora'): # myparams.update(dict(self.guidance.unet_lora.named_parameters())) myparams = self.get_params(include_occs=False) # assert len(myparams) == len(grads) for p, grad in zip(myparams, grads): if grad is not None: p.grad = grad.to(p.device, non_blocking=True) def compute_error_from_system(self, other): myparams = self.get_params(include_occs=False) otherparams = other.get_params(include_occs=False) with torch.no_grad(): error = 0.0 total_num = 0 for p, q in zip(myparams, otherparams): error += torch.linalg.norm(p-q).pow(2).item() total_num += np.prod(list(q.shape)) return error / total_num * 1e6 def on_fit_start(self) -> None: if self._save_dir is not None: threestudio.info(f"Validation results will be saved to {self._save_dir}") else: threestudio.warn( f"Saving directory not set for the system, visualization results will not be saved" ) def on_test_end(self) -> None: if self._save_dir is not None: threestudio.info(f"Test results saved to {self._save_dir}") def on_predict_start(self) -> None: self.exporter: Exporter = threestudio.find(self.cfg.exporter_type)( self.cfg.exporter, geometry=self.geometry, material=self.material, background=self.background, ) def predict_step(self, batch, batch_idx): if self.exporter.cfg.save_video: self.test_step(batch, batch_idx) def on_predict_epoch_end(self) -> None: if self.exporter.cfg.save_video: self.on_test_epoch_end()
class BaseSystem(pl.LightningModule, Updateable, SaverMixin): @dataclass class Config: loggers: dict = field(default_factory=dict) loss: dict = field(default_factory=dict) optimizer: dict = field(default_factory=dict) scheduler: Optional[dict] = None weights: Optional[str] = None weights_ignore_modules: Optional[List[str]] = None cleanup_after_validation_step: bool = False cleanup_after_test_step: bool = False cfg: Config def __init__(self, cfg, device=get_device(), resumed=False, configure=True) -> None: super().__init__() self.cfg = parse_structured(self.Config, cfg) self._save_dir: Optional[str] = None self._resumed: bool = resumed self._resumed_eval: bool = False self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0} if "loggers" in cfg: self.create_loggers(cfg.loggers) if configure: self.base_configure(device) def base_configure(self, device): self.configure(device) if self.cfg.weights is not None: self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules) self.post_configure() def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None): state_dict, epoch, global_step = load_module_weights( weights, ignore_modules=ignore_modules, map_location="cpu" ) self.load_state_dict(state_dict, strict=False) # restore step-dependent states self.do_update_step(epoch, global_step, on_load_weights=True) def set_resume_status(self, current_epoch: int, global_step: int): # restore correct epoch and global step in eval self._resumed_eval = True self._resumed_eval_status["current_epoch"] = current_epoch self._resumed_eval_status["global_step"] = global_step @property def resumed(self): # whether from resumed checkpoint return self._resumed @property def true_global_step(self): if self._resumed_eval: return self._resumed_eval_status["global_step"] else: return self.global_step @property def true_current_epoch(self): if self._resumed_eval: return self._resumed_eval_status["current_epoch"] else: return self.current_epoch def configure(self) -> None: pass def post_configure(self) -> None: """ executed after weights are loaded """ pass def C(self, value: Any) -> float: return C(value, self.true_current_epoch, self.true_global_step) def configure_optimizers(self): optim = parse_optimizer(self.cfg.optimizer, self) ret = { "optimizer": optim, } if self.cfg.scheduler is not None: ret.update( { "lr_scheduler": parse_scheduler(self.cfg.scheduler, optim), } ) return ret def training_step(self, batch, batch_idx): raise NotImplementedError def validation_step(self, batch, batch_idx): raise NotImplementedError def on_train_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.train_dataloader.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) def on_validation_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.val_dataloaders.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_validation_step: # cleanup to save vram cleanup() def on_validation_epoch_end(self): raise NotImplementedError def test_step(self, batch, batch_idx): raise NotImplementedError def on_test_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.test_dataloaders.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_test_step: # cleanup to save vram cleanup() def on_test_epoch_end(self): pass def predict_step(self, batch, batch_idx): raise NotImplementedError def on_predict_batch_end(self, outputs, batch, batch_idx): self.dataset = self.trainer.predict_dataloaders.dataset update_end_if_possible( self.dataset, self.true_current_epoch, self.true_global_step ) self.do_update_step_end(self.true_current_epoch, self.true_global_step) if self.cfg.cleanup_after_test_step: # cleanup to save vram cleanup() def on_predict_epoch_end(self): pass def preprocess_data(self, batch, stage): pass """ Implementing on_after_batch_transfer of DataModule does the same. But on_after_batch_transfer does not support DP. """ def on_train_batch_start(self, batch, batch_idx, unused=0): self.preprocess_data(batch, "train") self.dataset = self.trainer.train_dataloader.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_validation_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "validation") self.dataset = self.trainer.val_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_test_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "test") self.dataset = self.trainer.test_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def on_predict_batch_start(self, batch, batch_idx, dataloader_idx=0): self.preprocess_data(batch, "predict") self.dataset = self.trainer.predict_dataloaders.dataset update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step) self.do_update_step(self.true_current_epoch, self.true_global_step) def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False): pass def on_before_optimizer_step(self, optimizer): """ # some gradient-related debugging goes here, example: from lightning.pytorch.utilities import grad_norm norms = grad_norm(self.geometry, norm_type=2) print(norms) """ pass class BaseLift3DSystem(BaseSystem): @dataclass class Config(BaseSystem.Config): geometry_type: str = "" geometry: dict = field(default_factory=dict) geometry_convert_from: Optional[str] = None geometry_convert_inherit_texture: bool = False # used to override configurations of the previous geometry being converted from, # for example isosurface_threshold geometry_convert_override: dict = field(default_factory=dict) material_type: str = "" material: dict = field(default_factory=dict) background_type: str = "" background: dict = field(default_factory=dict) renderer_type: str = "" renderer: dict = field(default_factory=dict) guidance_type: str = "" guidance: dict = field(default_factory=dict) prompt_processor_type: str = "" prompt_processor: dict = field(default_factory=dict) # geometry export configurations, no need to specify in training exporter_type: str = "mesh-exporter" exporter: dict = field(default_factory=dict) cfg: Config def configure(self, device) -> None: if ( self.cfg.geometry_convert_from # from_coarse must be specified and not self.cfg.weights # not initialized from coarse when weights are specified and not self.resumed # not initialized from coarse when resumed from checkpoints ): threestudio.info("Initializing geometry from a given checkpoint ...") prev_cfg = load_config( os.path.join( os.path.dirname(self.cfg.geometry_convert_from), "../configs/parsed.yaml", ) ) # TODO: hard-coded relative path prev_system_cfg: BaseLift3DSystem.Config = parse_structured( self.Config, prev_cfg.system ) prev_geometry_cfg = prev_system_cfg.geometry prev_geometry_cfg.update(self.cfg.geometry_convert_override) prev_geometry = threestudio.find(prev_system_cfg.geometry_type)( prev_geometry_cfg, device=device ) state_dict, epoch, global_step = load_module_weights( self.cfg.geometry_convert_from, module_name="geometry", map_location="cpu", ) prev_geometry.load_state_dict(state_dict, strict=False) # restore step-dependent states prev_geometry.do_update_step(epoch, global_step, on_load_weights=True) # convert from coarse stage geometry prev_geometry = prev_geometry.to(device) self.geometry = threestudio.find(self.cfg.geometry_type).create_from( prev_geometry, self.cfg.geometry, copy_net=self.cfg.geometry_convert_inherit_texture, ) del prev_geometry cleanup() else: self.geometry = threestudio.find(self.cfg.geometry_type)(self.cfg.geometry, device=device) self.material = threestudio.find(self.cfg.material_type)(self.cfg.material, device=device) self.background = threestudio.find(self.cfg.background_type)( self.cfg.background, device=device ) self.renderer = threestudio.find(self.cfg.renderer_type)( self.cfg.renderer, geometry=self.geometry, material=self.material, background=self.background, device=device ) # def get_optimizer(self): # getattr(torch.optim, self.cfg.optimizer.name)(params, ** self.cfg.optimizer.args) def clone(self, device, other=None): if other is None: # make sure to clone after on_fit_start other = type(self)(self.cfg, device=device, configure=False) other.geometry = copy.deepcopy(self.geometry) other.material = copy.deepcopy(self.material) other.background = copy.deepcopy(self.background) other.renderer = threestudio.find(self.cfg.renderer_type)( self.cfg.renderer, geometry=other.geometry, material=other.material, background=other.background, device=device ) # if hasattr(self, "prompt_processor"): # other.prompt_processor = self.prompt_processor # if hasattr(self, "prompt_utils"): # other.prompt_utils = self.prompt_utils # if hasattr(self, "guidance"): # other.guidance = self.guidance # other.geometry = type(self.geometry)(self.cfg.geometry) # other.background = type(self.background)( # self.cfg.background # ) with torch.no_grad(): for parameter_to, parameter_from in zip(other.geometry.parameters(), self.geometry.parameters()): parameter_to.data = parameter_from.data.clone() for parameter_to, parameter_from in zip(other.material.parameters(), self.material.parameters()): parameter_to.data = parameter_from.data.clone() for parameter_to, parameter_from in zip(other.background.parameters(), self.background.parameters()): parameter_to.data = parameter_from.data.clone() for parameter_to, parameter_from in zip(other.renderer.parameters(), self.renderer.parameters()): parameter_to.data = parameter_from.data.clone() if self.renderer.cfg.get('grid_prune', False): other.renderer.estimator.occs = self.renderer.estimator.occs.clone() other.renderer.estimator.binaries = self.renderer.estimator.binaries.clone() # if isinstance(self.geometry, ImplicitSDF): # other.geometry.finite_difference_normal_eps = self.geometry.finite_difference_normal_eps return other.to(device, non_blocking=True) def get_aux_params(self): aux = {} if self.renderer.cfg.get('grid_prune', False): aux['occs'] = self.renderer.estimator.occs # aux['binaries'] = self.renderer.estimator.binaries return aux def get_params(self,include_occs=True, include_binaries=False): # myparams = {**dict(self.geometry.named_parameters()), **dict(self.material.named_parameters()) , **dict(self.background.named_parameters())} # if hasattr(self.guidance, 'unet_lora'): # myparams.update(dict(self.guidance.unet_lora.named_parameters())) myparams = list(self.geometry.parameters())+list(self.material.parameters())+list(self.background.parameters())+list(self.renderer.parameters()) if self.renderer.cfg.get('grid_prune', False) and include_occs: myparams += [self.renderer.estimator.occs]#, self.renderer.estimator.binaries] if self.renderer.cfg.get('grid_prune', False) and include_binaries: myparams += [self.renderer.estimator.binaries]#, self.renderer.estimator.binaries] return myparams def set_renderer_binaries(self): if self.renderer.cfg.get('grid_prune', False): thre = torch.clamp(self.renderer.estimator.occs[self.renderer.estimator.occs >= 0].mean(), max=0.01) self.renderer.estimator.binaries = (self.renderer.estimator.occs > thre).view(self.renderer.estimator.binaries.shape) def set_aux_params(self, aux): if self.renderer.cfg.get('grid_prune', False): self.renderer.estimator.occs = aux['occs'].to(self.renderer.estimator.occs.device, non_blocking=True) # self.renderer.estimator.binaries = aux['binaries'].to(self.renderer.estimator.binaries.device, non_blocking=True) def set_params(self, params, clone=True): myparams = self.get_params(include_occs=True, include_binaries=True) # if isinstance(self.geometry, ImplicitSDF): # self.geometry.finite_difference_normal_eps = aux['finite_difference_normal_eps'] for p, q in zip(myparams, params): if clone: p.data = q.data.clone().to(p.device, non_blocking=True) else: p.data = q.to(p.device, non_blocking=True) # for p, q in zip(myparams[-2:], params[-2:]): # p.data = q.data.clone().to(p.device, non_blocking=True) # self.set_renderer_binaries() def set_grads_from_grads(self, grads): # myparams = {**dict(self.geometry.named_parameters()), **dict(self.material.named_parameters()) , **dict(self.background.named_parameters())} # if hasattr(self.guidance, 'unet_lora'): # myparams.update(dict(self.guidance.unet_lora.named_parameters())) myparams = self.get_params(include_occs=False) # assert len(myparams) == len(grads) for p, grad in zip(myparams, grads): if grad is not None: p.grad = grad.to(p.device, non_blocking=True) def compute_error_from_system(self, other): myparams = self.get_params(include_occs=False) otherparams = other.get_params(include_occs=False) with torch.no_grad(): error = 0.0 total_num = 0 for p, q in zip(myparams, otherparams): error += torch.linalg.norm(p-q).pow(2).item() total_num += np.prod(list(q.shape)) return error / total_num * 1e6 def on_fit_start(self) -> None: if self._save_dir is not None: threestudio.info(f"Validation results will be saved to {self._save_dir}") else: threestudio.warn( f"Saving directory not set for the system, visualization results will not be saved" ) def on_test_end(self) -> None: if self._save_dir is not None: threestudio.info(f"Test results saved to {self._save_dir}") def on_predict_start(self) -> None: self.exporter: Exporter = threestudio.find(self.cfg.exporter_type)( self.cfg.exporter, geometry=self.geometry, material=self.material, background=self.background, ) def predict_step(self, batch, batch_idx): if self.exporter.cfg.save_video: self.test_step(batch, batch_idx) def on_predict_epoch_end(self) -> None: if self.exporter.cfg.save_video: self.on_test_epoch_end()
exporter_output: List[ExporterOutput] = self.exporter()
1
2023-11-27 23:39:49+00:00
24k
abdulhaim/LMRL-Gym
llm_rl_scripts/maze/ppo/train_ppo_online.py
[ { "identifier": "build_ppo_score_fn", "path": "LLM_RL/algorithms/ppo/score_fn.py", "snippet": "def build_ppo_score_fn(\n inference: PPOInference, \n tokenizer: PreTrainedTokenizer, \n max_length: int, \n bsize: int, \n):\n \n def score_fn(text_histories: List[TextHistory]) -> List[floa...
from typing import Optional, Dict, Any, Tuple from JaxSeq.bucket_manager import open_with_bucket as open from transformers import AutoTokenizer from JaxSeq.utils import convert_path, load_mesh, get_dtype, setup_experiment_save from JaxSeq.utils import BlockingStrategy, Padding, Truncation, get_weight_decay_mask, create_path, get_enabled_save_path from JaxSeq.models.gpt2.interface import GPT2Inference from JaxSeq.models.gpt2.load import load_train_state, ModelLoadMode from LLM_RL.algorithms.ppo.score_fn import build_ppo_score_fn from LLM_RL.algorithms.ppo.train import train_loop from LLM_RL.algorithms.ppo.base_interface import ppo_loss_fn, FixedKLController, AdaptiveKLController from transformers.generation import GenerationConfig from jaxtyping import PyTree from LLM_RL.environment import Text, TokenHistory, text_env_eval, TextTrajectory, TextTrajectoryChain from LLM_RL.algorithms.ppo.gpt2.interface import GPT2PPOPolicy, GPT2PPOInference, GPT2PPOTrain from LLM_RL.heads.linear_head import load_train_state_from_config as load_head_train_state_from_config from LLM_RL.heads.linear_head import LinearHeadConfig from JaxSeq.shard_model import shard_params_from_params from LLM_RL.algorithms.ppo.data import PPODataset from LLM_RL.utils import get_tensor_stats_np from functools import partial from JaxSeq.logs import label_logs, log, pull_logs from JaxSeq.utils import multihost_device_get from IPython import embed from llm_rl_scripts.maze.env.mazes import double_t_maze_optimal_directions, double_t_maze from JaxSeq.data import MaskDataset from JaxSeq.models.gpt2.interface import loss_fn_mask from llm_rl_scripts.maze.env.env import MazeEnv, describe_observation_give_position, maze_proposal_function from LLM_RL.algorithms.ppo.reranker_policy import ReRankerPolicy from JaxSeq.utils import block_sequences from llm_rl_scripts.maze.env.maze_utils import setup_maze_env, pick_start_position import tyro import jax import jax.numpy as jnp import os import optax import pickle as pkl import re import numpy as np import json
18,936
# in_tokens = list(map(lambda x: block_sequences([x.tokens], tokenizer.pad_token_id, dtype=np.int32, blocking_strategy=BlockingStrategy(Padding.RIGHT, Truncation.RIGHT, max_input_length))[0], text_histories)) # text_histories = list(map(lambda x: x.text_history, text_trajectories)) # no this doesn't work because we also need to pad the is_actions token_histories = list(map(lambda x: TokenHistory.from_text_history(x, tokenizer), text_histories)) in_tokens = list(map(lambda x: block_sequences([x.tokens], tokenizer.pad_token_id, dtype=np.int32, blocking_strategy=blocking_strategy)[0], token_histories)) is_actions = list(map(lambda x: block_sequences([x.is_action], 0.0, dtype=np.float32, blocking_strategy=blocking_strategy)[0], token_histories)) # tokens = list(map(lambda x: token_process(x.tokens), token_histories)) # is_actions = list(map(lambda x: x.is_action, token_histories)) bc_data = MaskDataset( in_tokens = jnp.array(in_tokens), in_training_mask = jnp.array(is_actions), ) else: bc_data = None mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def policy_optim_getter(params: PyTree): mask = get_weight_decay_mask(( "".join([r"\['ln_[0-9]+'\]", re.escape("['bias']")]), "".join([r"\['ln_[0-9]+'\]", re.escape("['scale']")]), re.escape("['ln_f']['bias']"), re.escape("['ln_f']['scale']"), "bias", ))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) model_dtype = get_dtype(use_fp16=use_fp16_activations) params_dtype = get_dtype(use_fp16=use_fp16_params) model_prng_key = jax.random.PRNGKey(2) policy_train_state, policy_model = load_train_state( model_load_mode=model_load_mode, model_load_path=convert_path(model_load_path) if model_load_mode != ModelLoadMode.HF else model_load_path, model_dtype=model_dtype, optim_getter=policy_optim_getter, tokenizer=tokenizer, mesh=mesh, prng_key=model_prng_key, force_pad_embeddings=force_pad_embeddings, params_dtype=params_dtype, ) policy_model.config.gradient_checkpointing = gradient_checkpointing policy_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy with jax.default_device(jax.devices('cpu')[0]): initial_policy_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), policy_train_state.params, ) initial_policy_params = shard_params_from_params( model=policy_model, params=initial_policy_params, ) loop_state = dict() if should_restore_loop_state and (model_load_mode in {ModelLoadMode.TRAIN_STATE, ModelLoadMode.TRAIN_STATE_PARAMS, ModelLoadMode.PARAMS}): with open(os.path.join(convert_path(model_load_path), 'loop_state.pkl'), 'rb') as f: loop_state = pkl.load(f) policy_inference = GPT2Inference.load_inference( params=policy_train_state.params, model=policy_model, tokenizer=tokenizer, ) policy_prng = jax.random.PRNGKey(0) policy = GPT2PPOPolicy( inference=policy_inference, prng_key=policy_prng, generation_config=GenerationConfig( do_sample=policy_do_sample, num_beams=policy_num_beams, temperature=policy_temperature, top_p=policy_top_p, top_k=policy_top_k, eos_token_id=tokenizer.encode('\n')[0], pad_token_id=tokenizer.pad_token_id, max_new_tokens=max_output_length, ), blocking_strategy=BlockingStrategy( padding=Padding.LEFT, truncation=Truncation.LEFT, max_length=max_input_length, ), out_str_process=lambda x: x.removesuffix('\n')+'\n', ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) head_prng_key = jax.random.PRNGKey(3) value_head_train_state, value_head = load_head_train_state_from_config(
# from LLM_RL.gpt2 import load_gpt2_from_pretrained def main( model_load_mode: ModelLoadMode, model_load_path: str, /, # Mark the end of positional arguments. bc_data_path: Optional[str]=None, train_bc_bsize: int=8, bc_loss_weight:int=0, model_str:str="gpt2", exp_name: Optional[str]=None, outputs_path: Optional[str]=None, maze_name: str="double_t_maze", data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=False, wandb_project: Optional[str]=None, n_rounds: int=1, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-5, weight_decay: float=0.0, train_bsize: int=32, grad_accum_steps: int=1, rollout_bsize: int=32, n_rollouts: int=128, ppo_data_bsize: int=32, num_pos_per_setup: int=4, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', use_fp16_activations: bool=False, use_fp16_params: bool=False, max_input_length: int=64, max_output_length: int=32, log_every: int=256, eval_every_steps: Optional[int]=None, eval_every_epochs: Optional[int]=None, eval_every_rounds: Optional[int]=1, eval_at_beginning: bool=True, eval_at_end: bool=True, save_every_steps: Optional[int]=None, save_every_epochs: Optional[int]=None, save_every_rounds: Optional[int]=10, save_at_beginning: bool=False, save_at_end: bool=True, save_best: bool=True, max_checkpoints: Optional[int]=20, save_train_state: bool=True, save_ppo_dataset: bool=True, save_bf16: bool=True, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, gamma: float=0.99, lam: float=0.95, use_advantage_whitening: bool=True, init_kl_coef: float=0.001, kl_target: Optional[float]=None, kl_horizon: Optional[int]=None, cliprange_value: float=0.2, cliprange: float=0.2, value_loss_coef: float=0.5, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, describe_function: str= "describe_observation", reranker_policy: bool=False, reward_function: str="standard_reward", ): input_args = locals().copy() print(input_args) use_adaptive_kl = (kl_target is not None and kl_horizon is not None) if not use_adaptive_kl: assert kl_target is None and kl_horizon is None tokenizer = AutoTokenizer.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) if bc_data_path is not None: with open(bc_data_path, 'rb') as f: text_histories = pkl.load(f) blocking_strategy = BlockingStrategy(Padding.RIGHT, Truncation.RIGHT, max_input_length+max_output_length) # in_tokens = list(map(lambda x: block_sequences([x.tokens], tokenizer.pad_token_id, dtype=np.int32, blocking_strategy=BlockingStrategy(Padding.RIGHT, Truncation.RIGHT, max_input_length))[0], text_histories)) # text_histories = list(map(lambda x: x.text_history, text_trajectories)) # no this doesn't work because we also need to pad the is_actions token_histories = list(map(lambda x: TokenHistory.from_text_history(x, tokenizer), text_histories)) in_tokens = list(map(lambda x: block_sequences([x.tokens], tokenizer.pad_token_id, dtype=np.int32, blocking_strategy=blocking_strategy)[0], token_histories)) is_actions = list(map(lambda x: block_sequences([x.is_action], 0.0, dtype=np.float32, blocking_strategy=blocking_strategy)[0], token_histories)) # tokens = list(map(lambda x: token_process(x.tokens), token_histories)) # is_actions = list(map(lambda x: x.is_action, token_histories)) bc_data = MaskDataset( in_tokens = jnp.array(in_tokens), in_training_mask = jnp.array(is_actions), ) else: bc_data = None mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def policy_optim_getter(params: PyTree): mask = get_weight_decay_mask(( "".join([r"\['ln_[0-9]+'\]", re.escape("['bias']")]), "".join([r"\['ln_[0-9]+'\]", re.escape("['scale']")]), re.escape("['ln_f']['bias']"), re.escape("['ln_f']['scale']"), "bias", ))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) model_dtype = get_dtype(use_fp16=use_fp16_activations) params_dtype = get_dtype(use_fp16=use_fp16_params) model_prng_key = jax.random.PRNGKey(2) policy_train_state, policy_model = load_train_state( model_load_mode=model_load_mode, model_load_path=convert_path(model_load_path) if model_load_mode != ModelLoadMode.HF else model_load_path, model_dtype=model_dtype, optim_getter=policy_optim_getter, tokenizer=tokenizer, mesh=mesh, prng_key=model_prng_key, force_pad_embeddings=force_pad_embeddings, params_dtype=params_dtype, ) policy_model.config.gradient_checkpointing = gradient_checkpointing policy_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy with jax.default_device(jax.devices('cpu')[0]): initial_policy_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), policy_train_state.params, ) initial_policy_params = shard_params_from_params( model=policy_model, params=initial_policy_params, ) loop_state = dict() if should_restore_loop_state and (model_load_mode in {ModelLoadMode.TRAIN_STATE, ModelLoadMode.TRAIN_STATE_PARAMS, ModelLoadMode.PARAMS}): with open(os.path.join(convert_path(model_load_path), 'loop_state.pkl'), 'rb') as f: loop_state = pkl.load(f) policy_inference = GPT2Inference.load_inference( params=policy_train_state.params, model=policy_model, tokenizer=tokenizer, ) policy_prng = jax.random.PRNGKey(0) policy = GPT2PPOPolicy( inference=policy_inference, prng_key=policy_prng, generation_config=GenerationConfig( do_sample=policy_do_sample, num_beams=policy_num_beams, temperature=policy_temperature, top_p=policy_top_p, top_k=policy_top_k, eos_token_id=tokenizer.encode('\n')[0], pad_token_id=tokenizer.pad_token_id, max_new_tokens=max_output_length, ), blocking_strategy=BlockingStrategy( padding=Padding.LEFT, truncation=Truncation.LEFT, max_length=max_input_length, ), out_str_process=lambda x: x.removesuffix('\n')+'\n', ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) head_prng_key = jax.random.PRNGKey(3) value_head_train_state, value_head = load_head_train_state_from_config(
model_config=LinearHeadConfig(
14
2023-11-21 00:16:42+00:00
24k
jzmzhong/Automatic-Prosody-Annotator-with-SSWP-CLAP
src/clap_module/conformer/encoder.py
[ { "identifier": "ConvolutionModule", "path": "src/clap_module/conformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\r\n \"\"\"ConvolutionModule in Conformer model.\r\n\r\n Args:\r\n channels (int): The number of channels of conv layers.\r\n kernel_size (int): Ker...
import logging import torch import math from .convolution import ConvolutionModule from .encoder_layer import EncoderLayer from .modules import get_activation from .modules import VGG2L from .modules import ( LegacyRelPositionMultiHeadedAttention, MultiHeadedAttention, RelPositionMultiHeadedAttention, ) from .embedding import ( LegacyRelPositionalEncoding, PositionalEncoding, RelPositionalEncoding, ScaledPositionalEncoding, ) from .modules import LayerNorm from .multi_layer_conv import ( Conv1dLinear, MultiLayeredConv1d, ) from .modules import ( PositionwiseFeedForward, ) from .modules import repeat from .sub_sampling import Conv2dSubsampling from ..feature_fusion import AttentionPool1d, DAF, AFF, iAFF
14,943
ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "vgg2l": self.embed = VGG2L(idim, attention_dim) self.conv_subsampling_factor = 4 elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), pos_enc_class(attention_dim, positional_dropout_rate), ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer is None: self.embed = torch.nn.Sequential( pos_enc_class(attention_dim, positional_dropout_rate) ) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before # self-attention module definition if selfattention_layer_type == "selfattn": logging.info("encoder self-attention layer type = self-attention") encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "legacy_rel_selfattn": assert pos_enc_layer_type == "legacy_rel_pos" encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "rel_selfattn": logging.info("encoder self-attention layer type = relative self-attention") assert pos_enc_layer_type == "rel_pos" encoder_selfattn_layer = RelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, zero_triu, ) else: raise ValueError("unknown encoder_attn_layer: " + selfattention_layer_type) # feed-forward module definition if ffn_layer_type == "linear": ffn_layer = PositionwiseFeedForward ffn_layer_args = ( attention_dim, linear_units, dropout_rate, activation, ) elif ffn_layer_type == "conv1d": ffn_layer = MultiLayeredConv1d ffn_layer_args = ( attention_dim, linear_units, ffn_conv_kernel_size, dropout_rate, ) elif ffn_layer_type == "conv1d-linear": ffn_layer = Conv1dLinear ffn_layer_args = ( attention_dim, linear_units, ffn_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") # convolution module definition
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "vgg2l": self.embed = VGG2L(idim, attention_dim) self.conv_subsampling_factor = 4 elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), pos_enc_class(attention_dim, positional_dropout_rate), ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer is None: self.embed = torch.nn.Sequential( pos_enc_class(attention_dim, positional_dropout_rate) ) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before # self-attention module definition if selfattention_layer_type == "selfattn": logging.info("encoder self-attention layer type = self-attention") encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "legacy_rel_selfattn": assert pos_enc_layer_type == "legacy_rel_pos" encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, ) elif selfattention_layer_type == "rel_selfattn": logging.info("encoder self-attention layer type = relative self-attention") assert pos_enc_layer_type == "rel_pos" encoder_selfattn_layer = RelPositionMultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, attention_dim, attention_dropout_rate, zero_triu, ) else: raise ValueError("unknown encoder_attn_layer: " + selfattention_layer_type) # feed-forward module definition if ffn_layer_type == "linear": ffn_layer = PositionwiseFeedForward ffn_layer_args = ( attention_dim, linear_units, dropout_rate, activation, ) elif ffn_layer_type == "conv1d": ffn_layer = MultiLayeredConv1d ffn_layer_args = ( attention_dim, linear_units, ffn_conv_kernel_size, dropout_rate, ) elif ffn_layer_type == "conv1d-linear": ffn_layer = Conv1dLinear ffn_layer_args = ( attention_dim, linear_units, ffn_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") # convolution module definition
convolution_layer = ConvolutionModule
0
2023-11-25 02:38:32+00:00
24k
Luo-Z13/pointobb
PointOBB/mmdet/models/roi_heads/PointOBB_head.py
[ { "identifier": "HEADS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "HEADS = MODELS" }, { "identifier": "MODELS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "MODELS = Registry('models', parent=MMCV_MODELS)" }, { "identifier": "build_head", "path": "Poi...
import math import torch import torch.nn.functional as F import torch.nn as nn import copy import numpy as np import cv2 from mmdet.core import bbox2result, bbox2roi, rbbox2roi, build_assigner, build_sampler, multi_apply from ..builder import HEADS, MODELS, build_head, build_roi_extractor, build_loss from .standard_roi_head import StandardRoIHead from .cascade_roi_head import CascadeRoIHead from mmdet.core.bbox.iou_calculators import bbox_overlaps from .test_mixins import BBoxTestMixin, MaskTestMixin from mmdet.core.bbox import bbox_xyxy_to_cxcywh from mmdet.core.bbox.transforms import rbbox2result from mmcv.cnn import Scale, ConvModule from mmcv.ops import box_iou_rotated from typing import Any, List, Sequence, Tuple, Union from torch import Tensor from mmdet.models.utils.base_bbox_coder import BaseBBoxCoder from ..detectors.utils import obb2xyxy, regularize_boxes, reduce_mean, obb2poly_np
16,228
"""Simplest base roi head including one bbox head and one mask head.""" def __init__(self, bbox_roi_extractor, num_stages, bbox_head, top_k=7, with_atten=None, conv_cfg=None, norm_cfg=None, scale_angle: bool = True, stacked_convs = 4, loss_symmetry_ss=dict( type='SmoothL1Loss', loss_weight=1.0, beta=0.1), angle_coder=dict( type='PSCCoder', angle_version='le90', dual_freq=False, num_step=3, thr_mod=0), angle_version = 'le90', use_angle_loss = True, add_angle_pred_begin = False, not_use_rot_mil = False, detach_angle_head = False, rotation_agnostic_classes = None, agnostic_resize_classes = None, cls_scores_weight = 1.0, ins_scores_weight = 1.0, **kwargs): super(PointOBBHead, self).__init__(bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, **kwargs) self.threshold = 0.3 self.merge_mode = 'weighted_clsins' self.test_mean_iou = False # self.test_mean_iou = True self.sum_iou = 0 self.sum_num = 0 self.num_stages = num_stages self.topk1 = top_k # 7 self.topk2 = top_k # 7 self.featmap_strides = bbox_roi_extractor.featmap_strides self.with_atten = with_atten self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.in_channels=256 self.feat_channels=256 self.stacked_convs=stacked_convs self.is_scale_angle = scale_angle self.angle_coder = HEADS.build(angle_coder) self.loss_symmetry_ss = build_loss(loss_symmetry_ss) self.angle_version = angle_version self.rotation_agnostic_classes = rotation_agnostic_classes self.agnostic_resize_classes = agnostic_resize_classes self.add_angle_pred_begin = add_angle_pred_begin self.use_angle_loss = use_angle_loss self.not_use_rot_mil = not_use_rot_mil self.detach_angle_head = detach_angle_head self.cls_scores_weight = cls_scores_weight self.ins_scores_weight = ins_scores_weight self.num_classes = self.bbox_head.num_classes self._init_layers() def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_angle = nn.Conv2d( self.feat_channels, self.angle_coder.encode_size, 3, padding=1) if self.is_scale_angle: self.scale_angle = Scale(1.0) def angle_forward(self, feats: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]: angle_results = [] for feat in feats: if self.detach_angle_head: feat_detach = feat.clone().detach() single_angle_pred = self.angle_forward_single(feat_detach) else: single_angle_pred = self.angle_forward_single(feat) angle_results.append(single_angle_pred) return tuple(angle_results) def angle_forward_single(self, x: Tensor): cls_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) # cls_score = self.conv_cls(cls_feat) angle_pred = self.conv_angle(cls_feat) if self.is_scale_angle: angle_pred = self.scale_angle(angle_pred).float() return angle_pred def init_assigner_sampler(self): """Initialize assigner and sampler.""" self.bbox_assigner = None self.bbox_sampler = None if self.train_cfg: self.bbox_assigner = build_assigner(self.train_cfg.assigner) self.bbox_sampler = build_sampler( self.train_cfg.sampler, context=self) def init_bbox_head(self, bbox_roi_extractor, bbox_head): """Initialize ``bbox_head``"""
RangeType = Sequence[Tuple[int, int]] INF = 1e8 def meshgrid(x: Tensor, y: Tensor, row_major: bool = True) -> Tuple[Tensor, Tensor]: yy, xx = torch.meshgrid(y, x) if row_major: # warning .flatten() would cause error in ONNX exportingF # have to use reshape here return xx.reshape(-1), yy.reshape(-1) else: return yy.reshape(-1), xx.reshape(-1) def obb2cxcywh_le90(obboxes): """Convert oriented bounding boxes to horizontal bounding boxes. Args: obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] Returns: hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb] """ center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=-1) Cos, Sin = torch.cos(theta), torch.sin(theta) x_bias = torch.abs(w / 2 * Cos) + torch.abs(h / 2 * Sin) y_bias = torch.abs(w / 2 * Sin) + torch.abs(h / 2 * Cos) bias = torch.cat([x_bias, y_bias], dim=-1) wh = bias * 2 return torch.cat([center, wh, torch.zeros_like(theta)], dim=-1) @HEADS.register_module() class PSCCoder(BaseBBoxCoder): """Phase-Shifting Coder. `Phase-Shifting Coder (PSC) <https://arxiv.org/abs/2211.06368>`. Args: angle_version (str): Angle definition. Only 'le90' is supported at present. dual_freq (bool, optional): Use dual frequency. Default: True. num_step (int, optional): Number of phase steps. Default: 3. thr_mod (float): Threshold of modulation. Default: 0.47. """ def __init__(self, angle_version: str, dual_freq: bool = True, num_step: int = 3, thr_mod: float = 0.47): super().__init__() self.angle_version = angle_version assert angle_version in ['le90'] self.dual_freq = dual_freq self.num_step = num_step self.thr_mod = thr_mod if self.dual_freq: self.encode_size = 2 * self.num_step else: self.encode_size = self.num_step self.coef_sin = torch.tensor( tuple( torch.sin(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) self.coef_cos = torch.tensor( tuple( torch.cos(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) def encode(self, angle_targets: Tensor) -> Tensor: """Phase-Shifting Encoder. Args: angle_targets (Tensor): Angle offset for each scale level. Has shape (num_anchors * H * W, 1) Returns: list[Tensor]: The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) """ phase_targets = angle_targets * 2 phase_shift_targets = tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) # Dual-freq PSC for square-like problem if self.dual_freq: phase_targets = angle_targets * 4 phase_shift_targets += tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) return torch.cat(phase_shift_targets, axis=-1) def decode(self, angle_preds: Tensor, keepdim: bool = False) -> Tensor: """Phase-Shifting Decoder. Args: angle_preds (Tensor): The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) keepdim (bool): Whether the output tensor has dim retained or not. Returns: list[Tensor]: Angle offset for each scale level. Has shape (num_anchors * H * W, 1) when keepdim is true, (num_anchors * H * W) otherwise """ self.coef_sin = self.coef_sin.to(angle_preds) self.coef_cos = self.coef_cos.to(angle_preds) phase_sin = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase = -torch.atan2(phase_sin, phase_cos) # In range [-pi,pi) if self.dual_freq: phase_sin = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase2 = -torch.atan2(phase_sin, phase_cos) / 2 # Phase unwarpping, dual freq mixing # Angle between phase and phase2 is obtuse angle idx = torch.cos(phase) * torch.cos(phase2) + torch.sin( phase) * torch.sin(phase2) < 0 # Add pi to phase2 and keep it in range [-pi,pi) phase2[idx] = phase2[idx] % (2 * math.pi) - math.pi phase = phase2 # Set the angle of isotropic objects to zero phase[phase_mod < self.thr_mod] *= 0 angle_pred = phase / 2 return angle_pred @HEADS.register_module() class PointOBBHead(StandardRoIHead): """Simplest base roi head including one bbox head and one mask head.""" def __init__(self, bbox_roi_extractor, num_stages, bbox_head, top_k=7, with_atten=None, conv_cfg=None, norm_cfg=None, scale_angle: bool = True, stacked_convs = 4, loss_symmetry_ss=dict( type='SmoothL1Loss', loss_weight=1.0, beta=0.1), angle_coder=dict( type='PSCCoder', angle_version='le90', dual_freq=False, num_step=3, thr_mod=0), angle_version = 'le90', use_angle_loss = True, add_angle_pred_begin = False, not_use_rot_mil = False, detach_angle_head = False, rotation_agnostic_classes = None, agnostic_resize_classes = None, cls_scores_weight = 1.0, ins_scores_weight = 1.0, **kwargs): super(PointOBBHead, self).__init__(bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, **kwargs) self.threshold = 0.3 self.merge_mode = 'weighted_clsins' self.test_mean_iou = False # self.test_mean_iou = True self.sum_iou = 0 self.sum_num = 0 self.num_stages = num_stages self.topk1 = top_k # 7 self.topk2 = top_k # 7 self.featmap_strides = bbox_roi_extractor.featmap_strides self.with_atten = with_atten self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.in_channels=256 self.feat_channels=256 self.stacked_convs=stacked_convs self.is_scale_angle = scale_angle self.angle_coder = HEADS.build(angle_coder) self.loss_symmetry_ss = build_loss(loss_symmetry_ss) self.angle_version = angle_version self.rotation_agnostic_classes = rotation_agnostic_classes self.agnostic_resize_classes = agnostic_resize_classes self.add_angle_pred_begin = add_angle_pred_begin self.use_angle_loss = use_angle_loss self.not_use_rot_mil = not_use_rot_mil self.detach_angle_head = detach_angle_head self.cls_scores_weight = cls_scores_weight self.ins_scores_weight = ins_scores_weight self.num_classes = self.bbox_head.num_classes self._init_layers() def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_angle = nn.Conv2d( self.feat_channels, self.angle_coder.encode_size, 3, padding=1) if self.is_scale_angle: self.scale_angle = Scale(1.0) def angle_forward(self, feats: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]: angle_results = [] for feat in feats: if self.detach_angle_head: feat_detach = feat.clone().detach() single_angle_pred = self.angle_forward_single(feat_detach) else: single_angle_pred = self.angle_forward_single(feat) angle_results.append(single_angle_pred) return tuple(angle_results) def angle_forward_single(self, x: Tensor): cls_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) # cls_score = self.conv_cls(cls_feat) angle_pred = self.conv_angle(cls_feat) if self.is_scale_angle: angle_pred = self.scale_angle(angle_pred).float() return angle_pred def init_assigner_sampler(self): """Initialize assigner and sampler.""" self.bbox_assigner = None self.bbox_sampler = None if self.train_cfg: self.bbox_assigner = build_assigner(self.train_cfg.assigner) self.bbox_sampler = build_sampler( self.train_cfg.sampler, context=self) def init_bbox_head(self, bbox_roi_extractor, bbox_head): """Initialize ``bbox_head``"""
self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor)
3
2023-11-20 07:50:12+00:00
24k
ModelTC/EasyLLM
llm/models/hf_models/qwen_vl/modeling_qwen.py
[ { "identifier": "QWenConfig", "path": "llm/models/hf_models/qwen_vl/configuration_qwen.py", "snippet": "class QWenConfig(PretrainedConfig):\n model_type = \"qwen\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=151936,\n hidden_...
import importlib import math import torch # noqa import torch.nn.functional as F # noqa import torch.utils.checkpoint # noqa from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union, Callable, List, Any, Generator # noqa from torch.cuda.amp import autocast # noqa from torch.nn import CrossEntropyLoss from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList # noqa from transformers.generation.logits_process import LogitsProcessorList # noqa from transformers.generation.streamers import BaseStreamer # noqa from transformers.generation.utils import GenerateOutput # noqa from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from transformers.modeling_utils import PreTrainedModel # noqa from transformers.utils import logging from einops import rearrange from torch import nn from .configuration_qwen import QWenConfig # noqa from .qwen_generation_utils import ( make_context, ) # noqa from llm.models.hf_models.qwen.qwen_generation_utils import ( HistoryType, decode_tokens, get_stop_words_ids, ) from .visual import VisionTransformer from llm.models.hf_models.qwen.modeling_qwen import RMSNorm, apply_rotary_pos_emb, QWenMLP from llm.models.hf_models.qwen.modeling_qwen import QWenAttention as QWenAttention_chat from llm.models.hf_models.qwen.modeling_qwen import QWenModel as QWenModel_chat from llm.models.hf_models.qwen.modeling_qwen import QWenLMHeadModel as QWenLMHeadModel_chat from einops import rearrange
15,715
): mixed_x_layer = self.c_attn(hidden_states) query, key, value = mixed_x_layer.split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if rotary_pos_emb is not None: cur_len = query.shape[1] rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb] rotary_pos_emb = (rotary_pos_emb,) * 2 q_pos_emb, k_pos_emb = rotary_pos_emb # Slice the pos emb for current inference query = apply_rotary_pos_emb(query, q_pos_emb) key = apply_rotary_pos_emb(key, k_pos_emb) if layer_past is not None: past_key, past_value = layer_past[0], layer_past[1] key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) if use_cache: present = (key, value) else: present = None if self.use_logn_attn and not self.training: if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype: self.logn_tensor = self.logn_tensor.to(query.device).type_as(query) seq_start = key.size(1) - query.size(1) seq_end = key.size(1) logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :] query = query * logn_tensor.expand_as(query) query = query.permute(0, 2, 1, 3) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) attn_output, attn_weight = self._attn( query, key, value, registered_causal_mask, attention_mask, head_mask ) context_layer = self._merge_heads( attn_output, self.num_heads, self.head_dim ) attn_output = self.c_proj(context_layer) outputs = (attn_output, present) if output_attentions: outputs += (attn_weight,) return outputs class QWenBlock(nn.Module): def __init__(self, config): super().__init__() hidden_size = config.hidden_size self.bf16 = config.bf16 self.ln_1 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.attn = QWenAttention(config) self.ln_2 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.mlp = QWenMLP(config) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ): layernorm_output = self.ln_1(hidden_states) attn_outputs = self.attn( layernorm_output, rotary_pos_emb, registered_causal_mask=registered_causal_mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] outputs = attn_outputs[1:] residual = hidden_states layernorm_input = attn_output + residual layernorm_output = self.ln_2(layernorm_input) residual = layernorm_input mlp_output = self.mlp(layernorm_output) hidden_states = residual + mlp_output if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs
# Copyright (c) Alibaba Cloud. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. if TYPE_CHECKING: try: except ImportError: rearrange = None SUPPORT_CUDA = torch.cuda.is_available() SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported() SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7 logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "qwen" _CONFIG_FOR_DOC = "QWenConfig" QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"] _ERROR_BAD_CHAT_FORMAT = """\ We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml". If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat(). 我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。 如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。 """ _SENTINEL = object() _ERROR_STREAM_IN_CHAT = """\ Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True). 向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。 """ apply_rotary_emb_func = None rms_norm = None # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class QWenAttention(QWenAttention_chat): def __init__(self, config): super().__init__(config) def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / torch.full( [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device, ) attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ): mixed_x_layer = self.c_attn(hidden_states) query, key, value = mixed_x_layer.split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if rotary_pos_emb is not None: cur_len = query.shape[1] rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb] rotary_pos_emb = (rotary_pos_emb,) * 2 q_pos_emb, k_pos_emb = rotary_pos_emb # Slice the pos emb for current inference query = apply_rotary_pos_emb(query, q_pos_emb) key = apply_rotary_pos_emb(key, k_pos_emb) if layer_past is not None: past_key, past_value = layer_past[0], layer_past[1] key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) if use_cache: present = (key, value) else: present = None if self.use_logn_attn and not self.training: if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype: self.logn_tensor = self.logn_tensor.to(query.device).type_as(query) seq_start = key.size(1) - query.size(1) seq_end = key.size(1) logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :] query = query * logn_tensor.expand_as(query) query = query.permute(0, 2, 1, 3) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) attn_output, attn_weight = self._attn( query, key, value, registered_causal_mask, attention_mask, head_mask ) context_layer = self._merge_heads( attn_output, self.num_heads, self.head_dim ) attn_output = self.c_proj(context_layer) outputs = (attn_output, present) if output_attentions: outputs += (attn_weight,) return outputs class QWenBlock(nn.Module): def __init__(self, config): super().__init__() hidden_size = config.hidden_size self.bf16 = config.bf16 self.ln_1 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.attn = QWenAttention(config) self.ln_2 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.mlp = QWenMLP(config) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ): layernorm_output = self.ln_1(hidden_states) attn_outputs = self.attn( layernorm_output, rotary_pos_emb, registered_causal_mask=registered_causal_mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] outputs = attn_outputs[1:] residual = hidden_states layernorm_input = attn_output + residual layernorm_output = self.ln_2(layernorm_input) residual = layernorm_input mlp_output = self.mlp(layernorm_output) hidden_states = residual + mlp_output if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs
class QWenModel(QWenModel_chat):
6
2023-11-26 10:12:52+00:00
24k
bearyi26/DCPT
lib/train/base_functions.py
[ { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, H...
import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet, BDD100K_Night, SHIFT_Night, ExDark from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.utils.misc import is_main_process
20,995
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", "BDD100K_NIGHT", "SHIFT_NIGHT", "ExDark"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else:
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", "BDD100K_NIGHT", "SHIFT_NIGHT", "ExDark"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else:
datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader))
0
2023-11-20 06:41:15+00:00
24k
shercoo/RGDiffSR
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import datetime import math import cv2 import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import pygame from collections import OrderedDict from matplotlib import pyplot as plt from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from torchvision import transforms from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from text_super_resolution.model.VisionLAN.utils import Attention_AR_counter from text_super_resolution.model.tps_spatial_transformer import TPSSpatialTransformer from text_super_resolution.model.stn_head import STNHead from text_super_resolution.model.VisionLAN.VisionLAN import VisionLAN from utils.render_standard_text import * from text_super_resolution.loss.semantic_loss import SemanticLoss from text_super_resolution.utils import ssim_psnr from pygame import freetype from utils.metrics import *
14,578
if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) # print(cond.shape) if self.text_prior_enable: if isinstance(cond, dict): shape = (self.channels, cond['c_concat'][0].shape[2], cond['c_concat'][0].shape[3]) elif isinstance(cond, list): shape = (self.channels, cond[0].shape[2], cond[0].shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) # shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) # print('**********************c shape',c.shape) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with self.ema_scope("Plotting"): samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} sem_loss = SemanticLoss() def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") print(sd.keys()) print(sd['epoch']) print(sd['global_step']) print(sd['callbacks']) # print(sd['optimizer_states']) # print(sd['lr_schedulers']) # print(sd['state_dict'].keys()) # exit(0) if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): # print('************************fuck',k) x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, text_prior_enable=False, image_height=32, image_width=128, STN_enable=False, standard_text=False, VL_pretrained_path=None, fid_eval=False, visualize=False, down_sample_rate=2, recog_loss_enable=False, font_path=None, *args, **kwargs): self.fid_eval = fid_eval self.visualize = visualize self.text_prior_enable = text_prior_enable self.recog_loss_enable = recog_loss_enable self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.image_height = image_height self.image_width = image_width self.stn = STN_enable if self.stn: self.tps_inputsize = [image_height // down_sample_rate, image_width // down_sample_rate] tps_outputsize = [image_height // down_sample_rate, image_width // down_sample_rate] num_control_points = 20 tps_margins = [0.05, 0.05] self.tps = TPSSpatialTransformer( output_image_size=tuple(tps_outputsize), num_control_points=num_control_points, margins=tuple(tps_margins)) self.stn_head = STNHead( in_planes=3, num_ctrlpoints=num_control_points, activation='none', input_size=self.tps_inputsize) self.standard_text = standard_text if self.standard_text: # self.VL_model = self.VisionLAN_init(VL_pretrained_path) # self.test_acc_counter = Attention_AR_counter('\ntest accuracy: ', # '/home/zhouyuxuan/latent-diffusion/dic_36.txt', False) self.font_path = font_path pygame.init() freetype.init() self.cal_psnr = ssim_psnr.calculate_psnr self.cal_ssim = ssim_psnr.SSIM() def VisionLAN_init(self, path=None): cfg = {'args': { 'strides': [(1, 1), (2, 2), (2, 2), (2, 2), (1, 1), (1, 1)], 'input_shape': [3, 64, 256], # C x H x W }, 'init_state_dict': '/home/zhouyuxuan/latent-diffusion/visionlan.pth', } model_VL = VisionLAN(**cfg['args']) model_path = cfg['init_state_dict'] if path is None else path print('load pre_trained VisionLAN model from %s' % model_path) model_VL = model_VL.to(self.device) model_VL = nn.DataParallel(model_VL) if cfg['init_state_dict'] != None: fe_state_dict_ori = torch.load(model_path) fe_state_dict = OrderedDict() for k, v in fe_state_dict_ori.items(): if 'module' not in k: k = 'module.' + k else: k = k.replace('features.module.', 'module.features.') fe_state_dict[k] = v model_dict_fe = model_VL.state_dict() state_dict_fe = {k: v for k, v in fe_state_dict.items() if k in model_dict_fe.keys()} model_dict_fe.update(state_dict_fe) model_VL.load_state_dict(model_dict_fe) return model_VL def parse_visionlan_data(self, imgs_input): imgs_input = transforms.ToPILImage()(imgs_input).convert('RGB') imgs_input = cv2.resize(np.array(imgs_input), (256, 64)) imgs_input = transforms.ToTensor()(imgs_input).unsqueeze(0) imgs_input = imgs_input.to(self.device) return imgs_input def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids def on_save_checkpoint(self, checkpoint): if not isinstance(self.cond_stage_model, torch.nn.Identity): self.cond_stage_model.save_state_dict( '/home/zhouyuxuan/latent-diffusion/crnn_ckpt/', self.current_epoch) @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # print(x.shape) # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[1] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) # print('weighting',weighting.shape,Ly,Lx) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[1] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: # if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) if self.text_prior_enable: c = self.get_additional_cond(xc, c) # c = {'c_concat': [xc], 'c_crossattn': [c]} else: c = xc if bs is not None: if isinstance(c, dict): for k, v in c.items(): c[k] = [v[0][:bs]] else: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] # print('fuck',c.shape) if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape print('decode z shape', z.shape) if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") print(ks, stride, uf) fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape print('encode x shape', x.shape) print('ks', ks, 'stride', stride, 'df', df) if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) print('encode z shape', z.shape) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def on_validation_start(self) -> None: print(f'******************************in validation {self.current_epoch}') def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) if self.fid_eval and self.current_epoch % 10 == 0: results = self.recognize_sample(batch, N=114514, inpaint=False) rec_image = results['samples'] target = batch[self.first_stage_key] target = rearrange(target, 'b h w c -> b c h w') cond = batch[self.cond_stage_key] cond = rearrange(cond, 'b h w c -> b c h w') if self.visualize: batchlen = rec_image.shape[0] rc = int(math.sqrt(batchlen)) f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) print(len(axs), batchlen, int(math.sqrt(batchlen))) assert len(axs) ** 2 == batchlen for i in range(batchlen): axs[i // rc, i % rc].set_xticklabels([]) axs[i // rc, i % rc].set_yticklabels([]) axs[i // rc, i % rc].set_aspect('equal') axs[i // rc, i % rc].imshow(rec_image[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/sample_{batch_idx}.jpg') plt.cla() f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) for i in range(batchlen): axs[i // rc, i % rc].imshow(target[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/target_{batch_idx}.jpg') plt.cla() f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) for i in range(batchlen): axs[i // rc, i % rc].imshow(cond[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/input_{batch_idx}.jpg') PSNR = self.cal_psnr(rec_image[:, :3], target[:, :3]) SSIM = self.cal_ssim(rec_image[:, :3], target[:, :3]) self.log_dict({'PSNR': PSNR, 'SSIM': SSIM}, prog_bar=False, logger=True, on_step=False, on_epoch=True) def shared_step(self, batch, **kwargs): # print('*******************************************************batch',batch['image'].shape) # print('*******************************************************batch',batch['image'].shape) # if hasattr(self, "split_input_params"): # print(self.split_input_params) # else: # print('fuck') x, c = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x, c) if self.recog_loss_enable: HR = batch['image'] HR = rearrange(HR, 'b h w c -> b c h w') HR = HR.to(memory_format=torch.contiguous_format).float() LR = c label_vecs = self.get_learned_conditioning(c).permute(1, 0, 2) label_vecs_hr = self.get_learned_conditioning(HR).permute(1, 0, 2) loss_recog_distill = sem_loss(label_vecs, label_vecs_hr) * 100 # 100 loss = loss + loss_recog_distill loss_dict.update({f'loss_recog': loss_recog_distill}) # return loss + loss_recog_distill, loss_dict # # else: return loss, loss_dict def get_additional_cond(self, c, tp): if self.stn: _, ctrl_points_c = self.stn_head(c) c, _ = self.tps(c, ctrl_points_c) if self.standard_text: x_q = torch.empty(1, 2, c.shape[2], c.shape[3]) # prob_lr = torch.empty(1, 25, 37) rec_results = get_string_crnn(tp.permute(1, 0, 2), False) for i in range(c.shape[0]): # visionlan_dict_lr = self.parse_visionlan_data(c[i, :3, :, :]) # target = '' # label_lr, label_length = self.VL_model(visionlan_dict_lr, target, '', False) # pred_str_lr, pred_prob = self.test_acc_counter.convert(label_lr, label_length) # s = pred_str_lr[0] # prob_lr = torch.cat([prob_lr, pred_prob], dim=0) s = rec_results[i] if s == "" or type(s) == torch.Tensor: s = "\t" lower_case = s.lower() upper_case = s.upper() i_t_lower = make_standard_text(self.font_path, lower_case, (c.shape[2], c.shape[3])) i_t_lower_tensor = torch.from_numpy(i_t_lower).unsqueeze(0).unsqueeze(0) i_t_upper = make_standard_text(self.font_path, upper_case, (c.shape[2], c.shape[3])) i_t_upper_tensor = torch.from_numpy(i_t_upper).unsqueeze(0).unsqueeze(0) i_t_tensor = torch.cat([i_t_lower_tensor, i_t_upper_tensor], dim=1) x_q = torch.cat([x_q, i_t_tensor], dim=0) x_q = x_q[1:] # prob_lr = prob_lr[1:] x_q = x_q.to(self.device) # prob_lr = prob_lr.to(self.device) c = torch.cat([c, x_q], dim=1) return {'c_concat': [c], 'c_crossattn': [tp]} def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.text_prior_enable and self.model.conditioning_key == 'hybrid': tp = self.get_learned_conditioning(c) c = self.get_additional_cond(c, tp) else: if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) # print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) # print("reducing stride") # print('ddpm','x_noisy shape',x_noisy.shape,'ks',ks,'stride',stride) fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) self.logvar = self.logvar.to(self.device) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) # print(cond.shape) if self.text_prior_enable: if isinstance(cond, dict): shape = (self.channels, cond['c_concat'][0].shape[2], cond['c_concat'][0].shape[3]) elif isinstance(cond, list): shape = (self.channels, cond[0].shape[2], cond[0].shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) # shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) # print('**********************c shape',c.shape) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with self.ema_scope("Plotting"): samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
13
2023-11-20 06:34:21+00:00
24k
microsoft/Project-BayesDAG
src/causica/models/pvae_base_model.py
[ { "identifier": "Dataset", "path": "src/causica/datasets/dataset.py", "snippet": "class Dataset(BaseDataset):\n \"\"\"\n Class to store dense train/val/test data and masks and variables metadata.\n Note that the data and masks provided by this class are read only.\n \"\"\"\n\n def __init_...
from abc import abstractmethod from typing import Any, Callable, Dict, Optional, Tuple, Union, cast from scipy.sparse import csr_matrix, issparse from tqdm import tqdm from ..datasets.dataset import Dataset, SparseDataset from ..datasets.variables import Variables from ..objectives.eddi import EDDIObjective from ..utils.data_mask_utils import restore_preserved_values, sample_inducing_points from ..utils.helper_functions import to_tensors from ..utils.torch_utils import create_dataloader from ..utils.training_objectives import gaussian_negative_log_likelihood, negative_log_likelihood from .imodel import IModelForObjective from .torch_imputation import impute from .torch_model import TorchModel from .torch_training_types import LossConfig import numpy as np import torch
20,613
self: PVAEBaseModel, data: np.ndarray, feature_mask: np.ndarray, target_idx, sample_count: int = 50, ): """ Computes the predictive log-likelihood of the target-data given the feature_mask-masked data as input. Args: data (Numpy array of shape (batch_size, feature_count)): Data in unprocessed form to be used to compute the pll. feature_mask (Numpy array of shape (batch_size, feature_count)): Mask indicating conditioning variables for computing the predictive log-likelihood. target_idx (int): Column index of target variable for compute the likelihood of. sample_count (int): Number of Monte Carlo samples to use from the latent space. Defaults to 50. Returns: predictive_ll (float): Mean predictive log-likelihood (mean taken over batch dim in data). """ # Process input data ( proc_feature_data_array, proc_feature_mask_array, ) = self.data_processor.process_data_and_masks(data, feature_mask) proc_feature_data, proc_feature_mask = to_tensors( proc_feature_data_array, proc_feature_mask_array, device=self.device ) # Create target_mask from target_index target_mask = np.zeros_like(data, dtype=bool) target_mask[:, target_idx] = 1 # Process target data ( proc_target_data_array, proc_target_mask_array, ) = self.data_processor.process_data_and_masks(data, target_mask) proc_target_data, proc_target_mask = to_tensors( proc_target_data_array, proc_target_mask_array, device=self.device ) # Expand target data and mask to be shape (sample_count, batch_size, feature_count) proc_target_data = proc_target_data.expand(sample_count, *proc_target_data.shape) proc_target_mask = proc_target_mask.expand(sample_count, *proc_target_mask.shape) # Compute PVAE outputs given input features (parameters of the Gaussian mixture) (dec_mean, dec_logvar), _, _ = self.reconstruct(proc_feature_data, proc_feature_mask, count=sample_count) # Compute Gaussian negative log-likelihood per sample in sample_count gnll = gaussian_negative_log_likelihood( proc_target_data, dec_mean, dec_logvar, mask=proc_target_mask, sum_type=None ) gnll = gnll[:, :, target_idx] predictive_ll = -gnll predictive_ll = torch.logsumexp(predictive_ll, dim=0) - np.log(sample_count) predictive_ll = predictive_ll.mean() return predictive_ll def get_marginal_log_likelihood( self, impute_config: Dict[str, int], data: Union[np.ndarray, csr_matrix], observed_mask: Optional[Union[np.ndarray, csr_matrix]] = None, target_mask: Optional[Union[np.ndarray, csr_matrix]] = None, evaluate_imputation: Optional[bool] = False, num_importance_samples: int = 5000, **kwargs, ) -> float: """ Estimate marginal log-likelihood of the data using importance sampling: - Imputation MLL -> imputed data given the observed data log p(x_u|x_o) if evaluate_imputation is True - Reconstruction MLL -> all data log p(x) otherwise Args: impute_config: Dictionary containing options for inference. data: Data in unprocessed form to be used with shape (num_rows, input_dim). mask: If not None, mask indicating observed variables with shape (num_rows, input_dim). 1 is observed, 0 is un-observed. If None everything is marked as observed. target_mask: Values masked during imputation to use as prediction targets, where 1 is a target, 0 is not. If None, nothing is marked as an imputation target. evaluate_imputation: Whether to estimate Imputation MLL log p(x_u|x_o) or Reconstruction MLL log p(x). num_importance_samples: The number of importance samples to be taken. **kwargs: Extra keyword arguments required by reconstruct. Returns: marginal_log_likelihood: The estimated marginal log likelihood averaged across data points. """ # TODO(17895): Add Generation MLL option to the marginal log-likelihood metric. batch_size = impute_config["batch_size"] # Assumed to only work on dense arrays for now if issparse(data): data = cast(csr_matrix, data) data = data.toarray() if issparse(observed_mask): observed_mask = cast(csr_matrix, observed_mask) observed_mask = observed_mask.toarray() if issparse(target_mask): target_mask = cast(csr_matrix, target_mask) target_mask = target_mask.toarray() if observed_mask is None: observed_mask = np.ones_like(data, dtype=bool) if target_mask is None: assert not evaluate_imputation target_mask = np.zeros_like(data, dtype=bool) assert data.shape == observed_mask.shape assert data.shape == target_mask.shape num_rows, _ = data.shape # TODO(17896): Add processing and batching of extra data objects processed_data, processed_obs_mask, processed_target_mask = self.data_processor.process_data_and_masks( data, observed_mask, target_mask ) marginal_log_likelihood = np.empty((num_rows,), dtype=processed_data.dtype) with torch.no_grad():
# This is required in python 3 to allow return types of the same class. from __future__ import annotations EPSILON = 1e-5 class PVAEBaseModel(TorchModel, IModelForObjective): """ Abstract model class. To instantiate this class, these functions need to be implemented: _train: Run the training loop for the model. _impute: Fill in any missing values for test data. _reconstruct: Reconstruct data by passing them through the VAE name: Name of model implementation. """ def __init__(self, model_id: str, variables: Variables, save_dir: str, device: torch.device) -> None: """ Args: model_id: Unique model ID for referencing this model instance. variables: Information about variables/features used by this model. save_dir: Location to save any information about this model, including training data. It will be created if it doesn't exist. device: Name of Torch device to create the model on. Valid options are 'cpu', 'gpu', or a device ID (e.g. 0 or 1 on a two-GPU machine). """ super().__init__(model_id, variables, save_dir, device) self._alpha = 1.0 # The default value for the categorical likelihood coefficient. @staticmethod def _split_vamp_prior_config(training_config: Dict[str, Any]) -> Tuple[dict, dict]: # Split training config into (training_config, vamp_prior_config) training_config = training_config.copy() vamp_prior_config = {"save_vamp_prior": training_config.pop("save_vamp_prior")} for k in ["vamp_prior_reward_samples", "vamp_prior_inducing_points"]: vamp_prior_config.update({k: training_config.pop(k, None)}) return training_config, vamp_prior_config def _save_vamp_prior( self, processed_dataset: Union[Dataset, SparseDataset], save_vamp_prior: bool, vamp_prior_inducing_points: Optional[int] = None, vamp_prior_reward_samples: Optional[int] = None, ) -> None: if not save_vamp_prior: return assert vamp_prior_inducing_points is not None assert vamp_prior_reward_samples is not None train_data, train_mask = processed_dataset.train_data_and_mask vamp_prior_data = sample_inducing_points(train_data, train_mask, row_count=vamp_prior_inducing_points) vamp_prior_data = cast(Union[Tuple[np.ndarray, np.ndarray], Tuple[csr_matrix, csr_matrix]], vamp_prior_data) EDDIObjective.calc_and_save_vamp_prior_info_gain(self, vamp_prior_data, sample_count=vamp_prior_reward_samples) def run_train( self, dataset: Union[Dataset, SparseDataset], train_config_dict: Optional[Dict[str, Any]] = None, report_progress_callback: Optional[Callable[[str, int, int], None]] = None, ) -> None: """ Train the model. Training results will be saved. Args: dataset: Dataset object with data and masks in unprocessed form. train_config_dict (dictionary): Any other parameters needed by a specific concrete class. Of the form {arg_name: arg_value}. e.g. {"learning_rate": 1e-3, "epochs": 100} report_progress_callback: Function to report model progress for API. """ if train_config_dict is None: train_config_dict = {} train_config_dict, vamp_prior_config = self._split_vamp_prior_config(train_config_dict) processed_dataset = self.data_processor.process_dataset(dataset) self._train( dataset=processed_dataset, report_progress_callback=report_progress_callback, **train_config_dict, ) self._save_vamp_prior(processed_dataset, **vamp_prior_config) @abstractmethod def _train(self, *args, **kwargs): pass def impute(self, data, mask, impute_config_dict=None, *, vamp_prior_data=None, average=True): if vamp_prior_data is None: return impute(self, data, mask, impute_config_dict=impute_config_dict, average=average) else: processed_vamp_data_array = self.data_processor.process_data_and_masks(*vamp_prior_data) # Keep processed VampPrior data on CPU until we sample inducing points, as this data can be large and is # not required for any CUDA computations. return impute( self, data, mask, impute_config_dict=impute_config_dict, average=average, vamp_prior_data=to_tensors(*processed_vamp_data_array, device=torch.device("cpu")), ) def impute_processed_batch( self: PVAEBaseModel, data: torch.Tensor, mask: torch.Tensor, *, sample_count: int, preserve_data: bool = True, vamp_prior_data: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, **kwargs, ) -> torch.Tensor: """ Fill in unobserved variables in a minibatch of data using a trained model. Optionally, use a vamp prior to impute empty rows, and optionally replace imputed values with input values for observed features. Assumes data is a torch.Tensor and in processed form (i.e. variables will be in their squashed ranges, and categorical variables will be in one-hot form). Args: data (shape (batch_size, input_dim)): Data to be used to train the model, in processed form. mask (shape (batch_size, input_dim)): Data observation mask, where observed values are 1 and unobserved values are 0. sample_count: Number of imputation samples to generate. vamp_prior_data (Tuple of (torch tensor, torch tensor)): Data to be used to fill variables if using the VAMP prior method. Format: (data, mask). This defaults to None, in which case the VAMP Prior method will not be used. preserve_data (bool): Whether or not to impute data already present. Defaults to True, which keeps data present in input. Returns: imputations (torch.Tensor of shape (sample_count, batch_size, output_dim)): Input data with missing values filled in. """ if not isinstance(data, torch.Tensor) or not isinstance(mask, torch.Tensor): raise ValueError("data and mask should be tensors. To work on ndarrays, use impute") assert data.shape == mask.shape assert data.shape[1] == self.input_dim batch_size, num_features = data.shape if self.variables.has_auxiliary: num_features = self.variables.num_processed_non_aux_cols imputations = torch.full((sample_count, batch_size, num_features), np.nan, device=self.device) # vamp_rows are rows where input is completely unobserved vamp_rows = torch.where(mask.sum(dim=1) == 0)[0] if vamp_prior_data is not None and vamp_rows.numel() > 0: imputed_from_vamp = self._impute_from_vamp_prior(sample_count * vamp_rows.numel(), vamp_prior_data) imputed_from_vamp = imputed_from_vamp.reshape(sample_count, vamp_rows.numel(), -1) imputations[:, vamp_rows, :] = imputed_from_vamp not_vamp_rows = torch.where(mask.sum(dim=1) != 0)[0] else: not_vamp_rows = torch.arange(batch_size) if len(not_vamp_rows) > 0: not_vamp_data = data[not_vamp_rows] not_vamp_mask = mask[not_vamp_rows] imputed_not_vamp_data = self._reconstruct_and_reshape( not_vamp_data, not_vamp_mask, sample_count=sample_count, **kwargs ) imputations[:, not_vamp_rows, :] = imputed_not_vamp_data if preserve_data: imputations = restore_preserved_values(self.variables, data, imputations, mask) return imputations def get_model_pll( self: PVAEBaseModel, data: np.ndarray, feature_mask: np.ndarray, target_idx, sample_count: int = 50, ): """ Computes the predictive log-likelihood of the target-data given the feature_mask-masked data as input. Args: data (Numpy array of shape (batch_size, feature_count)): Data in unprocessed form to be used to compute the pll. feature_mask (Numpy array of shape (batch_size, feature_count)): Mask indicating conditioning variables for computing the predictive log-likelihood. target_idx (int): Column index of target variable for compute the likelihood of. sample_count (int): Number of Monte Carlo samples to use from the latent space. Defaults to 50. Returns: predictive_ll (float): Mean predictive log-likelihood (mean taken over batch dim in data). """ # Process input data ( proc_feature_data_array, proc_feature_mask_array, ) = self.data_processor.process_data_and_masks(data, feature_mask) proc_feature_data, proc_feature_mask = to_tensors( proc_feature_data_array, proc_feature_mask_array, device=self.device ) # Create target_mask from target_index target_mask = np.zeros_like(data, dtype=bool) target_mask[:, target_idx] = 1 # Process target data ( proc_target_data_array, proc_target_mask_array, ) = self.data_processor.process_data_and_masks(data, target_mask) proc_target_data, proc_target_mask = to_tensors( proc_target_data_array, proc_target_mask_array, device=self.device ) # Expand target data and mask to be shape (sample_count, batch_size, feature_count) proc_target_data = proc_target_data.expand(sample_count, *proc_target_data.shape) proc_target_mask = proc_target_mask.expand(sample_count, *proc_target_mask.shape) # Compute PVAE outputs given input features (parameters of the Gaussian mixture) (dec_mean, dec_logvar), _, _ = self.reconstruct(proc_feature_data, proc_feature_mask, count=sample_count) # Compute Gaussian negative log-likelihood per sample in sample_count gnll = gaussian_negative_log_likelihood( proc_target_data, dec_mean, dec_logvar, mask=proc_target_mask, sum_type=None ) gnll = gnll[:, :, target_idx] predictive_ll = -gnll predictive_ll = torch.logsumexp(predictive_ll, dim=0) - np.log(sample_count) predictive_ll = predictive_ll.mean() return predictive_ll def get_marginal_log_likelihood( self, impute_config: Dict[str, int], data: Union[np.ndarray, csr_matrix], observed_mask: Optional[Union[np.ndarray, csr_matrix]] = None, target_mask: Optional[Union[np.ndarray, csr_matrix]] = None, evaluate_imputation: Optional[bool] = False, num_importance_samples: int = 5000, **kwargs, ) -> float: """ Estimate marginal log-likelihood of the data using importance sampling: - Imputation MLL -> imputed data given the observed data log p(x_u|x_o) if evaluate_imputation is True - Reconstruction MLL -> all data log p(x) otherwise Args: impute_config: Dictionary containing options for inference. data: Data in unprocessed form to be used with shape (num_rows, input_dim). mask: If not None, mask indicating observed variables with shape (num_rows, input_dim). 1 is observed, 0 is un-observed. If None everything is marked as observed. target_mask: Values masked during imputation to use as prediction targets, where 1 is a target, 0 is not. If None, nothing is marked as an imputation target. evaluate_imputation: Whether to estimate Imputation MLL log p(x_u|x_o) or Reconstruction MLL log p(x). num_importance_samples: The number of importance samples to be taken. **kwargs: Extra keyword arguments required by reconstruct. Returns: marginal_log_likelihood: The estimated marginal log likelihood averaged across data points. """ # TODO(17895): Add Generation MLL option to the marginal log-likelihood metric. batch_size = impute_config["batch_size"] # Assumed to only work on dense arrays for now if issparse(data): data = cast(csr_matrix, data) data = data.toarray() if issparse(observed_mask): observed_mask = cast(csr_matrix, observed_mask) observed_mask = observed_mask.toarray() if issparse(target_mask): target_mask = cast(csr_matrix, target_mask) target_mask = target_mask.toarray() if observed_mask is None: observed_mask = np.ones_like(data, dtype=bool) if target_mask is None: assert not evaluate_imputation target_mask = np.zeros_like(data, dtype=bool) assert data.shape == observed_mask.shape assert data.shape == target_mask.shape num_rows, _ = data.shape # TODO(17896): Add processing and batching of extra data objects processed_data, processed_obs_mask, processed_target_mask = self.data_processor.process_data_and_masks( data, observed_mask, target_mask ) marginal_log_likelihood = np.empty((num_rows,), dtype=processed_data.dtype) with torch.no_grad():
dataloader = create_dataloader(
7
2023-11-21 12:55:08+00:00
24k
jiawei-ren/dreamgaussian4d
diffusers/src/diffusers/pipelines/deepfloyd_if/watermark.py
[ { "identifier": "ConfigMixin", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~Conf...
from typing import List from PIL import Image from ...configuration_utils import ConfigMixin from ...models.modeling_utils import ModelMixin from ...utils import PIL_INTERPOLATION import PIL.Image import torch
17,026
class IFWatermarker(ModelMixin, ConfigMixin): def __init__(self): super().__init__() self.register_buffer("watermark_image", torch.zeros((62, 62, 4))) self.watermark_image_as_pil = None def apply_watermark(self, images: List[PIL.Image.Image], sample_size=None): # copied from https://github.com/deep-floyd/IF/blob/b77482e36ca2031cb94dbca1001fc1e6400bf4ab/deepfloyd_if/modules/base.py#L287 h = images[0].height w = images[0].width sample_size = sample_size or h coef = min(h / sample_size, w / sample_size) img_h, img_w = (int(h / coef), int(w / coef)) if coef < 1 else (h, w) S1, S2 = 1024**2, img_w * img_h K = (S2 / S1) ** 0.5 wm_size, wm_x, wm_y = int(K * 62), img_w - int(14 * K), img_h - int(14 * K) if self.watermark_image_as_pil is None: watermark_image = self.watermark_image.to(torch.uint8).cpu().numpy() watermark_image = Image.fromarray(watermark_image, mode="RGBA") self.watermark_image_as_pil = watermark_image wm_img = self.watermark_image_as_pil.resize(
class IFWatermarker(ModelMixin, ConfigMixin): def __init__(self): super().__init__() self.register_buffer("watermark_image", torch.zeros((62, 62, 4))) self.watermark_image_as_pil = None def apply_watermark(self, images: List[PIL.Image.Image], sample_size=None): # copied from https://github.com/deep-floyd/IF/blob/b77482e36ca2031cb94dbca1001fc1e6400bf4ab/deepfloyd_if/modules/base.py#L287 h = images[0].height w = images[0].width sample_size = sample_size or h coef = min(h / sample_size, w / sample_size) img_h, img_w = (int(h / coef), int(w / coef)) if coef < 1 else (h, w) S1, S2 = 1024**2, img_w * img_h K = (S2 / S1) ** 0.5 wm_size, wm_x, wm_y = int(K * 62), img_w - int(14 * K), img_h - int(14 * K) if self.watermark_image_as_pil is None: watermark_image = self.watermark_image.to(torch.uint8).cpu().numpy() watermark_image = Image.fromarray(watermark_image, mode="RGBA") self.watermark_image_as_pil = watermark_image wm_img = self.watermark_image_as_pil.resize(
(wm_size, wm_size), PIL_INTERPOLATION["bicubic"], reducing_gap=None
2
2023-12-28 08:17:40+00:00
24k
FoundationVision/UniRef
detectron2/utils/visualizer.py
[ { "identifier": "MetadataCatalog", "path": "detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remov...
import colorsys import logging import math import cv2 import matplotlib as mpl import matplotlib.colors as mplc import matplotlib.figure as mplfigure import numpy as np import pycocotools.mask as mask_util import torch from enum import Enum, unique from detectron2.data import MetadataCatalog from detectron2.structures import ( BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes, ) from detectron2.utils.file_io import PathManager from matplotlib.backends.backend_agg import FigureCanvasAgg from PIL import Image from .colormap import random_color from panopticapi.utils import rgb2id
17,020
soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. text (str): if None, will be drawn on the object alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) shape2d = (soft_mask.shape[0], soft_mask.shape[1]) rgba = np.zeros(shape2d + (4,), dtype="float32") rgba[:, :, :3] = color rgba[:, :, 3] = soft_mask * alpha self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if text is not None: lighter_color = self._change_color_brightness(color, brightness_factor=0.7) binary_mask = (soft_mask > 0.5).astype("uint8") self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): """ Args: segment: numpy array of shape Nx2, containing all the points in the polygon. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. If not provided, a darker shade of the polygon color will be used instead. alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with polygon drawn. """ if edge_color is None: # make edge color darker than the polygon color if alpha > 0.8: edge_color = self._change_color_brightness(color, brightness_factor=-0.7) else: edge_color = color edge_color = mplc.to_rgb(edge_color) + (1,) polygon = mpl.patches.Polygon( segment, fill=True, facecolor=mplc.to_rgb(color) + (alpha,), edgecolor=edge_color, linewidth=max(self._default_font_size // 15 * self.output.scale, 1), ) self.output.ax.add_patch(polygon) return self.output """ Internal methods: """ def _jitter(self, color): """ Randomly modifies given color to produce a slightly different color than the color given. Args: color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color picked. The values in the list are in the [0.0, 1.0] range. Returns: jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color after being jittered. The values in the list are in the [0.0, 1.0] range. """ color = mplc.to_rgb(color) vec = np.random.rand(3) # better to do it in another color space vec = vec / np.linalg.norm(vec) * 0.5 res = np.clip(vec + color, 0, 1) return tuple(res) def _create_grayscale_image(self, mask=None): """ Create a grayscale version of the original image. The colors in masked area, if given, will be kept. """ img_bw = self.img.astype("f4").mean(axis=2) img_bw = np.stack([img_bw] * 3, axis=2) if mask is not None: img_bw[mask] = self.img[mask] return img_bw def _change_color_brightness(self, color, brightness_factor): """ Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range. """ assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return modified_color def _convert_boxes(self, boxes): """ Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. """
# Copyright (c) Facebook, Inc. and its affiliates. logger = logging.getLogger(__name__) __all__ = ["ColorMode", "VisImage", "Visualizer"] _SMALL_OBJECT_AREA_THRESH = 1000 _LARGE_MASK_AREA_THRESH = 120000 _OFF_WHITE = (1.0, 1.0, 240.0 / 255) _BLACK = (0, 0, 0) _RED = (1.0, 0, 0) _KEYPOINT_THRESHOLD = 0.05 @unique class ColorMode(Enum): """ Enum of different color modes to use for instance visualizations. """ IMAGE = 0 """ Picks a random color for every instance and overlay segmentations with low opacity. """ SEGMENTATION = 1 """ Let instances of the same category have similar colors (from metadata.thing_colors), and overlay them with high opacity. This provides more attention on the quality of segmentation. """ IMAGE_BW = 2 """ Same as IMAGE, but convert all areas without masks to gray-scale. Only available for drawing per-instance mask predictions. """ class GenericMask: """ Attribute: polygons (list[ndarray]): list[ndarray]: polygons for this mask. Each ndarray has format [x, y, x, y, ...] mask (ndarray): a binary mask """ def __init__(self, mask_or_polygons, height, width): self._mask = self._polygons = self._has_holes = None self.height = height self.width = width m = mask_or_polygons if isinstance(m, dict): # RLEs assert "counts" in m and "size" in m if isinstance(m["counts"], list): # uncompressed RLEs h, w = m["size"] assert h == height and w == width m = mask_util.frPyObjects(m, h, w) self._mask = mask_util.decode(m)[:, :] return if isinstance(m, list): # list[ndarray] self._polygons = [np.asarray(x).reshape(-1) for x in m] return if isinstance(m, np.ndarray): # assumed to be a binary mask assert m.shape[1] != 2, m.shape assert m.shape == ( height, width, ), f"mask shape: {m.shape}, target dims: {height}, {width}" self._mask = m.astype("uint8") return raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) @property def mask(self): if self._mask is None: self._mask = self.polygons_to_mask(self._polygons) return self._mask @property def polygons(self): if self._polygons is None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) return self._polygons @property def has_holes(self): if self._has_holes is None: if self._mask is not None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) else: self._has_holes = False # if original format is polygon, does not have holes return self._has_holes def mask_to_polygons(self, mask): # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. # Internal contours (holes) are placed in hierarchy-2. # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) hierarchy = res[-1] if hierarchy is None: # empty mask return [], False has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 res = res[-2] res = [x.flatten() for x in res] # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. # We add 0.5 to turn them into real-value coordinate space. A better solution # would be to first +0.5 and then dilate the returned polygon by 0.5. res = [x + 0.5 for x in res if len(x) >= 6] return res, has_holes def polygons_to_mask(self, polygons): rle = mask_util.frPyObjects(polygons, self.height, self.width) rle = mask_util.merge(rle) return mask_util.decode(rle)[:, :] def area(self): return self.mask.sum() def bbox(self): p = mask_util.frPyObjects(self.polygons, self.height, self.width) p = mask_util.merge(p) bbox = mask_util.toBbox(p) bbox[2] += bbox[0] bbox[3] += bbox[1] return bbox class _PanopticPrediction: """ Unify different panoptic annotation/prediction formats """ def __init__(self, panoptic_seg, segments_info, metadata=None): if segments_info is None: assert metadata is not None # If "segments_info" is None, we assume "panoptic_img" is a # H*W int32 image storing the panoptic_id in the format of # category_id * label_divisor + instance_id. We reserve -1 for # VOID label. label_divisor = metadata.label_divisor segments_info = [] for panoptic_label in np.unique(panoptic_seg.numpy()): if panoptic_label == -1: # VOID region. continue pred_class = panoptic_label // label_divisor isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() segments_info.append( { "id": int(panoptic_label), "category_id": int(pred_class), "isthing": bool(isthing), } ) del metadata self._seg = panoptic_seg self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) areas = areas.numpy() sorted_idxs = np.argsort(-areas) self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] self._seg_ids = self._seg_ids.tolist() for sid, area in zip(self._seg_ids, self._seg_areas): if sid in self._sinfo: self._sinfo[sid]["area"] = float(area) def non_empty_mask(self): """ Returns: (H, W) array, a mask for all pixels that have a prediction """ empty_ids = [] for id in self._seg_ids: if id not in self._sinfo: empty_ids.append(id) if len(empty_ids) == 0: return np.zeros(self._seg.shape, dtype=np.uint8) assert ( len(empty_ids) == 1 ), ">1 ids corresponds to no labels. This is currently not supported" return (self._seg != empty_ids[0]).numpy().astype(np.bool) def semantic_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or sinfo["isthing"]: # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. continue yield (self._seg == sid).numpy().astype(np.bool), sinfo def instance_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or not sinfo["isthing"]: continue mask = (self._seg == sid).numpy().astype(np.bool) if mask.sum() > 0: yield mask, sinfo def _create_text_labels(classes, scores, class_names, is_crowd=None): """ Args: classes (list[int] or None): scores (list[float] or None): class_names (list[str] or None): is_crowd (list[bool] or None): Returns: list[str] or None """ labels = None if classes is not None: if class_names is not None and len(class_names) > 0: labels = [class_names[i] for i in classes] else: labels = [str(i) for i in classes] if scores is not None: if labels is None: labels = ["{:.0f}%".format(s * 100) for s in scores] else: labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] if labels is not None and is_crowd is not None: labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] return labels class VisImage: def __init__(self, img, scale=1.0): """ Args: img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. scale (float): scale the input image """ self.img = img self.scale = scale self.width, self.height = img.shape[1], img.shape[0] self._setup_figure(img) def _setup_figure(self, img): """ Args: Same as in :meth:`__init__()`. Returns: fig (matplotlib.pyplot.figure): top level container for all the image plot elements. ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. """ fig = mplfigure.Figure(frameon=False) self.dpi = fig.get_dpi() # add a small 1e-2 to avoid precision lost due to matplotlib's truncation # (https://github.com/matplotlib/matplotlib/issues/15363) fig.set_size_inches( (self.width * self.scale + 1e-2) / self.dpi, (self.height * self.scale + 1e-2) / self.dpi, ) self.canvas = FigureCanvasAgg(fig) # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) ax.axis("off") self.fig = fig self.ax = ax self.reset_image(img) def reset_image(self, img): """ Args: img: same as in __init__ """ img = img.astype("uint8") self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") def save(self, filepath): """ Args: filepath (str): a string that contains the absolute path, including the file name, where the visualized image will be saved. """ self.fig.savefig(filepath) def get_image(self): """ Returns: ndarray: the visualized image of shape (H, W, 3) (RGB) in uint8 type. The shape is scaled w.r.t the input image using the given `scale` argument. """ canvas = self.canvas s, (width, height) = canvas.print_to_buffer() # buf = io.BytesIO() # works for cairo backend # canvas.print_rgba(buf) # width, height = self.width, self.height # s = buf.getvalue() buffer = np.frombuffer(s, dtype="uint8") img_rgba = buffer.reshape(height, width, 4) rgb, alpha = np.split(img_rgba, [3], axis=2) return rgb.astype("uint8") class Visualizer: """ Visualizer that draws data about detection/segmentation on images. It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` that draw primitive objects to images, as well as high-level wrappers like `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` that draw composite data in some pre-defined style. Note that the exact visualization style for the high-level wrappers are subject to change. Style such as color, opacity, label contents, visibility of labels, or even the visibility of objects themselves (e.g. when the object is too small) may change according to different heuristics, as long as the results still look visually reasonable. To obtain a consistent style, you can implement custom drawing functions with the abovementioned primitive methods instead. If you need more customized visualization styles, you can process the data yourself following their format documented in tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not intend to satisfy everyone's preference on drawing styles. This visualizer focuses on high rendering quality rather than performance. It is not designed to be used for real-time applications. """ # TODO implement a fast, rasterized version using OpenCV def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): """ Args: img_rgb: a numpy array of shape (H, W, C), where H and W correspond to the height and width of the image respectively. C is the number of color channels. The image is required to be in RGB format since that is a requirement of the Matplotlib library. The image is also expected to be in the range [0, 255]. metadata (Metadata): dataset metadata (e.g. class names and colors) instance_mode (ColorMode): defines one of the pre-defined style for drawing instances on an image. """ self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) if metadata is None: metadata = MetadataCatalog.get("__nonexist__") self.metadata = metadata self.output = VisImage(self.img, scale=scale) self.cpu_device = torch.device("cpu") # too small texts are useless, therefore clamp to 9 self._default_font_size = max( np.sqrt(self.output.height * self.output.width) // 90, 10 // scale ) self._instance_mode = instance_mode self.keypoint_threshold = _KEYPOINT_THRESHOLD def draw_instance_predictions(self, predictions): """ Draw instance-level prediction results on an image. Args: predictions (Instances): the output of an instance detection/segmentation model. Following fields will be used to draw: "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). Returns: output (VisImage): image object with visualizations. """ boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None scores = predictions.scores if predictions.has("scores") else None classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None if predictions.has("pred_masks"): masks = np.asarray(predictions.pred_masks) masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] else: masks = None if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes ] alpha = 0.8 else: colors = None alpha = 0.5 if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image( self._create_grayscale_image( (predictions.pred_masks.any(dim=0) > 0).numpy() if predictions.has("pred_masks") else None ) ) alpha = 0.3 self.overlay_instances( masks=masks, boxes=boxes, labels=labels, keypoints=keypoints, assigned_colors=colors, alpha=alpha, ) return self.output def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8): """ Draw semantic segmentation predictions/labels. Args: sem_seg (Tensor or ndarray): the segmentation of shape (H, W). Each value is the integer label of the pixel. area_threshold (int): segments with less than `area_threshold` are not drawn. alpha (float): the larger it is, the more opaque the segmentations are. Returns: output (VisImage): image object with visualizations. """ if isinstance(sem_seg, torch.Tensor): sem_seg = sem_seg.numpy() labels, areas = np.unique(sem_seg, return_counts=True) sorted_idxs = np.argsort(-areas).tolist() labels = labels[sorted_idxs] for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): try: mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] except (AttributeError, IndexError): mask_color = None binary_mask = (sem_seg == label).astype(np.uint8) text = self.metadata.stuff_classes[label] self.draw_binary_mask( binary_mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, ) return self.output def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7): """ Draw panoptic prediction annotations or results. Args: panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. If it is a ``list[dict]``, each dict contains keys "id", "category_id". If None, category id of each pixel is computed by ``pixel // metadata.label_divisor``. area_threshold (int): stuff segments with less than `area_threshold` are not drawn. Returns: output (VisImage): image object with visualizations. """ pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask())) # draw mask for all semantic segments first i.e. "stuff" for mask, sinfo in pred.semantic_masks(): category_idx = sinfo["category_id"] try: mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] except AttributeError: mask_color = None text = self.metadata.stuff_classes[category_idx] self.draw_binary_mask( mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, ) # draw mask for all instances second all_instances = list(pred.instance_masks()) if len(all_instances) == 0: return self.output masks, sinfo = list(zip(*all_instances)) category_ids = [x["category_id"] for x in sinfo] try: scores = [x["score"] for x in sinfo] except KeyError: scores = None labels = _create_text_labels( category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo] ) try: colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] except AttributeError: colors = None self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha) return self.output draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility def draw_dataset_dict(self, dic): """ Draw annotations/segmentaions in Detectron2 Dataset format. Args: dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. Returns: output (VisImage): image object with visualizations. """ annos = dic.get("annotations", None) if annos: if "segmentation" in annos[0]: masks = [x["segmentation"] for x in annos] else: masks = None if "keypoints" in annos[0]: keypts = [x["keypoints"] for x in annos] keypts = np.array(keypts).reshape(len(annos), -1, 3) else: keypts = None boxes = [ BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) if len(x["bbox"]) == 4 else x["bbox"] for x in annos ] colors = None category_ids = [x["category_id"] for x in annos] if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] names = self.metadata.get("thing_classes", None) labels = _create_text_labels( category_ids, scores=None, class_names=names, is_crowd=[x.get("iscrowd", 0) for x in annos], ) self.overlay_instances( labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors ) sem_seg = dic.get("sem_seg", None) if sem_seg is None and "sem_seg_file_name" in dic: with PathManager.open(dic["sem_seg_file_name"], "rb") as f: sem_seg = Image.open(f) sem_seg = np.asarray(sem_seg, dtype="uint8") if sem_seg is not None: self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) pan_seg = dic.get("pan_seg", None) if pan_seg is None and "pan_seg_file_name" in dic: with PathManager.open(dic["pan_seg_file_name"], "rb") as f: pan_seg = Image.open(f) pan_seg = np.asarray(pan_seg) pan_seg = rgb2id(pan_seg) if pan_seg is not None: segments_info = dic["segments_info"] pan_seg = torch.tensor(pan_seg) self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) return self.output def overlay_instances( self, *, boxes=None, labels=None, masks=None, keypoints=None, assigned_colors=None, alpha=0.5, ): """ Args: boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, or a :class:`RotatedBoxes`, or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image, labels (list[str]): the text to be displayed for each instance. masks (masks-like object): Supported types are: * :class:`detectron2.structures.PolygonMasks`, :class:`detectron2.structures.BitMasks`. * list[list[ndarray]]: contains the segmentation masks for all objects in one image. The first level of the list corresponds to individual instances. The second level to all the polygon that compose the instance, and the third level to the polygon coordinates. The third level should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). * list[ndarray]: each ndarray is a binary mask of shape (H, W). * list[dict]: each dict is a COCO-style RLE. keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), where the N is the number of instances and K is the number of keypoints. The last dimension corresponds to (x, y, visibility or score). assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = 0 if boxes is not None: boxes = self._convert_boxes(boxes) num_instances = len(boxes) if masks is not None: masks = self._convert_masks(masks) if num_instances: assert len(masks) == num_instances else: num_instances = len(masks) if keypoints is not None: if num_instances: assert len(keypoints) == num_instances else: num_instances = len(keypoints) keypoints = self._convert_keypoints(keypoints) if labels is not None: assert len(labels) == num_instances if assigned_colors is None: assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] if num_instances == 0: return self.output if boxes is not None and boxes.shape[1] == 5: return self.overlay_rotated_instances( boxes=boxes, labels=labels, assigned_colors=assigned_colors ) # Display in largest to smallest order to reduce occlusion. areas = None if boxes is not None: areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) elif masks is not None: areas = np.asarray([x.area() for x in masks]) if areas is not None: sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] if boxes is not None else None labels = [labels[k] for k in sorted_idxs] if labels is not None else None masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] keypoints = keypoints[sorted_idxs] if keypoints is not None else None for i in range(num_instances): color = assigned_colors[i] if boxes is not None: self.draw_box(boxes[i], edge_color=color) if masks is not None: for segment in masks[i].polygons: self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) if labels is not None: # first get a box if boxes is not None: x0, y0, x1, y1 = boxes[i] text_pos = (x0, y0) # if drawing boxes, put text on the box corner. horiz_align = "left" elif masks is not None: # skip small mask without polygon if len(masks[i].polygons) == 0: continue x0, y0, x1, y1 = masks[i].bbox() # draw text in the center (defined by median) when box is not drawn # median is less sensitive to outliers. text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] horiz_align = "center" else: continue # drawing the box confidence for keypoints isn't very useful. # for small objects, draw text at the side to avoid occlusion instance_area = (y1 - y0) * (x1 - x0) if ( instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale or y1 - y0 < 40 * self.output.scale ): if y1 >= self.output.height - 5: text_pos = (x1, y0) else: text_pos = (x0, y1) height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) lighter_color = self._change_color_brightness(color, brightness_factor=0.7) font_size = ( np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size ) self.draw_text( labels[i], text_pos, color=lighter_color, horizontal_alignment=horiz_align, font_size=font_size, ) # draw keypoints if keypoints is not None: for keypoints_per_instance in keypoints: self.draw_and_connect_keypoints(keypoints_per_instance) return self.output def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): """ Args: boxes (ndarray): an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image. labels (list[str]): the text to be displayed for each instance. assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = len(boxes) if assigned_colors is None: assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] if num_instances == 0: return self.output # Display in largest to smallest order to reduce occlusion. if boxes is not None: areas = boxes[:, 2] * boxes[:, 3] sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] labels = [labels[k] for k in sorted_idxs] if labels is not None else None colors = [assigned_colors[idx] for idx in sorted_idxs] for i in range(num_instances): self.draw_rotated_box_with_label( boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None ) return self.output def draw_and_connect_keypoints(self, keypoints): """ Draws keypoints of an instance and follows the rules for keypoint connections to draw lines between appropriate keypoints. This follows color heuristics for line color. Args: keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints and the last dimension corresponds to (x, y, probability). Returns: output (VisImage): image object with visualizations. """ visible = {} keypoint_names = self.metadata.get("keypoint_names") for idx, keypoint in enumerate(keypoints): # draw keypoint x, y, prob = keypoint if prob > self.keypoint_threshold: self.draw_circle((x, y), color=_RED) if keypoint_names: keypoint_name = keypoint_names[idx] visible[keypoint_name] = (x, y) if self.metadata.get("keypoint_connection_rules"): for kp0, kp1, color in self.metadata.keypoint_connection_rules: if kp0 in visible and kp1 in visible: x0, y0 = visible[kp0] x1, y1 = visible[kp1] color = tuple(x / 255.0 for x in color) self.draw_line([x0, x1], [y0, y1], color=color) # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip # Note that this strategy is specific to person keypoints. # For other keypoints, it should just do nothing try: ls_x, ls_y = visible["left_shoulder"] rs_x, rs_y = visible["right_shoulder"] mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 except KeyError: pass else: # draw line from nose to mid-shoulder nose_x, nose_y = visible.get("nose", (None, None)) if nose_x is not None: self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) try: # draw line from mid-shoulder to mid-hip lh_x, lh_y = visible["left_hip"] rh_x, rh_y = visible["right_hip"] except KeyError: pass else: mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) return self.output """ Primitive drawing functions: """ def draw_text( self, text, position, *, font_size=None, color="g", horizontal_alignment="center", rotation=0, ): """ Args: text (str): class label position (tuple): a tuple of the x and y coordinates to place text on image. font_size (int, optional): font of the text. If not provided, a font size proportional to the image width is calculated and used. color: color of the text. Refer to `matplotlib.colors` for full list of formats that are accepted. horizontal_alignment (str): see `matplotlib.text.Text` rotation: rotation angle in degrees CCW Returns: output (VisImage): image object with text drawn. """ if not font_size: font_size = self._default_font_size # since the text background is dark, we don't want the text to be dark color = np.maximum(list(mplc.to_rgb(color)), 0.2) color[np.argmax(color)] = max(0.8, np.max(color)) x, y = position self.output.ax.text( x, y, text, size=font_size * self.output.scale, family="sans-serif", bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, verticalalignment="top", horizontalalignment=horizontal_alignment, color=color, zorder=10, rotation=rotation, ) return self.output def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"): """ Args: box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 are the coordinates of the image's top left corner. x1 and y1 are the coordinates of the image's bottom right corner. alpha (float): blending efficient. Smaller values lead to more transparent masks. edge_color: color of the outline of the box. Refer to `matplotlib.colors` for full list of formats that are accepted. line_style (string): the string to use to create the outline of the boxes. Returns: output (VisImage): image object with box drawn. """ x0, y0, x1, y1 = box_coord width = x1 - x0 height = y1 - y0 linewidth = max(self._default_font_size / 4, 1) self.output.ax.add_patch( mpl.patches.Rectangle( (x0, y0), width, height, fill=False, edgecolor=edge_color, linewidth=linewidth * self.output.scale, alpha=alpha, linestyle=line_style, ) ) return self.output def draw_rotated_box_with_label( self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None ): """ Draw a rotated box with label on its top-left corner. Args: rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), where cnt_x and cnt_y are the center coordinates of the box. w and h are the width and height of the box. angle represents how many degrees the box is rotated CCW with regard to the 0-degree box. alpha (float): blending efficient. Smaller values lead to more transparent masks. edge_color: color of the outline of the box. Refer to `matplotlib.colors` for full list of formats that are accepted. line_style (string): the string to use to create the outline of the boxes. label (string): label for rotated box. It will not be rendered when set to None. Returns: output (VisImage): image object with box drawn. """ cnt_x, cnt_y, w, h, angle = rotated_box area = w * h # use thinner lines when the box is small linewidth = self._default_font_size / ( 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3 ) theta = angle * math.pi / 180.0 c = math.cos(theta) s = math.sin(theta) rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] # x: left->right ; y: top->down rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] for k in range(4): j = (k + 1) % 4 self.draw_line( [rotated_rect[k][0], rotated_rect[j][0]], [rotated_rect[k][1], rotated_rect[j][1]], color=edge_color, linestyle="--" if k == 1 else line_style, linewidth=linewidth, ) if label is not None: text_pos = rotated_rect[1] # topleft corner height_ratio = h / np.sqrt(self.output.height * self.output.width) label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) font_size = ( np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size ) self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) return self.output def draw_circle(self, circle_coord, color, radius=3): """ Args: circle_coord (list(int) or tuple(int)): contains the x and y coordinates of the center of the circle. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. radius (int): radius of the circle. Returns: output (VisImage): image object with box drawn. """ x, y = circle_coord self.output.ax.add_patch( mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color) ) return self.output def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None): """ Args: x_data (list[int]): a list containing x values of all the points being drawn. Length of list should match the length of y_data. y_data (list[int]): a list containing y values of all the points being drawn. Length of list should match the length of x_data. color: color of the line. Refer to `matplotlib.colors` for a full list of formats that are accepted. linestyle: style of the line. Refer to `matplotlib.lines.Line2D` for a full list of formats that are accepted. linewidth (float or None): width of the line. When it's None, a default value will be computed and used. Returns: output (VisImage): image object with line drawn. """ if linewidth is None: linewidth = self._default_font_size / 3 linewidth = max(linewidth, 1) self.output.ax.add_line( mpl.lines.Line2D( x_data, y_data, linewidth=linewidth * self.output.scale, color=color, linestyle=linestyle, ) ) return self.output def draw_binary_mask( self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10 ): """ Args: binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and W is the image width. Each value in the array is either a 0 or 1 value of uint8 type. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. text (str): if None, will be drawn on the object alpha (float): blending efficient. Smaller values lead to more transparent masks. area_threshold (float): a connected component smaller than this area will not be shown. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) has_valid_segment = False binary_mask = binary_mask.astype("uint8") # opencv needs uint8 mask = GenericMask(binary_mask, self.output.height, self.output.width) shape2d = (binary_mask.shape[0], binary_mask.shape[1]) if not mask.has_holes: # draw polygons for regular masks for segment in mask.polygons: area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) if area < (area_threshold or 0): continue has_valid_segment = True segment = segment.reshape(-1, 2) self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) else: # TODO: Use Path/PathPatch to draw vector graphics: # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon rgba = np.zeros(shape2d + (4,), dtype="float32") rgba[:, :, :3] = color rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha has_valid_segment = True self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if text is not None and has_valid_segment: lighter_color = self._change_color_brightness(color, brightness_factor=0.7) self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5): """ Args: soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. text (str): if None, will be drawn on the object alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) shape2d = (soft_mask.shape[0], soft_mask.shape[1]) rgba = np.zeros(shape2d + (4,), dtype="float32") rgba[:, :, :3] = color rgba[:, :, 3] = soft_mask * alpha self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if text is not None: lighter_color = self._change_color_brightness(color, brightness_factor=0.7) binary_mask = (soft_mask > 0.5).astype("uint8") self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): """ Args: segment: numpy array of shape Nx2, containing all the points in the polygon. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. If not provided, a darker shade of the polygon color will be used instead. alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with polygon drawn. """ if edge_color is None: # make edge color darker than the polygon color if alpha > 0.8: edge_color = self._change_color_brightness(color, brightness_factor=-0.7) else: edge_color = color edge_color = mplc.to_rgb(edge_color) + (1,) polygon = mpl.patches.Polygon( segment, fill=True, facecolor=mplc.to_rgb(color) + (alpha,), edgecolor=edge_color, linewidth=max(self._default_font_size // 15 * self.output.scale, 1), ) self.output.ax.add_patch(polygon) return self.output """ Internal methods: """ def _jitter(self, color): """ Randomly modifies given color to produce a slightly different color than the color given. Args: color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color picked. The values in the list are in the [0.0, 1.0] range. Returns: jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color after being jittered. The values in the list are in the [0.0, 1.0] range. """ color = mplc.to_rgb(color) vec = np.random.rand(3) # better to do it in another color space vec = vec / np.linalg.norm(vec) * 0.5 res = np.clip(vec + color, 0, 1) return tuple(res) def _create_grayscale_image(self, mask=None): """ Create a grayscale version of the original image. The colors in masked area, if given, will be kept. """ img_bw = self.img.astype("f4").mean(axis=2) img_bw = np.stack([img_bw] * 3, axis=2) if mask is not None: img_bw[mask] = self.img[mask] return img_bw def _change_color_brightness(self, color, brightness_factor): """ Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range. """ assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return modified_color def _convert_boxes(self, boxes): """ Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. """
if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
6
2023-12-22 13:31:33+00:00
24k
iKala/ievals
ievals/cli/ieval.py
[ { "identifier": "TGI_Evaluator", "path": "ievals/modules/qa_evaluators/tgi.py", "snippet": "class TGI_Evaluator(Evaluator):\n def __init__(\n self,\n choices,\n k,\n ip_addr,\n model_name,\n systemMessageToken=\"<|im_start|>system\\n\",\n messageEndTok...
import os import logging import argparse import pandas as pd from datasets import load_dataset from ievals.modules.qa_evaluators.tgi import TGI_Evaluator from ievals.modules.qa_evaluators.gemini import Gemini_Evaluator from ievals.modules.qa_evaluators.claude import Claude_Evaluator from ievals.modules.qa_evaluators.azure import Azure_Evaluator from ievals.modules.qa_evaluators.oai_complete import GPT_Evaluator from ievals.modules.qa_evaluators.chatgpt import ChatGPT_Evaluator from ievals.modules.qa_evaluators.hf_chat import HF_Chat_Evaluator from ievals.modules.qa_evaluators.hf_base import ( Qwen_Evaluator, ) # we only use this for qwen base model from ievals.modules.qa_evaluators.ali_dashscope import DashScope_Evaluator from ievals.exp_executer import run_exp
19,458
""" CLI for all models Support mode: if tgi service was used you must pass in IP and hostname if the service was found in model_config.csv you could skip providing the 4 tokens (user, assistant, system, eos) else you need to pass in the four token in args """ try: except ImportError as e: logging.error("huggingface and qwen models are not supported due to " + str(e)) def get_model_config(): current_dir = os.path.dirname(os.path.abspath(__file__)) up_dir = os.path.abspath(os.path.join(current_dir, os.pardir)) df = pd.read_csv(os.path.join(up_dir, "model_config.csv")) df.fillna("", inplace=True) valid_model_names = df["model_name"].tolist() return valid_model_names, df def get_tgi_prompt_config(model_name): valid_model_names, df = get_model_config() if model_name not in valid_model_names: return None, None prompt_config = df[df["model_name"] == model_name].iloc[0] prompt_config.pop("model_name") return prompt_config def get_evaluator(model_name, series=""): if len(series): if series == "azure": return Azure_Evaluator elif series == "openai_chat": return ChatGPT_Evaluator elif series == "openai_complete": return GPT_Evaluator elif series == "gemini":
""" CLI for all models Support mode: if tgi service was used you must pass in IP and hostname if the service was found in model_config.csv you could skip providing the 4 tokens (user, assistant, system, eos) else you need to pass in the four token in args """ try: except ImportError as e: logging.error("huggingface and qwen models are not supported due to " + str(e)) def get_model_config(): current_dir = os.path.dirname(os.path.abspath(__file__)) up_dir = os.path.abspath(os.path.join(current_dir, os.pardir)) df = pd.read_csv(os.path.join(up_dir, "model_config.csv")) df.fillna("", inplace=True) valid_model_names = df["model_name"].tolist() return valid_model_names, df def get_tgi_prompt_config(model_name): valid_model_names, df = get_model_config() if model_name not in valid_model_names: return None, None prompt_config = df[df["model_name"] == model_name].iloc[0] prompt_config.pop("model_name") return prompt_config def get_evaluator(model_name, series=""): if len(series): if series == "azure": return Azure_Evaluator elif series == "openai_chat": return ChatGPT_Evaluator elif series == "openai_complete": return GPT_Evaluator elif series == "gemini":
return Gemini_Evaluator
1
2023-12-24 08:00:38+00:00
24k
kraina-ai/quackosm
quackosm/functions.py
[ { "identifier": "GroupedOsmTagsFilter", "path": "quackosm/_osm_tags_filters.py", "snippet": "def merge_osm_tags_filter(osm_tags_filter: OsmTagsFilter) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: GroupedOsmTagsFilter) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter:...
from collections.abc import Iterable from pathlib import Path from typing import Any, Optional, Union from shapely.geometry.base import BaseGeometry from quackosm._osm_tags_filters import GroupedOsmTagsFilter, OsmTagsFilter from quackosm._osm_way_polygon_features import OsmWayPolygonConfig from quackosm.pbf_file_reader import PbfFileReader import geopandas as gpd
17,284
""" Functions. This module contains helper functions to simplify the usage. """ def convert_pbf_to_gpq( pbf_path: Union[str, Path],
""" Functions. This module contains helper functions to simplify the usage. """ def convert_pbf_to_gpq( pbf_path: Union[str, Path],
tags_filter: Optional[Union[OsmTagsFilter, GroupedOsmTagsFilter]] = None,
0
2023-12-28 11:26:41+00:00
24k
KyanChen/TTP
tests/test_datasets/test_dataset.py
[ { "identifier": "ADE20KDataset", "path": "mmseg/datasets/ade.py", "snippet": "class ADE20KDataset(BaseSegDataset):\n \"\"\"ADE20K dataset.\n\n In segmentation map annotation for ADE20K, 0 stands for background, which\n is not included in 150 categories. ``reduce_zero_label`` is fixed to True.\n...
import os import os.path as osp import tempfile import pytest from mmseg.datasets import (ADE20KDataset, BaseSegDataset, BDD100KDataset, CityscapesDataset, COCOStuffDataset, DecathlonDataset, DSDLSegDataset, ISPRSDataset, LIPDataset, LoveDADataset, MapillaryDataset_v1, MapillaryDataset_v2, NYUDataset, PascalVOCDataset, PotsdamDataset, REFUGEDataset, SynapseDataset, iSAIDDataset) from mmseg.registry import DATASETS from mmseg.utils import get_classes, get_palette from dsdl.dataset import DSDLDataset
20,211
def test_cityscapes(): test_dataset = CityscapesDataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_cityscapes_dataset/leftImg8bit/val'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_cityscapes_dataset/gtFine/val'))) assert len(test_dataset) == 1 def test_loveda(): test_dataset = LoveDADataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_loveda_dataset/img_dir'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_loveda_dataset/ann_dir'))) assert len(test_dataset) == 3 def test_potsdam(): test_dataset = PotsdamDataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_potsdam_dataset/img_dir'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_potsdam_dataset/ann_dir'))) assert len(test_dataset) == 1 def test_vaihingen(): test_dataset = ISPRSDataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_vaihingen_dataset/img_dir'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_vaihingen_dataset/ann_dir'))) assert len(test_dataset) == 1 def test_synapse(): test_dataset = SynapseDataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_synapse_dataset/img_dir'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_synapse_dataset/ann_dir'))) assert len(test_dataset) == 2 def test_refuge(): test_dataset = REFUGEDataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_refuge_dataset/img_dir'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_refuge_dataset/ann_dir'))) assert len(test_dataset) == 1 def test_isaid(): test_dataset = iSAIDDataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_isaid_dataset/img_dir'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_isaid_dataset/ann_dir'))) assert len(test_dataset) == 2 test_dataset = iSAIDDataset( data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_isaid_dataset/img_dir'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_isaid_dataset/ann_dir')), ann_file=osp.join( osp.dirname(__file__), '../data/pseudo_isaid_dataset/splits/train.txt')) assert len(test_dataset) == 1 def test_decathlon(): data_root = osp.join(osp.dirname(__file__), '../data') # test load training dataset test_dataset = DecathlonDataset( pipeline=[], data_root=data_root, ann_file='dataset.json') assert len(test_dataset) == 1 # test load test dataset test_dataset = DecathlonDataset( pipeline=[], data_root=data_root, ann_file='dataset.json', test_mode=True) assert len(test_dataset) == 3 def test_lip(): data_root = osp.join(osp.dirname(__file__), '../data/pseudo_lip_dataset') # train load training dataset
# Copyright (c) OpenMMLab. All rights reserved. try: except ImportError: DSDLDataset = None def test_classes(): assert list( CityscapesDataset.METAINFO['classes']) == get_classes('cityscapes') assert list(PascalVOCDataset.METAINFO['classes']) == get_classes( 'voc') == get_classes('pascal_voc') assert list(ADE20KDataset.METAINFO['classes']) == get_classes( 'ade') == get_classes('ade20k') assert list( COCOStuffDataset.METAINFO['classes']) == get_classes('cocostuff') assert list(LoveDADataset.METAINFO['classes']) == get_classes('loveda') assert list(PotsdamDataset.METAINFO['classes']) == get_classes('potsdam') assert list(ISPRSDataset.METAINFO['classes']) == get_classes('vaihingen') assert list(iSAIDDataset.METAINFO['classes']) == get_classes('isaid') assert list( MapillaryDataset_v1.METAINFO['classes']) == get_classes('mapillary_v1') assert list( MapillaryDataset_v2.METAINFO['classes']) == get_classes('mapillary_v2') assert list(BDD100KDataset.METAINFO['classes']) == get_classes('bdd100k') with pytest.raises(ValueError): get_classes('unsupported') def test_classes_file_path(): tmp_file = tempfile.NamedTemporaryFile() classes_path = f'{tmp_file.name}.txt' train_pipeline = [] kwargs = dict( pipeline=train_pipeline, data_prefix=dict(img_path='./', seg_map_path='./'), metainfo=dict(classes=classes_path)) # classes.txt with full categories categories = get_classes('cityscapes') with open(classes_path, 'w') as f: f.write('\n'.join(categories)) dataset = CityscapesDataset(**kwargs) assert list(dataset.metainfo['classes']) == categories assert dataset.label_map is None # classes.txt with sub categories categories = ['road', 'sidewalk', 'building'] with open(classes_path, 'w') as f: f.write('\n'.join(categories)) dataset = CityscapesDataset(**kwargs) assert list(dataset.metainfo['classes']) == categories assert dataset.label_map is not None # classes.txt with unknown categories categories = ['road', 'sidewalk', 'unknown'] with open(classes_path, 'w') as f: f.write('\n'.join(categories)) with pytest.raises(ValueError): CityscapesDataset(**kwargs) tmp_file.close() os.remove(classes_path) assert not osp.exists(classes_path) def test_palette(): assert CityscapesDataset.METAINFO['palette'] == get_palette('cityscapes') assert PascalVOCDataset.METAINFO['palette'] == get_palette( 'voc') == get_palette('pascal_voc') assert ADE20KDataset.METAINFO['palette'] == get_palette( 'ade') == get_palette('ade20k') assert LoveDADataset.METAINFO['palette'] == get_palette('loveda') assert PotsdamDataset.METAINFO['palette'] == get_palette('potsdam') assert COCOStuffDataset.METAINFO['palette'] == get_palette('cocostuff') assert iSAIDDataset.METAINFO['palette'] == get_palette('isaid') assert list( MapillaryDataset_v1.METAINFO['palette']) == get_palette('mapillary_v1') assert list( MapillaryDataset_v2.METAINFO['palette']) == get_palette('mapillary_v2') assert list(BDD100KDataset.METAINFO['palette']) == get_palette('bdd100k') with pytest.raises(ValueError): get_palette('unsupported') def test_custom_dataset(): # with 'img_path' and 'seg_map_path' in data_prefix train_dataset = BaseSegDataset( data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), data_prefix=dict( img_path='imgs/', seg_map_path='gts/', ), img_suffix='img.jpg', seg_map_suffix='gt.png') assert len(train_dataset) == 5 # with 'img_path' and 'seg_map_path' in data_prefix and ann_file train_dataset = BaseSegDataset( data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), data_prefix=dict( img_path='imgs/', seg_map_path='gts/', ), img_suffix='img.jpg', seg_map_suffix='gt.png', ann_file='splits/train.txt') assert len(train_dataset) == 4 # no data_root train_dataset = BaseSegDataset( data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_dataset/imgs'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_dataset/gts')), img_suffix='img.jpg', seg_map_suffix='gt.png') assert len(train_dataset) == 5 # with data_root but 'img_path' and 'seg_map_path' in data_prefix are # abs path train_dataset = BaseSegDataset( data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'), data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_dataset/imgs'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_dataset/gts')), img_suffix='img.jpg', seg_map_suffix='gt.png') assert len(train_dataset) == 5 # test_mode=True test_dataset = BaseSegDataset( data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_dataset/imgs')), img_suffix='img.jpg', test_mode=True, metainfo=dict(classes=('pseudo_class', ))) assert len(test_dataset) == 5 # training data get train_data = train_dataset[0] assert isinstance(train_data, dict) assert 'img_path' in train_data and osp.isfile(train_data['img_path']) assert 'seg_map_path' in train_data and osp.isfile( train_data['seg_map_path']) # test data get test_data = test_dataset[0] assert isinstance(test_data, dict) assert 'img_path' in train_data and osp.isfile(train_data['img_path']) assert 'seg_map_path' in train_data and osp.isfile( train_data['seg_map_path']) def test_ade(): test_dataset = ADE20KDataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_dataset/imgs'))) assert len(test_dataset) == 5 def test_cityscapes(): test_dataset = CityscapesDataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_cityscapes_dataset/leftImg8bit/val'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_cityscapes_dataset/gtFine/val'))) assert len(test_dataset) == 1 def test_loveda(): test_dataset = LoveDADataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_loveda_dataset/img_dir'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_loveda_dataset/ann_dir'))) assert len(test_dataset) == 3 def test_potsdam(): test_dataset = PotsdamDataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_potsdam_dataset/img_dir'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_potsdam_dataset/ann_dir'))) assert len(test_dataset) == 1 def test_vaihingen(): test_dataset = ISPRSDataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_vaihingen_dataset/img_dir'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_vaihingen_dataset/ann_dir'))) assert len(test_dataset) == 1 def test_synapse(): test_dataset = SynapseDataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_synapse_dataset/img_dir'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_synapse_dataset/ann_dir'))) assert len(test_dataset) == 2 def test_refuge(): test_dataset = REFUGEDataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_refuge_dataset/img_dir'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_refuge_dataset/ann_dir'))) assert len(test_dataset) == 1 def test_isaid(): test_dataset = iSAIDDataset( pipeline=[], data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_isaid_dataset/img_dir'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_isaid_dataset/ann_dir'))) assert len(test_dataset) == 2 test_dataset = iSAIDDataset( data_prefix=dict( img_path=osp.join( osp.dirname(__file__), '../data/pseudo_isaid_dataset/img_dir'), seg_map_path=osp.join( osp.dirname(__file__), '../data/pseudo_isaid_dataset/ann_dir')), ann_file=osp.join( osp.dirname(__file__), '../data/pseudo_isaid_dataset/splits/train.txt')) assert len(test_dataset) == 1 def test_decathlon(): data_root = osp.join(osp.dirname(__file__), '../data') # test load training dataset test_dataset = DecathlonDataset( pipeline=[], data_root=data_root, ann_file='dataset.json') assert len(test_dataset) == 1 # test load test dataset test_dataset = DecathlonDataset( pipeline=[], data_root=data_root, ann_file='dataset.json', test_mode=True) assert len(test_dataset) == 3 def test_lip(): data_root = osp.join(osp.dirname(__file__), '../data/pseudo_lip_dataset') # train load training dataset
train_dataset = LIPDataset(
9
2023-12-23 08:36:47+00:00
24k
see2023/Bert-VITS2-ext
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, ...
import platform import os import torch import torch.distributed as dist import logging import argparse import datetime import gc import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, AudioVisemesLoader, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, WavLMDiscriminator, VisemesNet, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss, WavLMLoss, ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
15,277
if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc, net_wd, wl = nets optim_g, optim_d, optim_dur_disc, optim_wd = optims scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() net_wd.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_, logw_sdp), g, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1).float(), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16):
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 global_step = 0 global_visemes_step = 0 def run_only_visemes(hps): # 使用最简单的单机模式,仅训练隐变量z到表情(visemes)的全连接 VisemesFCNet 的参数 global global_visemes_step torch.manual_seed(hps.train.seed) torch.cuda.set_device(0) train_dataset = AudioVisemesLoader(hps.data.training_visemes_files, hps.data) train_loader = DataLoader(train_dataset, num_workers=0, shuffle=False, pin_memory=True, batch_size=1, drop_last=True) eval_dataset = AudioVisemesLoader(hps.data.validation_visemes_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False) net_v = VisemesNet(hps.model.hidden_channels).cuda() latest_model_path = utils.latest_checkpoint_path(hps.model_dir, "V_*.pth") if latest_model_path is not None: _, optim_d, _, epoch_str = utils.load_checkpoint(latest_model_path, net_v, None, skip_optimizer=False) else : epoch_str = 1 global_visemes_step = 0 net_v.init_weights() optim_v = torch.optim.AdamW( net_v.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) optim_v.param_groups[0]['initial_lr'] = hps.train.learning_rate scheduler_v = torch.optim.lr_scheduler.ExponentialLR(optim_v, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2, ) scaler = GradScaler(enabled=hps.train.bf16_run) for epoch in range(epoch_str, hps.train.epochs + 1): train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler) scheduler_v.step() if epoch % hps.train.eval_interval == 0: eval_visemes_only(epoch, hps, net_v, eval_loader) utils.save_checkpoint(net_v, optim_v,hps.train.learning_rate , epoch, os.path.join(hps.model_dir, "V_{}.pth".format(epoch))) def train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler): for batch_idx, (spec, visemes) in tqdm(enumerate(train_loader)): spec, visemes = spec.cuda(), visemes.cuda() with autocast(enabled=hps.train.bf16_run): # 通过VisemesNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) optim_v.zero_grad() scaler.scale(visemes_hat_mse).backward() scaler.unscale_(optim_v) grad_norm_v = commons.clip_grad_value_(net_v.parameters(), None) scaler.step(optim_v) global global_visemes_step global_visemes_step += 1 if batch_idx % hps.train.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tvisemes_hat_mse: {:.6f}\tgrad_norm_v: {:.6f}'.format( epoch, batch_idx * len(spec), len(train_loader.dataset), 100. * batch_idx / len(train_loader), visemes_hat_mse.item(), grad_norm_v)) def get_visemes_mse(visemes, visemes_hat): if visemes.shape[-1] != visemes_hat.shape[-1]: # 如果y和x的最低维度不一样 visemes_hat = F.interpolate(visemes_hat, size=visemes.shape[-1], mode='linear', align_corners=True) # 对x进行线性插值,使其形状与y一致 visemes_hat_mse = torch.mean(torch.pow(visemes_hat - visemes, 2)) return visemes_hat_mse def eval_visemes_only(epoch, hps, net_v, eval_loader): net_v.eval() with torch.no_grad(): visemes_hat_mse_sum = 0.0 for batch_idx, (spec, visemes) in tqdm(enumerate(eval_loader)): spec, visemes = spec.cuda(), visemes.cuda() # 通过VisemesFCNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) visemes_hat_mse_sum += visemes_hat_mse # print('visemes_hat_mse', visemes_hat_mse) break visemes_hat_mse_avg = visemes_hat_mse_sum / (batch_idx + 1) log_str = '------------------ eval epoch: {} visemes_hat_mse_avg: {:.6f}'.format(epoch, visemes_hat_mse_avg) print(log_str) logger.warning(log_str) net_v.train() def run(): # 环境变量解析 envs = config.train_ms_config.env for env_name, env_value in envs.items(): if env_name not in os.environ.keys(): print("加载config中的配置{}".format(str(env_value))) os.environ[env_name] = str(env_value) print( "加载环境变量 \nMASTER_ADDR: {},\nMASTER_PORT: {},\nWORLD_SIZE: {},\nRANK: {},\nLOCAL_RANK: {}".format( os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"], os.environ["WORLD_SIZE"], os.environ["RANK"], os.environ["LOCAL_RANK"], ) ) backend = "nccl" if platform.system() == "Windows": backend = "gloo" # If Windows,switch to gloo backend. dist.init_process_group( backend=backend, init_method="env://", timeout=datetime.timedelta(seconds=300), ) # Use torchrun instead of mp.spawn rank = dist.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) n_gpus = dist.get_world_size() # 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) parser.add_argument('--visemes', dest='visemes', action="store_true", default=False, help="train visemes only, lock the encoder and decoder") args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir set_logger(hps) if args.visemes: run_only_visemes(hps) # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=min(config.train_ms_config.num_workers, os.cpu_count() - 1), shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) else: net_dur_disc = None if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank) if getattr(hps.train, "freeze_ZH_bert", False): print("Freezing ZH bert encoder !!!") for param in net_g.enc_p.bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_EN_bert", False): print("Freezing EN bert encoder !!!") for param in net_g.enc_p.en_bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_JP_bert", False): print("Freezing JP bert encoder !!!") for param in net_g.enc_p.ja_bert_proj.parameters(): param.requires_grad = False net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(local_rank) net_wd = WavLMDiscriminator( hps.model.slm.hidden, hps.model.slm.nlayers, hps.model.slm.initial_channel ).cuda(local_rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_wd = torch.optim.AdamW( net_wd.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[local_rank], bucket_cap_mb=512) net_d = DDP(net_d, device_ids=[local_rank], bucket_cap_mb=512) net_wd = DDP(net_wd, device_ids=[local_rank], bucket_cap_mb=512) if net_dur_disc is not None: net_dur_disc = DDP( net_dur_disc, device_ids=[local_rank], bucket_cap_mb=512, ) # 下载底模 if config.train_ms_config.base["use_base_model"]: utils.download_checkpoint( hps.model_dir, config.train_ms_config.base, token=config.openi_token, mirror=config.mirror, ) dur_resume_lr = hps.train.learning_rate wd_resume_lr = hps.train.learning_rate if net_dur_disc is not None: try: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr except: print("Initialize dur_disc") try: _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr epoch_str = max(epoch_str, 1) # global_step = (epoch_str - 1) * len(train_loader) global_step = int( utils.get_steps(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth")) ) print( f"******************检测到模型存在,epoch为 {epoch_str},gloabl step为 {global_step}*********************" ) except Exception as e: print(e) epoch_str = 1 global_step = 0 try: _, optim_wd, wd_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "WD_*.pth"), net_wd, optim_wd, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_wd.param_groups[0].get("initial_lr"): optim_wd.param_groups[0]["initial_lr"] = wd_resume_lr except Exception as e: print(e) scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_wd = torch.optim.lr_scheduler.ExponentialLR( optim_wd, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.bf16_run) wl = WavLMLoss( hps.model.slm.model, net_wd, hps.data.sampling_rate, hps.model.slm.sr, ).to(local_rank) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() scheduler_wd.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc, net_wd, wl = nets optim_g, optim_d, optim_dur_disc, optim_wd = optims scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() net_wd.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_, logw_sdp), g, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1).float(), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16):
loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(
11
2023-12-27 03:09:11+00:00
24k
open-mmlab/Amphion
models/tts/naturalspeech2/ns2_trainer.py
[ { "identifier": "Logger", "path": "utils/util.py", "snippet": "class Logger(object):\n def __init__(\n self,\n filename,\n level=\"info\",\n when=\"D\",\n backCount=10,\n fmt=\"%(asctime)s : %(message)s\",\n ):\n self.level_relations = {\n ...
import os import shutil import json import time import torch import numpy as np import torch.nn.functional as F import accelerate from utils.util import Logger, ValueWindow from torch.utils.data import ConcatDataset, DataLoader from models.tts.base.tts_trainer import TTSTrainer from models.base.base_trainer import BaseTrainer from models.base.base_sampler import VariableSampler from models.tts.naturalspeech2.ns2_dataset import NS2Dataset, NS2Collator, batch_by_size from models.tts.naturalspeech2.ns2_loss import ( log_pitch_loss, log_dur_loss, diff_loss, diff_ce_loss, ) from torch.utils.data.sampler import BatchSampler, SequentialSampler from models.tts.naturalspeech2.ns2 import NaturalSpeech2 from torch.optim import Adam, AdamW from torch.nn import MSELoss, L1Loss from diffusers import get_scheduler from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration
19,716
batch_sampler=VariableSampler(batches, drop_last=False), pin_memory=self.cfg.train.dataloader.pin_memory, ) self.accelerator.wait_for_everyone() else: print("Use Normal Batchsize......") Dataset, Collator = self._build_dataset() train_dataset = Dataset(self.cfg, self.cfg.dataset[0], is_valid=False) train_collate = Collator(self.cfg) train_loader = DataLoader( train_dataset, shuffle=True, collate_fn=train_collate, batch_size=self.cfg.train.batch_size, num_workers=self.cfg.train.dataloader.num_worker, pin_memory=self.cfg.train.dataloader.pin_memory, ) valid_dataset = Dataset(self.cfg, self.cfg.dataset[0], is_valid=True) valid_collate = Collator(self.cfg) valid_loader = DataLoader( valid_dataset, shuffle=True, collate_fn=valid_collate, batch_size=self.cfg.train.batch_size, num_workers=self.cfg.train.dataloader.num_worker, pin_memory=self.cfg.train.dataloader.pin_memory, ) self.accelerator.wait_for_everyone() return train_loader, valid_loader def _build_optimizer(self): optimizer = torch.optim.AdamW( filter(lambda p: p.requires_grad, self.model.parameters()), **self.cfg.train.adam, ) return optimizer def _build_scheduler(self): lr_scheduler = get_scheduler( self.cfg.train.lr_scheduler, optimizer=self.optimizer, num_warmup_steps=self.cfg.train.lr_warmup_steps, num_training_steps=self.cfg.train.num_train_steps, ) return lr_scheduler def _build_criterion(self): criterion = torch.nn.L1Loss(reduction="mean") return criterion def write_summary(self, losses, stats): for key, value in losses.items(): self.sw.add_scalar(key, value, self.step) def write_valid_summary(self, losses, stats): for key, value in losses.items(): self.sw.add_scalar(key, value, self.step) def get_state_dict(self): state_dict = { "model": self.model.state_dict(), "optimizer": self.optimizer.state_dict(), "scheduler": self.scheduler.state_dict(), "step": self.step, "epoch": self.epoch, "batch_size": self.cfg.train.batch_size, } return state_dict def load_model(self, checkpoint): self.step = checkpoint["step"] self.epoch = checkpoint["epoch"] self.model.load_state_dict(checkpoint["model"]) self.optimizer.load_state_dict(checkpoint["optimizer"]) self.scheduler.load_state_dict(checkpoint["scheduler"]) def _train_step(self, batch): train_losses = {} total_loss = 0 train_stats = {} code = batch["code"] # (B, 16, T) pitch = batch["pitch"] # (B, T) duration = batch["duration"] # (B, N) phone_id = batch["phone_id"] # (B, N) ref_code = batch["ref_code"] # (B, 16, T') phone_mask = batch["phone_mask"] # (B, N) mask = batch["mask"] # (B, T) ref_mask = batch["ref_mask"] # (B, T') diff_out, prior_out = self.model( code=code, pitch=pitch, duration=duration, phone_id=phone_id, ref_code=ref_code, phone_mask=phone_mask, mask=mask, ref_mask=ref_mask, ) # pitch loss pitch_loss = log_pitch_loss(prior_out["pitch_pred_log"], pitch, mask=mask) total_loss += pitch_loss train_losses["pitch_loss"] = pitch_loss # duration loss dur_loss = log_dur_loss(prior_out["dur_pred_log"], duration, mask=phone_mask) total_loss += dur_loss train_losses["dur_loss"] = dur_loss x0 = self.model.module.code_to_latent(code) if self.cfg.model.diffusion.diffusion_type == "diffusion": # diff loss x0
# Copyright (c) 2023 Amphion. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class NS2Trainer(TTSTrainer): def __init__(self, args, cfg): self.args = args self.cfg = cfg cfg.exp_name = args.exp_name self._init_accelerator() self.accelerator.wait_for_everyone() # Init logger with self.accelerator.main_process_first(): if self.accelerator.is_main_process: os.makedirs(os.path.join(self.exp_dir, "checkpoint"), exist_ok=True) self.log_file = os.path.join( os.path.join(self.exp_dir, "checkpoint"), "train.log" ) self.logger = Logger(self.log_file, level=self.args.log_level).logger self.time_window = ValueWindow(50) if self.accelerator.is_main_process: # Log some info self.logger.info("=" * 56) self.logger.info("||\t\t" + "New training process started." + "\t\t||") self.logger.info("=" * 56) self.logger.info("\n") self.logger.debug(f"Using {args.log_level.upper()} logging level.") self.logger.info(f"Experiment name: {args.exp_name}") self.logger.info(f"Experiment directory: {self.exp_dir}") self.checkpoint_dir = os.path.join(self.exp_dir, "checkpoint") if self.accelerator.is_main_process: os.makedirs(self.checkpoint_dir, exist_ok=True) if self.accelerator.is_main_process: self.logger.debug(f"Checkpoint directory: {self.checkpoint_dir}") # init counts self.batch_count: int = 0 self.step: int = 0 self.epoch: int = 0 self.max_epoch = ( self.cfg.train.max_epoch if self.cfg.train.max_epoch > 0 else float("inf") ) if self.accelerator.is_main_process: self.logger.info( "Max epoch: {}".format( self.max_epoch if self.max_epoch < float("inf") else "Unlimited" ) ) # Check values if self.accelerator.is_main_process: self._check_basic_configs() # Set runtime configs self.save_checkpoint_stride = self.cfg.train.save_checkpoint_stride self.checkpoints_path = [ [] for _ in range(len(self.save_checkpoint_stride)) ] self.keep_last = [ i if i > 0 else float("inf") for i in self.cfg.train.keep_last ] self.run_eval = self.cfg.train.run_eval # set random seed with self.accelerator.main_process_first(): start = time.monotonic_ns() self._set_random_seed(self.cfg.train.random_seed) end = time.monotonic_ns() if self.accelerator.is_main_process: self.logger.debug( f"Setting random seed done in {(end - start) / 1e6:.2f}ms" ) self.logger.debug(f"Random seed: {self.cfg.train.random_seed}") # setup data_loader with self.accelerator.main_process_first(): if self.accelerator.is_main_process: self.logger.info("Building dataset...") start = time.monotonic_ns() self.train_dataloader, self.valid_dataloader = self._build_dataloader() end = time.monotonic_ns() if self.accelerator.is_main_process: self.logger.info( f"Building dataset done in {(end - start) / 1e6:.2f}ms" ) # setup model with self.accelerator.main_process_first(): if self.accelerator.is_main_process: self.logger.info("Building model...") start = time.monotonic_ns() self.model = self._build_model() end = time.monotonic_ns() if self.accelerator.is_main_process: self.logger.debug(self.model) self.logger.info(f"Building model done in {(end - start) / 1e6:.2f}ms") self.logger.info( f"Model parameters: {self._count_parameters(self.model)/1e6:.2f}M" ) # optimizer & scheduler with self.accelerator.main_process_first(): if self.accelerator.is_main_process: self.logger.info("Building optimizer and scheduler...") start = time.monotonic_ns() self.optimizer = self._build_optimizer() self.scheduler = self._build_scheduler() end = time.monotonic_ns() if self.accelerator.is_main_process: self.logger.info( f"Building optimizer and scheduler done in {(end - start) / 1e6:.2f}ms" ) # accelerate prepare if not self.cfg.train.use_dynamic_batchsize: if self.accelerator.is_main_process: self.logger.info("Initializing accelerate...") start = time.monotonic_ns() ( self.train_dataloader, self.valid_dataloader, ) = self.accelerator.prepare( self.train_dataloader, self.valid_dataloader, ) if isinstance(self.model, dict): for key in self.model.keys(): self.model[key] = self.accelerator.prepare(self.model[key]) else: self.model = self.accelerator.prepare(self.model) if isinstance(self.optimizer, dict): for key in self.optimizer.keys(): self.optimizer[key] = self.accelerator.prepare(self.optimizer[key]) else: self.optimizer = self.accelerator.prepare(self.optimizer) if isinstance(self.scheduler, dict): for key in self.scheduler.keys(): self.scheduler[key] = self.accelerator.prepare(self.scheduler[key]) else: self.scheduler = self.accelerator.prepare(self.scheduler) end = time.monotonic_ns() if self.accelerator.is_main_process: self.logger.info( f"Initializing accelerate done in {(end - start) / 1e6:.2f}ms" ) # create criterion with self.accelerator.main_process_first(): if self.accelerator.is_main_process: self.logger.info("Building criterion...") start = time.monotonic_ns() self.criterion = self._build_criterion() end = time.monotonic_ns() if self.accelerator.is_main_process: self.logger.info( f"Building criterion done in {(end - start) / 1e6:.2f}ms" ) # TODO: Resume from ckpt need test/debug with self.accelerator.main_process_first(): if args.resume: if self.accelerator.is_main_process: self.logger.info("Resuming from checkpoint...") start = time.monotonic_ns() ckpt_path = self._load_model( self.checkpoint_dir, args.checkpoint_path, resume_type=args.resume_type, ) end = time.monotonic_ns() if self.accelerator.is_main_process: self.logger.info( f"Resuming from checkpoint done in {(end - start) / 1e6:.2f}ms" ) self.checkpoints_path = json.load( open(os.path.join(ckpt_path, "ckpts.json"), "r") ) self.checkpoint_dir = os.path.join(self.exp_dir, "checkpoint") if self.accelerator.is_main_process: os.makedirs(self.checkpoint_dir, exist_ok=True) if self.accelerator.is_main_process: self.logger.debug(f"Checkpoint directory: {self.checkpoint_dir}") # save config file path self.config_save_path = os.path.join(self.exp_dir, "args.json") # Only for TTS tasks self.task_type = "TTS" if self.accelerator.is_main_process: self.logger.info("Task type: {}".format(self.task_type)) def _init_accelerator(self): self.exp_dir = os.path.join( os.path.abspath(self.cfg.log_dir), self.args.exp_name ) project_config = ProjectConfiguration( project_dir=self.exp_dir, logging_dir=os.path.join(self.exp_dir, "log"), ) # ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) self.accelerator = accelerate.Accelerator( gradient_accumulation_steps=self.cfg.train.gradient_accumulation_step, log_with=self.cfg.train.tracker, project_config=project_config, # kwargs_handlers=[ddp_kwargs] ) if self.accelerator.is_main_process: os.makedirs(project_config.project_dir, exist_ok=True) os.makedirs(project_config.logging_dir, exist_ok=True) with self.accelerator.main_process_first(): self.accelerator.init_trackers(self.args.exp_name) def _build_model(self): model = NaturalSpeech2(cfg=self.cfg.model) return model def _build_dataset(self): return NS2Dataset, NS2Collator def _build_dataloader(self): if self.cfg.train.use_dynamic_batchsize: print("Use Dynamic Batchsize......") Dataset, Collator = self._build_dataset() train_dataset = Dataset(self.cfg, self.cfg.dataset[0], is_valid=False) train_collate = Collator(self.cfg) batch_sampler = batch_by_size( train_dataset.num_frame_indices, train_dataset.get_num_frames, max_tokens=self.cfg.train.max_tokens * self.accelerator.num_processes, max_sentences=self.cfg.train.max_sentences * self.accelerator.num_processes, required_batch_size_multiple=self.accelerator.num_processes, ) np.random.seed(980205) np.random.shuffle(batch_sampler) print(batch_sampler[:1]) batches = [ x[ self.accelerator.local_process_index :: self.accelerator.num_processes ] for x in batch_sampler if len(x) % self.accelerator.num_processes == 0 ] train_loader = DataLoader( train_dataset, collate_fn=train_collate, num_workers=self.cfg.train.dataloader.num_worker, batch_sampler=VariableSampler( batches, drop_last=False, use_random_sampler=True ), pin_memory=self.cfg.train.dataloader.pin_memory, ) self.accelerator.wait_for_everyone() valid_dataset = Dataset(self.cfg, self.cfg.dataset[0], is_valid=True) valid_collate = Collator(self.cfg) batch_sampler = batch_by_size( valid_dataset.num_frame_indices, valid_dataset.get_num_frames, max_tokens=self.cfg.train.max_tokens * self.accelerator.num_processes, max_sentences=self.cfg.train.max_sentences * self.accelerator.num_processes, required_batch_size_multiple=self.accelerator.num_processes, ) batches = [ x[ self.accelerator.local_process_index :: self.accelerator.num_processes ] for x in batch_sampler if len(x) % self.accelerator.num_processes == 0 ] valid_loader = DataLoader( valid_dataset, collate_fn=valid_collate, num_workers=self.cfg.train.dataloader.num_worker, batch_sampler=VariableSampler(batches, drop_last=False), pin_memory=self.cfg.train.dataloader.pin_memory, ) self.accelerator.wait_for_everyone() else: print("Use Normal Batchsize......") Dataset, Collator = self._build_dataset() train_dataset = Dataset(self.cfg, self.cfg.dataset[0], is_valid=False) train_collate = Collator(self.cfg) train_loader = DataLoader( train_dataset, shuffle=True, collate_fn=train_collate, batch_size=self.cfg.train.batch_size, num_workers=self.cfg.train.dataloader.num_worker, pin_memory=self.cfg.train.dataloader.pin_memory, ) valid_dataset = Dataset(self.cfg, self.cfg.dataset[0], is_valid=True) valid_collate = Collator(self.cfg) valid_loader = DataLoader( valid_dataset, shuffle=True, collate_fn=valid_collate, batch_size=self.cfg.train.batch_size, num_workers=self.cfg.train.dataloader.num_worker, pin_memory=self.cfg.train.dataloader.pin_memory, ) self.accelerator.wait_for_everyone() return train_loader, valid_loader def _build_optimizer(self): optimizer = torch.optim.AdamW( filter(lambda p: p.requires_grad, self.model.parameters()), **self.cfg.train.adam, ) return optimizer def _build_scheduler(self): lr_scheduler = get_scheduler( self.cfg.train.lr_scheduler, optimizer=self.optimizer, num_warmup_steps=self.cfg.train.lr_warmup_steps, num_training_steps=self.cfg.train.num_train_steps, ) return lr_scheduler def _build_criterion(self): criterion = torch.nn.L1Loss(reduction="mean") return criterion def write_summary(self, losses, stats): for key, value in losses.items(): self.sw.add_scalar(key, value, self.step) def write_valid_summary(self, losses, stats): for key, value in losses.items(): self.sw.add_scalar(key, value, self.step) def get_state_dict(self): state_dict = { "model": self.model.state_dict(), "optimizer": self.optimizer.state_dict(), "scheduler": self.scheduler.state_dict(), "step": self.step, "epoch": self.epoch, "batch_size": self.cfg.train.batch_size, } return state_dict def load_model(self, checkpoint): self.step = checkpoint["step"] self.epoch = checkpoint["epoch"] self.model.load_state_dict(checkpoint["model"]) self.optimizer.load_state_dict(checkpoint["optimizer"]) self.scheduler.load_state_dict(checkpoint["scheduler"]) def _train_step(self, batch): train_losses = {} total_loss = 0 train_stats = {} code = batch["code"] # (B, 16, T) pitch = batch["pitch"] # (B, T) duration = batch["duration"] # (B, N) phone_id = batch["phone_id"] # (B, N) ref_code = batch["ref_code"] # (B, 16, T') phone_mask = batch["phone_mask"] # (B, N) mask = batch["mask"] # (B, T) ref_mask = batch["ref_mask"] # (B, T') diff_out, prior_out = self.model( code=code, pitch=pitch, duration=duration, phone_id=phone_id, ref_code=ref_code, phone_mask=phone_mask, mask=mask, ref_mask=ref_mask, ) # pitch loss pitch_loss = log_pitch_loss(prior_out["pitch_pred_log"], pitch, mask=mask) total_loss += pitch_loss train_losses["pitch_loss"] = pitch_loss # duration loss dur_loss = log_dur_loss(prior_out["dur_pred_log"], duration, mask=phone_mask) total_loss += dur_loss train_losses["dur_loss"] = dur_loss x0 = self.model.module.code_to_latent(code) if self.cfg.model.diffusion.diffusion_type == "diffusion": # diff loss x0
diff_loss_x0 = diff_loss(diff_out["x0_pred"], x0, mask=mask)
10
2023-11-15 09:19:27+00:00
24k
BobaZooba/xllm
tests/unit/core/test_dependencies.py
[ { "identifier": "LMCollator", "path": "src/xllm/collators/lm.py", "snippet": "class LMCollator(BaseCollator):\n \"\"\"\n `LMCollator` is a data collator class specifically designed to prepare batches of data for language modeling tasks.\n Extending the `BaseCollator`, it adapts the general data...
import pytest from peft import PeftModel from pytest import MonkeyPatch from torch import Tensor from transformers import ( BitsAndBytesConfig, GPTQConfig, PreTrainedTokenizer, TrainingArguments, ) from src.xllm.collators.lm import LMCollator from src.xllm.collators.registry import collators_registry from src.xllm.core.config import Config from src.xllm.core.dependencies import ( build_collator, build_dataset, build_model, build_quantization_config, build_tokenizer, build_trainer, build_training_arguments, ) from src.xllm.datasets.registry import datasets_registry from src.xllm.datasets.soda import SodaDataset from src.xllm.trainers.registry import trainers_registry from tests.helpers.constants import LLAMA_TOKENIZER_DIR from tests.helpers.dummy_data import DATA, DummyDataset from tests.helpers.patches import patch_from_pretrained_auto_causal_lm
18,389
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def test_build_training_arguments(config: Config): arguments = build_training_arguments(config=config) assert arguments.per_device_train_batch_size == config.per_device_train_batch_size assert arguments.deepspeed is None def test_build_dataset_train(path_to_train_dummy_data: str):
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def test_build_training_arguments(config: Config): arguments = build_training_arguments(config=config) assert arguments.per_device_train_batch_size == config.per_device_train_batch_size assert arguments.deepspeed is None def test_build_dataset_train(path_to_train_dummy_data: str):
datasets_registry.add(key="dummy", value=DummyDataset)
15
2023-11-10 17:55:03+00:00
24k
AMAAI-Lab/mustango
diffusers/src/diffusers/models/unet_2d_blocks.py
[ { "identifier": "AdaGroupNorm", "path": "diffusers/src/diffusers/models/attention.py", "snippet": "class AdaGroupNorm(nn.Module):\n \"\"\"\n GroupNorm layer modified to incorporate timestep embeddings.\n \"\"\"\n\n def __init__(\n self, embedding_dim: int, out_dim: int, num_groups: in...
from typing import Any, Dict, Optional, Tuple from torch import nn from .attention import AdaGroupNorm, AttentionBlock from .attention_processor import Attention, AttnAddedKVProcessor from .dual_transformer_2d import DualTransformer2DModel from .resnet import Downsample2D, FirDownsample2D, FirUpsample2D, KDownsample2D, KUpsample2D, ResnetBlock2D, Upsample2D from .transformer_2d import Transformer2DModel, Transformer2DModelOutput import numpy as np import torch
18,209
class AttnUpDecoderBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, add_upsample=True, ): super().__init__() resnets = [] attentions = [] for i in range(num_layers): input_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=input_channels, out_channels=out_channels, temb_channels=None, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) attentions.append( AttentionBlock( out_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None def forward(self, hidden_states): for resnet, attn in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb=None) hidden_states = attn(hidden_states) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class AttnSkipUpBlock2D(nn.Module): def __init__( self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=np.sqrt(2.0), upsample_padding=1, add_upsample=True, ): super().__init__() self.attentions = nn.ModuleList([]) self.resnets = nn.ModuleList([]) for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels self.resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(resnet_in_channels + res_skip_channels // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions.append( AttentionBlock( out_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, eps=resnet_eps, ) )
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def get_down_block( down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample, resnet_eps, resnet_act_fn, attn_num_head_channels, resnet_groups=None, cross_attention_dim=None, downsample_padding=None, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift="default", ): down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type if down_block_type == "DownBlock2D": return DownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "ResnetDownsampleBlock2D": return ResnetDownsampleBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "AttnDownBlock2D": return AttnDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, attn_num_head_channels=attn_num_head_channels, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "CrossAttnDownBlock2D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D") return CrossAttnDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "CrossAttnDownBlock2DMusic": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D") return CrossAttnDownBlock2DMusic( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "SimpleCrossAttnDownBlock2D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for SimpleCrossAttnDownBlock2D") return SimpleCrossAttnDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "SkipDownBlock2D": return SkipDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "AttnSkipDownBlock2D": return AttnSkipDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, downsample_padding=downsample_padding, attn_num_head_channels=attn_num_head_channels, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "DownEncoderBlock2D": return DownEncoderBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "AttnDownEncoderBlock2D": return AttnDownEncoderBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, attn_num_head_channels=attn_num_head_channels, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "KDownBlock2D": return KDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, ) elif down_block_type == "KCrossAttnDownBlock2D": return KCrossAttnDownBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, add_self_attention=True if not add_downsample else False, ) raise ValueError(f"{down_block_type} does not exist.") def get_up_block( up_block_type, num_layers, in_channels, out_channels, prev_output_channel, temb_channels, add_upsample, resnet_eps, resnet_act_fn, attn_num_head_channels, resnet_groups=None, cross_attention_dim=None, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift="default", ): up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type if up_block_type == "UpBlock2D": return UpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "ResnetUpsampleBlock2D": return ResnetUpsampleBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "CrossAttnUpBlock2D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D") return CrossAttnUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "CrossAttnUpBlock2DMusic": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D") return CrossAttnUpBlock2DMusic( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "SimpleCrossAttnUpBlock2D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for SimpleCrossAttnUpBlock2D") return SimpleCrossAttnUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "AttnUpBlock2D": return AttnUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, attn_num_head_channels=attn_num_head_channels, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "SkipUpBlock2D": return SkipUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "AttnSkipUpBlock2D": return AttnSkipUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, attn_num_head_channels=attn_num_head_channels, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "UpDecoderBlock2D": return UpDecoderBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "AttnUpDecoderBlock2D": return AttnUpDecoderBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, attn_num_head_channels=attn_num_head_channels, resnet_time_scale_shift=resnet_time_scale_shift, ) elif up_block_type == "KUpBlock2D": return KUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, ) elif up_block_type == "KCrossAttnUpBlock2D": return KCrossAttnUpBlock2D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, ) raise ValueError(f"{up_block_type} does not exist.") class UNetMidBlock2D(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, add_attention: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, ): super().__init__() resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) self.add_attention = add_attention # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] attentions = [] for _ in range(num_layers): if self.add_attention: attentions.append( AttentionBlock( in_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups, ) ) else: attentions.append(None) resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward(self, hidden_states, temb=None): hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): if attn is not None: hidden_states = attn(hidden_states) hidden_states = resnet(hidden_states, temb) return hidden_states class UNetMidBlock2DCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, cross_attention_dim=1280, dual_cross_attention=False, use_linear_projection=False, upcast_attention=False, ): super().__init__() self.has_cross_attention = True self.attn_num_head_channels = attn_num_head_channels resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] attentions = [] for _ in range(num_layers): if not dual_cross_attention: attentions.append( Transformer2DModel( attn_num_head_channels, in_channels // attn_num_head_channels, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) ) else: attentions.append( DualTransformer2DModel( attn_num_head_channels, in_channels // attn_num_head_channels, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): output: Transformer2DModelOutput = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, ) hidden_states = output.sample hidden_states = resnet(hidden_states, temb) return hidden_states class UNetMidBlock2DCrossAttnMusic(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, cross_attention_dim=1280, dual_cross_attention=False, use_linear_projection=False, upcast_attention=False, ): super().__init__() self.has_cross_attention = True self.attn_num_head_channels = attn_num_head_channels resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] attentions = [] attentions2 = [] attentions3 = [] for _ in range(num_layers): if not dual_cross_attention: attentions.append( Transformer2DModel( attn_num_head_channels, in_channels // attn_num_head_channels, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) ) attentions2.append( Transformer2DModel( attn_num_head_channels, in_channels // attn_num_head_channels, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) ) attentions3.append( Transformer2DModel( attn_num_head_channels, in_channels // attn_num_head_channels, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) ) else: attentions.append( DualTransformer2DModel( attn_num_head_channels, in_channels // attn_num_head_channels, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions = nn.ModuleList(attentions) self.attentions2 = nn.ModuleList(attentions2) self.attentions3 = nn.ModuleList(attentions3) self.resnets = nn.ModuleList(resnets) def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, beat_features = None, chord_features = None, attention_mask: Optional[torch.FloatTensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, beat_attention_mask = None, chord_attention_mask = None ) -> torch.FloatTensor: hidden_states = self.resnets[0](hidden_states, temb) for attn, attn2, attn3, resnet in zip(self.attentions, self.attentions2,self.attentions3, self.resnets[1:]): output: Transformer2DModelOutput = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, ) hidden_states = output.sample hidden_states = attn2( hidden_states, encoder_hidden_states=beat_features, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=beat_attention_mask, ).sample hidden_states = attn3( hidden_states, encoder_hidden_states=chord_features, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=chord_attention_mask, ).sample hidden_states = resnet(hidden_states, temb) return hidden_states class UNetMidBlock2DSimpleCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, cross_attention_dim=1280, ): super().__init__() self.has_cross_attention = True self.attn_num_head_channels = attn_num_head_channels resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) self.num_heads = in_channels // self.attn_num_head_channels # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] attentions = [] for _ in range(num_layers): attentions.append( Attention( query_dim=in_channels, cross_attention_dim=in_channels, heads=self.num_heads, dim_head=attn_num_head_channels, added_kv_proj_dim=cross_attention_dim, norm_num_groups=resnet_groups, bias=True, upcast_softmax=True, processor=AttnAddedKVProcessor(), ) ) resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) def forward( self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, cross_attention_kwargs=None ): cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} hidden_states = self.resnets[0](hidden_states, temb) for attn, resnet in zip(self.attentions, self.resnets[1:]): # attn hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, **cross_attention_kwargs, ) # resnet hidden_states = resnet(hidden_states, temb) return hidden_states class AttnDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, downsample_padding=1, add_downsample=True, ): super().__init__() resnets = [] attentions = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) attentions.append( AttentionBlock( out_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) else: self.downsamplers = None def forward(self, hidden_states, temb=None): output_states = () for resnet, attn in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states) output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class CrossAttnDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, cross_attention_dim=1280, output_scale_factor=1.0, downsample_padding=1, add_downsample=True, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, ): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.attn_num_head_channels = attn_num_head_channels for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if not dual_cross_attention: attentions.append( Transformer2DModel( attn_num_head_channels, out_channels // attn_num_head_channels, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) else: attentions.append( DualTransformer2DModel( attn_num_head_channels, out_channels // attn_num_head_channels, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, ): output_states = () for resnet, attn in zip(self.resnets, self.attentions): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states, None, # timestep None, # class_labels cross_attention_kwargs, attention_mask, encoder_attention_mask, )[0] else: hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, ).sample output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class CrossAttnDownBlock2DMusic(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, cross_attention_dim=1280, output_scale_factor=1.0, downsample_padding=1, add_downsample=True, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, ): super().__init__() resnets = [] attentions = [] attentions2 = [] attentions3 = [] self.has_cross_attention = True self.attn_num_head_channels = attn_num_head_channels for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if not dual_cross_attention: attentions.append( Transformer2DModel( attn_num_head_channels, out_channels // attn_num_head_channels, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, )) attentions2.append( Transformer2DModel( attn_num_head_channels, out_channels // attn_num_head_channels, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, )) attentions3.append( Transformer2DModel( attn_num_head_channels, out_channels // attn_num_head_channels, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) else: attentions.append( DualTransformer2DModel( attn_num_head_channels, out_channels // attn_num_head_channels, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.attentions = nn.ModuleList(attentions) self.attentions2 = nn.ModuleList(attentions2) self.attentions3 = nn.ModuleList(attentions3) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, beat_features = None, chord_features = None, attention_mask: Optional[torch.FloatTensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, beat_attention_mask = None, chord_attention_mask = None ): output_states = () for resnet, attn, attn2, attn3 in zip(self.resnets, self.attentions, self.attentions2, self.attentions3): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states, None, # timestep None, # class_labels cross_attention_kwargs, attention_mask, encoder_attention_mask, )[0] else: # print("checking downsampling shape", encoder_hidden_states.shape, beat_features.shape, chord_features.shape, encoder_attention_mask.shape, beat_attention_mask.shape, chord_attention_mask.shape) hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, ).sample hidden_states = attn2( hidden_states, encoder_hidden_states=beat_features, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=beat_attention_mask, ).sample hidden_states = attn3( hidden_states, encoder_hidden_states=chord_features, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=chord_attention_mask, ).sample output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class DownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor=1.0, add_downsample=True, downsample_padding=1, ): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states, temb=None): output_states = () for resnet in self.resnets: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class DownEncoderBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor=1.0, add_downsample=True, downsample_padding=1, ): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=None, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) else: self.downsamplers = None def forward(self, hidden_states): for resnet in self.resnets: hidden_states = resnet(hidden_states, temb=None) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return hidden_states class AttnDownEncoderBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, add_downsample=True, downsample_padding=1, ): super().__init__() resnets = [] attentions = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=None, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) attentions.append( AttentionBlock( out_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) else: self.downsamplers = None def forward(self, hidden_states): for resnet, attn in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb=None) hidden_states = attn(hidden_states) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return hidden_states class AttnSkipDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=np.sqrt(2.0), downsample_padding=1, add_downsample=True, ): super().__init__() self.attentions = nn.ModuleList([]) self.resnets = nn.ModuleList([]) for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels self.resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(in_channels // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions.append( AttentionBlock( out_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, eps=resnet_eps, ) ) if add_downsample: self.resnet_down = ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, use_in_shortcut=True, down=True, kernel="fir", ) self.downsamplers = nn.ModuleList([FirDownsample2D(out_channels, out_channels=out_channels)]) self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1)) else: self.resnet_down = None self.downsamplers = None self.skip_conv = None def forward(self, hidden_states, temb=None, skip_sample=None): output_states = () for resnet, attn in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states) output_states += (hidden_states,) if self.downsamplers is not None: hidden_states = self.resnet_down(hidden_states, temb) for downsampler in self.downsamplers: skip_sample = downsampler(skip_sample) hidden_states = self.skip_conv(skip_sample) + hidden_states output_states += (hidden_states,) return hidden_states, output_states, skip_sample class SkipDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_pre_norm: bool = True, output_scale_factor=np.sqrt(2.0), add_downsample=True, downsample_padding=1, ): super().__init__() self.resnets = nn.ModuleList([]) for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels self.resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(in_channels // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if add_downsample: self.resnet_down = ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, use_in_shortcut=True, down=True, kernel="fir", ) self.downsamplers = nn.ModuleList([FirDownsample2D(out_channels, out_channels=out_channels)]) self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1)) else: self.resnet_down = None self.downsamplers = None self.skip_conv = None def forward(self, hidden_states, temb=None, skip_sample=None): output_states = () for resnet in self.resnets: hidden_states = resnet(hidden_states, temb) output_states += (hidden_states,) if self.downsamplers is not None: hidden_states = self.resnet_down(hidden_states, temb) for downsampler in self.downsamplers: skip_sample = downsampler(skip_sample) hidden_states = self.skip_conv(skip_sample) + hidden_states output_states += (hidden_states,) return hidden_states, output_states, skip_sample class ResnetDownsampleBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor=1.0, add_downsample=True, ): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, down=True, ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states, temb=None): output_states = () for resnet in self.resnets: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states, temb) output_states += (hidden_states,) return hidden_states, output_states class SimpleCrossAttnDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, cross_attention_dim=1280, output_scale_factor=1.0, add_downsample=True, ): super().__init__() self.has_cross_attention = True resnets = [] attentions = [] self.attn_num_head_channels = attn_num_head_channels self.num_heads = out_channels // self.attn_num_head_channels for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) attentions.append( Attention( query_dim=out_channels, cross_attention_dim=out_channels, heads=self.num_heads, dim_head=attn_num_head_channels, added_kv_proj_dim=cross_attention_dim, norm_num_groups=resnet_groups, bias=True, upcast_softmax=True, processor=AttnAddedKVProcessor(), ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_downsample: self.downsamplers = nn.ModuleList( [ ResnetBlock2D( in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, down=True, ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, cross_attention_kwargs=None ): output_states = () cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} for resnet, attn in zip(self.resnets, self.attentions): # resnet hidden_states = resnet(hidden_states, temb) # attn hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, **cross_attention_kwargs, ) output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states, temb) output_states += (hidden_states,) return hidden_states, output_states class KDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 4, resnet_eps: float = 1e-5, resnet_act_fn: str = "gelu", resnet_group_size: int = 32, add_downsample=False, ): super().__init__() resnets = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels groups = in_channels // resnet_group_size groups_out = out_channels // resnet_group_size resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, dropout=dropout, temb_channels=temb_channels, groups=groups, groups_out=groups_out, eps=resnet_eps, non_linearity=resnet_act_fn, time_embedding_norm="ada_group", conv_shortcut_bias=False, ) ) self.resnets = nn.ModuleList(resnets) if add_downsample: # YiYi's comments- might be able to use FirDownsample2D, look into details later self.downsamplers = nn.ModuleList([KDownsample2D()]) else: self.downsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states, temb=None): output_states = () for resnet in self.resnets: if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return hidden_states, output_states class KCrossAttnDownBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, cross_attention_dim: int, dropout: float = 0.0, num_layers: int = 4, resnet_group_size: int = 32, add_downsample=True, attn_num_head_channels: int = 64, add_self_attention: bool = False, resnet_eps: float = 1e-5, resnet_act_fn: str = "gelu", ): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels groups = in_channels // resnet_group_size groups_out = out_channels // resnet_group_size resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, dropout=dropout, temb_channels=temb_channels, groups=groups, groups_out=groups_out, eps=resnet_eps, non_linearity=resnet_act_fn, time_embedding_norm="ada_group", conv_shortcut_bias=False, ) ) attentions.append( KAttentionBlock( out_channels, out_channels // attn_num_head_channels, attn_num_head_channels, cross_attention_dim=cross_attention_dim, temb_channels=temb_channels, attention_bias=True, add_self_attention=add_self_attention, cross_attention_norm=True, group_size=resnet_group_size, ) ) self.resnets = nn.ModuleList(resnets) self.attentions = nn.ModuleList(attentions) if add_downsample: self.downsamplers = nn.ModuleList([KDownsample2D()]) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, cross_attention_kwargs=None ): output_states = () for resnet, attn in zip(self.resnets, self.attentions): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states, attention_mask, cross_attention_kwargs, ) else: hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, emb=temb, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, ) if self.downsamplers is None: output_states += (None,) else: output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) return hidden_states, output_states class AttnUpBlock2D(nn.Module): def __init__( self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, add_upsample=True, ): super().__init__() resnets = [] attentions = [] for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) attentions.append( AttentionBlock( out_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None def forward(self, hidden_states, res_hidden_states_tuple, temb=None): for resnet, attn in zip(self.resnets, self.attentions): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) hidden_states = resnet(hidden_states, temb) hidden_states = attn(hidden_states) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class CrossAttnUpBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, cross_attention_dim=1280, output_scale_factor=1.0, add_upsample=True, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, ): super().__init__() resnets = [] attentions = [] self.has_cross_attention = True self.attn_num_head_channels = attn_num_head_channels for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if not dual_cross_attention: attentions.append( Transformer2DModel( attn_num_head_channels, out_channels // attn_num_head_channels, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) else: attentions.append( DualTransformer2DModel( attn_num_head_channels, out_channels // attn_num_head_channels, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.FloatTensor, res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, upsample_size: Optional[int] = None, attention_mask: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, ): for resnet, attn in zip(self.resnets, self.attentions): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states, None, # timestep None, # class_labels cross_attention_kwargs, attention_mask, encoder_attention_mask, )[0] else: hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, ).sample if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states class CrossAttnUpBlock2DMusic(nn.Module): def __init__( self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, cross_attention_dim=1280, output_scale_factor=1.0, add_upsample=True, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, ): super().__init__() resnets = [] attentions = [] attentions2 = [] attentions3 = [] self.has_cross_attention = True self.attn_num_head_channels = attn_num_head_channels for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if not dual_cross_attention: attentions.append( Transformer2DModel( attn_num_head_channels, out_channels // attn_num_head_channels, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) attentions2.append( Transformer2DModel( attn_num_head_channels, out_channels // attn_num_head_channels, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) attentions3.append( Transformer2DModel( attn_num_head_channels, out_channels // attn_num_head_channels, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) else: attentions.append( DualTransformer2DModel( attn_num_head_channels, out_channels // attn_num_head_channels, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.attentions = nn.ModuleList(attentions) self.attentions2 = nn.ModuleList(attentions2) self.attentions3 = nn.ModuleList(attentions3) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.FloatTensor, res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, beat_features = None, chord_features = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, upsample_size: Optional[int] = None, attention_mask: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, beat_attention_mask = None, chord_attention_mask = None ): for resnet, attn, attn2, attn3 in zip(self.resnets, self.attentions, self.attentions2,self.attentions3): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states, None, # timestep None, # class_labels cross_attention_kwargs, attention_mask, encoder_attention_mask, )[0] else: hidden_states = resnet(hidden_states, temb) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, ).sample hidden_states = attn2( hidden_states, encoder_hidden_states=beat_features, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=beat_attention_mask, ).sample hidden_states = attn3( hidden_states, encoder_hidden_states=chord_features, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=chord_attention_mask, ).sample if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states class UpBlock2D(nn.Module): def __init__( self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor=1.0, add_upsample=True, ): super().__init__() resnets = [] for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None): for resnet in self.resnets: # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb) else: hidden_states = resnet(hidden_states, temb) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states class UpDecoderBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor=1.0, add_upsample=True, ): super().__init__() resnets = [] for i in range(num_layers): input_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=input_channels, out_channels=out_channels, temb_channels=None, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None def forward(self, hidden_states): for resnet in self.resnets: hidden_states = resnet(hidden_states, temb=None) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class AttnUpDecoderBlock2D(nn.Module): def __init__( self, in_channels: int, out_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, add_upsample=True, ): super().__init__() resnets = [] attentions = [] for i in range(num_layers): input_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=input_channels, out_channels=out_channels, temb_channels=None, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) attentions.append( AttentionBlock( out_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, eps=resnet_eps, norm_num_groups=resnet_groups, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None def forward(self, hidden_states): for resnet, attn in zip(self.resnets, self.attentions): hidden_states = resnet(hidden_states, temb=None) hidden_states = attn(hidden_states) if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states) return hidden_states class AttnSkipUpBlock2D(nn.Module): def __init__( self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=np.sqrt(2.0), upsample_padding=1, add_upsample=True, ): super().__init__() self.attentions = nn.ModuleList([]) self.resnets = nn.ModuleList([]) for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels self.resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min(resnet_in_channels + res_skip_channels // 4, 32), groups_out=min(out_channels // 4, 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) self.attentions.append( AttentionBlock( out_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, eps=resnet_eps, ) )
self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels)
7
2023-11-14 23:29:31+00:00
24k
BraveGroup/Drive-WM
src/diffusers/pipelines/wuerstchen/modeling_paella_vq_model.py
[ { "identifier": "ConfigMixin", "path": "src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.sa...
from typing import Union from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin from ...models.vae import DecoderOutput, VectorQuantizer from ...models.vq_model import VQEncoderOutput from ...utils.accelerate_utils import apply_forward_hook import torch import torch.nn as nn
20,149
class MixingResidualBlock(nn.Module): """ Residual block with mixing used by Paella's VQ-VAE. """ def __init__(self, inp_channels, embed_dim): super().__init__() # depthwise self.norm1 = nn.LayerNorm(inp_channels, elementwise_affine=False, eps=1e-6) self.depthwise = nn.Sequential( nn.ReplicationPad2d(1), nn.Conv2d(inp_channels, inp_channels, kernel_size=3, groups=inp_channels) ) # channelwise self.norm2 = nn.LayerNorm(inp_channels, elementwise_affine=False, eps=1e-6) self.channelwise = nn.Sequential( nn.Linear(inp_channels, embed_dim), nn.GELU(), nn.Linear(embed_dim, inp_channels) ) self.gammas = nn.Parameter(torch.zeros(6), requires_grad=True) def forward(self, x): mods = self.gammas x_temp = self.norm1(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * (1 + mods[0]) + mods[1] x = x + self.depthwise(x_temp) * mods[2] x_temp = self.norm2(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * (1 + mods[3]) + mods[4] x = x + self.channelwise(x_temp.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * mods[5] return x class PaellaVQModel(ModelMixin, ConfigMixin): r"""VQ-VAE model from Paella model. This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the model (such as downloading or saving, etc.) Parameters: in_channels (int, *optional*, defaults to 3): Number of channels in the input image. out_channels (int, *optional*, defaults to 3): Number of channels in the output. up_down_scale_factor (int, *optional*, defaults to 2): Up and Downscale factor of the input image. levels (int, *optional*, defaults to 2): Number of levels in the model. bottleneck_blocks (int, *optional*, defaults to 12): Number of bottleneck blocks in the model. embed_dim (int, *optional*, defaults to 384): Number of hidden channels in the model. latent_channels (int, *optional*, defaults to 4): Number of latent channels in the VQ-VAE model. num_vq_embeddings (int, *optional*, defaults to 8192): Number of codebook vectors in the VQ-VAE. scale_factor (float, *optional*, defaults to 0.3764): Scaling factor of the latent space. """ @register_to_config def __init__( self, in_channels: int = 3, out_channels: int = 3, up_down_scale_factor: int = 2, levels: int = 2, bottleneck_blocks: int = 12, embed_dim: int = 384, latent_channels: int = 4, num_vq_embeddings: int = 8192, scale_factor: float = 0.3764, ): super().__init__() c_levels = [embed_dim // (2**i) for i in reversed(range(levels))] # Encoder blocks self.in_block = nn.Sequential( nn.PixelUnshuffle(up_down_scale_factor), nn.Conv2d(in_channels * up_down_scale_factor**2, c_levels[0], kernel_size=1), ) down_blocks = [] for i in range(levels): if i > 0: down_blocks.append(nn.Conv2d(c_levels[i - 1], c_levels[i], kernel_size=4, stride=2, padding=1)) block = MixingResidualBlock(c_levels[i], c_levels[i] * 4) down_blocks.append(block) down_blocks.append( nn.Sequential( nn.Conv2d(c_levels[-1], latent_channels, kernel_size=1, bias=False), nn.BatchNorm2d(latent_channels), # then normalize them to have mean 0 and std 1 ) ) self.down_blocks = nn.Sequential(*down_blocks) # Vector Quantizer self.vquantizer = VectorQuantizer(num_vq_embeddings, vq_embed_dim=latent_channels, legacy=False, beta=0.25) # Decoder blocks up_blocks = [nn.Sequential(nn.Conv2d(latent_channels, c_levels[-1], kernel_size=1))] for i in range(levels): for j in range(bottleneck_blocks if i == 0 else 1): block = MixingResidualBlock(c_levels[levels - 1 - i], c_levels[levels - 1 - i] * 4) up_blocks.append(block) if i < levels - 1: up_blocks.append( nn.ConvTranspose2d( c_levels[levels - 1 - i], c_levels[levels - 2 - i], kernel_size=4, stride=2, padding=1 ) ) self.up_blocks = nn.Sequential(*up_blocks) self.out_block = nn.Sequential( nn.Conv2d(c_levels[0], out_channels * up_down_scale_factor**2, kernel_size=1), nn.PixelShuffle(up_down_scale_factor), ) @apply_forward_hook def encode(self, x: torch.FloatTensor, return_dict: bool = True) -> VQEncoderOutput: h = self.in_block(x) h = self.down_blocks(h) if not return_dict: return (h,) return VQEncoderOutput(latents=h) @apply_forward_hook def decode( self, h: torch.FloatTensor, force_not_quantize: bool = True, return_dict: bool = True
# Copyright (c) 2022 Dominic Rampas MIT License # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class MixingResidualBlock(nn.Module): """ Residual block with mixing used by Paella's VQ-VAE. """ def __init__(self, inp_channels, embed_dim): super().__init__() # depthwise self.norm1 = nn.LayerNorm(inp_channels, elementwise_affine=False, eps=1e-6) self.depthwise = nn.Sequential( nn.ReplicationPad2d(1), nn.Conv2d(inp_channels, inp_channels, kernel_size=3, groups=inp_channels) ) # channelwise self.norm2 = nn.LayerNorm(inp_channels, elementwise_affine=False, eps=1e-6) self.channelwise = nn.Sequential( nn.Linear(inp_channels, embed_dim), nn.GELU(), nn.Linear(embed_dim, inp_channels) ) self.gammas = nn.Parameter(torch.zeros(6), requires_grad=True) def forward(self, x): mods = self.gammas x_temp = self.norm1(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * (1 + mods[0]) + mods[1] x = x + self.depthwise(x_temp) * mods[2] x_temp = self.norm2(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * (1 + mods[3]) + mods[4] x = x + self.channelwise(x_temp.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) * mods[5] return x class PaellaVQModel(ModelMixin, ConfigMixin): r"""VQ-VAE model from Paella model. This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the model (such as downloading or saving, etc.) Parameters: in_channels (int, *optional*, defaults to 3): Number of channels in the input image. out_channels (int, *optional*, defaults to 3): Number of channels in the output. up_down_scale_factor (int, *optional*, defaults to 2): Up and Downscale factor of the input image. levels (int, *optional*, defaults to 2): Number of levels in the model. bottleneck_blocks (int, *optional*, defaults to 12): Number of bottleneck blocks in the model. embed_dim (int, *optional*, defaults to 384): Number of hidden channels in the model. latent_channels (int, *optional*, defaults to 4): Number of latent channels in the VQ-VAE model. num_vq_embeddings (int, *optional*, defaults to 8192): Number of codebook vectors in the VQ-VAE. scale_factor (float, *optional*, defaults to 0.3764): Scaling factor of the latent space. """ @register_to_config def __init__( self, in_channels: int = 3, out_channels: int = 3, up_down_scale_factor: int = 2, levels: int = 2, bottleneck_blocks: int = 12, embed_dim: int = 384, latent_channels: int = 4, num_vq_embeddings: int = 8192, scale_factor: float = 0.3764, ): super().__init__() c_levels = [embed_dim // (2**i) for i in reversed(range(levels))] # Encoder blocks self.in_block = nn.Sequential( nn.PixelUnshuffle(up_down_scale_factor), nn.Conv2d(in_channels * up_down_scale_factor**2, c_levels[0], kernel_size=1), ) down_blocks = [] for i in range(levels): if i > 0: down_blocks.append(nn.Conv2d(c_levels[i - 1], c_levels[i], kernel_size=4, stride=2, padding=1)) block = MixingResidualBlock(c_levels[i], c_levels[i] * 4) down_blocks.append(block) down_blocks.append( nn.Sequential( nn.Conv2d(c_levels[-1], latent_channels, kernel_size=1, bias=False), nn.BatchNorm2d(latent_channels), # then normalize them to have mean 0 and std 1 ) ) self.down_blocks = nn.Sequential(*down_blocks) # Vector Quantizer self.vquantizer = VectorQuantizer(num_vq_embeddings, vq_embed_dim=latent_channels, legacy=False, beta=0.25) # Decoder blocks up_blocks = [nn.Sequential(nn.Conv2d(latent_channels, c_levels[-1], kernel_size=1))] for i in range(levels): for j in range(bottleneck_blocks if i == 0 else 1): block = MixingResidualBlock(c_levels[levels - 1 - i], c_levels[levels - 1 - i] * 4) up_blocks.append(block) if i < levels - 1: up_blocks.append( nn.ConvTranspose2d( c_levels[levels - 1 - i], c_levels[levels - 2 - i], kernel_size=4, stride=2, padding=1 ) ) self.up_blocks = nn.Sequential(*up_blocks) self.out_block = nn.Sequential( nn.Conv2d(c_levels[0], out_channels * up_down_scale_factor**2, kernel_size=1), nn.PixelShuffle(up_down_scale_factor), ) @apply_forward_hook def encode(self, x: torch.FloatTensor, return_dict: bool = True) -> VQEncoderOutput: h = self.in_block(x) h = self.down_blocks(h) if not return_dict: return (h,) return VQEncoderOutput(latents=h) @apply_forward_hook def decode( self, h: torch.FloatTensor, force_not_quantize: bool = True, return_dict: bool = True
) -> Union[DecoderOutput, torch.FloatTensor]:
3
2023-11-18 01:40:55+00:00
24k
ej0cl6/TextEE
TextEE/models/AMRIE/E2Etrainer.py
[ { "identifier": "BasicTrainer", "path": "TextEE/models/trainer.py", "snippet": "class BasicTrainer(object):\n def __init__(self, config, type_set=None):\n self.config = config\n self.type_set = type_set\n \n @classmethod\n def add_extra_info_fn(cls, instances, raw_data, con...
import os, sys, logging, tqdm, pprint, copy import torch import numpy as np import ipdb from transformers import (BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer, AutoTokenizer, AdamW, get_linear_schedule_with_warmup) from torch.utils.data import DataLoader from torch.optim import AdamW from ..trainer import BasicTrainer from .E2Emodel import AMRIEE2EModel from .data import IEDataset from .util import generate_vocabs, load_valid_patterns, save_result, best_score_by_task from .scorer import score_graphs from scorer import compute_f1, print_scores
17,409
logger = logging.getLogger(__name__) class AMRIEE2ETrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None self.valid_patterns = None @classmethod def add_extra_info_fn(cls, instances, raw_data, config): extra_info_map = {} for dt in raw_data: extra_info = { "entity_mentions": dt["entity_mentions"] if "entity_mentions" in dt else [], "relation_mentions": dt["relation_mentions"] if "relation_mentions" in dt else [], "event_mentions": dt["event_mentions"] if "event_mentions" in dt else [], } extra_info_map[(dt["doc_id"], dt["wnd_id"])] = extra_info for instance in instances: instance["extra_info"] = extra_info_map[(instance["doc_id"], instance["wnd_id"])] return instances def get_idx_map(self, tokens1, tokens2): len1, len2 = len(tokens1), len(tokens2) idx1_s, idx1_e, idx2_s, idx2_e, = 0, 0, 0, 0 idx_map = np.zeros((len2+1, ), dtype=np.int32) idx_map[-1] = len1 while idx1_e <= len1 and idx2_e <= len2: if "".join(tokens1[idx1_s:idx1_e+1]) == "".join(tokens2[idx2_s:idx2_e+1]): idx_map[idx2_s:idx2_e+1] = idx1_s idx1_s = idx1_e+1 idx1_e = idx1_e+1 idx2_s = idx2_e+1 idx2_e = idx2_e+1 elif len("".join(tokens1[idx1_s:idx1_e+1])) <= len("".join(tokens2[idx2_s:idx2_e+1])): idx1_e += 1 else: idx2_e += 1 return idx_map def load_tokenizer_(self, checkpoint=None): if checkpoint: logger.info(f"Loading tokenizer from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.tokenizer")) self.tokenizer = state["tokenizer"] else: logger.info(f"Loading tokenizer from {self.config.pretrained_model_name}") if self.config.pretrained_model_name.startswith('bert-'): self.tokenizer = BertTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('roberta-'): self.tokenizer = RobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('xlm-roberta-'): self.tokenizer = XLMRobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) else: self.tokenizer = AutoTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir, do_lower_case=False) def load_model_(self, checkpoint=None): assert self.tokenizer if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.vocabs = state["vocabs"] self.type_set = state["type_set"] self.valid_patterns = state["valid_patterns"]
logger = logging.getLogger(__name__) class AMRIEE2ETrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None self.valid_patterns = None @classmethod def add_extra_info_fn(cls, instances, raw_data, config): extra_info_map = {} for dt in raw_data: extra_info = { "entity_mentions": dt["entity_mentions"] if "entity_mentions" in dt else [], "relation_mentions": dt["relation_mentions"] if "relation_mentions" in dt else [], "event_mentions": dt["event_mentions"] if "event_mentions" in dt else [], } extra_info_map[(dt["doc_id"], dt["wnd_id"])] = extra_info for instance in instances: instance["extra_info"] = extra_info_map[(instance["doc_id"], instance["wnd_id"])] return instances def get_idx_map(self, tokens1, tokens2): len1, len2 = len(tokens1), len(tokens2) idx1_s, idx1_e, idx2_s, idx2_e, = 0, 0, 0, 0 idx_map = np.zeros((len2+1, ), dtype=np.int32) idx_map[-1] = len1 while idx1_e <= len1 and idx2_e <= len2: if "".join(tokens1[idx1_s:idx1_e+1]) == "".join(tokens2[idx2_s:idx2_e+1]): idx_map[idx2_s:idx2_e+1] = idx1_s idx1_s = idx1_e+1 idx1_e = idx1_e+1 idx2_s = idx2_e+1 idx2_e = idx2_e+1 elif len("".join(tokens1[idx1_s:idx1_e+1])) <= len("".join(tokens2[idx2_s:idx2_e+1])): idx1_e += 1 else: idx2_e += 1 return idx_map def load_tokenizer_(self, checkpoint=None): if checkpoint: logger.info(f"Loading tokenizer from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.tokenizer")) self.tokenizer = state["tokenizer"] else: logger.info(f"Loading tokenizer from {self.config.pretrained_model_name}") if self.config.pretrained_model_name.startswith('bert-'): self.tokenizer = BertTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('roberta-'): self.tokenizer = RobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('xlm-roberta-'): self.tokenizer = XLMRobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) else: self.tokenizer = AutoTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir, do_lower_case=False) def load_model_(self, checkpoint=None): assert self.tokenizer if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.vocabs = state["vocabs"] self.type_set = state["type_set"] self.valid_patterns = state["valid_patterns"]
self.model = AMRIEE2EModel(self.config, self.vocabs, self.valid_patterns)
1
2023-11-15 21:32:56+00:00
24k
ahayler/s4c
models/bts/evaluator_nvs.py
[ { "identifier": "make_test_dataset", "path": "datasets/data_util.py", "snippet": "def make_test_dataset(config):\n type = config.get(\"type\", \"KITTI_Raw\")\n if type == \"KITTI_Raw\":\n test_dataset = KittiRawDataset(\n data_path=config[\"data_path\"],\n pose_path=co...
import math import torch import lpips import skimage.metrics from ignite.contrib.handlers import TensorboardLogger from ignite.engine import Engine from torch import nn from torch.nn import functional as F from torch.utils.data import DataLoader from datasets.data_util import make_test_dataset from models.common.render import NeRFRenderer from models.bts.model.models_bts import BTSNet from models.bts.model.ray_sampler import ImageRaySampler from utils.base_evaluator import base_evaluation from utils.metrics import MeanMetric
14,541
render_dict["fine"] = dict(render_dict["coarse"]) render_dict["rgb_gt"] = all_rgb_gt render_dict["rays"] = all_rays render_dict = self.sampler.reconstruct(render_dict) data["fine"].append(render_dict["fine"]) data["coarse"].append(render_dict["coarse"]) data["rgb_gt"] = render_dict["rgb_gt"] data["rays"] = render_dict["rays"] data["z_near"] = torch.tensor(self.z_near, device=images.device) data["z_far"] = torch.tensor(self.z_far, device=images.device) data.update(self.compute_depth_metrics(data)) data.update(self.compute_nvs_metrics(data)) globals()["IDX"] += 1 return data def compute_depth_metrics(self, data): # TODO: This is only correct for batchsize 1! depth_gt = data["depths"][0] depth_pred = data["fine"][0]["depth"][:, :1] depth_pred = F.interpolate(depth_pred, depth_gt.shape[-2:]) # TODO: Maybe implement median scaling depth_pred = torch.clamp(depth_pred, 1e-3, 80) mask = depth_gt != 0 depth_gt = depth_gt[mask] depth_pred = depth_pred[mask] thresh = torch.maximum((depth_gt / depth_pred), (depth_pred / depth_gt)) a1 = (thresh < 1.25).to(torch.float) a2 = (thresh < 1.25 ** 2).to(torch.float) a3 = (thresh < 1.25 ** 3).to(torch.float) a1 = a1.mean() a2 = a2.mean() a3 = a3.mean() rmse = (depth_gt - depth_pred) ** 2 rmse = rmse.mean() ** .5 rmse_log = (torch.log(depth_gt) - torch.log(depth_pred)) ** 2 rmse_log = rmse_log.mean() ** .5 abs_rel = torch.abs(depth_gt - depth_pred) / depth_gt abs_rel = abs_rel.mean() sq_rel = ((depth_gt - depth_pred) ** 2) / depth_gt sq_rel = sq_rel.mean() metrics_dict = { "abs_rel": abs_rel, "sq_rel": sq_rel, "rmse": rmse, "rmse_log": rmse_log, "a1": a1, "a2": a2, "a3": a3 } return metrics_dict def compute_nvs_metrics(self, data): # TODO: This is only correct for batchsize 1! # Following tucker et al. and others, we crop 5% on all sides # idx of stereo frame (the target frame is always the "stereo" frame). sf_id = data["rgb_gt"].shape[1] // 2 imgs_gt = data["rgb_gt"][:, sf_id:sf_id+1] imgs_pred = data["fine"][0]["rgb"][:, sf_id:sf_id+1] imgs_gt = imgs_gt.squeeze(0).permute(0, 3, 1, 2) imgs_pred = imgs_pred.squeeze(0).squeeze(-2).permute(0, 3, 1, 2) imgs_gt = F.interpolate(imgs_gt, self.eval_resolution) imgs_pred = F.interpolate(imgs_pred, self.eval_resolution) n, c, h, w = imgs_gt.shape y0 = int(math.ceil(0.05 * h)) y1 = int(math.floor(0.95 * h)) x0 = int(math.ceil(0.05 * w)) x1 = int(math.floor(0.95 * w)) imgs_gt = imgs_gt[:, :, y0:y1, x0:x1] imgs_pred = imgs_pred[:, :, y0:y1, x0:x1] imgs_gt_np = imgs_gt.detach().squeeze().permute(1, 2, 0).cpu().numpy() imgs_pred_np = imgs_pred.detach().squeeze().permute(1, 2, 0).cpu().numpy() ssim_score = skimage.metrics.structural_similarity(imgs_pred_np, imgs_gt_np, multichannel=True, data_range=1) psnr_score = skimage.metrics.peak_signal_noise_ratio(imgs_pred_np, imgs_gt_np, data_range=1) lpips_score = self.lpips(imgs_pred, imgs_gt, normalize=True).mean() metrics_dict = { "ssim": ssim_score, "psnr": psnr_score, "lpips": lpips_score } return metrics_dict def evaluation(local_rank, config): return base_evaluation(local_rank, config, get_dataflow, initialize, get_metrics) def get_dataflow(config): test_dataset = make_test_dataset(config["data"]) test_loader = DataLoader(test_dataset, batch_size=1, num_workers=config["num_workers"], shuffle=False, drop_last=False) return test_loader def get_metrics(config, device): names = ["abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3", "ssim", "psnr", "lpips"]
IDX = 0 class BTSWrapper(nn.Module): def __init__(self, renderer, config, ) -> None: super().__init__() self.renderer = renderer self.z_near = config["z_near"] self.z_far = config["z_far"] self.ray_batch_size = config["ray_batch_size"] self.sampler = ImageRaySampler(self.z_near, self.z_far) self.eval_resolution = list(config.get("eval_resolution", None)) self.lpips = lpips.LPIPS() @staticmethod def get_loss_metric_names(): return ["loss", "loss_l2", "loss_mask", "loss_temporal"] def forward(self, data): data = dict(data) images = torch.stack(data["imgs"], dim=1) # n, v, c, h, w poses = torch.stack(data["poses"], dim=1) # n, v, 4, 4 w2c projs = torch.stack(data["projs"], dim=1) # n, v, 4, 4 (-1, 1) n, v, c, h, w = images.shape device = images.device # Use first frame as keyframe to_base_pose = torch.inverse(poses[:, :1, :, :]) poses = to_base_pose.expand(-1, v, -1, -1) @ poses if self.eval_resolution is not None: images_resized = F.interpolate(images.view(n * v, c, h, w), size=self.eval_resolution).view(n, v, c, *self.eval_resolution) else: images_resized = images ids_encoder = [0] self.renderer.net.compute_grid_transforms(projs[:, ids_encoder], poses[:, ids_encoder]) self.renderer.net.encode(images_resized, projs, poses, ids_encoder=ids_encoder, ids_render=ids_encoder, images_alt=images * .5 + .5) all_rays, all_rgb_gt = self.sampler.sample(images * .5 + .5, poses, projs) data["fine"] = [] data["coarse"] = [] self.renderer.net.set_scale(0) render_dict = self.renderer(all_rays, want_weights=True, want_alphas=True) if "fine" not in render_dict: render_dict["fine"] = dict(render_dict["coarse"]) render_dict["rgb_gt"] = all_rgb_gt render_dict["rays"] = all_rays render_dict = self.sampler.reconstruct(render_dict) data["fine"].append(render_dict["fine"]) data["coarse"].append(render_dict["coarse"]) data["rgb_gt"] = render_dict["rgb_gt"] data["rays"] = render_dict["rays"] data["z_near"] = torch.tensor(self.z_near, device=images.device) data["z_far"] = torch.tensor(self.z_far, device=images.device) data.update(self.compute_depth_metrics(data)) data.update(self.compute_nvs_metrics(data)) globals()["IDX"] += 1 return data def compute_depth_metrics(self, data): # TODO: This is only correct for batchsize 1! depth_gt = data["depths"][0] depth_pred = data["fine"][0]["depth"][:, :1] depth_pred = F.interpolate(depth_pred, depth_gt.shape[-2:]) # TODO: Maybe implement median scaling depth_pred = torch.clamp(depth_pred, 1e-3, 80) mask = depth_gt != 0 depth_gt = depth_gt[mask] depth_pred = depth_pred[mask] thresh = torch.maximum((depth_gt / depth_pred), (depth_pred / depth_gt)) a1 = (thresh < 1.25).to(torch.float) a2 = (thresh < 1.25 ** 2).to(torch.float) a3 = (thresh < 1.25 ** 3).to(torch.float) a1 = a1.mean() a2 = a2.mean() a3 = a3.mean() rmse = (depth_gt - depth_pred) ** 2 rmse = rmse.mean() ** .5 rmse_log = (torch.log(depth_gt) - torch.log(depth_pred)) ** 2 rmse_log = rmse_log.mean() ** .5 abs_rel = torch.abs(depth_gt - depth_pred) / depth_gt abs_rel = abs_rel.mean() sq_rel = ((depth_gt - depth_pred) ** 2) / depth_gt sq_rel = sq_rel.mean() metrics_dict = { "abs_rel": abs_rel, "sq_rel": sq_rel, "rmse": rmse, "rmse_log": rmse_log, "a1": a1, "a2": a2, "a3": a3 } return metrics_dict def compute_nvs_metrics(self, data): # TODO: This is only correct for batchsize 1! # Following tucker et al. and others, we crop 5% on all sides # idx of stereo frame (the target frame is always the "stereo" frame). sf_id = data["rgb_gt"].shape[1] // 2 imgs_gt = data["rgb_gt"][:, sf_id:sf_id+1] imgs_pred = data["fine"][0]["rgb"][:, sf_id:sf_id+1] imgs_gt = imgs_gt.squeeze(0).permute(0, 3, 1, 2) imgs_pred = imgs_pred.squeeze(0).squeeze(-2).permute(0, 3, 1, 2) imgs_gt = F.interpolate(imgs_gt, self.eval_resolution) imgs_pred = F.interpolate(imgs_pred, self.eval_resolution) n, c, h, w = imgs_gt.shape y0 = int(math.ceil(0.05 * h)) y1 = int(math.floor(0.95 * h)) x0 = int(math.ceil(0.05 * w)) x1 = int(math.floor(0.95 * w)) imgs_gt = imgs_gt[:, :, y0:y1, x0:x1] imgs_pred = imgs_pred[:, :, y0:y1, x0:x1] imgs_gt_np = imgs_gt.detach().squeeze().permute(1, 2, 0).cpu().numpy() imgs_pred_np = imgs_pred.detach().squeeze().permute(1, 2, 0).cpu().numpy() ssim_score = skimage.metrics.structural_similarity(imgs_pred_np, imgs_gt_np, multichannel=True, data_range=1) psnr_score = skimage.metrics.peak_signal_noise_ratio(imgs_pred_np, imgs_gt_np, data_range=1) lpips_score = self.lpips(imgs_pred, imgs_gt, normalize=True).mean() metrics_dict = { "ssim": ssim_score, "psnr": psnr_score, "lpips": lpips_score } return metrics_dict def evaluation(local_rank, config): return base_evaluation(local_rank, config, get_dataflow, initialize, get_metrics) def get_dataflow(config): test_dataset = make_test_dataset(config["data"]) test_loader = DataLoader(test_dataset, batch_size=1, num_workers=config["num_workers"], shuffle=False, drop_last=False) return test_loader def get_metrics(config, device): names = ["abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3", "ssim", "psnr", "lpips"]
metrics = {name: MeanMetric((lambda n: lambda x: x["output"][n])(name), device) for name in names}
5
2023-11-12 21:53:27+00:00
24k
newcastleuniversity/DISPEL
tests/processing/test_trace.py
[ { "identifier": "Reading", "path": "dispel/data/core.py", "snippet": "class Reading(FlagMixIn):\n \"\"\"A data capture from an experiment.\n\n Attributes\n ----------\n evaluation\n The evaluation information for this reading\n session\n The session information for this read...
import networkx as nx import pytest from dispel.data.core import Reading from dispel.data.epochs import EpochDefinition from dispel.data.measures import MeasureValueDefinition, MeasureValueDefinitionPrototype from dispel.data.raw import RawDataValueDefinition from dispel.data.values import AbbreviatedValue as AV from dispel.processing.core import ( CoreProcessingStepGroup, Parameter, ProcessingStep, ProcessResultType, ) from dispel.processing.data_set import transformation from dispel.processing.epochs import ( CreateLevelEpochStep, LevelEpochExtractStep, LevelEpochIdFilter, LevelEpochProcessingStepMixIn, ) from dispel.processing.extract import ( AggregateMeasures, ExtractStep, MeasureDefinitionMixin, ) from dispel.processing.trace import ( DataSetTrace, EpochTrace, MeasureTrace, OriginTrace, StepGroupTrace, StepTrace, Trace, TraceRelation, get_ancestor_source_graph, get_ancestors, get_edge_parameters, get_traces, inspect, ) from dispel.processing.transform import TransformStep
17,220
"""Tests for :mod:`dispel.processing.trace`.""" class _TestProcessingStep(ProcessingStep): def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType: pass class _TestTransformStep(TransformStep): data_set_ids = ["input_dataset1", "input_dataset2"] new_data_set_id = "output_dataset" definitions = [ RawDataValueDefinition("col1_id", "col1_name"), RawDataValueDefinition("col2_id", "col2_name"), ] @staticmethod @transformation def _transform(data): pass
"""Tests for :mod:`dispel.processing.trace`.""" class _TestProcessingStep(ProcessingStep): def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType: pass class _TestTransformStep(TransformStep): data_set_ids = ["input_dataset1", "input_dataset2"] new_data_set_id = "output_dataset" definitions = [ RawDataValueDefinition("col1_id", "col1_name"), RawDataValueDefinition("col2_id", "col2_name"), ] @staticmethod @transformation def _transform(data): pass
class _TestExtractStep(ExtractStep):
13
2023-11-14 10:06:46+00:00
24k
Jisencc/yolov5_dual_weighting
segment/val.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=Tr...
import argparse import json import os import subprocess import sys import numpy as np import torch import torch.nn.functional as F from multiprocessing.pool import ThreadPool from pathlib import Path from tqdm import tqdm from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode from pycocotools.mask import encode from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
15,585
with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map, pred_masks): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} def single_encode(x): rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] rle['counts'] = rle['counts'].decode('utf-8') return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements('pycocotools>=2.0.6') process = process_mask_native # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map, pred_masks): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} def single_encode(x): rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] rle['counts'] = rle['counts'].decode('utf-8') return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements('pycocotools>=2.0.6') process = process_mask_native # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
imgsz = check_img_size(imgsz, s=stride) # check image size
3
2023-11-12 13:28:26+00:00
24k
cyberark/ark-sdk-python
ark_sdk_python/cli_services/dpa/db/ark_dpa_db_policies_editor_service.py
[ { "identifier": "ArkInquirerRender", "path": "ark_sdk_python/args/ark_args_formatter.py", "snippet": "class ArkInquirerRender(ConsoleRender):\n # pylint: disable=keyword-arg-before-vararg,protected-access\n def __init__(self, event_generator=None, *args, **kwargs):\n super().__init__(event_...
from datetime import date, timedelta from typing import Dict, Final, List, Optional from overrides import overrides from ark_sdk_python.args.ark_args_formatter import ArkInquirerRender from ark_sdk_python.auth.ark_isp_auth import ArkISPAuth from ark_sdk_python.cli_services.dpa.common.ark_dpa_base_policies_editor_service import ArkDPABasePoliciesEditorService from ark_sdk_python.models.ark_profile import ArkProfile from ark_sdk_python.models.cli_services.dpa.policies_editor.db import ArkDPADBGeneratePolicy from ark_sdk_python.models.common import ArkWorkspaceType from ark_sdk_python.models.services import ArkServiceConfig from ark_sdk_python.models.services.dpa.policies.common import ArkDPADeletePolicy, ArkDPAGetPolicy, ArkDPARuleStatus, ArkDPAUserData from ark_sdk_python.models.services.dpa.policies.db import ( ArkDPADB, ArkDPADBAddPolicy, ArkDPADBAppliedTo, ArkDPADBAuthorizationRule, ArkDPADBBaseAuth, ArkDPADBConnectAs, ArkDPADBConnectionInformation, ArkDPADBLDAPAuth, ArkDPADBLocalDBAuth, ArkDPADBMariaDB, ArkDPADBMSSQL, ArkDPADBMySQL, ArkDPADBOracle, ArkDPADBOracleDBAuth, ArkDPADBOracleResource, ArkDPADBPolicy, ArkDPADBPolicyListItem, ArkDPADBPostgres, ArkDPADBProvidersData, ArkDPADBResourceIdentifierType, ArkDPADBUpdatePolicy, ) from ark_sdk_python.services.dpa.policies.db.ark_dpa_db_policies_service import ArkDPADBPoliciesService import inquirer
14,687
SUPPORTED_DATABASE_TYPES: Final[List[str]] = [ 'MSSQL', 'MySQL', 'MariaDB', 'Postgres', 'Oracle', ] DEFAULT_GENERATED_POLICY: Final[ArkDPADBPolicy] = ArkDPADBPolicy( policy_name='Default DB Policy', status=ArkDPARuleStatus.Draft, description='Auto generated db policy', providers_data=ArkDPADBProvidersData( postgres=ArkDPADBPostgres( resources=['postgres-onboarded-asset'], ), ), start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPADB]] = { ArkWorkspaceType.MSSQL: ArkDPADBMSSQL(resources=['mssql-onboarded-asset']), ArkWorkspaceType.MYSQL: ArkDPADBMySQL(resources=['mysql-onboarded-asset']), ArkWorkspaceType.MARIADB: ArkDPADBMariaDB(resources=['mariadb-onboarded-asset']), ArkWorkspaceType.POSTGRES: ArkDPADBPostgres(resources=['postgres-onboarded-asset']), ArkWorkspaceType.ORACLE: ArkDPADBOracle( resources=[ ArkDPADBOracleResource( name='oracle-onboarded-asset', services=['XE'], ), ], ), } DEFAULT_GENERATED_AUTH_METHODS: Final[Dict[ArkWorkspaceType, ArkDPADBBaseAuth]] = { ArkWorkspaceType.MSSQL: ArkDPADBLDAPAuth( assign_groups=['DomainSQLAdmins'], applied_to=[ ArkDPADBAppliedTo( name='mssql-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.MYSQL: ArkDPADBLocalDBAuth( roles=['db_admin'], applied_to=[ ArkDPADBAppliedTo( name='mysql-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.MARIADB: ArkDPADBLocalDBAuth( roles=['db_admin'], applied_to=[ ArkDPADBAppliedTo( name='mariadb-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.POSTGRES: ArkDPADBLocalDBAuth( roles=['rds_superuser'], applied_to=[ ArkDPADBAppliedTo( name='postgres-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.ORACLE: ArkDPADBOracleDBAuth( roles=[], dba_role=True, sysdba_role=True, sysoper_role=False, applied_to=[ ArkDPADBAppliedTo( name='oracle-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), } DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPADBAuthorizationRule] = ArkDPADBAuthorizationRule( rule_name='Default DB Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPADBConnectionInformation( grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', connect_as=ArkDPADBConnectAs( db_auth=[ ArkDPADBLocalDBAuth( roles=['rds_superuser'], applied_to=[ ArkDPADBAppliedTo( name='postgres-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ], ), ), ) WORKSPACE_TO_PROVIDER_NAME: Final[Dict[ArkWorkspaceType, str]] = { ArkWorkspaceType.MSSQL: 'mssql', ArkWorkspaceType.MYSQL: 'mysql', ArkWorkspaceType.POSTGRES: 'postgres', ArkWorkspaceType.ORACLE: 'oracle', ArkWorkspaceType.MARIADB: 'mariadb', } class ArkDPADBPoliciesEditorService(
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-db-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) SUPPORTED_DATABASE_TYPES: Final[List[str]] = [ 'MSSQL', 'MySQL', 'MariaDB', 'Postgres', 'Oracle', ] DEFAULT_GENERATED_POLICY: Final[ArkDPADBPolicy] = ArkDPADBPolicy( policy_name='Default DB Policy', status=ArkDPARuleStatus.Draft, description='Auto generated db policy', providers_data=ArkDPADBProvidersData( postgres=ArkDPADBPostgres( resources=['postgres-onboarded-asset'], ), ), start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPADB]] = { ArkWorkspaceType.MSSQL: ArkDPADBMSSQL(resources=['mssql-onboarded-asset']), ArkWorkspaceType.MYSQL: ArkDPADBMySQL(resources=['mysql-onboarded-asset']), ArkWorkspaceType.MARIADB: ArkDPADBMariaDB(resources=['mariadb-onboarded-asset']), ArkWorkspaceType.POSTGRES: ArkDPADBPostgres(resources=['postgres-onboarded-asset']), ArkWorkspaceType.ORACLE: ArkDPADBOracle( resources=[ ArkDPADBOracleResource( name='oracle-onboarded-asset', services=['XE'], ), ], ), } DEFAULT_GENERATED_AUTH_METHODS: Final[Dict[ArkWorkspaceType, ArkDPADBBaseAuth]] = { ArkWorkspaceType.MSSQL: ArkDPADBLDAPAuth( assign_groups=['DomainSQLAdmins'], applied_to=[ ArkDPADBAppliedTo( name='mssql-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.MYSQL: ArkDPADBLocalDBAuth( roles=['db_admin'], applied_to=[ ArkDPADBAppliedTo( name='mysql-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.MARIADB: ArkDPADBLocalDBAuth( roles=['db_admin'], applied_to=[ ArkDPADBAppliedTo( name='mariadb-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.POSTGRES: ArkDPADBLocalDBAuth( roles=['rds_superuser'], applied_to=[ ArkDPADBAppliedTo( name='postgres-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ArkWorkspaceType.ORACLE: ArkDPADBOracleDBAuth( roles=[], dba_role=True, sysdba_role=True, sysoper_role=False, applied_to=[ ArkDPADBAppliedTo( name='oracle-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), } DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPADBAuthorizationRule] = ArkDPADBAuthorizationRule( rule_name='Default DB Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPADBConnectionInformation( grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', connect_as=ArkDPADBConnectAs( db_auth=[ ArkDPADBLocalDBAuth( roles=['rds_superuser'], applied_to=[ ArkDPADBAppliedTo( name='postgres-onboarded-asset', type=ArkDPADBResourceIdentifierType.RESOURCE, ) ], ), ], ), ), ) WORKSPACE_TO_PROVIDER_NAME: Final[Dict[ArkWorkspaceType, str]] = { ArkWorkspaceType.MSSQL: 'mssql', ArkWorkspaceType.MYSQL: 'mysql', ArkWorkspaceType.POSTGRES: 'postgres', ArkWorkspaceType.ORACLE: 'oracle', ArkWorkspaceType.MARIADB: 'mariadb', } class ArkDPADBPoliciesEditorService(
ArkDPABasePoliciesEditorService[ArkDPADBPolicy, ArkDPADBPolicyListItem, ArkDPADBAddPolicy, ArkDPADBUpdatePolicy, ArkDPADBGeneratePolicy]
2
2023-11-13 09:24:31+00:00
24k
kampta/asic
train.py
[ { "identifier": "Logger", "path": "commons/logger.py", "snippet": "class Logger(SummaryWriter):\n\n def __init__(self, results_path, log_to_tb=False, log_to_wandb=True):\n super().__init__(results_path)\n self.results_path = results_path\n self.log_to_tb = log_to_tb\n self...
import argparse import torch import numpy as np import json import os import torch.nn.functional as F import wandb from torch import nn, optim from tqdm import tqdm from pathlib import Path from commons.logger import Logger, log_visuals from commons.distributed import get_rank, setup_distributed, reduce_loss_dict,\ get_world_size, primary from commons.utils import sample_tuples from datasets.cub import CUBDataset from datasets.in_memory import InMemoryDataset from datasets.spair import SpairDataset from datasets.utils import Augmentor from models.utils import accumulate, requires_grad from models.canonical import Canonical, CanonicalMLP from models.asic import Asic from losses.reg_losses import total_variation_loss from thirdparty.lpips.lpips import get_perceptual_loss from losses.matching_losses import LossCorrsSparse from thirdparty.gangealing.annealing import DecayingCosineAnnealingWarmRestarts,\ lr_cycle_iters
16,699
# Model hyperparameters: parser.add_argument("--canon_lr", type=float, default=0.003, help="base learning rate of canonical space") parser.add_argument("--canon_ema", action='store_true', help='Enable ema for canonical space') parser.add_argument("--stn_ema", action='store_true', help='Enable ema for canonical space') parser.add_argument("--stn_lr", type=float, default=0.003, help="base learning rate of SpatialTransformer") parser.add_argument("--flow_ssl", action='store_true', help="""If specified, apply STN on SSL features)""") parser.add_argument("--channel_multiplier", default=0.5, type=float, help='channel multiplier for smaller models') parser.add_argument("--bilinear", action='store_true', help='Apply bilinear upsample/downsample') parser.add_argument("--padding_mode", default='border', choices=['border', 'zeros', 'reflection'], type=str, help="""Padding algorithm for when the STN samples beyond image boundaries""") parser.add_argument("--use_tanh", action='store_true', help='Use tanh activation at the flow output') parser.add_argument("--disable_tps", action='store_true', help='disable tps transformations') # Backbone parameters parser.add_argument("--bb", default='dino_vits8', choices=['dino_vits8', 'dino_vits16', 'dino_vitb8', 'dino_vitb16', 'vit_small_patch8_224', 'vit_small_patch16_224', 'vit_base_patch16_224'], help='backbone models') parser.add_argument('--bb_stride', default=2, type=int, help="stride.") # Visualization hyperparameters: parser.add_argument("--vis_every", type=int, default=500, help="""frequency with which visualizations are generated during training""") parser.add_argument("--vis_denseres", type=int, default=32, help='number of sparse correspondences to visualize') parser.add_argument("--ckpt_every", type=int, default=10000, help='frequency of checkpointing during training') parser.add_argument("--log_every", default=25, type=int, help='How frequently to log data to TensorBoard') parser.add_argument("--n_sample", type=int, default=4, help="""number of images (real and fake) to generate visuals for""") parser.add_argument("--disable_wandb", action='store_true', help='Disable wandb for debugging') # Learning Rate scheduler hyperparameters: parser.add_argument("--period", default=10000, type=float, help="""Period for cosine learning rate scheduler (measured in gradient steps)""") parser.add_argument("--decay", default=0.9, type=float, help="""Decay factor for the cosine learning rate scheduler""") parser.add_argument("--tm", default=2, type=int, help="""Period multiplier for the cosine learning rate scheduler""") return parser def train(args, train_dset, canon, stn, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, loss_fn, nbb_loss_fn, device, writer): # Record modules to make saving checkpoints easier: if args.distributed: t_module = stn.module c_module = canon.module else: t_module = stn c_module = canon # Initialize Spatial Transformation Generator (Thin Plate Spline) aug = Augmentor(jitter=args.jitter, jitter_prob=args.jitter_prob, gray_prob=args.gray_prob, solar_prob=args.solar_prob, tps_scale=args.tps_scale).to(device) # A model checkpoint will be saved whenever the learning rate is zero: zero_lr_iters = lr_cycle_iters(0, args.period, args.iter, args.tm) early_ckpt_iters = set(zero_lr_iters) early_vis_iters = {100} early_vis_iters.update(early_ckpt_iters) # Initialize various training variables and constants: rec_loss = torch.tensor(0.0, device='cuda') flow_tv_loss = torch.tensor(0.0, device='cuda') nbb_loss = torch.tensor(0.0, device='cuda') equi_loss = torch.tensor(0.0, device='cuda') mask_loss = torch.tensor(0.0, device='cuda') parts_loss = torch.tensor(0.0, device='cuda') accum = 0.5 ** (32 / (10 * 1000)) # Resize function for perceptual loss if args.unwarp_size != args.img_size: scale_factor = args.unwarp_size / args.img_size resize_fn = nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=True) else: resize_fn = nn.Identity() # Pre-load on GPU # Assuming ~30 images of size 256x256, takes up ~23 MB device memory has_gt_kp = train_dset.kps is not None all_imgs = train_dset.imgs = train_dset.imgs.to(device) # / 127.5 - 1.0 all_masks = train_dset.masks = train_dset.masks.unsqueeze(1).to(device) all_parts = train_dset.parts = train_dset.parts.to(device) if has_gt_kp: all_kps = train_dset.kps = train_dset.kps.to(device) # Pseudo GT pseudo_kps = train_dset.pseudo_kps = torch.from_numpy(train_dset.pseudo_kps).to(device) num_parts = train_dset.num_parts loss_topk = pseudo_kps.shape[2] if args.sparse_topk is None else min(args.sparse_topk, pseudo_kps.shape[2]) # Progress bar for monitoring training: pbar = range(args.start_iter, args.iter)
def save_state_dict(ckpt_name, c_module, t_module, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, args, step, add_step_to_name=False): ckpt_dict = { "canon": c_module.state_dict(), "t": t_module.state_dict(), "c_ema": c_ema.state_dict(), "t_ema": t_ema.state_dict(), "t_optim": t_optim.state_dict(), "t_sched": t_sched.state_dict(), "canon_optim": canon_optim.state_dict() if canon_optim is not None else None, "canon_sched": canon_sched.state_dict() if canon_sched is not None else None, "args": args, "iter": step } torch.save(ckpt_dict, f'{results_path}/{ckpt_name}.pt') if add_step_to_name: torch.save(ckpt_dict, f'{results_path}/{ckpt_name}_{step:07d}.pt') def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def base_training_argparse(): parser = argparse.ArgumentParser(description="Training") # Main training arguments: parser.add_argument("--exp-name", type=str, required=True, help="Name for experiment run (used for logging)") parser.add_argument("--results", type=str, default='logs', help='path to the results directory') parser.add_argument("--seed", default=0, type=int, help='Random seed for this experiment') parser.add_argument("--dset", type=str, default='cub', choices=["cub", "spair"]) parser.add_argument("--img_dir", type=str, required=True, help="Path to real data") parser.add_argument("--flow_dir", type=str, default='processed_data', help="Path to preprocessed flows") parser.add_argument("--mask_threshold", type=int, default=1, help="Threshold for masking") parser.add_argument("--mask_bbox_pad", type=int, default=4, help="Crop with some padding") parser.add_argument("--img_size", default=256, type=int, help='resolution of real images') parser.add_argument("--iter", type=int, default=20000, help="total training iterations") parser.add_argument("--batch", type=int, default=20, help="batch size per-GPU") parser.add_argument("--num_workers", type=int, default=2, help="num workers for dataloader") # Dataset hyperparameters: parser.add_argument("--cub_idx", type=int, default=1, help="cub category") parser.add_argument("--split", default='test', choices=['test', 'val'], help='splits for training and validation') parser.add_argument("--use_coseg_masks", action='store_true') parser.add_argument("--num_parts", default=4, type=int) parser.add_argument("--spair_cat", default='cat', help="cub category") # Loss hyperparameters: parser.add_argument("--loss_fn", type=str, default='vgg_ssl', choices=['lpips', 'vgg_ssl'], help="The perceptual loss to use.") parser.add_argument("--rec_weight", type=float, default=1., help='weight for reconstruction loss') parser.add_argument("--nbb_weight", type=float, default=30., help='weight for nbb loss') parser.add_argument("--flow_tv_weight", default=15000.0, type=float, help="""Loss weighting of the Total Variation smoothness regularizer on the residual flow""") parser.add_argument("--equi_weight", default=1.0, type=float, help='Loss weighting for equivariance') parser.add_argument("--sparse_topk", type=int, default=None, help='number of sparse correspondences for loss') parser.add_argument("--sparse_temp", type=float, default=1, help='temperature for sparse loss') parser.add_argument("--mask_weight", default=0.1, type=float, help="""Loss weighting of the mask""") parser.add_argument("--parts_weight", default=10.0, type=float, help="""Loss weighting of the Parts Mask""") parser.add_argument("--use_nbb_parts", action='store_true') # Augmentation hyperparameters parser.add_argument("--jitter", default=[0.4, 0.4, 0.2, 0.1], type=float, nargs='+', help='augmentation mode') parser.add_argument("--jitter_prob", default=0.8, type=float) parser.add_argument("--gray_prob", default=0.2, type=float) parser.add_argument("--solar_prob", default=0.2, type=float) parser.add_argument("--tps_scale", default=0.4, type=float) # Canonical space parser.add_argument("--unwarp_size", type=int, default=128, help="resolution for unwarping") # Learned Grid hyperparameters parser.add_argument("--canon_size", type=int, default=256, help="resolution of canonical space") parser.add_argument("--clamp", action='store_true', help="clamp values of canonical space (-1, 1)") # MLP Hyperparams parser.add_argument("--use_mlp", action='store_true') parser.add_argument("--mlp_hidden_dim", type=int, default=256, help="number of hidden units per layer") parser.add_argument("--mlp_num_layers", type=int, default=8, help="number of layers") parser.add_argument("--mlp_skip_layers", type=int, nargs='+', default=[4, 7], help="skip layers") # Model hyperparameters: parser.add_argument("--canon_lr", type=float, default=0.003, help="base learning rate of canonical space") parser.add_argument("--canon_ema", action='store_true', help='Enable ema for canonical space') parser.add_argument("--stn_ema", action='store_true', help='Enable ema for canonical space') parser.add_argument("--stn_lr", type=float, default=0.003, help="base learning rate of SpatialTransformer") parser.add_argument("--flow_ssl", action='store_true', help="""If specified, apply STN on SSL features)""") parser.add_argument("--channel_multiplier", default=0.5, type=float, help='channel multiplier for smaller models') parser.add_argument("--bilinear", action='store_true', help='Apply bilinear upsample/downsample') parser.add_argument("--padding_mode", default='border', choices=['border', 'zeros', 'reflection'], type=str, help="""Padding algorithm for when the STN samples beyond image boundaries""") parser.add_argument("--use_tanh", action='store_true', help='Use tanh activation at the flow output') parser.add_argument("--disable_tps", action='store_true', help='disable tps transformations') # Backbone parameters parser.add_argument("--bb", default='dino_vits8', choices=['dino_vits8', 'dino_vits16', 'dino_vitb8', 'dino_vitb16', 'vit_small_patch8_224', 'vit_small_patch16_224', 'vit_base_patch16_224'], help='backbone models') parser.add_argument('--bb_stride', default=2, type=int, help="stride.") # Visualization hyperparameters: parser.add_argument("--vis_every", type=int, default=500, help="""frequency with which visualizations are generated during training""") parser.add_argument("--vis_denseres", type=int, default=32, help='number of sparse correspondences to visualize') parser.add_argument("--ckpt_every", type=int, default=10000, help='frequency of checkpointing during training') parser.add_argument("--log_every", default=25, type=int, help='How frequently to log data to TensorBoard') parser.add_argument("--n_sample", type=int, default=4, help="""number of images (real and fake) to generate visuals for""") parser.add_argument("--disable_wandb", action='store_true', help='Disable wandb for debugging') # Learning Rate scheduler hyperparameters: parser.add_argument("--period", default=10000, type=float, help="""Period for cosine learning rate scheduler (measured in gradient steps)""") parser.add_argument("--decay", default=0.9, type=float, help="""Decay factor for the cosine learning rate scheduler""") parser.add_argument("--tm", default=2, type=int, help="""Period multiplier for the cosine learning rate scheduler""") return parser def train(args, train_dset, canon, stn, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, loss_fn, nbb_loss_fn, device, writer): # Record modules to make saving checkpoints easier: if args.distributed: t_module = stn.module c_module = canon.module else: t_module = stn c_module = canon # Initialize Spatial Transformation Generator (Thin Plate Spline) aug = Augmentor(jitter=args.jitter, jitter_prob=args.jitter_prob, gray_prob=args.gray_prob, solar_prob=args.solar_prob, tps_scale=args.tps_scale).to(device) # A model checkpoint will be saved whenever the learning rate is zero: zero_lr_iters = lr_cycle_iters(0, args.period, args.iter, args.tm) early_ckpt_iters = set(zero_lr_iters) early_vis_iters = {100} early_vis_iters.update(early_ckpt_iters) # Initialize various training variables and constants: rec_loss = torch.tensor(0.0, device='cuda') flow_tv_loss = torch.tensor(0.0, device='cuda') nbb_loss = torch.tensor(0.0, device='cuda') equi_loss = torch.tensor(0.0, device='cuda') mask_loss = torch.tensor(0.0, device='cuda') parts_loss = torch.tensor(0.0, device='cuda') accum = 0.5 ** (32 / (10 * 1000)) # Resize function for perceptual loss if args.unwarp_size != args.img_size: scale_factor = args.unwarp_size / args.img_size resize_fn = nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=True) else: resize_fn = nn.Identity() # Pre-load on GPU # Assuming ~30 images of size 256x256, takes up ~23 MB device memory has_gt_kp = train_dset.kps is not None all_imgs = train_dset.imgs = train_dset.imgs.to(device) # / 127.5 - 1.0 all_masks = train_dset.masks = train_dset.masks.unsqueeze(1).to(device) all_parts = train_dset.parts = train_dset.parts.to(device) if has_gt_kp: all_kps = train_dset.kps = train_dset.kps.to(device) # Pseudo GT pseudo_kps = train_dset.pseudo_kps = torch.from_numpy(train_dset.pseudo_kps).to(device) num_parts = train_dset.num_parts loss_topk = pseudo_kps.shape[2] if args.sparse_topk is None else min(args.sparse_topk, pseudo_kps.shape[2]) # Progress bar for monitoring training: pbar = range(args.start_iter, args.iter)
if primary():
6
2023-11-14 16:43:16+00:00
24k
atlantic-quantum/Shipyard
tests/passes/test_include_files.py
[ { "identifier": "Compiler", "path": "shipyard/compiler.py", "snippet": "class Compiler:\n version = \"0.1.1\"\n \"\"\"\n Compiler to compile openQASM programs to target programs for different AWG Cores.\n Currently supports compilation to ZI SEQC cores.\n\n Args:\n program_path (Pa...
import io import pytest from pathlib import Path from openpulse import ast, parse from shipyard.compiler import Compiler from shipyard.compiler_error import SemanticError, TransformError from shipyard.printers.zi.seqcprinter import SEQCPrinter from shipyard.setup.internal import SetupInternal
16,802
@pytest.fixture(name="basic_setup") def fixture_basic_setup() -> SetupInternal: json_path = Path(__file__).parent.parent / "setups/basic.json" return SetupInternal.from_json(json_path) @pytest.fixture(name="seqc_printer") def fixture_seqc_printer(basic_setup: SetupInternal) -> SEQCPrinter: return SEQCPrinter(io.StringIO(), basic_setup) def load_ast(file: str) -> ast.Program: path = Path(__file__).parent.parent / f"qasm/include_files/{file}.qasm" with open(path, encoding="utf_8") as qasm_file: qasm_code = qasm_file.read() return parse(qasm_code) def test_ramsey_nested_include(seqc_printer: SEQCPrinter, file="ramsey_nested"): """ Test for nested include files (one qasm on inc) for ramsey experiment """ qasm_path = Path(__file__).parent.parent / f"qasm/include_files/{file}.qasm" setup_path = Path(__file__).parent.parent / "qasm/include_files/setup.json" seqc_path = Path(__file__).parent.parent / "qasm/include_files/ramsey_seqc.seqc"
@pytest.fixture(name="basic_setup") def fixture_basic_setup() -> SetupInternal: json_path = Path(__file__).parent.parent / "setups/basic.json" return SetupInternal.from_json(json_path) @pytest.fixture(name="seqc_printer") def fixture_seqc_printer(basic_setup: SetupInternal) -> SEQCPrinter: return SEQCPrinter(io.StringIO(), basic_setup) def load_ast(file: str) -> ast.Program: path = Path(__file__).parent.parent / f"qasm/include_files/{file}.qasm" with open(path, encoding="utf_8") as qasm_file: qasm_code = qasm_file.read() return parse(qasm_code) def test_ramsey_nested_include(seqc_printer: SEQCPrinter, file="ramsey_nested"): """ Test for nested include files (one qasm on inc) for ramsey experiment """ qasm_path = Path(__file__).parent.parent / f"qasm/include_files/{file}.qasm" setup_path = Path(__file__).parent.parent / "qasm/include_files/setup.json" seqc_path = Path(__file__).parent.parent / "qasm/include_files/ramsey_seqc.seqc"
compiler = Compiler(qasm_path, setup_path)
0
2023-11-16 17:37:29+00:00
24k
quantuminterface/qiclib
src/qiclib/code/qi_jobs.py
[ { "identifier": "TaskRunner", "path": "src/qiclib/hardware/taskrunner.py", "snippet": "class TaskRunner(PlatformComponent):\n \"\"\"Driver to control the Taskrunner on the Hardware Platform.\"\"\"\n\n def __init__(\n self,\n name: str,\n connection,\n controller,\n ...
import os import json import functools import warnings import numpy as np import qiclib.packages.utility as util from abc import abstractmethod from typing import Dict, List, Callable, Optional, Union, Set, Any, Type from ..hardware.taskrunner import TaskRunner from ..experiment.qicode.data_provider import DataProvider from ..experiment.qicode.data_handler import DataHandler from .qi_seq_instructions import SequencerInstruction from .qi_var_definitions import ( _QiVariableBase, _QiCalcBase, _QiConstValue, QiCellProperty, QiExpression, QiVariableSet, QiCondition, ) from .qi_pulse import QiPulse from .qi_visitor import ( QiCMContainedCellVisitor, QiResultCollector, QiVarInForRange, ) from .qi_prog_builder import QiProgramBuilder from .qi_types import ( QiType, QiPostTypecheckVisitor, QiTypeFallbackVisitor, _TypeDefiningUse, ) from .qi_types import _TypeDefiningUse from .qi_types import _TypeDefiningUse from .qi_types import ( _TypeConstraintReasonQiCommand, _IllegalTypeReason, _add_equal_constraints, ) from .qi_types import ( _TypeConstraintReasonQiCommand, _IllegalTypeReason, _add_equal_constraints, ) from .analysis.qi_insert_mem_parameters import ( insert_recording_offset_store_commands, insert_manipulation_pulse_frequency_store_commands, insert_readout_pulse_frequency_store_commands, ) from .qi_simulate import Simulator from ..experiment.qicode.base import QiCodeExperiment from qiclib.experiment.qicode.base import _TaskrunnerSettings from .qi_visitor import QiStringifyJob
17,170
super().__init__(dst) self._value = QiExpression._from(value) dst._type_info.add_illegal_type(QiType.STATE, _IllegalTypeReason.ASSIGN) _add_equal_constraints( QiType.NORMAL, _TypeConstraintReasonQiCommand(cQiAssign), self._value, dst ) _add_equal_constraints( QiType.TIME, _TypeConstraintReasonQiCommand(cQiAssign), self._value, dst ) for variable in self.value.contained_variables: self.add_associated_variable(variable) @property def value(self): return self._value def accept(self, visitor, *input): return visitor.visit_assign_command(self, *input) def _stringify(self) -> str: return f"Assign({self.var}, {self._value})" class cQiDeclare(QiVariableCommand): """Command generated by initialization of new QiVariable""" def __init__(self, dst: _QiVariableBase) -> None: super().__init__(var=dst) def accept(self, visitor, *input): return visitor.visit_declare_command(self, *input) def _stringify(self) -> str: return f"v{self.var.str_id} = {self.var}" class cQiASM(QiCommand): def __init__(self, cells: QiCell, instr: SequencerInstruction, cycles: int): super().__init__() self._relevant_cells.add(cells) self.asm_instruction = instr self.cycles = cycles def accept(self, visitor, *input): return visitor.visit_asm_command(self, *input) def _stringify(self) -> str: return f"ASM({self.asm_instruction.get_riscv_instruction()})" class cQiMemStore(QiCommand): def __init__(self, cell: QiCell, addr: int, value): super().__init__() self._relevant_cells.add(cell) self.addr = addr self.value = value def accept(self, visitor, *input): return visitor.visit_mem_store_command(self, *input) def _stringify(self): cell_str = ", ".join(list(map(lambda x: f"{x}", self._relevant_cells))) return f"cQiMemStore({cell_str}, {self.addr}, {self.value})" class QiContextManager(QiCommand): """Base Class for If, Else, ForRange and Parallel. Defines functions for storing commands.""" def __init__(self) -> None: super().__init__() self.body: List[QiCommand] = [] def __enter__(self): _QiJobReference._open_new_context() return self def __exit__(self, exception_type, exception_value, traceback): self.body = _QiJobReference._close_context() _QiJobReference._add_command(self) def accept(self, visitor, *input): return visitor.visit_context_manager(self, *input) class If(QiContextManager): """ Add conditional logic to the program. If multiple cells are used inside the body, a synchronization between the cells takes place before the If. :param condition: The condition to check Example ------- .. code-block:: python with QiJob() as job: q = QiCells(1) x = QiIntVariable(1) with If(x > 1): ... # won't be executed The If statement is most commonly used to react to qubit states in real-time: .. code-block:: python from qiclib import jobs with QiJob() as job: q = QiCells(1) state = QiStateVariable() jobs.Readout(q[0], state_to=state) with If(state = 0): ... # Apply some conditional logic based on the qubit state """
# Copyright © 2017-2023 Quantum Interface (quantuminterface@ipe.kit.edu) # Richard Gebauer, IPE, Karlsruhe Institute of Technology # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ This is the main module of QiCode. Here, all important commands write QiPrograms are defined. """ class QiResult: """Result of an experiment. Can be accessed via :python:`job.cells[cell_index].data("result name")`. Where :python:`cells` denotes a :class:`QiCells` object and :python:`cell_index` an integer. The actual data can be retrieved as a numpy array using the :meth:`get` Method Example ------- .. code-block:: python qic: QiController = ... sample: QiSample = ... with QiJob() as job: q = QiCells(1) Readout(q[0], save_to="result") job.run(qic, sample, averages=1000) data = job.cells[0].data("result") :param name: The name of the variable, by default None """ def __init__(self, name: Optional[str] = None) -> None: self._cell = None self.data = None self.recording_count = 0 self.name: str = "" if name is None else name def get(self) -> np.ndarray: """gets the data of the result as a numpy array :return: The data of the experiment """ return np.array(self.data) def __str__(self) -> str: return f'QiResult("{self.name}")' class QiCommand: """Base class of every Job command. Provides _relevant_cells, containing every cell used for the execution of the command. Provides _associated_variable_set, containing every variable needed for the execution of the command. """ def __init__(self) -> None: self._associated_variable_set = QiVariableSet() self._relevant_cells: Set[QiCell] = set() @abstractmethod def accept(self, visitor, *input): raise RuntimeError( f"{self.__class__} doesn't implement `accept`. This is a bug." ) def is_variable_relevant(self, variable: _QiVariableBase) -> bool: return variable in self._associated_variable_set def add_associated_variable(self, x): if isinstance(x, _QiVariableBase): self._associated_variable_set.add(x) def __str__(self) -> str: return "cQiCommand" def _stringify(self) -> str: raise NotImplementedError(f"_stringify not implemented for {repr(self)}") _QiJobReference = None def _add_cmd_to_job(cmd: QiCommand): if _QiJobReference is None: raise RuntimeError("Can not use command outside QiJob context manager.") _QiJobReference._add_command(cmd) def _set_job_reference(job): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = job def _delete_job_reference(): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = None class QiCell: """A QiCell is an abstract representation of the qubit/cell the program is run on. Usually, a single :python:`QiCell` is not instantiated, but instead a :class:`QiCells` object. For a single :python:`QiCell`, use instead :python:`QiCells(1)` A :python:`QiCell` must be instantiated inside within a :class:`QiJob` context. The :python:`QiCell` object can be used to get properties that are defined on :class:`QiSamples <QiSample>`. For this, index the :python:`QiCell` object using the name of the property: .. code-block:: python q: QiCell = ... t1_time = q["t1"] The actual value for the accessed property (in the example above, the T1 time) is filled in when executing a :class:`QiJob` and providing the actual sample. **Tasks of the QiCell**: - Saves the pulses needed for program execution. - Provides a dictionary functionality to define commonly used durations/properties. - Implements a Sequencer object, which contains the assembler program after compilation. :param cellID: A unique ID :raises RuntimeError: When the :python:`QiCell` is instantiated outside a `QiJob` """ def __init__(self, cellID: int): if not isinstance(_QiJobReference, QiJob): raise RuntimeError("QiCell can't be used outside of QiJob.") self.cellID = cellID self.manipulation_pulses: List[QiPulse] = [] self.flux_pulses: List[QiPulse] = [] self.readout_pulses: List[QiPulse] = [] self._result_container: Dict[str, QiResult] = {} # The order in which recorded values are assigned to which result container self._result_recording_order: List[QiResult] = [] self._unresolved_property: Set[QiCellProperty] = set() self._job_ref = _QiJobReference self._relevant_vars: Set[_QiVariableBase] = set() # These attributes are determined by dataflow analyses self._initial_manip_freq: float = None self._initial_readout_freq: float = None self._initial_rec_offset: float = None self._rec_length: Union[int, float, QiCellProperty] = None self._properties: Dict[QiCellProperty, Any] = {} def __getitem__(self, key): if _QiJobReference != self._job_ref: raise RuntimeError( "Tried getting values for cells registered to other QiJob" ) prop = self._properties.get(key, QiCellProperty(self, key)) if isinstance(prop, QiCellProperty): self._unresolved_property.add(key) return prop def __setitem__(self, key, value): if _QiJobReference != self._job_ref: raise RuntimeError( "Tried setting values for cells registered to other QiJob" ) self._properties[key] = value def __call__(self, qic): return qic.cell[self.qic_cell] def get_properties(self): return self._properties.copy() def add_pulse(self, pulse: QiPulse): if pulse not in self.manipulation_pulses: self.manipulation_pulses.append(pulse) if len(self.manipulation_pulses) > 13: raise RuntimeError("Too many pulses in use") return self.manipulation_pulses.index(pulse) + 1 # index 0 and 15 are reserved @property def initial_manipulation_frequency(self): if self._initial_manip_freq is None: if len(self.manipulation_pulses) > 0: warnings.warn( "Manipulation pulses without frequency given, using 90 MHz." ) return 90e6 # Default frequency freq = self._initial_manip_freq return freq() if isinstance(freq, QiCellProperty) else freq def add_recording_length(self, length): if self._rec_length is None: self._rec_length = length elif ( not self._rec_length._equal_syntax(length) if isinstance(self._rec_length, QiExpression) else self._rec_length != length ): raise RuntimeError( f"Cell {self.cellID}: Multiple definitions of recording length used." ) def add_readout_pulse(self, pulse: QiPulse): if pulse not in self.readout_pulses: self.readout_pulses.append(pulse) if len(self.readout_pulses) > 13: raise RuntimeError("Too many pulses in use") return self.readout_pulses.index(pulse) + 1 # index 0 and 15 are reserved @property def initial_readout_frequency(self): if self._initial_readout_freq is None: if len(self.readout_pulses) > 0: warnings.warn("Readout pulses without frequency given, using 30 MHz.") return 30e6 # Default frequency freq = self._initial_readout_freq return freq() if isinstance(freq, QiCellProperty) else freq @property def recording_length(self): """the length of the recording pulse""" if self._rec_length is not None: return ( self._rec_length() if isinstance(self._rec_length, QiCellProperty) else self._rec_length ) return 0 @property def initial_recording_offset(self): """the recording offset in seconds""" if self._initial_rec_offset is not None: return ( self._initial_rec_offset() if isinstance(self._initial_rec_offset, QiCellProperty) else self._initial_rec_offset ) return 0 def get_result_container(self, result: str) -> QiResult: if result in self._result_container: return self._result_container[result] # was already added else: box = QiResult(result) box._cell = self self._result_container[result] = box return box def add_variable(self, var: _QiVariableBase): self._relevant_vars.add(var) def get_number_of_recordings(self): return len(self._result_recording_order) def set_default_readout(self, pulse): pass def reset(self): for container in self._result_container.values(): container.data = [] def data( self, name: Optional[str] = None ) -> Union[Dict[str, np.ndarray], np.ndarray]: """ Returns the data after running an experiment. When calling this function without a name, i.e., calling :python:`cell.data()`, returns a dictionary containing the results as numpy arrays. When calling this function with a name, i.e., calling :python:`cell.data("result_name")`, returns the whole dictionary. :param name: The name of the data :return: A single result, or a dictionary of result names mapped to results. """ if name is None: result_dict = {} for key, container in self._result_container.items(): result_dict.update({key: container.get()}) return result_dict else: return self._result_container[name].get() def _resolve_properties(self, len_dict: Dict[QiCellProperty, Any]): keys = list(self._unresolved_property) missing_keys = self._unresolved_property.difference(len_dict.keys()) if missing_keys: raise RuntimeError( f"Cell {self.cellID}: Not all properties for job could be resolved. " f"Missing properties: {missing_keys}" ) for key in keys: self._properties[key] = len_dict[key] @property def has_unresolved_properties(self): return len(self._unresolved_property) > 0 def _get_unresolved_properties(self): return [ key for key in list(self._unresolved_property) if self._properties.get(key) is None ] def __str__(self) -> str: return f"QiCell({self.cellID})" class QiCells: """ QiCells encapsulates multiple :class`QiCell` objects. It is a list-like object where the individual cells can be accessed using the index operator, i.e. .. code-block:: python cells = QiCells(5) cell0: QiCell = cells[0] cell3: QiCell = cells[3] :param num: The number of cells to create :raises RuntimeError: When the :python:`QiCells` object is instantiated outside a :python:`QiJob` """ def __init__(self, num: int) -> None: if not isinstance(_QiJobReference, QiJob): raise RuntimeError( "QiCells can only be used within QiJob description. " + "If you try to create a sample object, use the new QiSample instead." ) self.cells = [QiCell(x) for x in range(num)] _QiJobReference._register_cells(self.cells) def __getitem__(self, key): return self.cells[key] def __len__(self): return len(self.cells) class QiSampleCell: """QiSampleCell is the representation of a single qubit/cell and its properties. All necessary parameters to perform experiments can be stored here. For this purpose, the QiSampleCell can be utilized as a dictionary with user-defined keys. """ def __init__(self, cellID: int, cells_ref: "QiSample"): self.cellID = cellID self._cells_ref = cells_ref self._relevant_vars: Set[_QiVariableBase] = set() self._properties: Dict[str, Any] = {} def __getitem__(self, key): return self._properties[key] def __setitem__(self, key, value): self._properties[key] = value def __call__(self, qic): return qic.cell[self.qic_cell] @property def qic_cell(self): return self._cells_ref.cell_map[self.cellID] def get_properties(self): return self._properties.copy() def __str__(self) -> str: return f"QiSampleCell({self.cellID})" def _export(self): return {"properties": self.get_properties()} def _import(self, prop_dict, index): if prop_dict is None: warnings.warn( f"Imported JSON string does not contain 'properties' for cell[{index}]." ) return self._properties.update(prop_dict) class QiSample: """Representation of an experiment sample and its properties. Property keys can be arbitrary strings, and property values can be anything. Set the keys using :python:`sample["property_key"] = property_value` and get the values the same way, i.e., :python:`property_value = sample["property_key"]`. Note that this class **cannot** be instantiated within a :class:`QiJob`. Instead, it must be defined outside one. Accessing samples defined here within a QiJob is still possible, however, using the :class:`QiCell` object: .. code-block:: python sample: QiSample = ... qic: QiController = ... sample["t1"] = 100e-6 with QiJob() as job: q = QiCells(1) Wait(q[0], q[0]["t1"]) job.run(qic, sample) # Note that we pass the sample object here to make the value available in the job The :python:`QiSample` object is serializable to `JSON <https://www.json.org/>`_. Have a look at the :meth:`save` and :meth:`load` methods for more :param num: The number of cells/qubits this sample has. :param cell_map: On which QiController cells these are mapped, by default [0, 1, ..., num-1] :raises RuntimeError: When the Sample is used within a :class:`QiJob` """ def __init__(self, num: int, cell_map: Optional[List[int]] = None) -> None: self._cell_map = None if _QiJobReference is not None: raise RuntimeError( "QiSample can only be used outside of QiJob to define sample " "properties. Inside a QiJob, use QiCells as placeholder for the " "qubits/cells instead." ) self.cells: List[QiSampleCell] = [] for x in range(num): self.cells.append(QiSampleCell(cellID=x, cells_ref=self)) self.cell_map = cell_map or list(range(num)) def __getitem__(self, key): return self.cells[key] def __len__(self): return len(self.cells) def __str__(self): return ( f"QiSample({len(self.cells)}, cell_map=[{','.join(map(str, self.cell_map))}]):\n" + "\n".join( [ f"[{i}]: {json.dumps(props['properties'], indent=2)}" for i, props in enumerate(self._export()["cells"]) ] ) ) def _arrange_for_controller(self) -> List[Optional[QiSampleCell]]: inverse: List[Optional[QiSampleCell]] = [None] * (max(self.cell_map) + 1) for cell, qi_cell_index in enumerate(self.cell_map): inverse[qi_cell_index] = self[cell] return inverse @property def cell_map(self): return self._cell_map @cell_map.setter def cell_map(self, cell_map): if len(cell_map) != len(self): raise ValueError( "cell_map needs to have as many entries as the there are cells, but " f"{len(cell_map)} entries given and {len(self)} required!" ) if len(set(cell_map)) != len(cell_map): raise ValueError("Duplicate values not allowed in cell_map!") if any(c < 0 for c in cell_map): raise ValueError("Cell indices inside cell_map cannot be negative!") self._cell_map = cell_map def _export(self): properties = [cell._export() for cell in self.cells] return {"cells": properties, "cell_map": self.cell_map} def _import(self, jsn_string): jsn_loaded = json.loads(jsn_string) self._evaluate_import(jsn_loaded.get("cells", None)) self.cell_map = jsn_loaded.get("cell_map", self.cell_map) def save(self, file_path: Union[str, os.PathLike], overwrite: bool = False): """ Save the sample to a file denoted by the :python:`file_path` argument in JSON format. :param file_path: Where to store the file :param overwrite: When true, allow overwriting an existing file. :raise FileExistsError: When overwrite is False and the file exists. """ mode = "w" if overwrite is True else "x" with open(file_path, mode, encoding="utf-8") as file: json.dump(self._export(), file) def load(self, file_path: Union[str, os.PathLike]): """ Loads the file at :python:`file_path` and assigns all properties of the loaded file to this :class:`QiSample` object. :param file_path: Where to look for the file """ with open(file_path, "r", encoding="utf-8") as file: self._import(file.read()) def _evaluate_import(self, sample): if sample is None: warnings.warn("Imported JSON string does not contain 'cells'.") return if len(sample) != len(self): raise ValueError( f"Imported JSON contains {len(sample)} sample cells but {len(self)} " "expected." ) for i in range(0, len(self)): self.cells[i]._import(sample[i].get("properties", None), i) class _JobDescription: """Saves experiment descriptions and handles storage of commands""" def __init__(self): self._commands: List[QiCommand] = [] self._ContextStack: List[List[QiCommand]] = [] def __getitem__(self, key): return self._commands[key] def __len__(self): return len(self._commands) def add_command(self, command): """Checks current command for used cells and raises error, if cells are not defined for current QiJob""" if isinstance(command, QiCellCommand): if _QiJobReference != command.cell._job_ref: raise RuntimeError("Cell not defined for current job") self._commands.append(command) def open_new_context(self): """Saves current commands in a stack and clears command list""" self._ContextStack.append(self._commands.copy()) self._commands = [] def close_context(self) -> List[QiCommand]: """returns the current command list, and loads the commands from top of stack""" current_commands = self._commands.copy() self._commands = self._ContextStack.pop() return current_commands def reset(self): self._commands = [] self._ContextStack = [] class QiCellCommand(QiCommand): """ Cell commands are commands using only one cell, such as Play and Wait commands. :param cell: The target cell """ def __init__(self, cell: QiCell): super().__init__() self.cell = cell self._relevant_cells.add(cell) def accept(self, visitor, *input): return visitor.visit_cell_command(self, *input) class QiVariableCommand(QiCommand): """Base class of variable commands cQiDeclare and cQiAssign""" def __init__(self, var: _QiVariableBase): super().__init__() self.var = var def accept(self, visitor, *input): return visitor.visit_variable_command(self, *input) class cQiWait(QiCellCommand): """Command generated by :meth:`Wait`""" def __init__(self, cell, length: Union[QiExpression, QiCellProperty]): super().__init__(cell) self._length = length if isinstance(length, _QiVariableBase): self.add_associated_variable(length) elif isinstance(length, _QiCalcBase): for variable in length.contained_variables: self.add_associated_variable(variable) if isinstance(length, QiExpression): length._type_info.set_type(QiType.TIME, _TypeDefiningUse.WAIT_COMMAND) @property def length(self): return ( self._length() if isinstance(self._length, QiCellProperty) else self._length ) def _stringify(self) -> str: return f"Wait({self.cell}, {self._length})" class _cQiPlay_base(QiCellCommand): """Base class of Play commands. Saves pulses, trigger_index and adds pulse variables to associated variable set """ def __init__(self, cell, pulse: QiPulse): super().__init__(cell) self.pulse = pulse # default False; Set True for certain commands when unrolling a loop with TimingVariable == 1 cycle self._var_single_cycle_trigger = False for variable in self.pulse.variables: self.add_associated_variable(variable) # length of command might differ from pulse length self._length: Union[float, _QiVariableBase, QiCellProperty] = self.pulse.length self.trigger_index = 0 @property def length(self): return ( self._length if not isinstance(self._length, QiCellProperty) else self._length() ) @length.setter def length(self, value): self._length = value class cQiPlay(_cQiPlay_base): """Command generated by Play()""" def __init__(self, cell, pulse: QiPulse): super().__init__(cell, pulse) self.trigger_index = cell.add_pulse(pulse) def _stringify(self) -> str: return f"Play({self.cell}, {self.pulse._stringify()})" class cQiPlayFlux(_cQiPlay_base): pass class cQiPlayReadout(_cQiPlay_base): """Command generated by :meth:`PlayReadout`""" def __init__(self, cell, pulse) -> None: super().__init__(cell, pulse) self.recording: Union[None, cQiRecording] = None self.trigger_index = cell.add_readout_pulse(pulse) @property def length(self): length = ( self._length if not isinstance(self._length, QiCellProperty) else self._length() ) # if Recording is defined and length is not defined by variable, compare both lengths if isinstance(self.recording, cQiRecording) and not isinstance( self._length, _QiVariableBase ): return max(length, self.recording.length) return length @length.setter def length(self, value): self._length = value if isinstance(self.recording, cQiRecording): self.recording.length = value @property def uses_state(self): return self.recording is not None and self.recording.uses_state def _stringify(self) -> str: return f"PlayReadout({self.cell}, {self.pulse._stringify()})" class cQiRotateFrame(_cQiPlay_base): """Command generated by :meth:`RotateFrame`""" def __init__(self, cell, angle: float): # Negate phase because frame needs to be shifted in the opposite direction # than pulses -> want to shift the state on bloch sphere but shift the frame pulse = QiPulse(0, phase=-1 * angle) pulse.shift_phase = True # Special property to make phase offset persistant super().__init__(cell, pulse) self.trigger_index = cell.add_pulse(pulse) self.length = util.conv_cycles_to_time(1) # command needs exactly one cycle self.angle = angle def _stringify(self) -> str: return f"RotateFrame({self.cell}, {self.angle})" class cQiSync(QiCommand): """Command generated by :meth:`Sync`""" def __init__(self, cells: List[QiCell]): super().__init__() self._relevant_cells.update(cells) def accept(self, visitor, *input): return visitor.visit_sync_command(self, *input) def _stringify(self) -> str: return ( "Sync(" + ", ".join( [ f"{cell}" for cell in sorted(self._relevant_cells, key=lambda c: c.cellID) ] ) + ")" ) class cQiRecording(QiCellCommand): """Command generated by Recording()""" def __init__( self, cell: QiCell, save_to: Union[str, _QiVariableBase, None], state_to: Union[_QiVariableBase, None], length: Union[int, float, QiCellProperty], offset: Union[int, float, QiExpression], toggleContinuous: Optional[bool] = None, ): super().__init__(cell) self.result_box = None self.var = None if ( isinstance(length, QiExpression) and length.type == QiType.STATE or isinstance(offset, QiExpression) and offset.type == QiType.STATE ): raise RuntimeError("State variable can only be used at save_to parameter.") if isinstance(state_to, _QiVariableBase): state_to._type_info.set_type( QiType.STATE, _TypeDefiningUse.RECORDING_SAVE_TO ) self.add_associated_variable(state_to) self.var = state_to self.save_to = save_to assert not isinstance( save_to, QiResult ) # support for QiResult as parameter was removed. if isinstance(save_to, _QiVariableBase): # TODO This should be deprecated and turned into new result variable # to handle I/Q values instead if necessary -> consistency if self.var is not None: raise RuntimeError("Cannot pass variable to state_to and save_to.") save_to._type_info.set_type( QiType.STATE, _TypeDefiningUse.RECORDING_SAVE_TO ) self.add_associated_variable(save_to) self.var = save_to elif isinstance(save_to, str): self.result_box = cell.get_result_container( save_to ) # container might have been added to cell before self.save_to = save_to cell.add_recording_length(length) self._length = length if isinstance(self._length, QiExpression): self._length._type_info.set_type( QiType.TIME, _TypeDefiningUse.RECORDING_OFFSET_EXPRESSION ) self._offset: QiExpression = QiExpression._from(offset) self._offset._type_info.set_type( QiType.TIME, _TypeDefiningUse.RECORDING_OFFSET_EXPRESSION ) for var in self._offset.contained_variables: var._relevant_cells.add(cell) self.toggleContinuous = toggleContinuous self.follows_readout = False try: cmd = _QiJobReference.commands[-1] if ( isinstance(cmd, cQiPlayReadout) and cmd.cell == self.cell ): # Warning if previous cmd is readout but different cell self.follows_readout = True cmd.recording = self cmd._associated_variable_set.update(self._associated_variable_set) except IndexError: pass @property def uses_state(self): return len(self._associated_variable_set) > 0 @property def length(self): return ( self._length() if isinstance(self._length, QiCellProperty) else self._length ) @length.setter def length(self, value): self._length = value @property def offset(self): return ( self._offset() if isinstance(self._offset, QiCellProperty) else self._offset ) def _stringify_args(self) -> str: """Determines non-default args to explicitly stringify""" arg_strings = [str(self.cell), str(self._length)] if not ( isinstance(self._offset, _QiConstValue) and self._offset._given_value == 0 ): arg_strings.append(f"offset={self._offset}") if self.result_box is not None: arg_strings.append(f'save_to="{self.result_box.name}"') if self.var is not None: arg_strings.append(f"state_to={self.var}") if self.toggleContinuous is not None: arg_strings.append(f"toggleContinuous={self.toggleContinuous}") return ", ".join(arg_strings) def _stringify(self) -> str: return f"Recording({self._stringify_args()})" class cQiStore(QiCellCommand): """Command generated by :meth:`Store`""" def __init__(self, cell, store_var: _QiVariableBase, save_to: QiResult): super().__init__(cell) self.store_var = store_var self.save_to = save_to self.add_associated_variable(store_var) def _stringify(self) -> str: return f"Store({self.cell}, {self.store_var}, {self.save_to})" class cQiAssign(QiVariableCommand): """Command generated by :meth:`Assign`""" def __init__(self, dst: _QiVariableBase, value: Union[QiExpression, int, float]): if not isinstance(dst, _QiVariableBase): raise TypeError("Target of Assign can only be a QiVariable.") super().__init__(dst) self._value = QiExpression._from(value) dst._type_info.add_illegal_type(QiType.STATE, _IllegalTypeReason.ASSIGN) _add_equal_constraints( QiType.NORMAL, _TypeConstraintReasonQiCommand(cQiAssign), self._value, dst ) _add_equal_constraints( QiType.TIME, _TypeConstraintReasonQiCommand(cQiAssign), self._value, dst ) for variable in self.value.contained_variables: self.add_associated_variable(variable) @property def value(self): return self._value def accept(self, visitor, *input): return visitor.visit_assign_command(self, *input) def _stringify(self) -> str: return f"Assign({self.var}, {self._value})" class cQiDeclare(QiVariableCommand): """Command generated by initialization of new QiVariable""" def __init__(self, dst: _QiVariableBase) -> None: super().__init__(var=dst) def accept(self, visitor, *input): return visitor.visit_declare_command(self, *input) def _stringify(self) -> str: return f"v{self.var.str_id} = {self.var}" class cQiASM(QiCommand): def __init__(self, cells: QiCell, instr: SequencerInstruction, cycles: int): super().__init__() self._relevant_cells.add(cells) self.asm_instruction = instr self.cycles = cycles def accept(self, visitor, *input): return visitor.visit_asm_command(self, *input) def _stringify(self) -> str: return f"ASM({self.asm_instruction.get_riscv_instruction()})" class cQiMemStore(QiCommand): def __init__(self, cell: QiCell, addr: int, value): super().__init__() self._relevant_cells.add(cell) self.addr = addr self.value = value def accept(self, visitor, *input): return visitor.visit_mem_store_command(self, *input) def _stringify(self): cell_str = ", ".join(list(map(lambda x: f"{x}", self._relevant_cells))) return f"cQiMemStore({cell_str}, {self.addr}, {self.value})" class QiContextManager(QiCommand): """Base Class for If, Else, ForRange and Parallel. Defines functions for storing commands.""" def __init__(self) -> None: super().__init__() self.body: List[QiCommand] = [] def __enter__(self): _QiJobReference._open_new_context() return self def __exit__(self, exception_type, exception_value, traceback): self.body = _QiJobReference._close_context() _QiJobReference._add_command(self) def accept(self, visitor, *input): return visitor.visit_context_manager(self, *input) class If(QiContextManager): """ Add conditional logic to the program. If multiple cells are used inside the body, a synchronization between the cells takes place before the If. :param condition: The condition to check Example ------- .. code-block:: python with QiJob() as job: q = QiCells(1) x = QiIntVariable(1) with If(x > 1): ... # won't be executed The If statement is most commonly used to react to qubit states in real-time: .. code-block:: python from qiclib import jobs with QiJob() as job: q = QiCells(1) state = QiStateVariable() jobs.Readout(q[0], state_to=state) with If(state = 0): ... # Apply some conditional logic based on the qubit state """
def __init__(self, condition: Optional[QiCondition] = None):
10
2023-11-10 10:26:10+00:00
24k
fg320/DEASC
examples/12C_5x1_farm_dyn_tuning_wso_grouping_looping.py
[ { "identifier": "WfModel", "path": "deasc/wf_model.py", "snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by p...
import numpy as np from deasc import WfModel from deasc import WSOpt from deasc import Tuning from deasc import GPWrap from deasc import TuningDyn_Grouping from deasc import TuningDyn_Looping_Turbine from deasc.utils_floris import ( floris_extract_object_dict, floris_extract_parameter, floris_param_change_object_dict, floris_param_change_object )
16,427
tuning_groups=[[1, 2], [3, 4]], GP_model=GP_model) # Optimisation with dynamic tuning # Initialise wake steering object wso_obj_tuning = WSOpt(wf_model=wf_model, inflow=inflow, variables=variables, var_bounds=var_bounds, var_initial=var_initial, opt_method="SLSQP", opt_options=None, obj_function="Farm Power", tuning_dynamic=True ) # Assign dynamic tuning to wake steering optimisation wso_obj_tuning.tuning_dyn_initialize([tuning_dyn_obj]) # Optimise and print yaw angles opt_yaw_angles_vars, opt_yaw_angles_all = wso_obj_tuning.optimize_yaw() print('Optimal farm yaw angles with dynamic parameter tuning:') print(opt_yaw_angles_all) # Extract wind farm power without any yaw wf_pow_noyaw = wso_obj_tuning.wf_pow_noyaw # %% Looping refinement yaw_initial = opt_yaw_angles_all # Number of loops for each turbine n_iterations = 1 # One loop for each turbine variable for turbine in [1, 2, 3, 4]*n_iterations: # Wake steering optimisation inputs - single turbine inflow = (yaw_initial, wd, ws, ti, shear) variables = [turbine] var_initial = [yaw_initial[turbine-1]] # %% Looping GP dataset # Higher fidelity dataset # Initialise trainer and set farm layout path = "./inputs/" input_file_trainer = "gch.yaml" trainer = WfModel(input_file_trainer, path) trainer.set_aligned_layout(5, 1, 7, 5) # Define training set yaw_list = [] for yaw_var in np.linspace(-25, 25, 7): yaw_single = yaw_initial.copy() yaw_single[turbine-1] = yaw_var yaw_list.append(yaw_single) # Produce high-fidelity power measurement for each training condition wt_pow_training_list = [] for i in range(len(yaw_list)): _, wt_pow_training, _, _ = trainer.farm_eval(yaw=yaw_list[i], wd=wd, ws=ws, ti=ti, shear=shear) wt_pow_training_list.append(wt_pow_training) # Parameter tuning - Run a single optimisation for each training condition # Initialise dataset optimal_parameter_dataset = {} for i, yaw in enumerate(yaw_list): # Initialise trainee trainee = wf_model # Parameters to tune param_class_list = ['wake_velocity_parameters'] param_name_list = ['we'] param_bounds_list = [(0.0, 0.1)] # TURBO options TURBO_opt = {"n_init": 2, "max_evals": 100, "batch_size": 4, # 1 = Serial "verbose": True, "use_ard": True, "max_cholesky_size": 2000, "n_training_steps": 50, "min_cuda": 1024, "device": "cpu", "dtype": "float64"} # Initialise parameter tuning object tune_obj = Tuning(wf_model=trainee, variables_class_list=param_class_list, variables_names_list=param_name_list, variables_bounds_list=param_bounds_list, obj_func_name='RMSE', opt_method='TURBO_1', opt_options=TURBO_opt) # Specify higher-fidelity tuning condition tune_obj.tuning_conditions(yaw_angles_list=[yaw], wind_directions_list=[wd], wind_speeds_list=[ws], turbulence_intensities_list=[ti], wind_shear_list=[shear]) # Specify higher-fidelity turbine power measurements tune_obj.tuning_data(data_power_list=[wt_pow_training_list[i]]) # Tune parameters, extract tuned dictionary, reinitialise wf_model object trainee, trainee_dict_opt = tune_obj.tune_parameters() # Extract tuned k parameter
""" This example shows wake steering optimisation on a 5x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with the looping approach is implemented to refine the results achieved with grouping. Tuning is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles of all wind turbines in the farm, excluding the most downstream one. """ # %% Initial wake steering optimisation - Grouping approach for dynamic parameter tuning # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3) wf_model = floris_param_change_object(wf_model, wf_model_dict) # Specify atmopheric conditions ws = 8.0 wd = 270 ti = 0.05 shear = 0.0 # Wake steering optimisation inputs yaw_initial = np.full(shape=(5), fill_value=0) inflow = (yaw_initial, wd, ws, ti, shear) variables = [1, 2, 3, 4] var_bounds = (-25, 25) var_initial = np.full(shape=(len(variables)), fill_value=0) # Dynamic tuning object # Parameter info parameter_class = 'wake_velocity_parameters' parameter_name = 'we' # Import optimal parameter dataset and extract GP input dataset_path = "./optimal_parameter_datasets/" dataset_import = np.load(dataset_path+'we_5x1_2dim_grouping.npy', allow_pickle=True) optimal_parameter_dataset = dataset_import.item() yaw_data = [] param_data = [] for key in optimal_parameter_dataset.keys(): yaw_data.append([key[0], key[2]]) # Extract group yaw param_data.append([optimal_parameter_dataset[key]]) # Construct Gaussian Process (GP) GP_obj = GPWrap(parameter_class=parameter_class, parameter_name=parameter_name, dimensions=2) GP_model = GP_obj.GP_so(yaw_data, param_data, num_restarts=100, noise=0.05) # Tuning object initialisation tuning_dyn_obj = TuningDyn_Grouping(param_class=parameter_class, param_name=parameter_name, tuning_groups=[[1, 2], [3, 4]], GP_model=GP_model) # Optimisation with dynamic tuning # Initialise wake steering object wso_obj_tuning = WSOpt(wf_model=wf_model, inflow=inflow, variables=variables, var_bounds=var_bounds, var_initial=var_initial, opt_method="SLSQP", opt_options=None, obj_function="Farm Power", tuning_dynamic=True ) # Assign dynamic tuning to wake steering optimisation wso_obj_tuning.tuning_dyn_initialize([tuning_dyn_obj]) # Optimise and print yaw angles opt_yaw_angles_vars, opt_yaw_angles_all = wso_obj_tuning.optimize_yaw() print('Optimal farm yaw angles with dynamic parameter tuning:') print(opt_yaw_angles_all) # Extract wind farm power without any yaw wf_pow_noyaw = wso_obj_tuning.wf_pow_noyaw # %% Looping refinement yaw_initial = opt_yaw_angles_all # Number of loops for each turbine n_iterations = 1 # One loop for each turbine variable for turbine in [1, 2, 3, 4]*n_iterations: # Wake steering optimisation inputs - single turbine inflow = (yaw_initial, wd, ws, ti, shear) variables = [turbine] var_initial = [yaw_initial[turbine-1]] # %% Looping GP dataset # Higher fidelity dataset # Initialise trainer and set farm layout path = "./inputs/" input_file_trainer = "gch.yaml" trainer = WfModel(input_file_trainer, path) trainer.set_aligned_layout(5, 1, 7, 5) # Define training set yaw_list = [] for yaw_var in np.linspace(-25, 25, 7): yaw_single = yaw_initial.copy() yaw_single[turbine-1] = yaw_var yaw_list.append(yaw_single) # Produce high-fidelity power measurement for each training condition wt_pow_training_list = [] for i in range(len(yaw_list)): _, wt_pow_training, _, _ = trainer.farm_eval(yaw=yaw_list[i], wd=wd, ws=ws, ti=ti, shear=shear) wt_pow_training_list.append(wt_pow_training) # Parameter tuning - Run a single optimisation for each training condition # Initialise dataset optimal_parameter_dataset = {} for i, yaw in enumerate(yaw_list): # Initialise trainee trainee = wf_model # Parameters to tune param_class_list = ['wake_velocity_parameters'] param_name_list = ['we'] param_bounds_list = [(0.0, 0.1)] # TURBO options TURBO_opt = {"n_init": 2, "max_evals": 100, "batch_size": 4, # 1 = Serial "verbose": True, "use_ard": True, "max_cholesky_size": 2000, "n_training_steps": 50, "min_cuda": 1024, "device": "cpu", "dtype": "float64"} # Initialise parameter tuning object tune_obj = Tuning(wf_model=trainee, variables_class_list=param_class_list, variables_names_list=param_name_list, variables_bounds_list=param_bounds_list, obj_func_name='RMSE', opt_method='TURBO_1', opt_options=TURBO_opt) # Specify higher-fidelity tuning condition tune_obj.tuning_conditions(yaw_angles_list=[yaw], wind_directions_list=[wd], wind_speeds_list=[ws], turbulence_intensities_list=[ti], wind_shear_list=[shear]) # Specify higher-fidelity turbine power measurements tune_obj.tuning_data(data_power_list=[wt_pow_training_list[i]]) # Tune parameters, extract tuned dictionary, reinitialise wf_model object trainee, trainee_dict_opt = tune_obj.tune_parameters() # Extract tuned k parameter
k_tuned = floris_extract_parameter(trainee_dict_opt,
7
2023-11-10 18:13:27+00:00
24k
PlaxtonFlarion/NexaFlow
nexaflow/skills/alynex.py
[ { "identifier": "toolbox", "path": "nexaflow/toolbox.py", "snippet": "def video_capture(video_path: str):\ndef video_jump(video_cap: cv2.VideoCapture, frame_id: int):\ndef compare_ssim(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef multi_compare_ssim(\n pic1_list: typing.List, pic2_list: typing.L...
import os import cv2 import time import random import asyncio from loguru import logger from typing import List, Union, Optional from concurrent.futures import ThreadPoolExecutor from nexaflow import toolbox from nexaflow.skills.report import Report from nexaflow.skills.record import Record from nexaflow.skills.player import Player from nexaflow.skills.switch import Switch from nexaflow.cutter.cutter import VideoCutter from nexaflow.video import VideoObject, Frame from nexaflow.classifier.keras_classifier import KerasClassifier from nexaflow.hook import BaseHook, CropHook, OmitHook, FrameSaveHook from nexaflow.classifier.base import ClassifierResult, SingleClassifierResult
16,817
class Alynex(object): target_size: tuple = (350, 700) fps: int = 60 step: int = 1 block: int = 6 threshold: Union[int | float] = 0.97 offset: int = 3 compress_rate: float = 0.5 window_size: int = 1 window_coefficient: int = 2 kc: KerasClassifier = KerasClassifier( target_size=target_size, data_size=target_size ) def __init__(self): self.__report: Optional[Report] = None self.__record: Optional[Record] = Record() self.__player: Optional[Player] = Player() self.__ffmpeg: Optional[Switch] = Switch() self.__filmer: Optional[Alynex._Filmer] = Alynex._Filmer() self.__framix: Optional[Alynex._Framix] = None def __str__(self): return (f""" <Alynex for NexaFlow Target Size: {self.target_size} Fps: {self.fps} Step: {self.step} Block: {self.block} Threshold: {self.threshold} Offset: {self.offset} Compress Rate: {self.compress_rate} Window Size: {self.window_size} Window Coefficient: {self.window_coefficient} > """) __repr__ = __str__ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass @property def report(self) -> "Report": assert self.__report, f"{self.activate.__name__} first ..." return self.__report @property def record(self) -> "Record": return self.__record @property def player(self) -> "Player": return self.__player @property def ffmpeg(self) -> "Switch": return self.__ffmpeg @property def filmer(self) -> "Alynex._Filmer": return self.__filmer @property def framix(self) -> "Alynex._Framix": assert self.__framix, f"{self.activate.__name__} first ..." return self.__framix @staticmethod def only_video(folder: str) -> List: class Entry(object): def __init__(self, title: str, place: str, sheet: list): self.title = title self.place = place self.sheet = sheet return [ Entry( os.path.basename(root), root, [os.path.join(root, f) for f in sorted(file)] ) for root, _, file in os.walk(folder) if file ] def activate(self, models: str, total_path: str): if not self.__report: self.__report = Report(total_path) self.__framix = Alynex._Framix(self.report) Alynex.kc.load_model(models) class _Filmer(object): @staticmethod def train_model(video_file: str) -> None: model_path = os.path.join( os.path.dirname(video_file), f"Model_{time.strftime('%Y%m%d%H%M%S')}_{os.getpid()}" ) if not os.path.exists(model_path): os.makedirs(model_path, exist_ok=True) # 将视频切分成帧 video = VideoObject(video_file, fps=Alynex.fps) # 新建帧,计算视频总共有多少帧,每帧多少ms video.load_frames() # 压缩视频
class Alynex(object): target_size: tuple = (350, 700) fps: int = 60 step: int = 1 block: int = 6 threshold: Union[int | float] = 0.97 offset: int = 3 compress_rate: float = 0.5 window_size: int = 1 window_coefficient: int = 2 kc: KerasClassifier = KerasClassifier( target_size=target_size, data_size=target_size ) def __init__(self): self.__report: Optional[Report] = None self.__record: Optional[Record] = Record() self.__player: Optional[Player] = Player() self.__ffmpeg: Optional[Switch] = Switch() self.__filmer: Optional[Alynex._Filmer] = Alynex._Filmer() self.__framix: Optional[Alynex._Framix] = None def __str__(self): return (f""" <Alynex for NexaFlow Target Size: {self.target_size} Fps: {self.fps} Step: {self.step} Block: {self.block} Threshold: {self.threshold} Offset: {self.offset} Compress Rate: {self.compress_rate} Window Size: {self.window_size} Window Coefficient: {self.window_coefficient} > """) __repr__ = __str__ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass @property def report(self) -> "Report": assert self.__report, f"{self.activate.__name__} first ..." return self.__report @property def record(self) -> "Record": return self.__record @property def player(self) -> "Player": return self.__player @property def ffmpeg(self) -> "Switch": return self.__ffmpeg @property def filmer(self) -> "Alynex._Filmer": return self.__filmer @property def framix(self) -> "Alynex._Framix": assert self.__framix, f"{self.activate.__name__} first ..." return self.__framix @staticmethod def only_video(folder: str) -> List: class Entry(object): def __init__(self, title: str, place: str, sheet: list): self.title = title self.place = place self.sheet = sheet return [ Entry( os.path.basename(root), root, [os.path.join(root, f) for f in sorted(file)] ) for root, _, file in os.walk(folder) if file ] def activate(self, models: str, total_path: str): if not self.__report: self.__report = Report(total_path) self.__framix = Alynex._Framix(self.report) Alynex.kc.load_model(models) class _Filmer(object): @staticmethod def train_model(video_file: str) -> None: model_path = os.path.join( os.path.dirname(video_file), f"Model_{time.strftime('%Y%m%d%H%M%S')}_{os.getpid()}" ) if not os.path.exists(model_path): os.makedirs(model_path, exist_ok=True) # 将视频切分成帧 video = VideoObject(video_file, fps=Alynex.fps) # 新建帧,计算视频总共有多少帧,每帧多少ms video.load_frames() # 压缩视频
cutter = VideoCutter(
5
2023-11-13 05:27:34+00:00
24k
microsoft/SoM
demo_gpt4v_som.py
[ { "identifier": "interactive_seem_m2m_auto", "path": "task_adapter/seem/tasks/interactive_seem_m2m_auto.py", "snippet": "def interactive_seem_m2m_auto(model, image, text_size, label_mode='1', alpha=0.1, anno_mode=['Mask']):\n t = []\n t.append(transforms.Resize(int(text_size), interpolation=Image....
import io import gradio as gr import torch import argparse import numpy as np import matplotlib.colors as mcolors from PIL import Image from seem.modeling.BaseModel import BaseModel as BaseModel_Seem from seem.utils.distributed import init_distributed as init_distributed_seem from seem.modeling import build_model as build_model_seem from task_adapter.seem.tasks import interactive_seem_m2m_auto, inference_seem_pano, inference_seem_interactive from semantic_sam.BaseModel import BaseModel from semantic_sam import build_model from semantic_sam.utils.dist import init_distributed_mode from semantic_sam.utils.arguments import load_opt_from_config_file from semantic_sam.utils.constants import COCO_PANOPTIC_CLASSES from task_adapter.semantic_sam.tasks import inference_semsam_m2m_auto, prompt_switch from segment_anything import sam_model_registry from task_adapter.sam.tasks.inference_sam_m2m_auto import inference_sam_m2m_auto from task_adapter.sam.tasks.inference_sam_m2m_interactive import inference_sam_m2m_interactive from task_adapter.utils.visualizer import Visualizer from detectron2.data import MetadataCatalog from scipy.ndimage import label from gpt4v import request_gpt4v from openai import OpenAI from pydub import AudioSegment from pydub.playback import play
17,946
# -------------------------------------------------------- # Set-of-Mark (SoM) Prompting for Visual Grounding in GPT-4V # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by: # Jianwei Yang (jianwyan@microsoft.com) # Xueyan Zou (xueyan@cs.wisc.edu) # Hao Zhang (hzhangcx@connect.ust.hk) # -------------------------------------------------------- # seem # semantic sam # sam metadata = MetadataCatalog.get('coco_2017_train_panoptic') css4_colors = mcolors.CSS4_COLORS color_proposals = [list(mcolors.hex2color(color)) for color in css4_colors.values()] client = OpenAI() ''' build args ''' semsam_cfg = "configs/semantic_sam_only_sa-1b_swinL.yaml" seem_cfg = "configs/seem_focall_unicl_lang_v1.yaml" semsam_ckpt = "./swinl_only_sam_many2many.pth" sam_ckpt = "./sam_vit_h_4b8939.pth" seem_ckpt = "./seem_focall_v1.pt" opt_semsam = load_opt_from_config_file(semsam_cfg) opt_seem = load_opt_from_config_file(seem_cfg) opt_seem = init_distributed_seem(opt_seem) ''' build model ''' model_semsam = BaseModel(opt_semsam, build_model(opt_semsam)).from_pretrained(semsam_ckpt).eval().cuda() model_sam = sam_model_registry["vit_h"](checkpoint=sam_ckpt).eval().cuda() model_seem = BaseModel_Seem(opt_seem, build_model_seem(opt_seem)).from_pretrained(seem_ckpt).eval().cuda() with torch.no_grad(): with torch.autocast(device_type='cuda', dtype=torch.float16): model_seem.model.sem_seg_head.predictor.lang_encoder.get_text_embeddings(COCO_PANOPTIC_CLASSES + ["background"], is_eval=True) history_images = [] history_masks = [] history_texts = [] @torch.no_grad() def inference(image, slider, mode, alpha, label_mode, anno_mode, *args, **kwargs): global history_images; history_images = [] global history_masks; history_masks = [] if slider < 1.5: model_name = 'seem' elif slider > 2.5: model_name = 'sam' else: if mode == 'Automatic': model_name = 'semantic-sam' if slider < 1.5 + 0.14: level = [1] elif slider < 1.5 + 0.28: level = [2] elif slider < 1.5 + 0.42: level = [3] elif slider < 1.5 + 0.56: level = [4] elif slider < 1.5 + 0.70: level = [5] elif slider < 1.5 + 0.84: level = [6] else: level = [6, 1, 2, 3, 4, 5] else: model_name = 'sam' if label_mode == 'Alphabet': label_mode = 'a' else: label_mode = '1' text_size, hole_scale, island_scale=640,100,100 text, text_part, text_thresh = '','','0.0' with torch.autocast(device_type='cuda', dtype=torch.float16): semantic=False if mode == "Interactive": labeled_array, num_features = label(np.asarray(image['mask'].convert('L'))) spatial_masks = torch.stack([torch.from_numpy(labeled_array == i+1) for i in range(num_features)]) if model_name == 'semantic-sam': model = model_semsam output, mask = inference_semsam_m2m_auto(model, image['image'], level, text, text_part, text_thresh, text_size, hole_scale, island_scale, semantic, label_mode=label_mode, alpha=alpha, anno_mode=anno_mode, *args, **kwargs) elif model_name == 'sam': model = model_sam if mode == "Automatic": output, mask = inference_sam_m2m_auto(model, image['image'], text_size, label_mode, alpha, anno_mode) elif mode == "Interactive": output, mask = inference_sam_m2m_interactive(model, image['image'], spatial_masks, text_size, label_mode, alpha, anno_mode) elif model_name == 'seem': model = model_seem if mode == "Automatic":
# -------------------------------------------------------- # Set-of-Mark (SoM) Prompting for Visual Grounding in GPT-4V # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by: # Jianwei Yang (jianwyan@microsoft.com) # Xueyan Zou (xueyan@cs.wisc.edu) # Hao Zhang (hzhangcx@connect.ust.hk) # -------------------------------------------------------- # seem # semantic sam # sam metadata = MetadataCatalog.get('coco_2017_train_panoptic') css4_colors = mcolors.CSS4_COLORS color_proposals = [list(mcolors.hex2color(color)) for color in css4_colors.values()] client = OpenAI() ''' build args ''' semsam_cfg = "configs/semantic_sam_only_sa-1b_swinL.yaml" seem_cfg = "configs/seem_focall_unicl_lang_v1.yaml" semsam_ckpt = "./swinl_only_sam_many2many.pth" sam_ckpt = "./sam_vit_h_4b8939.pth" seem_ckpt = "./seem_focall_v1.pt" opt_semsam = load_opt_from_config_file(semsam_cfg) opt_seem = load_opt_from_config_file(seem_cfg) opt_seem = init_distributed_seem(opt_seem) ''' build model ''' model_semsam = BaseModel(opt_semsam, build_model(opt_semsam)).from_pretrained(semsam_ckpt).eval().cuda() model_sam = sam_model_registry["vit_h"](checkpoint=sam_ckpt).eval().cuda() model_seem = BaseModel_Seem(opt_seem, build_model_seem(opt_seem)).from_pretrained(seem_ckpt).eval().cuda() with torch.no_grad(): with torch.autocast(device_type='cuda', dtype=torch.float16): model_seem.model.sem_seg_head.predictor.lang_encoder.get_text_embeddings(COCO_PANOPTIC_CLASSES + ["background"], is_eval=True) history_images = [] history_masks = [] history_texts = [] @torch.no_grad() def inference(image, slider, mode, alpha, label_mode, anno_mode, *args, **kwargs): global history_images; history_images = [] global history_masks; history_masks = [] if slider < 1.5: model_name = 'seem' elif slider > 2.5: model_name = 'sam' else: if mode == 'Automatic': model_name = 'semantic-sam' if slider < 1.5 + 0.14: level = [1] elif slider < 1.5 + 0.28: level = [2] elif slider < 1.5 + 0.42: level = [3] elif slider < 1.5 + 0.56: level = [4] elif slider < 1.5 + 0.70: level = [5] elif slider < 1.5 + 0.84: level = [6] else: level = [6, 1, 2, 3, 4, 5] else: model_name = 'sam' if label_mode == 'Alphabet': label_mode = 'a' else: label_mode = '1' text_size, hole_scale, island_scale=640,100,100 text, text_part, text_thresh = '','','0.0' with torch.autocast(device_type='cuda', dtype=torch.float16): semantic=False if mode == "Interactive": labeled_array, num_features = label(np.asarray(image['mask'].convert('L'))) spatial_masks = torch.stack([torch.from_numpy(labeled_array == i+1) for i in range(num_features)]) if model_name == 'semantic-sam': model = model_semsam output, mask = inference_semsam_m2m_auto(model, image['image'], level, text, text_part, text_thresh, text_size, hole_scale, island_scale, semantic, label_mode=label_mode, alpha=alpha, anno_mode=anno_mode, *args, **kwargs) elif model_name == 'sam': model = model_sam if mode == "Automatic": output, mask = inference_sam_m2m_auto(model, image['image'], text_size, label_mode, alpha, anno_mode) elif mode == "Interactive": output, mask = inference_sam_m2m_interactive(model, image['image'], spatial_masks, text_size, label_mode, alpha, anno_mode) elif model_name == 'seem': model = model_seem if mode == "Automatic":
output, mask = inference_seem_pano(model, image['image'], text_size, label_mode, alpha, anno_mode)
1
2023-10-16 03:39:26+00:00
24k
hkchengrex/Cutie
gui/main_controller.py
[ { "identifier": "CUTIE", "path": "cutie/model/cutie.py", "snippet": "class CUTIE(nn.Module):\n def __init__(self, cfg: DictConfig, *, single_object=False):\n super().__init__()\n model_cfg = cfg.model\n self.ms_dims = model_cfg.pixel_encoder.ms_dims\n self.key_dim = model_...
import os import logging import cv2 import torch import numpy as np from os import path from typing import Literal from torch import mps from torch import autocast from torchvision.transforms.functional import to_tensor from omegaconf import DictConfig, open_dict from cutie.model.cutie import CUTIE from cutie.inference.inference_core import InferenceCore from gui.interaction import * from gui.interactive_utils import * from gui.resource_manager import ResourceManager from gui.gui import GUI from gui.click_controller import ClickController from gui.reader import PropagationReader, get_data_loader from gui.exporter import convert_frames_to_video, convert_mask_to_binary from scripts.download_models import download_models_if_needed
15,809
# fix conflicts between qt5 and cv2 os.environ.pop("QT_QPA_PLATFORM_PLUGIN_PATH") try: except: print('torch.MPS not available.') log = logging.getLogger() class MainController(): def __init__(self, cfg: DictConfig) -> None: super().__init__() self.initialized = False # setting up the workspace if cfg["workspace"] is None: if cfg["images"] is not None: basename = path.basename(cfg["images"]) elif cfg["video"] is not None: basename = path.basename(cfg["video"])[:-4] else: raise NotImplementedError('Either images, video, or workspace has to be specified') cfg["workspace"] = path.join(cfg['workspace_root'], basename) # reading arguments self.cfg = cfg self.num_objects = cfg['num_objects'] self.device = cfg['device'] self.amp = cfg['amp'] # initializing the network(s) self.initialize_networks() # main components self.res_man = ResourceManager(cfg) self.processor = InferenceCore(self.cutie, self.cfg) self.gui = GUI(self, self.cfg) # initialize control info self.length: int = self.res_man.length self.interaction: Interaction = None self.interaction_type: str = 'Click' self.curr_ti: int = 0 self.curr_object: int = 1 self.propagating: bool = False self.propagate_direction: Literal['forward', 'backward', 'none'] = 'none' self.last_ex = self.last_ey = 0 # current frame info self.curr_frame_dirty: bool = False self.curr_image_np: np.ndarray = np.zeros((self.h, self.w, 3), dtype=np.uint8) self.curr_image_torch: torch.Tensor = None self.curr_mask: np.ndarray = np.zeros((self.h, self.w), dtype=np.uint8) self.curr_prob: torch.Tensor = torch.zeros((self.num_objects + 1, self.h, self.w), dtype=torch.float).to(self.device) self.curr_prob[0] = 1 # visualization info self.vis_mode: str = 'davis' self.vis_image: np.ndarray = None self.save_visualization: bool = False self.save_soft_mask: bool = False self.interacted_prob: torch.Tensor = None self.overlay_layer: np.ndarray = None self.overlay_layer_torch: torch.Tensor = None # the object id used for popup/layer overlay self.vis_target_objects = list(range(1, self.num_objects + 1)) self.load_current_image_mask() self.show_current_frame() # initialize stuff self.update_memory_gauges() self.update_gpu_gauges() self.gui.work_mem_min.setValue(self.processor.memory.min_mem_frames) self.gui.work_mem_max.setValue(self.processor.memory.max_mem_frames) self.gui.long_mem_max.setValue(self.processor.memory.max_long_tokens) self.gui.mem_every_box.setValue(self.processor.mem_every) # for exporting videos self.output_fps = cfg['output_fps'] self.output_bitrate = cfg['output_bitrate'] # set callbacks self.gui.on_mouse_motion_xy = self.on_mouse_motion_xy self.gui.click_fn = self.click_fn self.gui.show() self.gui.text('Initialized.') self.initialized = True # try to load the default overlay self._try_load_layer('./docs/uiuc.png') self.gui.set_object_color(self.curr_object) self.update_config() def initialize_networks(self) -> None: download_models_if_needed()
# fix conflicts between qt5 and cv2 os.environ.pop("QT_QPA_PLATFORM_PLUGIN_PATH") try: except: print('torch.MPS not available.') log = logging.getLogger() class MainController(): def __init__(self, cfg: DictConfig) -> None: super().__init__() self.initialized = False # setting up the workspace if cfg["workspace"] is None: if cfg["images"] is not None: basename = path.basename(cfg["images"]) elif cfg["video"] is not None: basename = path.basename(cfg["video"])[:-4] else: raise NotImplementedError('Either images, video, or workspace has to be specified') cfg["workspace"] = path.join(cfg['workspace_root'], basename) # reading arguments self.cfg = cfg self.num_objects = cfg['num_objects'] self.device = cfg['device'] self.amp = cfg['amp'] # initializing the network(s) self.initialize_networks() # main components self.res_man = ResourceManager(cfg) self.processor = InferenceCore(self.cutie, self.cfg) self.gui = GUI(self, self.cfg) # initialize control info self.length: int = self.res_man.length self.interaction: Interaction = None self.interaction_type: str = 'Click' self.curr_ti: int = 0 self.curr_object: int = 1 self.propagating: bool = False self.propagate_direction: Literal['forward', 'backward', 'none'] = 'none' self.last_ex = self.last_ey = 0 # current frame info self.curr_frame_dirty: bool = False self.curr_image_np: np.ndarray = np.zeros((self.h, self.w, 3), dtype=np.uint8) self.curr_image_torch: torch.Tensor = None self.curr_mask: np.ndarray = np.zeros((self.h, self.w), dtype=np.uint8) self.curr_prob: torch.Tensor = torch.zeros((self.num_objects + 1, self.h, self.w), dtype=torch.float).to(self.device) self.curr_prob[0] = 1 # visualization info self.vis_mode: str = 'davis' self.vis_image: np.ndarray = None self.save_visualization: bool = False self.save_soft_mask: bool = False self.interacted_prob: torch.Tensor = None self.overlay_layer: np.ndarray = None self.overlay_layer_torch: torch.Tensor = None # the object id used for popup/layer overlay self.vis_target_objects = list(range(1, self.num_objects + 1)) self.load_current_image_mask() self.show_current_frame() # initialize stuff self.update_memory_gauges() self.update_gpu_gauges() self.gui.work_mem_min.setValue(self.processor.memory.min_mem_frames) self.gui.work_mem_max.setValue(self.processor.memory.max_mem_frames) self.gui.long_mem_max.setValue(self.processor.memory.max_long_tokens) self.gui.mem_every_box.setValue(self.processor.mem_every) # for exporting videos self.output_fps = cfg['output_fps'] self.output_bitrate = cfg['output_bitrate'] # set callbacks self.gui.on_mouse_motion_xy = self.on_mouse_motion_xy self.gui.click_fn = self.click_fn self.gui.show() self.gui.text('Initialized.') self.initialized = True # try to load the default overlay self._try_load_layer('./docs/uiuc.png') self.gui.set_object_color(self.curr_object) self.update_config() def initialize_networks(self) -> None: download_models_if_needed()
self.cutie = CUTIE(self.cfg).eval().to(self.device)
0
2023-10-19 17:49:24+00:00
24k
ZhengyiLuo/PerpetualHumanoidControl
scripts/render_smpl_o3d.py
[ { "identifier": "SMPL_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPL_Parser(_SMPL):\n\n def __init__(self, create_transl=False, *args, **kwargs):\n \"\"\"SMPL model constructor\n Parameters\n ----------\n model_path: str\n The path to the ...
import glob import os import sys import pdb import os.path as osp import open3d as o3d import open3d.visualization.rendering as rendering import imageio import joblib import numpy as np import torch import random import matplotlib.pyplot as plt import cv2 from tqdm import tqdm from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from uhc.smpllib.smpl_mujoco import SMPL_BONE_ORDER_NAMES as joint_names from poselib.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState from scipy.spatial.transform import Rotation as sRot from tqdm import tqdm
19,684
sys.path.append(os.getcwd()) paused, reset, recording, image_list, writer, control, curr_zoom = False, False, False, [], None, None, 0.01 def main(): render = rendering.OffscreenRenderer(2560, 960) # render.scene.set_clear_color(np.array([0, 0, 0, 1])) ############ Load SMPL Data ############ pkl_dir = "output/renderings/smpl_im_comp_8-2023-02-05-15:36:14.pkl" mujoco_joint_names = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand'] Name = pkl_dir.split("/")[-1].split(".")[0] pkl_data = joblib.load(pkl_dir) data_dir = "data/smpl" mujoco_2_smpl = [mujoco_joint_names.index(q) for q in joint_names if q in mujoco_joint_names] smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") data_seq = pkl_data['0_0'] pose_quat, trans = data_seq['body_quat'].numpy()[::2], data_seq['trans'].numpy()[::2]
sys.path.append(os.getcwd()) paused, reset, recording, image_list, writer, control, curr_zoom = False, False, False, [], None, None, 0.01 def main(): render = rendering.OffscreenRenderer(2560, 960) # render.scene.set_clear_color(np.array([0, 0, 0, 1])) ############ Load SMPL Data ############ pkl_dir = "output/renderings/smpl_im_comp_8-2023-02-05-15:36:14.pkl" mujoco_joint_names = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand'] Name = pkl_dir.split("/")[-1].split(".")[0] pkl_data = joblib.load(pkl_dir) data_dir = "data/smpl" mujoco_2_smpl = [mujoco_joint_names.index(q) for q in joint_names if q in mujoco_joint_names] smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") data_seq = pkl_data['0_0'] pose_quat, trans = data_seq['body_quat'].numpy()[::2], data_seq['trans'].numpy()[::2]
skeleton_tree = SkeletonTree.from_dict(data_seq['skeleton_tree'])
4
2023-10-15 19:05:47+00:00
24k
e4s2023/E4S2023
face_swap_video_pipeline.py
[ { "identifier": "OurSwapFacePipelineOptions", "path": "options/our_swap_face_pipeline_options.py", "snippet": "class OurSwapFacePipelineOptions:\n\n\tdef __init__(self):\n\t\tself.parser = ArgumentParser()\n\t\tself.initialize()\n\n\tdef initialize(self):\n\t\tself.parser.add_argument('--exp_dir', type=...
import argparse import copy import os import datetime import glob import cv2 import numpy as np import torch import torch.nn as nn import torchvision.transforms as transforms from tqdm import tqdm from PIL import Image from skimage.transform import resize from torch.nn import functional as F from options.our_swap_face_pipeline_options import OurSwapFacePipelineOptions from utils import torch_utils from models.networks import Net3 from datasets.dataset import get_transforms, TO_TENSOR, NORMALIZE from gradio_utils.face_swapping import ( read_video_as_frames, save_frames_as_video, crop_and_align_face, logical_or_reduce, create_masks, get_facial_mask_from_seg19, get_edge, blending_two_images_with_mask, SoftErosion, ) from swap_face_fine.face_vid2vid.drive_demo import init_facevid2vid_pretrained_model, drive_source_demo from swap_face_fine.face_parsing.face_parsing_demo import ( init_faceParsing_pretrained_model, faceParsing_demo, vis_parsing_maps ) from swap_face_fine.gpen.gpen_demo import GPENInfer from swap_face_fine.inference_codeformer import CodeFormerInfer from swap_face_fine.realesr.image_infer import RealESRBatchInfer from training.video_swap_ft_coach import VideoSwapPTICoach from swap_face_fine.swap_face_mask import swap_head_mask_hole_first, swap_comp_style_vector from swap_face_fine.multi_band_blending import blending from swap_face_fine.Blender.inference import BlenderInfer
20,415
S_mask = faceParsing_demo(face_parsing_model, source, convert_to_seg12=True) D_mask = [faceParsing_demo(face_parsing_model, d, convert_to_seg12=True) for d in drivens] save_img_dir = os.path.join(self.out_dir, "imgs") save_mask_dir = os.path.join(self.out_dir, "mask") os.makedirs(save_img_dir, exist_ok=True) os.makedirs(save_mask_dir, exist_ok=True) for i in range(len(T_mask)): targets[i].save(os.path.join(save_img_dir, "T_%04d.png" % i)) Image.fromarray(T_mask[i]).save(os.path.join(save_mask_dir, "T_mask_%04d.png" % i)) Image.fromarray(D_mask[i]).save(os.path.join(save_mask_dir, "D_mask_%04d.png" % i)) D_mask_vis = vis_parsing_maps(drivens[i], D_mask[i]) Image.fromarray(D_mask_vis).save(os.path.join(save_mask_dir, "D_mask_vis_%04d.png" % i)) Image.fromarray(S_mask).save(os.path.join(save_mask_dir, "S_mask.png")) return { "targets_mask": T_mask, "source_mask": S_mask, "drivens_mask": D_mask, } def _process_face_reenact(self, targets, source, use_recolor: bool = False): self._load_face_reenact_model() generator = self.face_reenact_model["generator"] kp_detector = self.face_reenact_model["kp_detector"] he_estimator = self.face_reenact_model["he_estimator"] estimate_jacobian = self.face_reenact_model["estimate_jacobian"] print("[FaceSwapVideoPipeline] face reenacting...") targets_256 = [resize(np.array(im) / 255.0, (256, 256)) for im in targets] source_256 = resize(np.array(source) / 255.0, (256, 256)) predictions = drive_source_demo(source_256, targets_256, generator, kp_detector, he_estimator, estimate_jacobian) predictions = [(pred * 255).astype(np.uint8) for pred in predictions] # RGB predictions = self._process_face_enhance( predictions, model_name="gpen", ) # fixed as gpen ''' color transfer before pasting back ''' predictions_recolor = [None] * len(predictions) if use_recolor: predictions_recolor = [None] * len(predictions) face_parsing_model = self._load_face_parsing_model()["model"] face_enhance_model = self._load_face_enhance_model("codeformer")["codeformer"] recolor_save_dir = os.path.join(self.out_dir, "recolor_before_rgi") os.makedirs(recolor_save_dir, exist_ok=True) face_recolor_model = self._load_face_recolor_model()["model"] mask_softer_model = self._load_mask_softer()["model"] for i in range(len(predictions)): # swapped_face_image = Image.fromarray(predictions[i]) swapped_face_image = predictions[i] swapped_face_image.save(os.path.join(recolor_save_dir, "recolor_input_%04d.png" % i)) T = targets[i].resize(swapped_face_image.size) swap_mask_19 = faceParsing_demo(face_parsing_model, swapped_face_image, convert_to_seg12=False) target_mask_19 = faceParsing_demo(face_parsing_model, T, convert_to_seg12=False) recolor: Image = face_recolor_model.infer_image( swapped_face_image, T, Image.fromarray(swap_mask_19), Image.fromarray(target_mask_19) ) recolor.save(os.path.join(recolor_save_dir, "recolor_gen_%04d.png" % i)) recolor = recolor.resize(swapped_face_image.size) recolor = face_enhance_model.infer_image(recolor) # no need to super-res? recolor = recolor.resize((512, 512)).resize(recolor.size) # resize down to avoid too high-res in video recolor.save(os.path.join(recolor_save_dir, "gen_enhance_%04d.png" % i)) # only copy low-frequency parts # blending_mask = get_facial_mask_from_seg19( # torch.LongTensor(swap_mask_19[None, None, :, :]), # target_size=recolor.size, edge_softer=mask_softer_model, is_seg19=True # ) # edge = get_edge(swapped_face_image) # edge = np.array(edge).astype(np.float32) / 255. # blending_mask = (blending_mask - edge).clip(0., 1.) # Image.fromarray((blending_mask.squeeze() * 255.).astype(np.uint8)).save( # os.path.join(recolor_save_dir, "blend_mask_%04d.png" % i) # ) # recolor = blending_two_images_with_mask( # swapped_face_image, recolor, up_ratio=0.95, up_mask=blending_mask.copy() # ) # recolor.save(os.path.join(recolor_save_dir, "recolor_blend_%04d.png" % i)) predictions_recolor[i] = np.array(recolor) # RGB imgs_save_dir = os.path.join(self.out_dir, "imgs") os.makedirs(imgs_save_dir, exist_ok=True) for i in range(len(predictions_recolor)): Image.fromarray(predictions_recolor[i]).save( os.path.join(imgs_save_dir, "%s%04d.png" % ("D_recolor_", i))) ''' end ''' self._free_face_reenact_model() return predictions, predictions_recolor def _process_face_enhance(self, lq_images: list, model_name: str = "gpen", save_prefix: str = "D_", ): self._load_face_enhance_model(model_name) enhance_model = self.face_enhance_model[model_name] print("[FaceSwapVideoPipeline] face enhancing...") hq_images = [enhance_model.infer_image(Image.fromarray(lq)) for lq in lq_images] save_dir = os.path.join(self.out_dir, "imgs") os.makedirs(save_dir, exist_ok=True) for i in range(len(hq_images)): hq_images[i].save(os.path.join(save_dir, "%s%04d.png" % (save_prefix, i))) return hq_images @torch.no_grad() def _process_extract_init_style_vectors(self, drivens, targets, drivens_mask, targets_mask): save_dir = os.path.join(self.out_dir, "styleVec") os.makedirs(save_dir, exist_ok=True) net = self._load_e4s_model() for i, (d, t) in enumerate(zip(drivens, targets)):
class FaceSwapVideoPipeline(object): def __init__(self, e4s_opt: argparse.Namespace, use_time_subfolder: bool = True, ): self.exp_root = e4s_opt.exp_dir self.out_dir = e4s_opt.exp_dir self.pti_save_fn = None self.use_time_subfolder = use_time_subfolder self.e4s_opt = e4s_opt self.e4s_model = None self.device = e4s_opt.device # models are lazy loaded self.face_reenact_model = {} self.face_parsing_model = {} self.face_enhance_model = {} self.face_recolor_model = {} self.mask_softer_model = {} self.num_seg_cls = 12 # fixed def forward(self, target_video_path: str, source_image_path: str, result_video_fn: str = "output.mp4", use_crop: bool = True, target_frames_cnt: int = -1, use_pti: bool = True, pti_resume_weight_path: str = "./video_outputs/finetuned_G_lr0.001000_iters80.pth", ): """ @param target_video_path: @param source_image_path: @param result_video_fn: @param use_crop: @param target_frames_cnt: @param use_pti: @param pti_resume_weight_path: if opt.max_pti_steps == 0, the pipeline will use this pre-trained weight file """ # 0. update time, used as output directory self._update_out_dir() # 1. prepare input target and source target_paths, source_paths = self._prepare_inputs( target_video_path, source_image_path, target_frames_cnt=target_frames_cnt, ) target_frames_cnt = len(target_paths) # 2. crop and align crop_results = self._process_crop_align( target_paths, source_paths, use_crop=use_crop, ) T = crop_results["targets_crop"] S = crop_results["source_crop"] T_ori = crop_results["targets_ori"] T_inv_trans = crop_results["targets_inv_trans"] # 3. face reenactment drivens, drivens_recolor = self._process_face_reenact( T, S, use_recolor=True ) # 4. face enhancement # drivens = self._process_face_enhance( # drivens, model_name="codeformer", # ) # if drivens_recolor[0] is not None: # drivens_recolor = self._process_face_enhance( # drivens_recolor, model_name="codeformer", save_prefix="D_recolor_" # ) # 5. face parsing parsing_results = self._process_face_parsing( T, S, drivens ) T_mask = parsing_results["targets_mask"] S_mask = parsing_results["source_mask"] D_mask = parsing_results["drivens_mask"] # 6. extract initial style vectors self._process_extract_init_style_vectors( drivens, T, drivens_mask=D_mask, targets_mask=T_mask ) # 7. PTI tuning if use_pti: self._process_pti_tuning( pti_resume_weight_path, target_frames_cnt=target_frames_cnt, ) # 8. face swapping swap_results = self._process_face_swapping( target_frames_cnt, T_inv_trans, T_ori, ) swaps_face = swap_results["swaps_face"] # each is: PIL.Image swaps_mask = swap_results["swaps_mask"] # each is: np.ndarray(512,512), in {0,...,9} # 9. prepare outputs self._prepare_outputs( result_video_fn, target_video_path ) def _update_out_dir(self): if not self.use_time_subfolder: return now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") out_dir = os.path.join(self.exp_root, now) os.makedirs(out_dir, exist_ok=True) self.out_dir = out_dir self.e4s_opt.exp_dir = out_dir print(f"[FaceSwapVideoPipeline] out directory changed to: {self.out_dir}") return def _prepare_inputs(self, target_video_path: str, source_image_path: str, target_frames_cnt: int = 120, ): in_target_frames_folder = os.path.join(self.out_dir, "in_frames/") t_frames, t_paths = read_video_as_frames(target_video_path, in_target_frames_folder) t_frames = t_frames[:target_frames_cnt] t_paths = t_paths[:target_frames_cnt] # many targets s_paths = [source_image_path] # only 1 source # save inputs target_save_path = os.path.join(self.out_dir, "target.mp4") source_save_path = os.path.join(self.out_dir, "source.png") os.system(f"cp {target_video_path} {target_save_path}") os.system(f"cp {source_image_path} {source_save_path}") return t_paths, s_paths def _process_crop_align(self, t_paths: list, s_paths: list, use_crop: bool): if use_crop: target_files = [(os.path.basename(f).split('.')[0], f) for f in t_paths] source_files = [(os.path.basename(f).split('.')[0], f) for f in s_paths] target_crops, target_orig_images, target_quads, target_inv_transforms = crop_and_align_face( target_files, image_size=1024, scale=1.0, center_sigma=1.0, xy_sigma=3.0, use_fa=False ) T = [crop.convert("RGB") for crop in target_crops] source_crops, source_orig_images, source_quads, source_inv_transforms = crop_and_align_face( source_files, image_size=1024, scale=1.0, center_sigma=0, xy_sigma=0, use_fa=False ) S = source_crops[0].convert("RGB") T_ori = target_orig_images T_inv_trans = target_inv_transforms else: T = [Image.open(t).convert("RGB").resize((1024, 1024)) for t in t_paths] S = Image.open(s_paths[0]).convert("RGB").resize((1024, 1024)) T_ori = T T_inv_trans = None return { "targets_crop": T, "source_crop": S, "targets_ori": T_ori, "targets_inv_trans": T_inv_trans } def _process_face_parsing(self, targets, source, drivens): self._load_face_parsing_model() face_parsing_model = self.face_parsing_model["model"] print("[FaceSwapVideoPipeline] face parsing...") T_mask = [faceParsing_demo(face_parsing_model, frm, convert_to_seg12=True) for frm in targets] # 12 S_mask = faceParsing_demo(face_parsing_model, source, convert_to_seg12=True) D_mask = [faceParsing_demo(face_parsing_model, d, convert_to_seg12=True) for d in drivens] save_img_dir = os.path.join(self.out_dir, "imgs") save_mask_dir = os.path.join(self.out_dir, "mask") os.makedirs(save_img_dir, exist_ok=True) os.makedirs(save_mask_dir, exist_ok=True) for i in range(len(T_mask)): targets[i].save(os.path.join(save_img_dir, "T_%04d.png" % i)) Image.fromarray(T_mask[i]).save(os.path.join(save_mask_dir, "T_mask_%04d.png" % i)) Image.fromarray(D_mask[i]).save(os.path.join(save_mask_dir, "D_mask_%04d.png" % i)) D_mask_vis = vis_parsing_maps(drivens[i], D_mask[i]) Image.fromarray(D_mask_vis).save(os.path.join(save_mask_dir, "D_mask_vis_%04d.png" % i)) Image.fromarray(S_mask).save(os.path.join(save_mask_dir, "S_mask.png")) return { "targets_mask": T_mask, "source_mask": S_mask, "drivens_mask": D_mask, } def _process_face_reenact(self, targets, source, use_recolor: bool = False): self._load_face_reenact_model() generator = self.face_reenact_model["generator"] kp_detector = self.face_reenact_model["kp_detector"] he_estimator = self.face_reenact_model["he_estimator"] estimate_jacobian = self.face_reenact_model["estimate_jacobian"] print("[FaceSwapVideoPipeline] face reenacting...") targets_256 = [resize(np.array(im) / 255.0, (256, 256)) for im in targets] source_256 = resize(np.array(source) / 255.0, (256, 256)) predictions = drive_source_demo(source_256, targets_256, generator, kp_detector, he_estimator, estimate_jacobian) predictions = [(pred * 255).astype(np.uint8) for pred in predictions] # RGB predictions = self._process_face_enhance( predictions, model_name="gpen", ) # fixed as gpen ''' color transfer before pasting back ''' predictions_recolor = [None] * len(predictions) if use_recolor: predictions_recolor = [None] * len(predictions) face_parsing_model = self._load_face_parsing_model()["model"] face_enhance_model = self._load_face_enhance_model("codeformer")["codeformer"] recolor_save_dir = os.path.join(self.out_dir, "recolor_before_rgi") os.makedirs(recolor_save_dir, exist_ok=True) face_recolor_model = self._load_face_recolor_model()["model"] mask_softer_model = self._load_mask_softer()["model"] for i in range(len(predictions)): # swapped_face_image = Image.fromarray(predictions[i]) swapped_face_image = predictions[i] swapped_face_image.save(os.path.join(recolor_save_dir, "recolor_input_%04d.png" % i)) T = targets[i].resize(swapped_face_image.size) swap_mask_19 = faceParsing_demo(face_parsing_model, swapped_face_image, convert_to_seg12=False) target_mask_19 = faceParsing_demo(face_parsing_model, T, convert_to_seg12=False) recolor: Image = face_recolor_model.infer_image( swapped_face_image, T, Image.fromarray(swap_mask_19), Image.fromarray(target_mask_19) ) recolor.save(os.path.join(recolor_save_dir, "recolor_gen_%04d.png" % i)) recolor = recolor.resize(swapped_face_image.size) recolor = face_enhance_model.infer_image(recolor) # no need to super-res? recolor = recolor.resize((512, 512)).resize(recolor.size) # resize down to avoid too high-res in video recolor.save(os.path.join(recolor_save_dir, "gen_enhance_%04d.png" % i)) # only copy low-frequency parts # blending_mask = get_facial_mask_from_seg19( # torch.LongTensor(swap_mask_19[None, None, :, :]), # target_size=recolor.size, edge_softer=mask_softer_model, is_seg19=True # ) # edge = get_edge(swapped_face_image) # edge = np.array(edge).astype(np.float32) / 255. # blending_mask = (blending_mask - edge).clip(0., 1.) # Image.fromarray((blending_mask.squeeze() * 255.).astype(np.uint8)).save( # os.path.join(recolor_save_dir, "blend_mask_%04d.png" % i) # ) # recolor = blending_two_images_with_mask( # swapped_face_image, recolor, up_ratio=0.95, up_mask=blending_mask.copy() # ) # recolor.save(os.path.join(recolor_save_dir, "recolor_blend_%04d.png" % i)) predictions_recolor[i] = np.array(recolor) # RGB imgs_save_dir = os.path.join(self.out_dir, "imgs") os.makedirs(imgs_save_dir, exist_ok=True) for i in range(len(predictions_recolor)): Image.fromarray(predictions_recolor[i]).save( os.path.join(imgs_save_dir, "%s%04d.png" % ("D_recolor_", i))) ''' end ''' self._free_face_reenact_model() return predictions, predictions_recolor def _process_face_enhance(self, lq_images: list, model_name: str = "gpen", save_prefix: str = "D_", ): self._load_face_enhance_model(model_name) enhance_model = self.face_enhance_model[model_name] print("[FaceSwapVideoPipeline] face enhancing...") hq_images = [enhance_model.infer_image(Image.fromarray(lq)) for lq in lq_images] save_dir = os.path.join(self.out_dir, "imgs") os.makedirs(save_dir, exist_ok=True) for i in range(len(hq_images)): hq_images[i].save(os.path.join(save_dir, "%s%04d.png" % (save_prefix, i))) return hq_images @torch.no_grad() def _process_extract_init_style_vectors(self, drivens, targets, drivens_mask, targets_mask): save_dir = os.path.join(self.out_dir, "styleVec") os.makedirs(save_dir, exist_ok=True) net = self._load_e4s_model() for i, (d, t) in enumerate(zip(drivens, targets)):
driven = transforms.Compose([TO_TENSOR, NORMALIZE])(d)
4
2023-10-15 12:15:01+00:00
24k
sotopia-lab/sotopia
sotopia/server.py
[ { "identifier": "Agents", "path": "sotopia/agents/llm_agent.py", "snippet": "class Agents(dict[str, BaseAgent[Observation, AgentAction]]):\n def reset(self) -> None:\n for agent in self.values():\n agent.reset()\n\n def act(self, obs: dict[str, Observation]) -> dict[str, AgentAct...
import asyncio import functools import itertools import logging import gin import rich from typing import Callable, Literal, Sequence, Type, cast from beartype import beartype from tqdm.asyncio import tqdm_asyncio from sotopia.agents import ( Agents, HumanAgent, LLMAgent, RedisAgent, ScriptWritingAgent, SpeakAgent, ) from sotopia.agents.base_agent import BaseAgent from sotopia.database import EpisodeLog from sotopia.database.persistent_profile import ( AgentProfile, EnvironmentProfile, ) from sotopia.envs import ParallelSotopiaEnv from sotopia.envs.evaluators import ( ReachGoalLLMEvaluator, RuleBasedTerminatedEvaluator, unweighted_aggregate_evaluate, ) from sotopia.generation_utils.generate import LLM_Name, agenerate_script from sotopia.messages import AgentAction, Message, Observation from sotopia.messages.message_classes import ( ScriptBackground, ScriptEnvironmentResponse, ScriptInteraction, ) from sotopia.samplers import ( BaseSampler, ConstraintBasedSampler, EnvAgentCombo, UniformSampler, )
18,764
@beartype def run_sync_server( model_name_dict: dict[str, LLM_Name], action_order: Literal["simutaneous", "round-robin", "random"], agents_info: dict[str, dict[str, str]] | None = None, partial_background_file: str | None = None, full_background_file: str | None = None, mode: str | None = None, ) -> list[tuple[str, str, Message]]: # Create Environment and agents # This step will be moved to outside this function env = ParallelSotopiaEnv( model_name=model_name_dict["env"], action_order=action_order, evaluators=[ RuleBasedTerminatedEvaluator(), ], ) if partial_background_file: environment_messages = env.reset( options={"partial_background_file": partial_background_file} ) elif full_background_file: environment_messages = env.reset( options={"full_background_file": full_background_file} ) else: environment_messages = env.reset() agents = Agents() agents_model_names = [model_name_dict["agent1"], model_name_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif mode == "speak": agents[agent_name] = SpeakAgent(agent_name, model_name=agent_model) else: agents[agent_name] = LLMAgent(agent_name, model_name=agent_model) agents.reset() messages: list[tuple[str, str, Message]] = [] # Main Event Loop done = False for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) while not done: # gather agent messages agent_messages: dict[str, AgentAction] = dict() for agent_name in env.agents: if agents_info is not None: agents[agent_name].goal = agents_info[agent_name]["goal"] agent_messages[agent_name] = agents[agent_name].act( environment_messages[agent_name] ) messages.append( (agent_name, "Environment", agent_messages[agent_name]) ) # send agent messages to environment environment_messages, _, terminated, ___, ____ = env.step( agent_messages ) for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) done = all(terminated.values()) return messages @gin.configurable async def arun_one_episode( env: ParallelSotopiaEnv, agent_list: Sequence[BaseAgent[Observation, AgentAction]], model_dict: dict[str, LLM_Name], omniscient: bool = False, script_like: bool = False, json_in_script: bool = False, tag: str | None = None, push_to_db: bool = False, ) -> list[tuple[str, str, Message]]: agents = Agents({agent.agent_name: agent for agent in agent_list}) environment_messages = env.reset(agents=agents, omniscient=omniscient) agents_model_names = [model_dict["agent1"], model_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif agent_model == "redis": agents[agent_name] = RedisAgent(agent_name) elif script_like and not json_in_script:
@beartype def run_sync_server( model_name_dict: dict[str, LLM_Name], action_order: Literal["simutaneous", "round-robin", "random"], agents_info: dict[str, dict[str, str]] | None = None, partial_background_file: str | None = None, full_background_file: str | None = None, mode: str | None = None, ) -> list[tuple[str, str, Message]]: # Create Environment and agents # This step will be moved to outside this function env = ParallelSotopiaEnv( model_name=model_name_dict["env"], action_order=action_order, evaluators=[ RuleBasedTerminatedEvaluator(), ], ) if partial_background_file: environment_messages = env.reset( options={"partial_background_file": partial_background_file} ) elif full_background_file: environment_messages = env.reset( options={"full_background_file": full_background_file} ) else: environment_messages = env.reset() agents = Agents() agents_model_names = [model_name_dict["agent1"], model_name_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif mode == "speak": agents[agent_name] = SpeakAgent(agent_name, model_name=agent_model) else: agents[agent_name] = LLMAgent(agent_name, model_name=agent_model) agents.reset() messages: list[tuple[str, str, Message]] = [] # Main Event Loop done = False for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) while not done: # gather agent messages agent_messages: dict[str, AgentAction] = dict() for agent_name in env.agents: if agents_info is not None: agents[agent_name].goal = agents_info[agent_name]["goal"] agent_messages[agent_name] = agents[agent_name].act( environment_messages[agent_name] ) messages.append( (agent_name, "Environment", agent_messages[agent_name]) ) # send agent messages to environment environment_messages, _, terminated, ___, ____ = env.step( agent_messages ) for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) done = all(terminated.values()) return messages @gin.configurable async def arun_one_episode( env: ParallelSotopiaEnv, agent_list: Sequence[BaseAgent[Observation, AgentAction]], model_dict: dict[str, LLM_Name], omniscient: bool = False, script_like: bool = False, json_in_script: bool = False, tag: str | None = None, push_to_db: bool = False, ) -> list[tuple[str, str, Message]]: agents = Agents({agent.agent_name: agent for agent in agent_list}) environment_messages = env.reset(agents=agents, omniscient=omniscient) agents_model_names = [model_dict["agent1"], model_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif agent_model == "redis": agents[agent_name] = RedisAgent(agent_name) elif script_like and not json_in_script:
agents[agent_name] = ScriptWritingAgent(
3
2023-10-23 19:47:26+00:00
24k
f0uriest/interpax
tests/test_interpolate.py
[ { "identifier": "fft_interp1d", "path": "interpax/_fourier.py", "snippet": "@partial(jit, static_argnames=\"n\")\ndef fft_interp1d(f: jax.Array, n: int, sx: jax.Array = None, dx: float = 1.0):\n \"\"\"Interpolation of a 1d periodic function via FFT.\n\n Parameters\n ----------\n f : ndarray,...
import jax import jax.numpy as jnp import numpy as np import pytest from jax import config as jax_config from interpax import ( Interpolator1D, Interpolator2D, Interpolator3D, fft_interp1d, fft_interp2d, interp1d, interp2d, interp3d, )
15,743
@pytest.mark.unit def test_interp1d_monotonic(self): """Ensure monotonic interpolation is actually monotonic.""" # true function is just linear with a jump discontinuity at x=1.5 x = np.linspace(-4, 5, 10) f = np.heaviside(x - 1.5, 0) + 0.1 * x xq = np.linspace(-4, 5, 1000) dfc = interp1d(xq, x, f, derivative=1, method="cubic") dfm = interp1d(xq, x, f, derivative=1, method="monotonic") dfm0 = interp1d(xq, x, f, derivative=1, method="monotonic-0") assert dfc.min() < 0 # cubic interpolation undershoots, giving negative slope assert dfm.min() > 0 # monotonic interpolation doesn't assert dfm0.min() >= 0 # monotonic-0 doesn't overshoot either # ensure monotonic-0 has 0 slope at end points np.testing.assert_allclose(dfm0[np.array([0, -1])], 0, atol=1e-12) class TestInterp2D: """Tests for interp2d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y", [ (np.linspace(0, 3 * np.pi, 1000), np.linspace(0, 2 * np.pi, 1000)), (0.0, 0.0), ], ) def test_interp2d(self, x, y): """Test accuracy of different 2d interpolation methods.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.sin(x) * np.cos(y) fp = f(xxp, yyp) interp1 = lambda xq, yq, *args, **kwargs: interp2d(xq, yq, *args, **kwargs) interp2 = lambda xq, yq, *args, **kwargs: Interpolator2D(*args, **kwargs)( xq, yq ) for interp in [interp1, interp2]: fq = interp( x, y, xp, yp, fp, method="nearest", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-2, atol=1) fq = interp( x, y, xp, yp, fp, method="linear", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-4, atol=1e-2) atol = 2e-3 rtol = 1e-5 fq = interp(x, y, xp, yp, fp, method="cubic", period=(2 * np.pi, 2 * np.pi)) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cubic2", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="catmull-rom", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cardinal", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp2d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) x = np.linspace(0, 3 * np.pi, 200) y = np.linspace(0, 2 * np.pi, 200) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.array([np.sin(x) * np.cos(y), np.sin(x) + np.cos(y)]) fp = f(xxp.T, yyp.T).T fq = interp2d(x, y, xp, yp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-2, atol=1.2e-1) fq = interp2d(x, y, xp, yp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-3, atol=1e-2) fq = interp2d(x, y, xp, yp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-5, atol=2e-3) class TestInterp3D: """Tests for interp3d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y, z", [ ( np.linspace(0, np.pi, 1000), np.linspace(0, 2 * np.pi, 1000), np.linspace(0, 3, 1000), ), (0.0, 0.0, 0.0), ], ) def test_interp3d(self, x, y, z): """Test accuracy of different 3d interpolation methods.""" xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.sin(x) * np.cos(y) * z**2 fp = f(xxp, yyp, zzp)
"""Tests for interpolation functions.""" jax_config.update("jax_enable_x64", True) class TestInterp1D: """Tests for interp1d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x", [ np.linspace(0, 2 * np.pi, 10000), 0.0, ], ) def test_interp1d(self, x): """Test accuracy of different 1d interpolation methods.""" xp = np.linspace(0, 2 * np.pi, 100) f = lambda x: np.sin(x) fp = f(xp) interp1 = lambda xq, *args, **kwargs: interp1d(xq, *args, **kwargs) interp2 = lambda xq, *args, **kwargs: Interpolator1D(*args, **kwargs)(xq) for interp in [interp1, interp2]: fq = interp(x, xp, fp, method="nearest") np.testing.assert_allclose(fq, f(x), rtol=1e-2, atol=1e-1) fq = interp(x, xp, fp, method="linear") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-3) fq = interp(x, xp, fp, method="cubic") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="monotonic") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-3) fq = interp(x, xp, fp, method="monotonic-0") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-2) @pytest.mark.unit def test_interp1d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 2 * np.pi, 100) x = np.linspace(0, 2 * np.pi, 300)[10:-10] f = lambda x: np.array([np.sin(x), np.cos(x)]) fp = f(xp).T fq = interp1d(x, xp, fp, method="nearest") np.testing.assert_allclose(fq, f(x).T, rtol=1e-2, atol=1e-1) fq = interp1d(x, xp, fp, method="linear") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-3) fq = interp1d(x, xp, fp, method="cubic") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="monotonic") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-3) fq = interp1d(x, xp, fp, method="monotonic-0") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-2) @pytest.mark.unit def test_interp1d_extrap_periodic(self): """Test extrapolation and periodic BC of 1d interpolation.""" xp = np.linspace(0, 2 * np.pi, 200) x = np.linspace(-1, 2 * np.pi + 1, 10000) f = lambda x: np.sin(x) fp = f(xp) fq = interp1d(x, xp, fp, method="cubic", extrap=False) assert np.isnan(fq[0]) assert np.isnan(fq[-1]) fq = interp1d(x, xp, fp, method="cubic", extrap=True) assert not np.isnan(fq[0]) assert not np.isnan(fq[-1]) fq = interp1d(x, xp, fp, method="cubic", period=2 * np.pi) np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-2) @pytest.mark.unit def test_interp1d_monotonic(self): """Ensure monotonic interpolation is actually monotonic.""" # true function is just linear with a jump discontinuity at x=1.5 x = np.linspace(-4, 5, 10) f = np.heaviside(x - 1.5, 0) + 0.1 * x xq = np.linspace(-4, 5, 1000) dfc = interp1d(xq, x, f, derivative=1, method="cubic") dfm = interp1d(xq, x, f, derivative=1, method="monotonic") dfm0 = interp1d(xq, x, f, derivative=1, method="monotonic-0") assert dfc.min() < 0 # cubic interpolation undershoots, giving negative slope assert dfm.min() > 0 # monotonic interpolation doesn't assert dfm0.min() >= 0 # monotonic-0 doesn't overshoot either # ensure monotonic-0 has 0 slope at end points np.testing.assert_allclose(dfm0[np.array([0, -1])], 0, atol=1e-12) class TestInterp2D: """Tests for interp2d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y", [ (np.linspace(0, 3 * np.pi, 1000), np.linspace(0, 2 * np.pi, 1000)), (0.0, 0.0), ], ) def test_interp2d(self, x, y): """Test accuracy of different 2d interpolation methods.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.sin(x) * np.cos(y) fp = f(xxp, yyp) interp1 = lambda xq, yq, *args, **kwargs: interp2d(xq, yq, *args, **kwargs) interp2 = lambda xq, yq, *args, **kwargs: Interpolator2D(*args, **kwargs)( xq, yq ) for interp in [interp1, interp2]: fq = interp( x, y, xp, yp, fp, method="nearest", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-2, atol=1) fq = interp( x, y, xp, yp, fp, method="linear", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-4, atol=1e-2) atol = 2e-3 rtol = 1e-5 fq = interp(x, y, xp, yp, fp, method="cubic", period=(2 * np.pi, 2 * np.pi)) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cubic2", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="catmull-rom", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cardinal", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp2d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) x = np.linspace(0, 3 * np.pi, 200) y = np.linspace(0, 2 * np.pi, 200) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.array([np.sin(x) * np.cos(y), np.sin(x) + np.cos(y)]) fp = f(xxp.T, yyp.T).T fq = interp2d(x, y, xp, yp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-2, atol=1.2e-1) fq = interp2d(x, y, xp, yp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-3, atol=1e-2) fq = interp2d(x, y, xp, yp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-5, atol=2e-3) class TestInterp3D: """Tests for interp3d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y, z", [ ( np.linspace(0, np.pi, 1000), np.linspace(0, 2 * np.pi, 1000), np.linspace(0, 3, 1000), ), (0.0, 0.0, 0.0), ], ) def test_interp3d(self, x, y, z): """Test accuracy of different 3d interpolation methods.""" xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.sin(x) * np.cos(y) * z**2 fp = f(xxp, yyp, zzp)
interp1 = lambda xq, yq, zq, *args, **kwargs: interp3d(
7
2023-10-18 13:12:20+00:00
24k
amitfin/oref_alert
custom_components/oref_alert/coordinator.py
[ { "identifier": "CONF_ALERT_MAX_AGE", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_ALERT_MAX_AGE: Final = \"alert_max_age\"" }, { "identifier": "CONF_POLL_INTERVAL", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_POLL_INTERVAL: Final = \"poll_inter...
import asyncio import homeassistant.util.dt as dt_util from dataclasses import dataclass from datetime import timedelta from functools import cmp_to_key from json import JSONDecodeError from typing import Any from aiohttp.client_exceptions import ContentTypeError from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.update_coordinator import DataUpdateCoordinator from .const import ( CONF_ALERT_MAX_AGE, CONF_POLL_INTERVAL, DEFAULT_POLL_INTERVAL, DOMAIN, IST, LOGGER, ) from .metadata.areas import AREAS
17,932
"""DataUpdateCoordinator for oref_alert integration.""" OREF_ALERTS_URL = "https://www.oref.org.il/WarningMessages/alert/alerts.json" OREF_HISTORY_URL = "https://www.oref.org.il/WarningMessages/History/AlertsHistory.json" OREF_HEADERS = { "Referer": "https://www.oref.org.il/", "X-Requested-With": "XMLHttpRequest", "Content-Type": "application/json", } REQUEST_RETRIES = 3 REAL_TIME_ALERT_LOGIC_WINDOW = 2 @dataclass class OrefAlertCoordinatorData: """Class for holding coordinator data.""" alerts: list[Any] active_alerts: list[Any] def _sort_alerts(item1: dict[str, Any], item2: dict[str, Any]) -> int: """Sort by descending-order "date" and then ascending-order "name".""" if item1["alertDate"] < item2["alertDate"]: return 1 if item1["alertDate"] > item2["alertDate"]: return -1 if item1["data"] > item2["data"]: return 1 if item1["data"] < item2["data"]: return -1 return 0 def _compare_fields(alert: dict[str, Any], area: str, category: int) -> bool: """Compare an alert with area and category (time is ignored).""" return alert["data"] == area and alert["category"] == category class OrefAlertDataUpdateCoordinator(DataUpdateCoordinator[OrefAlertCoordinatorData]): """Class to manage fetching Oref Alert data.""" def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry): """Initialize global data updater.""" super().__init__( hass, LOGGER, name=DOMAIN, update_interval=timedelta( seconds=config_entry.options.get(
"""DataUpdateCoordinator for oref_alert integration.""" OREF_ALERTS_URL = "https://www.oref.org.il/WarningMessages/alert/alerts.json" OREF_HISTORY_URL = "https://www.oref.org.il/WarningMessages/History/AlertsHistory.json" OREF_HEADERS = { "Referer": "https://www.oref.org.il/", "X-Requested-With": "XMLHttpRequest", "Content-Type": "application/json", } REQUEST_RETRIES = 3 REAL_TIME_ALERT_LOGIC_WINDOW = 2 @dataclass class OrefAlertCoordinatorData: """Class for holding coordinator data.""" alerts: list[Any] active_alerts: list[Any] def _sort_alerts(item1: dict[str, Any], item2: dict[str, Any]) -> int: """Sort by descending-order "date" and then ascending-order "name".""" if item1["alertDate"] < item2["alertDate"]: return 1 if item1["alertDate"] > item2["alertDate"]: return -1 if item1["data"] > item2["data"]: return 1 if item1["data"] < item2["data"]: return -1 return 0 def _compare_fields(alert: dict[str, Any], area: str, category: int) -> bool: """Compare an alert with area and category (time is ignored).""" return alert["data"] == area and alert["category"] == category class OrefAlertDataUpdateCoordinator(DataUpdateCoordinator[OrefAlertCoordinatorData]): """Class to manage fetching Oref Alert data.""" def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry): """Initialize global data updater.""" super().__init__( hass, LOGGER, name=DOMAIN, update_interval=timedelta( seconds=config_entry.options.get(
CONF_POLL_INTERVAL, DEFAULT_POLL_INTERVAL
1
2023-10-18 11:16:41+00:00
24k
RobertCsordas/moe
tasks/simple/language_model/transformer_lm_mixin.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: ...
import framework import torch import torch.nn import torch.nn.functional as F import torch.utils.data import math from typing import List, Tuple, Dict, Any from models import TransformerLanguageModel from ... import task, args from layers.transformer import RelativeTransformerEncoderLayer, PrelnRelativeTransformerEncoderLayer from layers.transformer.relative_preln_kvmem_transformer import PrelnRelativeKVMemTransformerEncoderLayer from layers.transformer.relative_moe_transformer import RelativeMoeTransformerEncoderLayer from layers.transformer.topk_transformer import TopkTransformer from layers.moe_layer import MoE from interfaces import Result
20,712
reg_type=self.helper.args.moe.reg_type, std_correction=self.helper.args.moe.std_correction, topk_mode=self.helper.args.moe.topk_mode, head_projection_size=self.helper.args.transformer.head_projection_size, activation_after_topk=self.helper.args.moe.activation_after_topk, weight_grouping=self.helper.args.moe.weight_grouping, kmeans_distance=self.helper.args.moe.kmeans_distance, drop_parallel=self.helper.args.moe.drop_parallel, block_expert_sel_in_grad=self.helper.args.moe.block_expert_sel_in_grad, mlp_selection=self.helper.args.moe.mlp_selection, classification_target=self.helper.args.moe.classification_target, norm_key_init=self.helper.args.moe.norm_key_init, normalize_expert_sel_init=self.helper.args.moe.norm_expert_sel_init, norm_value_init=self.helper.args.moe.norm_value_init, norm_standard_parallel_values=self.helper.args.moe.norm_standard_parallel_values, identical_init=self.helper.args.moe.identical_init, topological_sel_reg=self.helper.args.moe.topological_sel_reg, topological_expert_reg=self.helper.args.moe.topological_expert_reg, gumbel_select_only=self.helper.args.moe.gumbel_select_only, topk_value_norm_compensation=self.helper.args.moe.topk_value_norm_compensation, norm_expert_scores=self.helper.args.moe.norm_expert_scores, sel_input_cluster_init=self.helper.args.moe.sel_input_cluster_init, init_norm_mode=self.helper.args.moe.init_norm_mode, sel_bias=self.helper.args.moe.sel_bias, bias=self.helper.args.moe.bias, rescale_normed=self.helper.args.moe.rescale_normed, sel_norm=self.helper.args.moe.sel_norm, rescale_grads=self.helper.args.moe.rescale_grads, gumbel_decay=self.helper.args.moe.gumbel_decay, ln_affine=self.helper.args.transformer.ln_affine, sinkhorn_local=self.helper.args.moe.sinkhorn_local, sinkhorn_n_iters=self.helper.args.moe.sinkhron_n_iters, moe_dropout_factor=self.helper.args.moe.dropout_factor, drop_expert=self.helper.args.moe.drop_expert, expert_size_init=self.helper.args.moe.expert_size_init, sync_distributed=self.helper.args.moe.sync_distributed, modulation_amplitude=self.helper.args.moe.modulation_amplitude, invisible_selection=self.helper.args.moe.invisible_selection, slope_multiplier=self.helper.args.moe.slope_multiplier, moe_init_scale=self.helper.args.moe.init_scale) else: assert False, "Invalid variant" layers = [mklayer() for _ in range(self.helper.args.transformer.encoder_n_layers)] return layers def fix_init(self, model): init_std = 0.02 torch.nn.init.normal_(model.embedding.weight, 0.0, init_std) # torch.nn.init.normal_(model.embedding_adapter.weight, 0.0, init_std) initialized = 0 for m in model.modules(): if isinstance(m, (torch.nn.Linear, torch.nn.Embedding)) and hasattr(m, "weight"): torch.nn.init.normal_(m.weight, 0.0, init_std) initialized += m.weight.numel() if isinstance(m, (torch.nn.Linear, torch.nn.LayerNorm)) and m.bias is not None: torch.nn.init.constant_(m.bias, 0) initialized += m.bias.numel() if isinstance(m, (torch.nn.LayerNorm)) and m.weight is not None: torch.nn.init.normal_(m.weight, 1.0, init_std) initialized += m.weight.numel() if isinstance(m, MoE): torch.nn.init.normal_(m.keys, 0.0, init_std) torch.nn.init.normal_(m.values, 0.0, init_std) if m.expert_sel is not None: torch.nn.init.normal_(m.expert_sel, 0.0, init_std) initialized += m.expert_sel.numel() initialized += m.keys.numel() + m.values.numel() print(f"Reinitialized {initialized/self.n_weights*100:.3f}% weights") def create_model(self) -> torch.nn.Module: # pyright: reportOptionalMemberAccess=false tlayers = self.get_layers() if self.helper.args.transformer.output_mode != "normal" and self.is_preln(): raise ValueError("accumulated_output not supported with pre-ln") model = TransformerLanguageModel( len(self.train_set.vocabulary), self.helper.args.embedding_size, self.helper.args.state_size, self.helper.args.dropout, tied_embedding=self.helper.args.tied_embedding, layers=tlayers, n_prev_states=self.helper.args.lm.trafo.context_blocks, n_prev_states_test=self.helper.args.lm.trafo.test_context_blocks, same_length_eval=self.helper.args.lm.trafo.same_length_eval, p_drop_layer=self.helper.args.transformer.p_drop_layer, same_length=self.helper.args.lm.trafo.same_length, use_last_state=self.helper.args.lm.trafo.last_layer_context, norm_before_output=self.is_preln(), output_mode=self.helper.args.transformer.output_mode,) self.n_weights = sum(p.numel() for p in model.parameters()) with torch.no_grad(): if self.is_preln(): model.embedding_scale = 1.0 elif self.helper.args.lm.trafo.xl_init: self.fix_init(model) elif self.helper.args.lm.trafo.embedding_mode_init=="scale_to_sqrt_dmodel": norm = model.embedding.weight.norm(dim=-1).mean() model.embedding_scale = math.sqrt(self.helper.args.state_size) / norm elif self.helper.args.lm.trafo.embedding_mode_init=="one_and_scale_to_sqrt_dmodel": norm = model.embedding.weight.norm(dim=-1).mean() model.embedding_scale = math.sqrt(self.helper.args.state_size) model.embedding.weight.mul_(1.0 / norm) elif self.helper.args.lm.trafo.embedding_mode_init=="init_to_sqrt_dmodel": norm = model.embedding.weight.norm(dim=-1, keepdim=True) model.embedding_scale=1.0 model.embedding.weight.mul_(math.sqrt(self.helper.args.state_size) / norm) return model def moe_recluster(self): for n, m in self.model.named_modules(): if isinstance(m, MoE): perm = m.regroup_weights() m.patch_optimizer_state(self.optimizer, perm)
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_keys", default="128", parser=parser.int_list_parser) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-pkm.knn", default=32) parser.add_argument("-pkm.stochastic", default=False) parser.add_argument("-pkm.query_batchnorm", default=False) parser.add_argument("-pkm.custom_init", default=0) parser.add_argument("-pkm.slice_values", default=False) parser.add_argument("-pkm.slice_proj", default=False) parser.add_argument("-pkm.sample_smallest", default=False) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="add", choice=["add", "gate", "sigmoid", "gumbel", "hard_gumbel", "predict", "predict_mlp", "classify", "gumbel_sigmoid", "sinkhorn", "sinkhorn2", "sinkmoid", "sinkmax", "moe", "mul", "random", "sinkmoid2", "sinkmax2", "modulate"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.key_mode", default="moe", choice=["moe", "both", "shared"]) parser.add_argument("-moe.half_key", default=False) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.kmeans_distance", default='cosine', choice=['cosine', 'euclidean']) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.std_correction", default=False) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.weight_grouping", default="none", choice=["none", "keys_only", "keys_and_experts"]) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.mlp_selection", default=False) parser.add_argument("-moe.block_expert_sel_in_grad", default=False) parser.add_argument("-moe.classification_target", default="sum", choice=["sum", "max"]) parser.add_argument("-moe.recluster_steps", default="", parser=parser.int_list_parser) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-moe.norm_standard_parallel_values", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.topological_sel_reg", default=0.0) parser.add_argument("-moe.topological_expert_reg", default=0.0) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.gumbel_select_only", default=False) parser.add_argument("-moe.topk_value_norm_compensation", default=False) parser.add_argument("-moe.norm_expert_scores", default=False) parser.add_argument("-moe.sel_input_cluster_init", default=False) parser.add_argument("-moe.init_norm_mode", default="full") parser.add_argument("-moe.bias", default=False) parser.add_argument("-moe.sel_bias", default=False) parser.add_argument("-moe.rescale_normed", default=False) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.rescale_grads", default=False) parser.add_argument("-moe.gumbel_decay", default=0) parser.add_argument("-moe.sinkhorn_local", default=False) parser.add_argument("-moe.sinkhron_n_iters", default=3) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.expert_size_init", default=False) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.invisible_selection", default=False) parser.add_argument("-moe.slope_multiplier", default=1.0) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-kvmem.linproj", default=False) parser.add_argument("-kvmem.head_merge_topk", default=False) parser.add_argument("-kvmem.load_balance", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.randomize_indices", default=False) parser.add_argument("-kvmem.standard_parallel", default=False) parser.add_argument("-kvmem.query_bias", default=False) parser.add_argument("-kvmem.approx_topk", default=False) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-kvmem.factorize", default=False) parser.add_argument("-kvmem.full_key", default=False) parser.add_argument("-kvmem.key_redundancy_factor", default=1) parser.add_argument("-kvmem.two_stage", default=False) parser.add_argument("-kvmem.head_exclusive", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.universal.nonshared", default=0) parser.add_argument("-transformer.topk_use_norm", default=True) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-transformer.output_mode", default="normal", choice=["normal", "sum", "geometric", "sigmoid"]) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dim_feedforward=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier), dropout=self.helper.args.dropout, activation=activation ) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_topk"}: mklayer = lambda: TopkTransformer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, k=self.helper.args.transformer.topk_value, use_norm=self.helper.args.transformer.topk_use_norm, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_kvmem"}: mklayer = lambda: PrelnRelativeKVMemTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, n_keys=self.helper.args.pkm.n_keys, pkm_stochastic=self.helper.args.pkm.stochastic, pkm_heads=self.helper.args.pkm.n_heads, pkm_custom_init=self.helper.args.pkm.custom_init, pkm_slice_values=self.helper.args.pkm.slice_values, pkm_knn=self.helper.args.pkm.knn, linproj=self.helper.args.kvmem.linproj, head_merge_topk=self.helper.args.kvmem.head_merge_topk, load_balance=self.helper.args.kvmem.load_balance, kvmem_dropout=self.helper.args.kvmem.dropout, kvmem_randomize_indices=self.helper.args.kvmem.randomize_indices, kvmem_query_bias=self.helper.args.kvmem.query_bias, standard_parallel=self.helper.args.kvmem.standard_parallel, approx_topk=self.helper.args.kvmem.approx_topk, factorize=self.helper.args.kvmem.factorize, full_key=self.helper.args.kvmem.full_key, key_redundancy_factor=self.helper.args.kvmem.key_redundancy_factor, two_stage=self.helper.args.kvmem.two_stage, head_exclusive=self.helper.args.kvmem.head_exclusive, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_moe", "preln_moe_universal", "moe", "moe_universal"}: # def __init__(self, d_model, nhead, n_bins: int, bin_size: int, n_layers: int, dim_feedforward=2048, mklayer = lambda: RelativeMoeTransformerEncoderLayer( **base_args, **extra_args, preln=self.is_preln(), test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, standard_parallel=self.helper.args.kvmem.standard_parallel, custom_init=self.helper.args.pkm.custom_init, n_experts=self.helper.args.moe.n_experts, expert_size=self.helper.args.moe.expert_size, dropout_mode=self.helper.args.kvmem.dropout, knn=self.helper.args.pkm.knn, selection_mode=self.helper.args.moe.selection_mode, perplexity_reg=self.helper.args.moe.perplexity_reg, key_mode=self.helper.args.moe.key_mode, half_key=self.helper.args.moe.half_key, n_heads=self.helper.args.pkm.n_heads, norm_keys=self.helper.args.moe.norm_keys, perplexity_reg_mode=self.helper.args.moe.perplexity_reg_mode, n_random=self.helper.args.moe.n_random, reg_type=self.helper.args.moe.reg_type, std_correction=self.helper.args.moe.std_correction, topk_mode=self.helper.args.moe.topk_mode, head_projection_size=self.helper.args.transformer.head_projection_size, activation_after_topk=self.helper.args.moe.activation_after_topk, weight_grouping=self.helper.args.moe.weight_grouping, kmeans_distance=self.helper.args.moe.kmeans_distance, drop_parallel=self.helper.args.moe.drop_parallel, block_expert_sel_in_grad=self.helper.args.moe.block_expert_sel_in_grad, mlp_selection=self.helper.args.moe.mlp_selection, classification_target=self.helper.args.moe.classification_target, norm_key_init=self.helper.args.moe.norm_key_init, normalize_expert_sel_init=self.helper.args.moe.norm_expert_sel_init, norm_value_init=self.helper.args.moe.norm_value_init, norm_standard_parallel_values=self.helper.args.moe.norm_standard_parallel_values, identical_init=self.helper.args.moe.identical_init, topological_sel_reg=self.helper.args.moe.topological_sel_reg, topological_expert_reg=self.helper.args.moe.topological_expert_reg, gumbel_select_only=self.helper.args.moe.gumbel_select_only, topk_value_norm_compensation=self.helper.args.moe.topk_value_norm_compensation, norm_expert_scores=self.helper.args.moe.norm_expert_scores, sel_input_cluster_init=self.helper.args.moe.sel_input_cluster_init, init_norm_mode=self.helper.args.moe.init_norm_mode, sel_bias=self.helper.args.moe.sel_bias, bias=self.helper.args.moe.bias, rescale_normed=self.helper.args.moe.rescale_normed, sel_norm=self.helper.args.moe.sel_norm, rescale_grads=self.helper.args.moe.rescale_grads, gumbel_decay=self.helper.args.moe.gumbel_decay, ln_affine=self.helper.args.transformer.ln_affine, sinkhorn_local=self.helper.args.moe.sinkhorn_local, sinkhorn_n_iters=self.helper.args.moe.sinkhron_n_iters, moe_dropout_factor=self.helper.args.moe.dropout_factor, drop_expert=self.helper.args.moe.drop_expert, expert_size_init=self.helper.args.moe.expert_size_init, sync_distributed=self.helper.args.moe.sync_distributed, modulation_amplitude=self.helper.args.moe.modulation_amplitude, invisible_selection=self.helper.args.moe.invisible_selection, slope_multiplier=self.helper.args.moe.slope_multiplier, moe_init_scale=self.helper.args.moe.init_scale) else: assert False, "Invalid variant" layers = [mklayer() for _ in range(self.helper.args.transformer.encoder_n_layers)] return layers def fix_init(self, model): init_std = 0.02 torch.nn.init.normal_(model.embedding.weight, 0.0, init_std) # torch.nn.init.normal_(model.embedding_adapter.weight, 0.0, init_std) initialized = 0 for m in model.modules(): if isinstance(m, (torch.nn.Linear, torch.nn.Embedding)) and hasattr(m, "weight"): torch.nn.init.normal_(m.weight, 0.0, init_std) initialized += m.weight.numel() if isinstance(m, (torch.nn.Linear, torch.nn.LayerNorm)) and m.bias is not None: torch.nn.init.constant_(m.bias, 0) initialized += m.bias.numel() if isinstance(m, (torch.nn.LayerNorm)) and m.weight is not None: torch.nn.init.normal_(m.weight, 1.0, init_std) initialized += m.weight.numel() if isinstance(m, MoE): torch.nn.init.normal_(m.keys, 0.0, init_std) torch.nn.init.normal_(m.values, 0.0, init_std) if m.expert_sel is not None: torch.nn.init.normal_(m.expert_sel, 0.0, init_std) initialized += m.expert_sel.numel() initialized += m.keys.numel() + m.values.numel() print(f"Reinitialized {initialized/self.n_weights*100:.3f}% weights") def create_model(self) -> torch.nn.Module: # pyright: reportOptionalMemberAccess=false tlayers = self.get_layers() if self.helper.args.transformer.output_mode != "normal" and self.is_preln(): raise ValueError("accumulated_output not supported with pre-ln") model = TransformerLanguageModel( len(self.train_set.vocabulary), self.helper.args.embedding_size, self.helper.args.state_size, self.helper.args.dropout, tied_embedding=self.helper.args.tied_embedding, layers=tlayers, n_prev_states=self.helper.args.lm.trafo.context_blocks, n_prev_states_test=self.helper.args.lm.trafo.test_context_blocks, same_length_eval=self.helper.args.lm.trafo.same_length_eval, p_drop_layer=self.helper.args.transformer.p_drop_layer, same_length=self.helper.args.lm.trafo.same_length, use_last_state=self.helper.args.lm.trafo.last_layer_context, norm_before_output=self.is_preln(), output_mode=self.helper.args.transformer.output_mode,) self.n_weights = sum(p.numel() for p in model.parameters()) with torch.no_grad(): if self.is_preln(): model.embedding_scale = 1.0 elif self.helper.args.lm.trafo.xl_init: self.fix_init(model) elif self.helper.args.lm.trafo.embedding_mode_init=="scale_to_sqrt_dmodel": norm = model.embedding.weight.norm(dim=-1).mean() model.embedding_scale = math.sqrt(self.helper.args.state_size) / norm elif self.helper.args.lm.trafo.embedding_mode_init=="one_and_scale_to_sqrt_dmodel": norm = model.embedding.weight.norm(dim=-1).mean() model.embedding_scale = math.sqrt(self.helper.args.state_size) model.embedding.weight.mul_(1.0 / norm) elif self.helper.args.lm.trafo.embedding_mode_init=="init_to_sqrt_dmodel": norm = model.embedding.weight.norm(dim=-1, keepdim=True) model.embedding_scale=1.0 model.embedding.weight.mul_(math.sqrt(self.helper.args.state_size) / norm) return model def moe_recluster(self): for n, m in self.model.named_modules(): if isinstance(m, MoE): perm = m.regroup_weights() m.patch_optimizer_state(self.optimizer, perm)
def train_step(self) -> Tuple[Result, Dict[str, Any]]:
9
2023-10-16 11:26:45+00:00
24k
boppreh/hello_tls
src/hello_tls/protocol.py
[ { "identifier": "Protocol", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Protocol(Enum):\n # Keep protocols in order of preference.\n TLS1_3 = b\"\\x03\\x04\"\n TLS1_2 = b\"\\x03\\x03\"\n TLS1_1 = b\"\\x03\\x02\"\n TLS1_0 = b\"\\x03\\x01\"\n SSLv3 = b\"\\x03\\x00\"\n...
from typing import Iterator, List, Sequence, Optional, Iterable, Callable, Tuple from contextlib import contextmanager from dataclasses import dataclass from .names_and_numbers import Protocol, RecordType, HandshakeType, CompressionMethod, CipherSuite, ExtensionType, Group, AlertLevel, AlertDescription, PskKeyExchangeMode import logging
14,906
logger = logging.getLogger(__name__) class ScanError(Exception): """ Base error class for errors that occur during scanning. """ pass class ServerAlertError(ScanError): def __init__(self, level: AlertLevel, description: AlertDescription): super().__init__(self, f'Server error: {level}: {description}') self.level = level self.description = description class BadServerResponse(ScanError): """ Error for server responses that can't be parsed. """ pass @dataclass class ServerHello: version: Protocol compression: CompressionMethod cipher_suite: CipherSuite group: Optional[Group] def _make_stream_parser(packets: Iterable[bytes]) -> Tuple[Callable[[int], bytes], Callable[[], int]]: """ Returns helper functions to parse a stream of packets. """ start = 0 packets_iter = iter(packets) data = b'' def read_next(length: int) -> bytes: nonlocal start, data while start + length > len(data): try: data += next(packets_iter) except StopIteration: raise BadServerResponse('Server response ended unexpectedly') value = data[start:start+length] start += length return value return read_next, lambda: start def _bytes_to_int(b: bytes) -> int: return int.from_bytes(b, byteorder='big') def parse_server_hello(packets: Iterable[bytes]) -> ServerHello: """ Parses a Server Hello packet and returns the cipher suite accepted by the server. """ read_next, current_position = _make_stream_parser(packets)
logger = logging.getLogger(__name__) class ScanError(Exception): """ Base error class for errors that occur during scanning. """ pass class ServerAlertError(ScanError): def __init__(self, level: AlertLevel, description: AlertDescription): super().__init__(self, f'Server error: {level}: {description}') self.level = level self.description = description class BadServerResponse(ScanError): """ Error for server responses that can't be parsed. """ pass @dataclass class ServerHello: version: Protocol compression: CompressionMethod cipher_suite: CipherSuite group: Optional[Group] def _make_stream_parser(packets: Iterable[bytes]) -> Tuple[Callable[[int], bytes], Callable[[], int]]: """ Returns helper functions to parse a stream of packets. """ start = 0 packets_iter = iter(packets) data = b'' def read_next(length: int) -> bytes: nonlocal start, data while start + length > len(data): try: data += next(packets_iter) except StopIteration: raise BadServerResponse('Server response ended unexpectedly') value = data[start:start+length] start += length return value return read_next, lambda: start def _bytes_to_int(b: bytes) -> int: return int.from_bytes(b, byteorder='big') def parse_server_hello(packets: Iterable[bytes]) -> ServerHello: """ Parses a Server Hello packet and returns the cipher suite accepted by the server. """ read_next, current_position = _make_stream_parser(packets)
record_type = RecordType(read_next(1))
1
2023-10-21 02:00:13+00:00
24k
zhaojw1998/AccoMontage-3
arrangement_utils.py
[ { "identifier": "split_phrases", "path": "piano_arranger/acc_utils.py", "snippet": "def split_phrases(segmentation):\n \"\"\"Split a phrase label string into individual phrase meta info\"\"\"\n if '\\n' not in segmentation:\n segmentation += '\\n'\n phrases = []\n lengths = []\n cu...
import os import pretty_midi as pyd import numpy as np import torch import piano_arranger.format_converter as cvt from torch.utils.data import DataLoader from scipy.interpolate import interp1d from tqdm import tqdm from piano_arranger.acc_utils import split_phrases from piano_arranger.models import DisentangleVAE from piano_arranger.AccoMontage import find_by_length, dp_search, re_harmonization, get_texture_filter, ref_spotlight from orchestrator import Slakh2100_Pop909_Dataset, collate_fn, compute_pr_feat, EMBED_PROGRAM_MAPPING, Prior from orchestrator.QA_dataset import SLAKH_CLASS_PROGRAMS from orchestrator.utils import grid2pr, pr2grid, matrix2midi, midi2matrix from orchestrator.prior_dataset import TOTAL_LEN_BIN, ABS_POS_BIN, REL_POS_BIN
17,664
SLAKH_CLASS_MAPPING = {v: k for k, v in EMBED_PROGRAM_MAPPING.items()} def load_premise(DATA_FILE_ROOT, DEVICE): """Load AccoMontage Search Space""" print('Loading AccoMontage piano texture search space. This may take 1 or 2 minutes ...') data = np.load(os.path.join(DATA_FILE_ROOT, 'phrase_data.npz'), allow_pickle=True) melody = data['melody'] acc = data['acc'] chord = data['chord'] vel = data['velocity'] cc = data['cc'] acc_pool = {} for LEN in tqdm(range(2, 13)): (mel, acc_, chord_, vel_, cc_, song_reference) = find_by_length(melody, acc, chord, vel, cc, LEN) acc_pool[LEN] = (mel, acc_, chord_, vel_, cc_, song_reference) texture_filter = get_texture_filter(acc_pool) edge_weights=np.load(os.path.join(DATA_FILE_ROOT, 'edge_weights.npz'), allow_pickle=True) """Load Q&A Prompt Search Space""" print('loading orchestration prompt search space ...') slakh_dir = os.path.join(DATA_FILE_ROOT, 'Slakh2100_inference_set') dataset = Slakh2100_Pop909_Dataset(slakh_dir=slakh_dir, pop909_dir=None, debug_mode=False, split='validation', mode='train')
SLAKH_CLASS_MAPPING = {v: k for k, v in EMBED_PROGRAM_MAPPING.items()} def load_premise(DATA_FILE_ROOT, DEVICE): """Load AccoMontage Search Space""" print('Loading AccoMontage piano texture search space. This may take 1 or 2 minutes ...') data = np.load(os.path.join(DATA_FILE_ROOT, 'phrase_data.npz'), allow_pickle=True) melody = data['melody'] acc = data['acc'] chord = data['chord'] vel = data['velocity'] cc = data['cc'] acc_pool = {} for LEN in tqdm(range(2, 13)): (mel, acc_, chord_, vel_, cc_, song_reference) = find_by_length(melody, acc, chord, vel, cc, LEN) acc_pool[LEN] = (mel, acc_, chord_, vel_, cc_, song_reference) texture_filter = get_texture_filter(acc_pool) edge_weights=np.load(os.path.join(DATA_FILE_ROOT, 'edge_weights.npz'), allow_pickle=True) """Load Q&A Prompt Search Space""" print('loading orchestration prompt search space ...') slakh_dir = os.path.join(DATA_FILE_ROOT, 'Slakh2100_inference_set') dataset = Slakh2100_Pop909_Dataset(slakh_dir=slakh_dir, pop909_dir=None, debug_mode=False, split='validation', mode='train')
loader = DataLoader(dataset, batch_size=1, shuffle=True, collate_fn=lambda b:collate_fn(b, DEVICE))
8
2023-10-23 12:36:57+00:00
24k
liuqidong07/MOELoRA-peft
src/MLoRA/peft/peft_model.py
[ { "identifier": "PeftConfig", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The...
import inspect import os import warnings import torch import torch.nn as nn from contextlib import contextmanager from accelerate import dispatch_model, infer_auto_device_map from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules from accelerate.utils import get_balanced_memory from huggingface_hub import hf_hub_download from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import PreTrainedModel from transformers.modeling_outputs import SequenceClassifierOutput, TokenClassifierOutput from transformers.utils import PushToHubMixin from .utils import PeftConfig from .shared import Gate, GateN from .tuners import ( AdaLoraModel, AdaptionPromptModel, LoraModel, PrefixEncoder, PromptEmbedding, PromptEncoder, MMOELoraModelS, ) from .utils import ( TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, PeftConfig, PeftType, PromptLearningConfig, TaskType, _set_adapter, _set_trainable, get_peft_model_state_dict, set_peft_model_state_dict, shift_tokens_right, ) from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
15,267
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder, PeftType.ADALORA: AdaLoraModel, PeftType.ADAPTION_PROMPT: AdaptionPromptModel, PeftType.MMOELORAS: MMOELoraModelS, } class PeftModel(PushToHubMixin, torch.nn.Module): """ Base model encompassing various Peft methods. Args: model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft. peft_config ([`PeftConfig`]): The configuration of the Peft model. **Attributes**: - **base_model** ([`~transformers.PreTrainedModel`]) -- The base transformer model used for Peft. - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model. - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when saving the model. - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if using [`PromptLearningConfig`]. - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if using [`PromptLearningConfig`]. - **transformer_backbone_name** (`str`) -- The name of the transformer backbone in the base model if using [`PromptLearningConfig`]. - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone in the base model if using [`PromptLearningConfig`]. """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__() self.base_model = model self.config = self.base_model.config self.modules_to_save = None self.peft_config = {} self.active_adapter = adapter_name self.peft_type = peft_config.peft_type self.base_model_torch_dtype = getattr(model, "dtype", None) if not isinstance(peft_config, PromptLearningConfig): self.peft_config[adapter_name] = peft_config self.base_model = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type]( self.base_model, self.peft_config, adapter_name ) self.set_additional_trainable_modules(peft_config, adapter_name) else: self.add_adapter(adapter_name, peft_config) def save_pretrained(self, save_directory, **kwargs): r""" This function saves the adapter model and the adapter configuration files to a directory, so that it can be reloaded using the [`LoraModel.from_pretrained`] class method, and also used by the [`LoraModel.push_to_hub`] method. Args: save_directory (`str`): Directory where the adapter model and configuration files will be saved (will be created if it does not exist). kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the `push_to_hub` method. """ if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) for adapter_name, peft_config in self.peft_config.items(): # save only the trainable weights output_state_dict = get_peft_model_state_dict( self, state_dict=kwargs.get("state_dict", None), adapter_name=adapter_name ) # save the weights based on the adapter name output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory os.makedirs(output_dir, exist_ok=True)
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder, PeftType.ADALORA: AdaLoraModel, PeftType.ADAPTION_PROMPT: AdaptionPromptModel, PeftType.MMOELORAS: MMOELoraModelS, } class PeftModel(PushToHubMixin, torch.nn.Module): """ Base model encompassing various Peft methods. Args: model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft. peft_config ([`PeftConfig`]): The configuration of the Peft model. **Attributes**: - **base_model** ([`~transformers.PreTrainedModel`]) -- The base transformer model used for Peft. - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model. - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when saving the model. - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if using [`PromptLearningConfig`]. - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if using [`PromptLearningConfig`]. - **transformer_backbone_name** (`str`) -- The name of the transformer backbone in the base model if using [`PromptLearningConfig`]. - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone in the base model if using [`PromptLearningConfig`]. """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__() self.base_model = model self.config = self.base_model.config self.modules_to_save = None self.peft_config = {} self.active_adapter = adapter_name self.peft_type = peft_config.peft_type self.base_model_torch_dtype = getattr(model, "dtype", None) if not isinstance(peft_config, PromptLearningConfig): self.peft_config[adapter_name] = peft_config self.base_model = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type]( self.base_model, self.peft_config, adapter_name ) self.set_additional_trainable_modules(peft_config, adapter_name) else: self.add_adapter(adapter_name, peft_config) def save_pretrained(self, save_directory, **kwargs): r""" This function saves the adapter model and the adapter configuration files to a directory, so that it can be reloaded using the [`LoraModel.from_pretrained`] class method, and also used by the [`LoraModel.push_to_hub`] method. Args: save_directory (`str`): Directory where the adapter model and configuration files will be saved (will be created if it does not exist). kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the `push_to_hub` method. """ if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) for adapter_name, peft_config in self.peft_config.items(): # save only the trainable weights output_state_dict = get_peft_model_state_dict( self, state_dict=kwargs.get("state_dict", None), adapter_name=adapter_name ) # save the weights based on the adapter name output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory os.makedirs(output_dir, exist_ok=True)
torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME))
15
2023-10-19 10:55:50+00:00
24k
YuroFR/freqtrade-modded-crypto-trading-bot
freqtrade/exchange/exchange.py
[ { "identifier": "DEFAULT_AMOUNT_RESERVE_PERCENT", "path": "freqtrade/constants.py", "snippet": "DOCS_LINK = \"https://www.freqtrade.io/en/stable\"\nDEFAULT_CONFIG = 'config.json'\nPROCESS_THROTTLE_SECS = 5 # sec\nHYPEROPT_EPOCH = 100 # epochs\nRETRY_TIMEOUT = 30 # sec\nTIMEOUT_UNITS = ['minutes', 'se...
import asyncio import inspect import logging import signal import ccxt import ccxt.async_support as ccxt_async from copy import deepcopy from datetime import datetime, timedelta, timezone from math import floor from threading import Lock from typing import Any, Coroutine, Dict, List, Literal, Optional, Tuple, Union from cachetools import TTLCache from ccxt import TICK_SIZE from dateutil import parser from pandas import DataFrame, concat from freqtrade.constants import (DEFAULT_AMOUNT_RESERVE_PERCENT, NON_OPEN_EXCHANGE_STATES, BidAsk, BuySell, Config, EntryExit, ExchangeConfig, ListPairsWithTimeframes, MakerTaker, OBLiteral, PairWithTimeframe) from freqtrade.data.converter import clean_ohlcv_dataframe, ohlcv_to_dataframe, trades_dict_to_list from freqtrade.enums import OPTIMIZE_MODES, CandleType, MarginMode, PriceType, TradingMode from freqtrade.exceptions import (DDosProtection, ExchangeError, InsufficientFundsError, InvalidOrderException, OperationalException, PricingError, RetryableOrderError, TemporaryError) from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, remove_exchange_credentials, retrier, retrier_async) from freqtrade.exchange.exchange_utils import (ROUND, ROUND_DOWN, ROUND_UP, CcxtModuleType, amount_to_contract_precision, amount_to_contracts, amount_to_precision, contracts_to_amount, date_minus_candles, is_exchange_known_ccxt, market_is_active, price_to_precision, timeframe_to_minutes, timeframe_to_msecs, timeframe_to_next_date, timeframe_to_prev_date, timeframe_to_seconds) from freqtrade.exchange.types import OHLCVResponse, OrderBook, Ticker, Tickers from freqtrade.misc import (chunks, deep_merge_dicts, file_dump_json, file_load_json, safe_value_fallback2) from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist from freqtrade.util import dt_from_ts, dt_now from freqtrade.util.datetime_helpers import dt_humanize, dt_ts from freqtrade.persistence import Order
15,001
leverage: float, wallet_balance: float, mm_ex_1: float = 0.0, # (Binance) Cross only upnl_ex_1: float = 0.0, # (Binance) Cross only ) -> Optional[float]: """ Set's the margin mode on the exchange to cross or isolated for a specific pair """ if self.trading_mode == TradingMode.SPOT: return None elif (self.trading_mode != TradingMode.FUTURES): raise OperationalException( f"{self.name} does not support {self.margin_mode} {self.trading_mode}") liquidation_price = None if self._config['dry_run'] or not self.exchange_has("fetchPositions"): liquidation_price = self.dry_run_liquidation_price( pair=pair, open_rate=open_rate, is_short=is_short, amount=amount, leverage=leverage, stake_amount=stake_amount, wallet_balance=wallet_balance, mm_ex_1=mm_ex_1, upnl_ex_1=upnl_ex_1 ) else: positions = self.fetch_positions(pair) if len(positions) > 0: pos = positions[0] liquidation_price = pos['liquidationPrice'] if liquidation_price is not None: buffer_amount = abs(open_rate - liquidation_price) * self.liquidation_buffer liquidation_price_buffer = ( liquidation_price - buffer_amount if is_short else liquidation_price + buffer_amount ) return max(liquidation_price_buffer, 0.0) else: return None def dry_run_liquidation_price( self, pair: str, open_rate: float, # Entry price of position is_short: bool, amount: float, stake_amount: float, leverage: float, wallet_balance: float, # Or margin balance mm_ex_1: float = 0.0, # (Binance) Cross only upnl_ex_1: float = 0.0, # (Binance) Cross only ) -> Optional[float]: """ Important: Must be fetching data from cached values as this is used by backtesting! PERPETUAL: gate: https://www.gate.io/help/futures/futures/27724/liquidation-price-bankruptcy-price > Liquidation Price = (Entry Price ± Margin / Contract Multiplier / Size) / [ 1 ± (Maintenance Margin Ratio + Taker Rate)] Wherein, "+" or "-" depends on whether the contract goes long or short: "-" for long, and "+" for short. okex: https://www.okex.com/support/hc/en-us/articles/ 360053909592-VI-Introduction-to-the-isolated-mode-of-Single-Multi-currency-Portfolio-margin :param pair: Pair to calculate liquidation price for :param open_rate: Entry price of position :param is_short: True if the trade is a short, false otherwise :param amount: Absolute value of position size incl. leverage (in base currency) :param stake_amount: Stake amount - Collateral in settle currency. :param leverage: Leverage used for this position. :param trading_mode: SPOT, MARGIN, FUTURES, etc. :param margin_mode: Either ISOLATED or CROSS :param wallet_balance: Amount of margin_mode in the wallet being used to trade Cross-Margin Mode: crossWalletBalance Isolated-Margin Mode: isolatedWalletBalance # * Not required by Gate or OKX :param mm_ex_1: :param upnl_ex_1: """ market = self.markets[pair] taker_fee_rate = market['taker'] mm_ratio, _ = self.get_maintenance_ratio_and_amt(pair, stake_amount) if self.trading_mode == TradingMode.FUTURES and self.margin_mode == MarginMode.ISOLATED: if market['inverse']: raise OperationalException( "Freqtrade does not yet support inverse contracts") value = wallet_balance / amount mm_ratio_taker = (mm_ratio + taker_fee_rate) if is_short: return (open_rate + value) / (1 + mm_ratio_taker) else: return (open_rate - value) / (1 - mm_ratio_taker) else: raise OperationalException( "Freqtrade only supports isolated futures for leverage trading") def get_maintenance_ratio_and_amt( self, pair: str, nominal_value: float, ) -> Tuple[float, Optional[float]]: """ Important: Must be fetching data from cached values as this is used by backtesting! :param pair: Market symbol :param nominal_value: The total trade amount in quote currency including leverage maintenance amount only on Binance :return: (maintenance margin ratio, maintenance amount) """
# pragma pylint: disable=W0603 """ Cryptocurrency Exchanges support """ logger = logging.getLogger(__name__) class Exchange: # Parameters to add directly to buy/sell calls (like agreeing to trading agreement) _params: Dict = {} # Additional parameters - added to the ccxt object _ccxt_params: Dict = {} # Dict to specify which options each exchange implements # This defines defaults, which can be selectively overridden by subclasses using _ft_has # or by specifying them in the configuration. _ft_has_default: Dict = { "stoploss_on_exchange": False, "stop_price_param": "stopLossPrice", # Used for stoploss_on_exchange request "stop_price_prop": "stopLossPrice", # Used for stoploss_on_exchange response parsing "order_time_in_force": ["GTC"], "ohlcv_params": {}, "ohlcv_candle_limit": 500, "ohlcv_has_history": True, # Some exchanges (Kraken) don't provide history via ohlcv "ohlcv_partial_candle": True, "ohlcv_require_since": False, # Check https://github.com/ccxt/ccxt/issues/10767 for removal of ohlcv_volume_currency "ohlcv_volume_currency": "base", # "base" or "quote" "tickers_have_quoteVolume": True, "tickers_have_bid_ask": True, # bid / ask empty for fetch_tickers "tickers_have_price": True, "trades_pagination": "time", # Possible are "time" or "id" "trades_pagination_arg": "since", "l2_limit_range": None, "l2_limit_range_required": True, # Allow Empty L2 limit (kucoin) "mark_ohlcv_price": "mark", "mark_ohlcv_timeframe": "8h", "ccxt_futures_name": "swap", "needs_trading_fees": False, # use fetch_trading_fees to cache fees "order_props_in_contracts": ['amount', 'filled', 'remaining'], # Override createMarketBuyOrderRequiresPrice where ccxt has it wrong "marketOrderRequiresPrice": False, } _ft_has: Dict = {} _ft_has_futures: Dict = {} _supported_trading_mode_margin_pairs: List[Tuple[TradingMode, MarginMode]] = [ # TradingMode.SPOT always supported and not required in this list ] def __init__(self, config: Config, *, exchange_config: Optional[ExchangeConfig] = None, validate: bool = True, load_leverage_tiers: bool = False) -> None: """ Initializes this module with the given config, it does basic validation whether the specified exchange and pairs are valid. :return: None """ self._api: ccxt.Exchange self._api_async: ccxt_async.Exchange = None self._markets: Dict = {} self._trading_fees: Dict[str, Any] = {} self._leverage_tiers: Dict[str, List[Dict]] = {} # Lock event loop. This is necessary to avoid race-conditions when using force* commands # Due to funding fee fetching. self._loop_lock = Lock() self.loop = self._init_async_loop() self._config: Config = {} self._config.update(config) # Holds last candle refreshed time of each pair self._pairs_last_refresh_time: Dict[PairWithTimeframe, int] = {} # Timestamp of last markets refresh self._last_markets_refresh: int = 0 # Cache for 10 minutes ... self._cache_lock = Lock() self._fetch_tickers_cache: TTLCache = TTLCache(maxsize=2, ttl=60 * 10) # Cache values for 1800 to avoid frequent polling of the exchange for prices # Caching only applies to RPC methods, so prices for open trades are still # refreshed once every iteration. self._exit_rate_cache: TTLCache = TTLCache(maxsize=100, ttl=1800) self._entry_rate_cache: TTLCache = TTLCache(maxsize=100, ttl=1800) # Holds candles self._klines: Dict[PairWithTimeframe, DataFrame] = {} # Holds all open sell orders for dry_run self._dry_run_open_orders: Dict[str, Any] = {} if config['dry_run']: logger.info('Instance is running with dry_run enabled') logger.info(f"Using CCXT {ccxt.__version__}") exchange_conf: Dict[str, Any] = exchange_config if exchange_config else config['exchange'] remove_exchange_credentials(exchange_conf, config.get('dry_run', False)) self.log_responses = exchange_conf.get('log_responses', False) # Leverage properties self.trading_mode: TradingMode = config.get('trading_mode', TradingMode.SPOT) self.margin_mode: MarginMode = ( MarginMode(config.get('margin_mode')) if config.get('margin_mode') else MarginMode.NONE ) self.liquidation_buffer = config.get('liquidation_buffer', 0.05) # Deep merge ft_has with default ft_has options self._ft_has = deep_merge_dicts(self._ft_has, deepcopy(self._ft_has_default)) if self.trading_mode == TradingMode.FUTURES: self._ft_has = deep_merge_dicts(self._ft_has_futures, self._ft_has) if exchange_conf.get('_ft_has_params'): self._ft_has = deep_merge_dicts(exchange_conf.get('_ft_has_params'), self._ft_has) logger.info("Overriding exchange._ft_has with config params, result: %s", self._ft_has) # Assign this directly for easy access self._ohlcv_partial_candle = self._ft_has['ohlcv_partial_candle'] self._trades_pagination = self._ft_has['trades_pagination'] self._trades_pagination_arg = self._ft_has['trades_pagination_arg'] # Initialize ccxt objects ccxt_config = self._ccxt_config ccxt_config = deep_merge_dicts(exchange_conf.get('ccxt_config', {}), ccxt_config) ccxt_config = deep_merge_dicts(exchange_conf.get('ccxt_sync_config', {}), ccxt_config) self._api = self._init_ccxt(exchange_conf, ccxt_kwargs=ccxt_config) ccxt_async_config = self._ccxt_config ccxt_async_config = deep_merge_dicts(exchange_conf.get('ccxt_config', {}), ccxt_async_config) ccxt_async_config = deep_merge_dicts(exchange_conf.get('ccxt_async_config', {}), ccxt_async_config) self._api_async = self._init_ccxt( exchange_conf, ccxt_async, ccxt_kwargs=ccxt_async_config) logger.info(f'Using Exchange "{self.name}"') self.required_candle_call_count = 1 if validate: # Initial markets load self._load_markets() self.validate_config(config) self._startup_candle_count: int = config.get('startup_candle_count', 0) self.required_candle_call_count = self.validate_required_startup_candles( self._startup_candle_count, config.get('timeframe', '')) # Converts the interval provided in minutes in config to seconds self.markets_refresh_interval: int = exchange_conf.get( "markets_refresh_interval", 60) * 60 * 1000 if self.trading_mode != TradingMode.SPOT and load_leverage_tiers: self.fill_leverage_tiers() self.additional_exchange_init() def __del__(self): """ Destructor - clean up async stuff """ self.close() def close(self): logger.debug("Exchange object destroyed, closing async loop") if (self._api_async and inspect.iscoroutinefunction(self._api_async.close) and self._api_async.session): logger.debug("Closing async ccxt session.") self.loop.run_until_complete(self._api_async.close()) if self.loop and not self.loop.is_closed(): self.loop.close() def _init_async_loop(self) -> asyncio.AbstractEventLoop: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) return loop def validate_config(self, config): # Check if timeframe is available self.validate_timeframes(config.get('timeframe')) # Check if all pairs are available self.validate_stakecurrency(config['stake_currency']) if not config['exchange'].get('skip_pair_validation'): self.validate_pairs(config['exchange']['pair_whitelist']) self.validate_ordertypes(config.get('order_types', {})) self.validate_order_time_in_force(config.get('order_time_in_force', {})) self.validate_trading_mode_and_margin_mode(self.trading_mode, self.margin_mode) self.validate_pricing(config['exit_pricing']) self.validate_pricing(config['entry_pricing']) def _init_ccxt(self, exchange_config: Dict[str, Any], ccxt_module: CcxtModuleType = ccxt, ccxt_kwargs: Dict = {}) -> ccxt.Exchange: """ Initialize ccxt with given config and return valid ccxt instance. """ # Find matching class for the given exchange name name = exchange_config['name'] if not is_exchange_known_ccxt(name, ccxt_module): raise OperationalException(f'Exchange {name} is not supported by ccxt') ex_config = { 'apiKey': exchange_config.get('key'), 'secret': exchange_config.get('secret'), 'password': exchange_config.get('password'), 'uid': exchange_config.get('uid', ''), } if ccxt_kwargs: logger.info('Applying additional ccxt config: %s', ccxt_kwargs) if self._ccxt_params: # Inject static options after the above output to not confuse users. ccxt_kwargs = deep_merge_dicts(self._ccxt_params, ccxt_kwargs) if ccxt_kwargs: ex_config.update(ccxt_kwargs) try: api = getattr(ccxt_module, name.lower())(ex_config) except (KeyError, AttributeError) as e: raise OperationalException(f'Exchange {name} is not supported') from e except ccxt.BaseError as e: raise OperationalException(f"Initialization of ccxt failed. Reason: {e}") from e return api @property def _ccxt_config(self) -> Dict: # Parameters to add directly to ccxt sync/async initialization. if self.trading_mode == TradingMode.MARGIN: return { "options": { "defaultType": "margin" } } elif self.trading_mode == TradingMode.FUTURES: return { "options": { "defaultType": self._ft_has["ccxt_futures_name"] } } else: return {} @property def name(self) -> str: """exchange Name (from ccxt)""" return self._api.name @property def id(self) -> str: """exchange ccxt id""" return self._api.id @property def timeframes(self) -> List[str]: return list((self._api.timeframes or {}).keys()) @property def markets(self) -> Dict[str, Any]: """exchange ccxt markets""" if not self._markets: logger.info("Markets were not loaded. Loading them now..") self._load_markets() return self._markets @property def precisionMode(self) -> int: """exchange ccxt precisionMode""" return self._api.precisionMode def additional_exchange_init(self) -> None: """ Additional exchange initialization logic. .api will be available at this point. Must be overridden in child methods if required. """ pass def _log_exchange_response(self, endpoint, response) -> None: """ Log exchange responses """ if self.log_responses: logger.info(f"API {endpoint}: {response}") def ohlcv_candle_limit( self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None) -> int: """ Exchange ohlcv candle limit Uses ohlcv_candle_limit_per_timeframe if the exchange has different limits per timeframe (e.g. bittrex), otherwise falls back to ohlcv_candle_limit :param timeframe: Timeframe to check :param candle_type: Candle-type :param since_ms: Starting timestamp :return: Candle limit as integer """ return int(self._ft_has.get('ohlcv_candle_limit_per_timeframe', {}).get( timeframe, self._ft_has.get('ohlcv_candle_limit'))) def get_markets(self, base_currencies: List[str] = [], quote_currencies: List[str] = [], spot_only: bool = False, margin_only: bool = False, futures_only: bool = False, tradable_only: bool = True, active_only: bool = False) -> Dict[str, Any]: """ Return exchange ccxt markets, filtered out by base currency and quote currency if this was requested in parameters. """ markets = self.markets if not markets: raise OperationalException("Markets were not loaded.") if base_currencies: markets = {k: v for k, v in markets.items() if v['base'] in base_currencies} if quote_currencies: markets = {k: v for k, v in markets.items() if v['quote'] in quote_currencies} if tradable_only: markets = {k: v for k, v in markets.items() if self.market_is_tradable(v)} if spot_only: markets = {k: v for k, v in markets.items() if self.market_is_spot(v)} if margin_only: markets = {k: v for k, v in markets.items() if self.market_is_margin(v)} if futures_only: markets = {k: v for k, v in markets.items() if self.market_is_future(v)} if active_only: markets = {k: v for k, v in markets.items() if market_is_active(v)} return markets def get_quote_currencies(self) -> List[str]: """ Return a list of supported quote currencies """ markets = self.markets return sorted(set([x['quote'] for _, x in markets.items()])) def get_pair_quote_currency(self, pair: str) -> str: """ Return a pair's quote currency (base/quote:settlement) """ return self.markets.get(pair, {}).get('quote', '') def get_pair_base_currency(self, pair: str) -> str: """ Return a pair's base currency (base/quote:settlement) """ return self.markets.get(pair, {}).get('base', '') def market_is_future(self, market: Dict[str, Any]) -> bool: return ( market.get(self._ft_has["ccxt_futures_name"], False) is True and market.get('linear', False) is True ) def market_is_spot(self, market: Dict[str, Any]) -> bool: return market.get('spot', False) is True def market_is_margin(self, market: Dict[str, Any]) -> bool: return market.get('margin', False) is True def market_is_tradable(self, market: Dict[str, Any]) -> bool: """ Check if the market symbol is tradable by Freqtrade. Ensures that Configured mode aligns to """ return ( market.get('quote', None) is not None and market.get('base', None) is not None and (self.precisionMode != TICK_SIZE # Too low precision will falsify calculations or market.get('precision', {}).get('price') > 1e-11) and ((self.trading_mode == TradingMode.SPOT and self.market_is_spot(market)) or (self.trading_mode == TradingMode.MARGIN and self.market_is_margin(market)) or (self.trading_mode == TradingMode.FUTURES and self.market_is_future(market))) ) def klines(self, pair_interval: PairWithTimeframe, copy: bool = True) -> DataFrame: if pair_interval in self._klines: return self._klines[pair_interval].copy() if copy else self._klines[pair_interval] else: return DataFrame() def get_contract_size(self, pair: str) -> Optional[float]: if self.trading_mode == TradingMode.FUTURES: market = self.markets.get(pair, {}) contract_size: float = 1.0 if not market: return None if market.get('contractSize') is not None: # ccxt has contractSize in markets as string contract_size = float(market['contractSize']) return contract_size else: return 1 def _trades_contracts_to_amount(self, trades: List) -> List: if len(trades) > 0 and 'symbol' in trades[0]: contract_size = self.get_contract_size(trades[0]['symbol']) if contract_size != 1: for trade in trades: trade['amount'] = trade['amount'] * contract_size return trades def _order_contracts_to_amount(self, order: Dict) -> Dict: if 'symbol' in order and order['symbol'] is not None: contract_size = self.get_contract_size(order['symbol']) if contract_size != 1: for prop in self._ft_has.get('order_props_in_contracts', []): if prop in order and order[prop] is not None: order[prop] = order[prop] * contract_size return order def _amount_to_contracts(self, pair: str, amount: float) -> float: contract_size = self.get_contract_size(pair) return amount_to_contracts(amount, contract_size) def _contracts_to_amount(self, pair: str, num_contracts: float) -> float: contract_size = self.get_contract_size(pair) return contracts_to_amount(num_contracts, contract_size) def amount_to_contract_precision(self, pair: str, amount: float) -> float: """ Helper wrapper around amount_to_contract_precision """ contract_size = self.get_contract_size(pair) return amount_to_contract_precision(amount, self.get_precision_amount(pair), self.precisionMode, contract_size) def _load_async_markets(self, reload: bool = False) -> None: try: if self._api_async: self.loop.run_until_complete( self._api_async.load_markets(reload=reload, params={})) except (asyncio.TimeoutError, ccxt.BaseError) as e: logger.warning('Could not load async markets. Reason: %s', e) return def _load_markets(self) -> None: """ Initialize markets both sync and async """ try: self._markets = self._api.load_markets(params={}) self._load_async_markets() self._last_markets_refresh = dt_ts() if self._ft_has['needs_trading_fees']: self._trading_fees = self.fetch_trading_fees() except ccxt.BaseError: logger.exception('Unable to initialize markets.') def reload_markets(self) -> None: """Reload markets both sync and async if refresh interval has passed """ # Check whether markets have to be reloaded if (self._last_markets_refresh > 0) and ( self._last_markets_refresh + self.markets_refresh_interval > dt_ts()): return None logger.debug("Performing scheduled market reload..") try: self._markets = self._api.load_markets(reload=True, params={}) # Also reload async markets to avoid issues with newly listed pairs self._load_async_markets(reload=True) self._last_markets_refresh = dt_ts() self.fill_leverage_tiers() except ccxt.BaseError: logger.exception("Could not reload markets.") def validate_stakecurrency(self, stake_currency: str) -> None: """ Checks stake-currency against available currencies on the exchange. Only runs on startup. If markets have not been loaded, there's been a problem with the connection to the exchange. :param stake_currency: Stake-currency to validate :raise: OperationalException if stake-currency is not available. """ if not self._markets: raise OperationalException( 'Could not load markets, therefore cannot start. ' 'Please investigate the above error for more details.' ) quote_currencies = self.get_quote_currencies() if stake_currency not in quote_currencies: raise OperationalException( f"{stake_currency} is not available as stake on {self.name}. " f"Available currencies are: {', '.join(quote_currencies)}") def validate_pairs(self, pairs: List[str]) -> None: """ Checks if all given pairs are tradable on the current exchange. :param pairs: list of pairs :raise: OperationalException if one pair is not available :return: None """ if not self.markets: logger.warning('Unable to validate pairs (assuming they are correct).') return extended_pairs = expand_pairlist(pairs, list(self.markets), keep_invalid=True) invalid_pairs = [] for pair in extended_pairs: # Note: ccxt has BaseCurrency/QuoteCurrency format for pairs if self.markets and pair not in self.markets: raise OperationalException( f'Pair {pair} is not available on {self.name} {self.trading_mode.value}. ' f'Please remove {pair} from your whitelist.') # From ccxt Documentation: # markets.info: An associative array of non-common market properties, # including fees, rates, limits and other general market information. # The internal info array is different for each particular market, # its contents depend on the exchange. # It can also be a string or similar ... so we need to verify that first. elif (isinstance(self.markets[pair].get('info'), dict) and self.markets[pair].get('info', {}).get('prohibitedIn', False)): # Warn users about restricted pairs in whitelist. # We cannot determine reliably if Users are affected. logger.warning(f"Pair {pair} is restricted for some users on this exchange." f"Please check if you are impacted by this restriction " f"on the exchange and eventually remove {pair} from your whitelist.") if (self._config['stake_currency'] and self.get_pair_quote_currency(pair) != self._config['stake_currency']): invalid_pairs.append(pair) if invalid_pairs: raise OperationalException( f"Stake-currency '{self._config['stake_currency']}' not compatible with " f"pair-whitelist. Please remove the following pairs: {invalid_pairs}") def get_valid_pair_combination(self, curr_1: str, curr_2: str) -> str: """ Get valid pair combination of curr_1 and curr_2 by trying both combinations. """ for pair in [f"{curr_1}/{curr_2}", f"{curr_2}/{curr_1}"]: if pair in self.markets and self.markets[pair].get('active'): return pair raise ValueError(f"Could not combine {curr_1} and {curr_2} to get a valid pair.") def validate_timeframes(self, timeframe: Optional[str]) -> None: """ Check if timeframe from config is a supported timeframe on the exchange """ if not hasattr(self._api, "timeframes") or self._api.timeframes is None: # If timeframes attribute is missing (or is None), the exchange probably # has no fetchOHLCV method. # Therefore we also show that. raise OperationalException( f"The ccxt library does not provide the list of timeframes " f"for the exchange {self.name} and this exchange " f"is therefore not supported. ccxt fetchOHLCV: {self.exchange_has('fetchOHLCV')}") if timeframe and (timeframe not in self.timeframes): raise OperationalException( f"Invalid timeframe '{timeframe}'. This exchange supports: {self.timeframes}") if timeframe and timeframe_to_minutes(timeframe) < 1: raise OperationalException("Timeframes < 1m are currently not supported by Freqtrade.") def validate_ordertypes(self, order_types: Dict) -> None: """ Checks if order-types configured in strategy/config are supported """ if any(v == 'market' for k, v in order_types.items()): if not self.exchange_has('createMarketOrder'): raise OperationalException( f'Exchange {self.name} does not support market orders.') self.validate_stop_ordertypes(order_types) def validate_stop_ordertypes(self, order_types: Dict) -> None: """ Validate stoploss order types """ if (order_types.get("stoploss_on_exchange") and not self._ft_has.get("stoploss_on_exchange", False)): raise OperationalException( f'On exchange stoploss is not supported for {self.name}.' ) if self.trading_mode == TradingMode.FUTURES: price_mapping = self._ft_has.get('stop_price_type_value_mapping', {}).keys() if ( order_types.get("stoploss_on_exchange", False) is True and 'stoploss_price_type' in order_types and order_types['stoploss_price_type'] not in price_mapping ): raise OperationalException( f'On exchange stoploss price type is not supported for {self.name}.' ) def validate_pricing(self, pricing: Dict) -> None: if pricing.get('use_order_book', False) and not self.exchange_has('fetchL2OrderBook'): raise OperationalException(f'Orderbook not available for {self.name}.') if (not pricing.get('use_order_book', False) and ( not self.exchange_has('fetchTicker') or not self._ft_has['tickers_have_price'])): raise OperationalException(f'Ticker pricing not available for {self.name}.') def validate_order_time_in_force(self, order_time_in_force: Dict) -> None: """ Checks if order time in force configured in strategy/config are supported """ if any(v.upper() not in self._ft_has["order_time_in_force"] for k, v in order_time_in_force.items()): raise OperationalException( f'Time in force policies are not supported for {self.name} yet.') def validate_required_startup_candles(self, startup_candles: int, timeframe: str) -> int: """ Checks if required startup_candles is more than ohlcv_candle_limit(). Requires a grace-period of 5 candles - so a startup-period up to 494 is allowed by default. """ candle_limit = self.ohlcv_candle_limit( timeframe, self._config['candle_type_def'], int(date_minus_candles(timeframe, startup_candles).timestamp() * 1000) if timeframe else None) # Require one more candle - to account for the still open candle. candle_count = startup_candles + 1 # Allow 5 calls to the exchange per pair required_candle_call_count = int( (candle_count / candle_limit) + (0 if candle_count % candle_limit == 0 else 1)) if self._ft_has['ohlcv_has_history']: if required_candle_call_count > 5: # Only allow 5 calls per pair to somewhat limit the impact raise OperationalException( f"This strategy requires {startup_candles} candles to start, " "which is more than 5x " f"the amount of candles {self.name} provides for {timeframe}.") elif required_candle_call_count > 1: raise OperationalException( f"This strategy requires {startup_candles} candles to start, which is more than " f"the amount of candles {self.name} provides for {timeframe}.") if required_candle_call_count > 1: logger.warning(f"Using {required_candle_call_count} calls to get OHLCV. " f"This can result in slower operations for the bot. Please check " f"if you really need {startup_candles} candles for your strategy") return required_candle_call_count def validate_trading_mode_and_margin_mode( self, trading_mode: TradingMode, margin_mode: Optional[MarginMode] # Only None when trading_mode = TradingMode.SPOT ): """ Checks if freqtrade can perform trades using the configured trading mode(Margin, Futures) and MarginMode(Cross, Isolated) Throws OperationalException: If the trading_mode/margin_mode type are not supported by freqtrade on this exchange """ if trading_mode != TradingMode.SPOT and ( (trading_mode, margin_mode) not in self._supported_trading_mode_margin_pairs ): mm_value = margin_mode and margin_mode.value raise OperationalException( f"Freqtrade does not support {mm_value} {trading_mode.value} on {self.name}" ) def get_option(self, param: str, default: Optional[Any] = None) -> Any: """ Get parameter value from _ft_has """ return self._ft_has.get(param, default) def exchange_has(self, endpoint: str) -> bool: """ Checks if exchange implements a specific API endpoint. Wrapper around ccxt 'has' attribute :param endpoint: Name of endpoint (e.g. 'fetchOHLCV', 'fetchTickers') :return: bool """ return endpoint in self._api.has and self._api.has[endpoint] def get_precision_amount(self, pair: str) -> Optional[float]: """ Returns the amount precision of the exchange. :param pair: Pair to get precision for :return: precision for amount or None. Must be used in combination with precisionMode """ return self.markets.get(pair, {}).get('precision', {}).get('amount', None) def get_precision_price(self, pair: str) -> Optional[float]: """ Returns the price precision of the exchange. :param pair: Pair to get precision for :return: precision for price or None. Must be used in combination with precisionMode """ return self.markets.get(pair, {}).get('precision', {}).get('price', None) def amount_to_precision(self, pair: str, amount: float) -> float: """ Returns the amount to buy or sell to a precision the Exchange accepts """ return amount_to_precision(amount, self.get_precision_amount(pair), self.precisionMode) def price_to_precision(self, pair: str, price: float, *, rounding_mode: int = ROUND) -> float: """ Returns the price rounded to the precision the Exchange accepts. The default price_rounding_mode in conf is ROUND. For stoploss calculations, must use ROUND_UP for longs, and ROUND_DOWN for shorts. """ return price_to_precision(price, self.get_precision_price(pair), self.precisionMode, rounding_mode=rounding_mode) def price_get_one_pip(self, pair: str, price: float) -> float: """ Get's the "1 pip" value for this pair. Used in PriceFilter to calculate the 1pip movements. """ precision = self.markets[pair]['precision']['price'] if self.precisionMode == TICK_SIZE: return precision else: return 1 / pow(10, precision) def get_min_pair_stake_amount( self, pair: str, price: float, stoploss: float, leverage: Optional[float] = 1.0 ) -> Optional[float]: return self._get_stake_amount_limit(pair, price, stoploss, 'min', leverage) def get_max_pair_stake_amount(self, pair: str, price: float, leverage: float = 1.0) -> float: max_stake_amount = self._get_stake_amount_limit(pair, price, 0.0, 'max', leverage) if max_stake_amount is None: # * Should never be executed raise OperationalException(f'{self.name}.get_max_pair_stake_amount should' 'never set max_stake_amount to None') return max_stake_amount def _get_stake_amount_limit( self, pair: str, price: float, stoploss: float, limit: Literal['min', 'max'], leverage: Optional[float] = 1.0 ) -> Optional[float]: isMin = limit == 'min' try: market = self.markets[pair] except KeyError: raise ValueError(f"Can't get market information for symbol {pair}") if isMin: # reserve some percent defined in config (5% default) + stoploss margin_reserve: float = 1.0 + self._config.get('amount_reserve_percent', DEFAULT_AMOUNT_RESERVE_PERCENT) stoploss_reserve = ( margin_reserve / (1 - abs(stoploss)) if abs(stoploss) != 1 else 1.5 ) # it should not be more than 50% stoploss_reserve = max(min(stoploss_reserve, 1.5), 1) else: margin_reserve = 1.0 stoploss_reserve = 1.0 stake_limits = [] limits = market['limits'] if (limits['cost'][limit] is not None): stake_limits.append( self._contracts_to_amount(pair, limits['cost'][limit]) * stoploss_reserve ) if (limits['amount'][limit] is not None): stake_limits.append( self._contracts_to_amount(pair, limits['amount'][limit]) * price * margin_reserve ) if not stake_limits: return None if isMin else float('inf') # The value returned should satisfy both limits: for amount (base currency) and # for cost (quote, stake currency), so max() is used here. # See also #2575 at github. return self._get_stake_amount_considering_leverage( max(stake_limits) if isMin else min(stake_limits), leverage or 1.0 ) def _get_stake_amount_considering_leverage(self, stake_amount: float, leverage: float) -> float: """ Takes the minimum stake amount for a pair with no leverage and returns the minimum stake amount when leverage is considered :param stake_amount: The stake amount for a pair before leverage is considered :param leverage: The amount of leverage being used on the current trade """ return stake_amount / leverage # Dry-run methods def create_dry_run_order(self, pair: str, ordertype: str, side: str, amount: float, rate: float, leverage: float, params: Dict = {}, stop_loss: bool = False) -> Dict[str, Any]: now = dt_now() order_id = f'dry_run_{side}_{pair}_{now.timestamp()}' # Rounding here must respect to contract sizes _amount = self._contracts_to_amount( pair, self.amount_to_precision(pair, self._amount_to_contracts(pair, amount))) dry_order: Dict[str, Any] = { 'id': order_id, 'symbol': pair, 'price': rate, 'average': rate, 'amount': _amount, 'cost': _amount * rate, 'type': ordertype, 'side': side, 'filled': 0, 'remaining': _amount, 'datetime': now.strftime('%Y-%m-%dT%H:%M:%S.%fZ'), 'timestamp': dt_ts(now), 'status': "open", 'fee': None, 'info': {}, 'leverage': leverage } if stop_loss: dry_order["info"] = {"stopPrice": dry_order["price"]} dry_order[self._ft_has['stop_price_prop']] = dry_order["price"] # Workaround to avoid filling stoploss orders immediately dry_order["ft_order_type"] = "stoploss" orderbook: Optional[OrderBook] = None if self.exchange_has('fetchL2OrderBook'): orderbook = self.fetch_l2_order_book(pair, 20) if ordertype == "limit" and orderbook: # Allow a 1% price difference allowed_diff = 0.01 if self._dry_is_price_crossed(pair, side, rate, orderbook, allowed_diff): logger.info( f"Converted order {pair} to market order due to price {rate} crossing spread " f"by more than {allowed_diff:.2%}.") dry_order["type"] = "market" if dry_order["type"] == "market" and not dry_order.get("ft_order_type"): # Update market order pricing average = self.get_dry_market_fill_price(pair, side, amount, rate, orderbook) dry_order.update({ 'average': average, 'filled': _amount, 'remaining': 0.0, 'status': "closed", 'cost': (dry_order['amount'] * average) }) # market orders will always incurr taker fees dry_order = self.add_dry_order_fee(pair, dry_order, 'taker') dry_order = self.check_dry_limit_order_filled( dry_order, immediate=True, orderbook=orderbook) self._dry_run_open_orders[dry_order["id"]] = dry_order # Copy order and close it - so the returned order is open unless it's a market order return dry_order def add_dry_order_fee( self, pair: str, dry_order: Dict[str, Any], taker_or_maker: MakerTaker, ) -> Dict[str, Any]: fee = self.get_fee(pair, taker_or_maker=taker_or_maker) dry_order.update({ 'fee': { 'currency': self.get_pair_quote_currency(pair), 'cost': dry_order['cost'] * fee, 'rate': fee } }) return dry_order def get_dry_market_fill_price(self, pair: str, side: str, amount: float, rate: float, orderbook: Optional[OrderBook]) -> float: """ Get the market order fill price based on orderbook interpolation """ if self.exchange_has('fetchL2OrderBook'): if not orderbook: orderbook = self.fetch_l2_order_book(pair, 20) ob_type: OBLiteral = 'asks' if side == 'buy' else 'bids' slippage = 0.05 max_slippage_val = rate * ((1 + slippage) if side == 'buy' else (1 - slippage)) remaining_amount = amount filled_value = 0.0 book_entry_price = 0.0 for book_entry in orderbook[ob_type]: book_entry_price = book_entry[0] book_entry_coin_volume = book_entry[1] if remaining_amount > 0: if remaining_amount < book_entry_coin_volume: # Orderbook at this slot bigger than remaining amount filled_value += remaining_amount * book_entry_price break else: filled_value += book_entry_coin_volume * book_entry_price remaining_amount -= book_entry_coin_volume else: break else: # If remaining_amount wasn't consumed completely (break was not called) filled_value += remaining_amount * book_entry_price forecast_avg_filled_price = max(filled_value, 0) / amount # Limit max. slippage to specified value if side == 'buy': forecast_avg_filled_price = min(forecast_avg_filled_price, max_slippage_val) else: forecast_avg_filled_price = max(forecast_avg_filled_price, max_slippage_val) return self.price_to_precision(pair, forecast_avg_filled_price) return rate def _dry_is_price_crossed(self, pair: str, side: str, limit: float, orderbook: Optional[OrderBook] = None, offset: float = 0.0) -> bool: if not self.exchange_has('fetchL2OrderBook'): return True if not orderbook: orderbook = self.fetch_l2_order_book(pair, 1) try: if side == 'buy': price = orderbook['asks'][0][0] if limit * (1 - offset) >= price: return True else: price = orderbook['bids'][0][0] if limit * (1 + offset) <= price: return True except IndexError: # Ignore empty orderbooks when filling - can be filled with the next iteration. pass return False def check_dry_limit_order_filled( self, order: Dict[str, Any], immediate: bool = False, orderbook: Optional[OrderBook] = None) -> Dict[str, Any]: """ Check dry-run limit order fill and update fee (if it filled). """ if (order['status'] != "closed" and order['type'] in ["limit"] and not order.get('ft_order_type')): pair = order['symbol'] if self._dry_is_price_crossed(pair, order['side'], order['price'], orderbook): order.update({ 'status': 'closed', 'filled': order['amount'], 'remaining': 0, }) self.add_dry_order_fee( pair, order, 'taker' if immediate else 'maker', ) return order def fetch_dry_run_order(self, order_id) -> Dict[str, Any]: """ Return dry-run order Only call if running in dry-run mode. """ try: order = self._dry_run_open_orders[order_id] order = self.check_dry_limit_order_filled(order) return order except KeyError as e: order = Order.order_by_id(order_id) if order: ccxt_order = order.to_ccxt_object(self._ft_has['stop_price_prop']) self._dry_run_open_orders[order_id] = ccxt_order return ccxt_order # Gracefully handle errors with dry-run orders. raise InvalidOrderException( f'Tried to get an invalid dry-run-order (id: {order_id}). Message: {e}') from e # Order handling def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False): if self.trading_mode != TradingMode.SPOT: self.set_margin_mode(pair, self.margin_mode, accept_fail) self._set_leverage(leverage, pair, accept_fail) def _get_params( self, side: BuySell, ordertype: str, leverage: float, reduceOnly: bool, time_in_force: str = 'GTC', ) -> Dict: params = self._params.copy() if time_in_force != 'GTC' and ordertype != 'market': params.update({'timeInForce': time_in_force.upper()}) if reduceOnly: params.update({'reduceOnly': True}) return params def _order_needs_price(self, ordertype: str) -> bool: return ( ordertype != 'market' or self._api.options.get("createMarketBuyOrderRequiresPrice", False) or self._ft_has.get('marketOrderRequiresPrice', False) ) def create_order( self, *, pair: str, ordertype: str, side: BuySell, amount: float, rate: float, leverage: float, reduceOnly: bool = False, time_in_force: str = 'GTC', ) -> Dict: if self._config['dry_run']: dry_order = self.create_dry_run_order( pair, ordertype, side, amount, self.price_to_precision(pair, rate), leverage) return dry_order params = self._get_params(side, ordertype, leverage, reduceOnly, time_in_force) try: # Set the precision for amount and price(rate) as accepted by the exchange amount = self.amount_to_precision(pair, self._amount_to_contracts(pair, amount)) needs_price = self._order_needs_price(ordertype) rate_for_order = self.price_to_precision(pair, rate) if needs_price else None if not reduceOnly: self._lev_prep(pair, leverage, side) order = self._api.create_order( pair, ordertype, side, amount, rate_for_order, params, ) if order.get('status') is None: # Map empty status to open. order['status'] = 'open' if order.get('type') is None: order['type'] = ordertype self._log_exchange_response('create_order', order) order = self._order_contracts_to_amount(order) return order except ccxt.InsufficientFunds as e: raise InsufficientFundsError( f'Insufficient funds to create {ordertype} {side} order on market {pair}. ' f'Tried to {side} amount {amount} at rate {rate}.' f'Message: {e}') from e except ccxt.InvalidOrder as e: raise InvalidOrderException( f'Could not create {ordertype} {side} order on market {pair}. ' f'Tried to {side} amount {amount} at rate {rate}. ' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not place {side} order due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def stoploss_adjust(self, stop_loss: float, order: Dict, side: str) -> bool: """ Verify stop_loss against stoploss-order value (limit or price) Returns True if adjustment is necessary. """ if not self._ft_has.get('stoploss_on_exchange'): raise OperationalException(f"stoploss is not implemented for {self.name}.") price_param = self._ft_has['stop_price_prop'] return ( order.get(price_param, None) is None or ((side == "sell" and stop_loss > float(order[price_param])) or (side == "buy" and stop_loss < float(order[price_param]))) ) def _get_stop_order_type(self, user_order_type) -> Tuple[str, str]: available_order_Types: Dict[str, str] = self._ft_has["stoploss_order_types"] if user_order_type in available_order_Types.keys(): ordertype = available_order_Types[user_order_type] else: # Otherwise pick only one available ordertype = list(available_order_Types.values())[0] user_order_type = list(available_order_Types.keys())[0] return ordertype, user_order_type def _get_stop_limit_rate(self, stop_price: float, order_types: Dict, side: str) -> float: # Limit price threshold: As limit price should always be below stop-price limit_price_pct = order_types.get('stoploss_on_exchange_limit_ratio', 0.99) if side == "sell": limit_rate = stop_price * limit_price_pct else: limit_rate = stop_price * (2 - limit_price_pct) bad_stop_price = ((stop_price < limit_rate) if side == "sell" else (stop_price > limit_rate)) # Ensure rate is less than stop price if bad_stop_price: # This can for example happen if the stop / liquidation price is set to 0 # Which is possible if a market-order closes right away. # The InvalidOrderException will bubble up to exit_positions, where it will be # handled gracefully. raise InvalidOrderException( "In stoploss limit order, stop price should be more than limit price. " f"Stop price: {stop_price}, Limit price: {limit_rate}, " f"Limit Price pct: {limit_price_pct}" ) return limit_rate def _get_stop_params(self, side: BuySell, ordertype: str, stop_price: float) -> Dict: params = self._params.copy() # Verify if stopPrice works for your exchange, else configure stop_price_param params.update({self._ft_has['stop_price_param']: stop_price}) return params @retrier(retries=0) def create_stoploss(self, pair: str, amount: float, stop_price: float, order_types: Dict, side: BuySell, leverage: float) -> Dict: """ creates a stoploss order. requires `_ft_has['stoploss_order_types']` to be set as a dict mapping limit and market to the corresponding exchange type. The precise ordertype is determined by the order_types dict or exchange default. The exception below should never raise, since we disallow starting the bot in validate_ordertypes() This may work with a limited number of other exchanges, but correct working needs to be tested individually. WARNING: setting `stoploss_on_exchange` to True will NOT auto-enable stoploss on exchange. `stoploss_adjust` must still be implemented for this to work. """ if not self._ft_has['stoploss_on_exchange']: raise OperationalException(f"stoploss is not implemented for {self.name}.") user_order_type = order_types.get('stoploss', 'market') ordertype, user_order_type = self._get_stop_order_type(user_order_type) round_mode = ROUND_DOWN if side == 'buy' else ROUND_UP stop_price_norm = self.price_to_precision(pair, stop_price, rounding_mode=round_mode) limit_rate = None if user_order_type == 'limit': limit_rate = self._get_stop_limit_rate(stop_price, order_types, side) limit_rate = self.price_to_precision(pair, limit_rate, rounding_mode=round_mode) if self._config['dry_run']: dry_order = self.create_dry_run_order( pair, ordertype, side, amount, stop_price_norm, stop_loss=True, leverage=leverage, ) return dry_order try: params = self._get_stop_params(side=side, ordertype=ordertype, stop_price=stop_price_norm) if self.trading_mode == TradingMode.FUTURES: params['reduceOnly'] = True if 'stoploss_price_type' in order_types and 'stop_price_type_field' in self._ft_has: price_type = self._ft_has['stop_price_type_value_mapping'][ order_types.get('stoploss_price_type', PriceType.LAST)] params[self._ft_has['stop_price_type_field']] = price_type amount = self.amount_to_precision(pair, self._amount_to_contracts(pair, amount)) self._lev_prep(pair, leverage, side, accept_fail=True) order = self._api.create_order(symbol=pair, type=ordertype, side=side, amount=amount, price=limit_rate, params=params) self._log_exchange_response('create_stoploss_order', order) order = self._order_contracts_to_amount(order) logger.info(f"stoploss {user_order_type} order added for {pair}. " f"stop price: {stop_price}. limit: {limit_rate}") return order except ccxt.InsufficientFunds as e: raise InsufficientFundsError( f'Insufficient funds to create {ordertype} sell order on market {pair}. ' f'Tried to sell amount {amount} at rate {limit_rate}. ' f'Message: {e}') from e except ccxt.InvalidOrder as e: # Errors: # `Order would trigger immediately.` raise InvalidOrderException( f'Could not create {ordertype} sell order on market {pair}. ' f'Tried to sell amount {amount} at rate {limit_rate}. ' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f"Could not place stoploss order due to {e.__class__.__name__}. " f"Message: {e}") from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier(retries=API_FETCH_ORDER_RETRY_COUNT) def fetch_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict: if self._config['dry_run']: return self.fetch_dry_run_order(order_id) try: order = self._api.fetch_order(order_id, pair, params=params) self._log_exchange_response('fetch_order', order) order = self._order_contracts_to_amount(order) return order except ccxt.OrderNotFound as e: raise RetryableOrderError( f'Order not found (pair: {pair} id: {order_id}). Message: {e}') from e except ccxt.InvalidOrder as e: raise InvalidOrderException( f'Tried to get an invalid order (pair: {pair} id: {order_id}). Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get order due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def fetch_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict: return self.fetch_order(order_id, pair, params) def fetch_order_or_stoploss_order(self, order_id: str, pair: str, stoploss_order: bool = False) -> Dict: """ Simple wrapper calling either fetch_order or fetch_stoploss_order depending on the stoploss_order parameter :param order_id: OrderId to fetch order :param pair: Pair corresponding to order_id :param stoploss_order: If true, uses fetch_stoploss_order, otherwise fetch_order. """ if stoploss_order: return self.fetch_stoploss_order(order_id, pair) return self.fetch_order(order_id, pair) def check_order_canceled_empty(self, order: Dict) -> bool: """ Verify if an order has been cancelled without being partially filled :param order: Order dict as returned from fetch_order() :return: True if order has been cancelled without being filled, False otherwise. """ return (order.get('status') in NON_OPEN_EXCHANGE_STATES and order.get('filled') == 0.0) @retrier def cancel_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict: if self._config['dry_run']: try: order = self.fetch_dry_run_order(order_id) order.update({'status': 'canceled', 'filled': 0.0, 'remaining': order['amount']}) return order except InvalidOrderException: return {} try: order = self._api.cancel_order(order_id, pair, params=params) self._log_exchange_response('cancel_order', order) order = self._order_contracts_to_amount(order) return order except ccxt.InvalidOrder as e: raise InvalidOrderException( f'Could not cancel order. Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not cancel order due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def cancel_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict: return self.cancel_order(order_id, pair, params) def is_cancel_order_result_suitable(self, corder) -> bool: if not isinstance(corder, dict): return False required = ('fee', 'status', 'amount') return all(corder.get(k, None) is not None for k in required) def cancel_order_with_result(self, order_id: str, pair: str, amount: float) -> Dict: """ Cancel order returning a result. Creates a fake result if cancel order returns a non-usable result and fetch_order does not work (certain exchanges don't return cancelled orders) :param order_id: Orderid to cancel :param pair: Pair corresponding to order_id :param amount: Amount to use for fake response :return: Result from either cancel_order if usable, or fetch_order """ try: corder = self.cancel_order(order_id, pair) if self.is_cancel_order_result_suitable(corder): return corder except InvalidOrderException: logger.warning(f"Could not cancel order {order_id} for {pair}.") try: order = self.fetch_order(order_id, pair) except InvalidOrderException: logger.warning(f"Could not fetch cancelled order {order_id}.") order = { 'id': order_id, 'status': 'canceled', 'amount': amount, 'filled': 0.0, 'fee': {}, 'info': {} } return order def cancel_stoploss_order_with_result(self, order_id: str, pair: str, amount: float) -> Dict: """ Cancel stoploss order returning a result. Creates a fake result if cancel order returns a non-usable result and fetch_order does not work (certain exchanges don't return cancelled orders) :param order_id: stoploss-order-id to cancel :param pair: Pair corresponding to order_id :param amount: Amount to use for fake response :return: Result from either cancel_order if usable, or fetch_order """ corder = self.cancel_stoploss_order(order_id, pair) if self.is_cancel_order_result_suitable(corder): return corder try: order = self.fetch_stoploss_order(order_id, pair) except InvalidOrderException: logger.warning(f"Could not fetch cancelled stoploss order {order_id}.") order = {'fee': {}, 'status': 'canceled', 'amount': amount, 'info': {}} return order @retrier def get_balances(self) -> dict: try: balances = self._api.fetch_balance() # Remove additional info from ccxt results balances.pop("info", None) balances.pop("free", None) balances.pop("total", None) balances.pop("used", None) return balances except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get balance due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier def fetch_positions(self, pair: Optional[str] = None) -> List[Dict]: """ Fetch positions from the exchange. If no pair is given, all positions are returned. :param pair: Pair for the query """ if self._config['dry_run'] or self.trading_mode != TradingMode.FUTURES: return [] try: symbols = [] if pair: symbols.append(pair) positions: List[Dict] = self._api.fetch_positions(symbols) self._log_exchange_response('fetch_positions', positions) return positions except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get positions due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def _fetch_orders_emulate(self, pair: str, since_ms: int) -> List[Dict]: orders = [] if self.exchange_has('fetchClosedOrders'): orders = self._api.fetch_closed_orders(pair, since=since_ms) if self.exchange_has('fetchOpenOrders'): orders_open = self._api.fetch_open_orders(pair, since=since_ms) orders.extend(orders_open) return orders @retrier(retries=0) def fetch_orders(self, pair: str, since: datetime, params: Optional[Dict] = None) -> List[Dict]: """ Fetch all orders for a pair "since" :param pair: Pair for the query :param since: Starting time for the query """ if self._config['dry_run']: return [] try: since_ms = int((since.timestamp() - 10) * 1000) if self.exchange_has('fetchOrders'): if not params: params = {} try: orders: List[Dict] = self._api.fetch_orders(pair, since=since_ms, params=params) except ccxt.NotSupported: # Some exchanges don't support fetchOrders # attempt to fetch open and closed orders separately orders = self._fetch_orders_emulate(pair, since_ms) else: orders = self._fetch_orders_emulate(pair, since_ms) self._log_exchange_response('fetch_orders', orders) orders = [self._order_contracts_to_amount(o) for o in orders] return orders except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not fetch positions due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier def fetch_trading_fees(self) -> Dict[str, Any]: """ Fetch user account trading fees Can be cached, should not update often. """ if (self._config['dry_run'] or self.trading_mode != TradingMode.FUTURES or not self.exchange_has('fetchTradingFees')): return {} try: trading_fees: Dict[str, Any] = self._api.fetch_trading_fees() self._log_exchange_response('fetch_trading_fees', trading_fees) return trading_fees except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not fetch trading fees due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier def fetch_bids_asks(self, symbols: Optional[List[str]] = None, cached: bool = False) -> Dict: """ :param cached: Allow cached result :return: fetch_tickers result """ if not self.exchange_has('fetchBidsAsks'): return {} if cached: with self._cache_lock: tickers = self._fetch_tickers_cache.get('fetch_bids_asks') if tickers: return tickers try: tickers = self._api.fetch_bids_asks(symbols) with self._cache_lock: self._fetch_tickers_cache['fetch_bids_asks'] = tickers return tickers except ccxt.NotSupported as e: raise OperationalException( f'Exchange {self._api.name} does not support fetching bids/asks in batch. ' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not load bids/asks due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier def get_tickers(self, symbols: Optional[List[str]] = None, cached: bool = False) -> Tickers: """ :param cached: Allow cached result :return: fetch_tickers result """ tickers: Tickers if not self.exchange_has('fetchTickers'): return {} if cached: with self._cache_lock: tickers = self._fetch_tickers_cache.get('fetch_tickers') # type: ignore if tickers: return tickers try: tickers = self._api.fetch_tickers(symbols) with self._cache_lock: self._fetch_tickers_cache['fetch_tickers'] = tickers return tickers except ccxt.NotSupported as e: raise OperationalException( f'Exchange {self._api.name} does not support fetching tickers in batch. ' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not load tickers due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e # Pricing info @retrier def fetch_ticker(self, pair: str) -> Ticker: try: if (pair not in self.markets or self.markets[pair].get('active', False) is False): raise ExchangeError(f"Pair {pair} not available") data: Ticker = self._api.fetch_ticker(pair) return data except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not load ticker due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @staticmethod def get_next_limit_in_list(limit: int, limit_range: Optional[List[int]], range_required: bool = True): """ Get next greater value in the list. Used by fetch_l2_order_book if the api only supports a limited range """ if not limit_range: return limit result = min([x for x in limit_range if limit <= x] + [max(limit_range)]) if not range_required and limit > result: # Range is not required - we can use None as parameter. return None return result @retrier def fetch_l2_order_book(self, pair: str, limit: int = 100) -> OrderBook: """ Get L2 order book from exchange. Can be limited to a certain amount (if supported). Returns a dict in the format {'asks': [price, volume], 'bids': [price, volume]} """ limit1 = self.get_next_limit_in_list(limit, self._ft_has['l2_limit_range'], self._ft_has['l2_limit_range_required']) try: return self._api.fetch_l2_order_book(pair, limit1) except ccxt.NotSupported as e: raise OperationalException( f'Exchange {self._api.name} does not support fetching order book.' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get order book due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def _get_price_side(self, side: str, is_short: bool, conf_strategy: Dict) -> BidAsk: price_side = conf_strategy['price_side'] if price_side in ('same', 'other'): price_map = { ('entry', 'long', 'same'): 'bid', ('entry', 'long', 'other'): 'ask', ('entry', 'short', 'same'): 'ask', ('entry', 'short', 'other'): 'bid', ('exit', 'long', 'same'): 'ask', ('exit', 'long', 'other'): 'bid', ('exit', 'short', 'same'): 'bid', ('exit', 'short', 'other'): 'ask', } price_side = price_map[(side, 'short' if is_short else 'long', price_side)] return price_side def get_rate(self, pair: str, refresh: bool, side: EntryExit, is_short: bool, order_book: Optional[OrderBook] = None, ticker: Optional[Ticker] = None) -> float: """ Calculates bid/ask target bid rate - between current ask price and last price ask rate - either using ticker bid or first bid based on orderbook or remain static in any other case since it's not updating. :param pair: Pair to get rate for :param refresh: allow cached data :param side: "buy" or "sell" :return: float: Price :raises PricingError if orderbook price could not be determined. """ name = side.capitalize() strat_name = 'entry_pricing' if side == "entry" else 'exit_pricing' cache_rate: TTLCache = self._entry_rate_cache if side == "entry" else self._exit_rate_cache if not refresh: with self._cache_lock: rate = cache_rate.get(pair) # Check if cache has been invalidated if rate: logger.debug(f"Using cached {side} rate for {pair}.") return rate conf_strategy = self._config.get(strat_name, {}) price_side = self._get_price_side(side, is_short, conf_strategy) if conf_strategy.get('use_order_book', False): order_book_top = conf_strategy.get('order_book_top', 1) if order_book is None: order_book = self.fetch_l2_order_book(pair, order_book_top) rate = self._get_rate_from_ob(pair, side, order_book, name, price_side, order_book_top) else: logger.debug(f"Using Last {price_side.capitalize()} / Last Price") if ticker is None: ticker = self.fetch_ticker(pair) rate = self._get_rate_from_ticker(side, ticker, conf_strategy, price_side) if rate is None: raise PricingError(f"{name}-Rate for {pair} was empty.") with self._cache_lock: cache_rate[pair] = rate return rate def _get_rate_from_ticker(self, side: EntryExit, ticker: Ticker, conf_strategy: Dict[str, Any], price_side: BidAsk) -> Optional[float]: """ Get rate from ticker. """ ticker_rate = ticker[price_side] if ticker['last'] and ticker_rate: if side == 'entry' and ticker_rate > ticker['last']: balance = conf_strategy.get('price_last_balance', 0.0) ticker_rate = ticker_rate + balance * (ticker['last'] - ticker_rate) elif side == 'exit' and ticker_rate < ticker['last']: balance = conf_strategy.get('price_last_balance', 0.0) ticker_rate = ticker_rate - balance * (ticker_rate - ticker['last']) rate = ticker_rate return rate def _get_rate_from_ob(self, pair: str, side: EntryExit, order_book: OrderBook, name: str, price_side: BidAsk, order_book_top: int) -> float: """ Get rate from orderbook :raises: PricingError if rate could not be determined. """ logger.debug('order_book %s', order_book) # top 1 = index 0 try: obside: OBLiteral = 'bids' if price_side == 'bid' else 'asks' rate = order_book[obside][order_book_top - 1][0] except (IndexError, KeyError) as e: logger.warning( f"{pair} - {name} Price at location {order_book_top} from orderbook " f"could not be determined. Orderbook: {order_book}" ) raise PricingError from e logger.debug(f"{pair} - {name} price from orderbook {price_side.capitalize()}" f"side - top {order_book_top} order book {side} rate {rate:.8f}") return rate def get_rates(self, pair: str, refresh: bool, is_short: bool) -> Tuple[float, float]: entry_rate = None exit_rate = None if not refresh: with self._cache_lock: entry_rate = self._entry_rate_cache.get(pair) exit_rate = self._exit_rate_cache.get(pair) if entry_rate: logger.debug(f"Using cached buy rate for {pair}.") if exit_rate: logger.debug(f"Using cached sell rate for {pair}.") entry_pricing = self._config.get('entry_pricing', {}) exit_pricing = self._config.get('exit_pricing', {}) order_book = ticker = None if not entry_rate and entry_pricing.get('use_order_book', False): order_book_top = max(entry_pricing.get('order_book_top', 1), exit_pricing.get('order_book_top', 1)) order_book = self.fetch_l2_order_book(pair, order_book_top) entry_rate = self.get_rate(pair, refresh, 'entry', is_short, order_book=order_book) elif not entry_rate: ticker = self.fetch_ticker(pair) entry_rate = self.get_rate(pair, refresh, 'entry', is_short, ticker=ticker) if not exit_rate: exit_rate = self.get_rate(pair, refresh, 'exit', is_short, order_book=order_book, ticker=ticker) return entry_rate, exit_rate # Fee handling @retrier def get_trades_for_order(self, order_id: str, pair: str, since: datetime, params: Optional[Dict] = None) -> List: """ Fetch Orders using the "fetch_my_trades" endpoint and filter them by order-id. The "since" argument passed in is coming from the database and is in UTC, as timezone-native datetime object. From the python documentation: > Naive datetime instances are assumed to represent local time Therefore, calling "since.timestamp()" will get the UTC timestamp, after applying the transformation from local timezone to UTC. This works for timezones UTC+ since then the result will contain trades from a few hours instead of from the last 5 seconds, however fails for UTC- timezones, since we're then asking for trades with a "since" argument in the future. :param order_id order_id: Order-id as given when creating the order :param pair: Pair the order is for :param since: datetime object of the order creation time. Assumes object is in UTC. """ if self._config['dry_run']: return [] if not self.exchange_has('fetchMyTrades'): return [] try: # Allow 5s offset to catch slight time offsets (discovered in #1185) # since needs to be int in milliseconds _params = params if params else {} my_trades = self._api.fetch_my_trades( pair, int((since.replace(tzinfo=timezone.utc).timestamp() - 5) * 1000), params=_params) matched_trades = [trade for trade in my_trades if trade['order'] == order_id] self._log_exchange_response('get_trades_for_order', matched_trades) matched_trades = self._trades_contracts_to_amount(matched_trades) return matched_trades except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get trades due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def get_order_id_conditional(self, order: Dict[str, Any]) -> str: return order['id'] @retrier def get_fee(self, symbol: str, type: str = '', side: str = '', amount: float = 1, price: float = 1, taker_or_maker: MakerTaker = 'maker') -> float: """ Retrieve fee from exchange :param symbol: Pair :param type: Type of order (market, limit, ...) :param side: Side of order (buy, sell) :param amount: Amount of order :param price: Price of order :param taker_or_maker: 'maker' or 'taker' (ignored if "type" is provided) """ if type and type == 'market': taker_or_maker = 'taker' try: if self._config['dry_run'] and self._config.get('fee', None) is not None: return self._config['fee'] # validate that markets are loaded before trying to get fee if self._api.markets is None or len(self._api.markets) == 0: self._api.load_markets(params={}) return self._api.calculate_fee(symbol=symbol, type=type, side=side, amount=amount, price=price, takerOrMaker=taker_or_maker)['rate'] except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get fee info due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @staticmethod def order_has_fee(order: Dict) -> bool: """ Verifies if the passed in order dict has the needed keys to extract fees, and that these keys (currency, cost) are not empty. :param order: Order or trade (one trade) dict :return: True if the fee substructure contains currency and cost, false otherwise """ if not isinstance(order, dict): return False return ('fee' in order and order['fee'] is not None and (order['fee'].keys() >= {'currency', 'cost'}) and order['fee']['currency'] is not None and order['fee']['cost'] is not None ) def calculate_fee_rate( self, fee: Dict, symbol: str, cost: float, amount: float) -> Optional[float]: """ Calculate fee rate if it's not given by the exchange. :param fee: ccxt Fee dict - must contain cost / currency / rate :param symbol: Symbol of the order :param cost: Total cost of the order :param amount: Amount of the order """ if fee.get('rate') is not None: return fee.get('rate') fee_curr = fee.get('currency') if fee_curr is None: return None fee_cost = float(fee['cost']) # Calculate fee based on order details if fee_curr == self.get_pair_base_currency(symbol): # Base currency - divide by amount return round(fee_cost / amount, 8) elif fee_curr == self.get_pair_quote_currency(symbol): # Quote currency - divide by cost return round(fee_cost / cost, 8) if cost else None else: # If Fee currency is a different currency if not cost: # If cost is None or 0.0 -> falsy, return None return None try: comb = self.get_valid_pair_combination(fee_curr, self._config['stake_currency']) tick = self.fetch_ticker(comb) fee_to_quote_rate = safe_value_fallback2(tick, tick, 'last', 'ask') except (ValueError, ExchangeError): fee_to_quote_rate = self._config['exchange'].get('unknown_fee_rate', None) if not fee_to_quote_rate: return None return round((fee_cost * fee_to_quote_rate) / cost, 8) def extract_cost_curr_rate(self, fee: Dict, symbol: str, cost: float, amount: float) -> Tuple[float, str, Optional[float]]: """ Extract tuple of cost, currency, rate. Requires order_has_fee to run first! :param fee: ccxt Fee dict - must contain cost / currency / rate :param symbol: Symbol of the order :param cost: Total cost of the order :param amount: Amount of the order :return: Tuple with cost, currency, rate of the given fee dict """ return (float(fee['cost']), fee['currency'], self.calculate_fee_rate( fee, symbol, cost, amount ) ) # Historic data def get_historic_ohlcv(self, pair: str, timeframe: str, since_ms: int, candle_type: CandleType, is_new_pair: bool = False, until_ms: Optional[int] = None) -> List: """ Get candle history using asyncio and returns the list of candles. Handles all async work for this. Async over one pair, assuming we get `self.ohlcv_candle_limit()` candles per call. :param pair: Pair to download :param timeframe: Timeframe to get data for :param since_ms: Timestamp in milliseconds to get history from :param until_ms: Timestamp in milliseconds to get history up to :param candle_type: '', mark, index, premiumIndex, or funding_rate :return: List with candle (OHLCV) data """ pair, _, _, data, _ = self.loop.run_until_complete( self._async_get_historic_ohlcv(pair=pair, timeframe=timeframe, since_ms=since_ms, until_ms=until_ms, is_new_pair=is_new_pair, candle_type=candle_type)) logger.info(f"Downloaded data for {pair} with length {len(data)}.") return data async def _async_get_historic_ohlcv(self, pair: str, timeframe: str, since_ms: int, candle_type: CandleType, is_new_pair: bool = False, raise_: bool = False, until_ms: Optional[int] = None ) -> OHLCVResponse: """ Download historic ohlcv :param is_new_pair: used by binance subclass to allow "fast" new pair downloading :param candle_type: Any of the enum CandleType (must match trading mode!) """ one_call = timeframe_to_msecs(timeframe) * self.ohlcv_candle_limit( timeframe, candle_type, since_ms) logger.debug( "one_call: %s msecs (%s)", one_call, dt_humanize(dt_now() - timedelta(milliseconds=one_call), only_distance=True) ) input_coroutines = [self._async_get_candle_history( pair, timeframe, candle_type, since) for since in range(since_ms, until_ms or dt_ts(), one_call)] data: List = [] # Chunk requests into batches of 100 to avoid overwelming ccxt Throttling for input_coro in chunks(input_coroutines, 100): results = await asyncio.gather(*input_coro, return_exceptions=True) for res in results: if isinstance(res, Exception): logger.warning(f"Async code raised an exception: {repr(res)}") if raise_: raise continue else: # Deconstruct tuple if it's not an exception p, _, c, new_data, _ = res if p == pair and c == candle_type: data.extend(new_data) # Sort data again after extending the result - above calls return in "async order" data = sorted(data, key=lambda x: x[0]) return pair, timeframe, candle_type, data, self._ohlcv_partial_candle def _build_coroutine( self, pair: str, timeframe: str, candle_type: CandleType, since_ms: Optional[int], cache: bool) -> Coroutine[Any, Any, OHLCVResponse]: not_all_data = cache and self.required_candle_call_count > 1 if cache and (pair, timeframe, candle_type) in self._klines: candle_limit = self.ohlcv_candle_limit(timeframe, candle_type) min_date = date_minus_candles(timeframe, candle_limit - 5).timestamp() # Check if 1 call can get us updated candles without hole in the data. if min_date < self._pairs_last_refresh_time.get((pair, timeframe, candle_type), 0): # Cache can be used - do one-off call. not_all_data = False else: # Time jump detected, evict cache logger.info( f"Time jump detected. Evicting cache for {pair}, {timeframe}, {candle_type}") del self._klines[(pair, timeframe, candle_type)] if (not since_ms and (self._ft_has["ohlcv_require_since"] or not_all_data)): # Multiple calls for one pair - to get more history one_call = timeframe_to_msecs(timeframe) * self.ohlcv_candle_limit( timeframe, candle_type, since_ms) move_to = one_call * self.required_candle_call_count now = timeframe_to_next_date(timeframe) since_ms = int((now - timedelta(seconds=move_to // 1000)).timestamp() * 1000) if since_ms: return self._async_get_historic_ohlcv( pair, timeframe, since_ms=since_ms, raise_=True, candle_type=candle_type) else: # One call ... "regular" refresh return self._async_get_candle_history( pair, timeframe, since_ms=since_ms, candle_type=candle_type) def _build_ohlcv_dl_jobs( self, pair_list: ListPairsWithTimeframes, since_ms: Optional[int], cache: bool) -> Tuple[List[Coroutine], List[Tuple[str, str, CandleType]]]: """ Build Coroutines to execute as part of refresh_latest_ohlcv """ input_coroutines: List[Coroutine[Any, Any, OHLCVResponse]] = [] cached_pairs = [] for pair, timeframe, candle_type in set(pair_list): if (timeframe not in self.timeframes and candle_type in (CandleType.SPOT, CandleType.FUTURES)): logger.warning( f"Cannot download ({pair}, {timeframe}) combination as this timeframe is " f"not available on {self.name}. Available timeframes are " f"{', '.join(self.timeframes)}.") continue if ((pair, timeframe, candle_type) not in self._klines or not cache or self._now_is_time_to_refresh(pair, timeframe, candle_type)): input_coroutines.append( self._build_coroutine(pair, timeframe, candle_type, since_ms, cache)) else: logger.debug( f"Using cached candle (OHLCV) data for {pair}, {timeframe}, {candle_type} ..." ) cached_pairs.append((pair, timeframe, candle_type)) return input_coroutines, cached_pairs def _process_ohlcv_df(self, pair: str, timeframe: str, c_type: CandleType, ticks: List[List], cache: bool, drop_incomplete: bool) -> DataFrame: # keeping last candle time as last refreshed time of the pair if ticks and cache: idx = -2 if drop_incomplete and len(ticks) > 1 else -1 self._pairs_last_refresh_time[(pair, timeframe, c_type)] = ticks[idx][0] // 1000 # keeping parsed dataframe in cache ohlcv_df = ohlcv_to_dataframe(ticks, timeframe, pair=pair, fill_missing=True, drop_incomplete=drop_incomplete) if cache: if (pair, timeframe, c_type) in self._klines: old = self._klines[(pair, timeframe, c_type)] # Reassign so we return the updated, combined df ohlcv_df = clean_ohlcv_dataframe(concat([old, ohlcv_df], axis=0), timeframe, pair, fill_missing=True, drop_incomplete=False) candle_limit = self.ohlcv_candle_limit(timeframe, self._config['candle_type_def']) # Age out old candles ohlcv_df = ohlcv_df.tail(candle_limit + self._startup_candle_count) ohlcv_df = ohlcv_df.reset_index(drop=True) self._klines[(pair, timeframe, c_type)] = ohlcv_df else: self._klines[(pair, timeframe, c_type)] = ohlcv_df return ohlcv_df def refresh_latest_ohlcv(self, pair_list: ListPairsWithTimeframes, *, since_ms: Optional[int] = None, cache: bool = True, drop_incomplete: Optional[bool] = None ) -> Dict[PairWithTimeframe, DataFrame]: """ Refresh in-memory OHLCV asynchronously and set `_klines` with the result Loops asynchronously over pair_list and downloads all pairs async (semi-parallel). Only used in the dataprovider.refresh() method. :param pair_list: List of 2 element tuples containing pair, interval to refresh :param since_ms: time since when to download, in milliseconds :param cache: Assign result to _klines. Usefull for one-off downloads like for pairlists :param drop_incomplete: Control candle dropping. Specifying None defaults to _ohlcv_partial_candle :return: Dict of [{(pair, timeframe): Dataframe}] """ logger.debug("Refreshing candle (OHLCV) data for %d pairs", len(pair_list)) # Gather coroutines to run input_coroutines, cached_pairs = self._build_ohlcv_dl_jobs(pair_list, since_ms, cache) results_df = {} # Chunk requests into batches of 100 to avoid overwelming ccxt Throttling for input_coro in chunks(input_coroutines, 100): async def gather_stuff(): return await asyncio.gather(*input_coro, return_exceptions=True) with self._loop_lock: results = self.loop.run_until_complete(gather_stuff()) for res in results: if isinstance(res, Exception): logger.warning(f"Async code raised an exception: {repr(res)}") continue # Deconstruct tuple (has 5 elements) pair, timeframe, c_type, ticks, drop_hint = res drop_incomplete_ = drop_hint if drop_incomplete is None else drop_incomplete ohlcv_df = self._process_ohlcv_df( pair, timeframe, c_type, ticks, cache, drop_incomplete_) results_df[(pair, timeframe, c_type)] = ohlcv_df # Return cached klines for pair, timeframe, c_type in cached_pairs: results_df[(pair, timeframe, c_type)] = self.klines( (pair, timeframe, c_type), copy=False ) return results_df def _now_is_time_to_refresh(self, pair: str, timeframe: str, candle_type: CandleType) -> bool: # Timeframe in seconds interval_in_sec = timeframe_to_seconds(timeframe) plr = self._pairs_last_refresh_time.get((pair, timeframe, candle_type), 0) + interval_in_sec # current,active candle open date now = int(timeframe_to_prev_date(timeframe).timestamp()) return plr < now @retrier_async async def _async_get_candle_history( self, pair: str, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None, ) -> OHLCVResponse: """ Asynchronously get candle history data using fetch_ohlcv :param candle_type: '', mark, index, premiumIndex, or funding_rate returns tuple: (pair, timeframe, ohlcv_list) """ try: # Fetch OHLCV asynchronously s = '(' + dt_from_ts(since_ms).isoformat() + ') ' if since_ms is not None else '' logger.debug( "Fetching pair %s, %s, interval %s, since %s %s...", pair, candle_type, timeframe, since_ms, s ) params = deepcopy(self._ft_has.get('ohlcv_params', {})) candle_limit = self.ohlcv_candle_limit( timeframe, candle_type=candle_type, since_ms=since_ms) if candle_type and candle_type != CandleType.SPOT: params.update({'price': candle_type.value}) if candle_type != CandleType.FUNDING_RATE: data = await self._api_async.fetch_ohlcv( pair, timeframe=timeframe, since=since_ms, limit=candle_limit, params=params) else: # Funding rate data = await self._fetch_funding_rate_history( pair=pair, timeframe=timeframe, limit=candle_limit, since_ms=since_ms, ) # Some exchanges sort OHLCV in ASC order and others in DESC. # Ex: Bittrex returns the list of OHLCV in ASC order (oldest first, newest last) # while GDAX returns the list of OHLCV in DESC order (newest first, oldest last) # Only sort if necessary to save computing time try: if data and data[0][0] > data[-1][0]: data = sorted(data, key=lambda x: x[0]) except IndexError: logger.exception("Error loading %s. Result was %s.", pair, data) return pair, timeframe, candle_type, [], self._ohlcv_partial_candle logger.debug("Done fetching pair %s, %s interval %s...", pair, candle_type, timeframe) return pair, timeframe, candle_type, data, self._ohlcv_partial_candle except ccxt.NotSupported as e: raise OperationalException( f'Exchange {self._api.name} does not support fetching historical ' f'candle (OHLCV) data. Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError(f'Could not fetch historical candle (OHLCV) data ' f'for pair {pair} due to {e.__class__.__name__}. ' f'Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(f'Could not fetch historical candle (OHLCV) data ' f'for pair {pair}. Message: {e}') from e async def _fetch_funding_rate_history( self, pair: str, timeframe: str, limit: int, since_ms: Optional[int] = None, ) -> List[List]: """ Fetch funding rate history - used to selectively override this by subclasses. """ # Funding rate data = await self._api_async.fetch_funding_rate_history( pair, since=since_ms, limit=limit) # Convert funding rate to candle pattern data = [[x['timestamp'], x['fundingRate'], 0, 0, 0, 0] for x in data] return data # Fetch historic trades @retrier_async async def _async_fetch_trades(self, pair: str, since: Optional[int] = None, params: Optional[dict] = None) -> List[List]: """ Asyncronously gets trade history using fetch_trades. Handles exchange errors, does one call to the exchange. :param pair: Pair to fetch trade data for :param since: Since as integer timestamp in milliseconds returns: List of dicts containing trades """ try: # fetch trades asynchronously if params: logger.debug("Fetching trades for pair %s, params: %s ", pair, params) trades = await self._api_async.fetch_trades(pair, params=params, limit=1000) else: logger.debug( "Fetching trades for pair %s, since %s %s...", pair, since, '(' + dt_from_ts(since).isoformat() + ') ' if since is not None else '' ) trades = await self._api_async.fetch_trades(pair, since=since, limit=1000) trades = self._trades_contracts_to_amount(trades) return trades_dict_to_list(trades) except ccxt.NotSupported as e: raise OperationalException( f'Exchange {self._api.name} does not support fetching historical trade data.' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError(f'Could not load trade history due to {e.__class__.__name__}. ' f'Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(f'Could not fetch trade data. Msg: {e}') from e async def _async_get_trade_history_id(self, pair: str, until: int, since: Optional[int] = None, from_id: Optional[str] = None) -> Tuple[str, List[List]]: """ Asyncronously gets trade history using fetch_trades use this when exchange uses id-based iteration (check `self._trades_pagination`) :param pair: Pair to fetch trade data for :param since: Since as integer timestamp in milliseconds :param until: Until as integer timestamp in milliseconds :param from_id: Download data starting with ID (if id is known). Ignores "since" if set. returns tuple: (pair, trades-list) """ trades: List[List] = [] if not from_id: # Fetch first elements using timebased method to get an ID to paginate on # Depending on the Exchange, this can introduce a drift at the start of the interval # of up to an hour. # e.g. Binance returns the "last 1000" candles within a 1h time interval # - so we will miss the first trades. t = await self._async_fetch_trades(pair, since=since) # DEFAULT_TRADES_COLUMNS: 0 -> timestamp # DEFAULT_TRADES_COLUMNS: 1 -> id from_id = t[-1][1] trades.extend(t[:-1]) while True: try: t = await self._async_fetch_trades(pair, params={self._trades_pagination_arg: from_id}) if t: # Skip last id since its the key for the next call trades.extend(t[:-1]) if from_id == t[-1][1] or t[-1][0] > until: logger.debug(f"Stopping because from_id did not change. " f"Reached {t[-1][0]} > {until}") # Reached the end of the defined-download period - add last trade as well. trades.extend(t[-1:]) break from_id = t[-1][1] else: logger.debug("Stopping as no more trades were returned.") break except asyncio.CancelledError: logger.debug("Async operation Interrupted, breaking trades DL loop.") break return (pair, trades) async def _async_get_trade_history_time(self, pair: str, until: int, since: Optional[int] = None) -> Tuple[str, List[List]]: """ Asyncronously gets trade history using fetch_trades, when the exchange uses time-based iteration (check `self._trades_pagination`) :param pair: Pair to fetch trade data for :param since: Since as integer timestamp in milliseconds :param until: Until as integer timestamp in milliseconds returns tuple: (pair, trades-list) """ trades: List[List] = [] # DEFAULT_TRADES_COLUMNS: 0 -> timestamp # DEFAULT_TRADES_COLUMNS: 1 -> id while True: try: t = await self._async_fetch_trades(pair, since=since) if t: # No more trades to download available at the exchange, # So we repeatedly get the same trade over and over again. if since == t[-1][0] and len(t) == 1: logger.debug("Stopping because no more trades are available.") break since = t[-1][0] trades.extend(t) # Reached the end of the defined-download period if until and t[-1][0] > until: logger.debug( f"Stopping because until was reached. {t[-1][0]} > {until}") break else: logger.debug("Stopping as no more trades were returned.") break except asyncio.CancelledError: logger.debug("Async operation Interrupted, breaking trades DL loop.") break return (pair, trades) async def _async_get_trade_history(self, pair: str, since: Optional[int] = None, until: Optional[int] = None, from_id: Optional[str] = None) -> Tuple[str, List[List]]: """ Async wrapper handling downloading trades using either time or id based methods. """ logger.debug(f"_async_get_trade_history(), pair: {pair}, " f"since: {since}, until: {until}, from_id: {from_id}") if until is None: until = ccxt.Exchange.milliseconds() logger.debug(f"Exchange milliseconds: {until}") if self._trades_pagination == 'time': return await self._async_get_trade_history_time( pair=pair, since=since, until=until) elif self._trades_pagination == 'id': return await self._async_get_trade_history_id( pair=pair, since=since, until=until, from_id=from_id ) else: raise OperationalException(f"Exchange {self.name} does use neither time, " f"nor id based pagination") def get_historic_trades(self, pair: str, since: Optional[int] = None, until: Optional[int] = None, from_id: Optional[str] = None) -> Tuple[str, List]: """ Get trade history data using asyncio. Handles all async work and returns the list of candles. Async over one pair, assuming we get `self.ohlcv_candle_limit()` candles per call. :param pair: Pair to download :param since: Timestamp in milliseconds to get history from :param until: Timestamp in milliseconds. Defaults to current timestamp if not defined. :param from_id: Download data starting with ID (if id is known) :returns List of trade data """ if not self.exchange_has("fetchTrades"): raise OperationalException("This exchange does not support downloading Trades.") with self._loop_lock: task = asyncio.ensure_future(self._async_get_trade_history( pair=pair, since=since, until=until, from_id=from_id)) for sig in [signal.SIGINT, signal.SIGTERM]: try: self.loop.add_signal_handler(sig, task.cancel) except NotImplementedError: # Not all platforms implement signals (e.g. windows) pass return self.loop.run_until_complete(task) @retrier def _get_funding_fees_from_exchange(self, pair: str, since: Union[datetime, int]) -> float: """ Returns the sum of all funding fees that were exchanged for a pair within a timeframe Dry-run handling happens as part of _calculate_funding_fees. :param pair: (e.g. ADA/USDT) :param since: The earliest time of consideration for calculating funding fees, in unix time or as a datetime """ if not self.exchange_has("fetchFundingHistory"): raise OperationalException( f"fetch_funding_history() is not available using {self.name}" ) if type(since) is datetime: since = int(since.timestamp()) * 1000 # * 1000 for ms try: funding_history = self._api.fetch_funding_history( symbol=pair, since=since ) return sum(fee['amount'] for fee in funding_history) except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get funding fees due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier def get_leverage_tiers(self) -> Dict[str, List[Dict]]: try: return self._api.fetch_leverage_tiers() except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not load leverage tiers due to {e.__class__.__name__}. Message: {e}' ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier_async async def get_market_leverage_tiers(self, symbol: str) -> Tuple[str, List[Dict]]: """ Leverage tiers per symbol """ try: tier = await self._api_async.fetch_market_leverage_tiers(symbol) return symbol, tier except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not load leverage tiers for {symbol}' f' due to {e.__class__.__name__}. Message: {e}' ) from e except ccxt.BaseError as e: raise OperationalException(e) from e def load_leverage_tiers(self) -> Dict[str, List[Dict]]: if self.trading_mode == TradingMode.FUTURES: if self.exchange_has('fetchLeverageTiers'): # Fetch all leverage tiers at once return self.get_leverage_tiers() elif self.exchange_has('fetchMarketLeverageTiers'): # Must fetch the leverage tiers for each market separately # * This is slow(~45s) on Okx, makes ~90 api calls to load all linear swap markets markets = self.markets symbols = [ symbol for symbol, market in markets.items() if (self.market_is_future(market) and market['quote'] == self._config['stake_currency']) ] tiers: Dict[str, List[Dict]] = {} tiers_cached = self.load_cached_leverage_tiers(self._config['stake_currency']) if tiers_cached: tiers = tiers_cached coros = [ self.get_market_leverage_tiers(symbol) for symbol in sorted(symbols) if symbol not in tiers] # Be verbose here, as this delays startup by ~1 minute. if coros: logger.info( f"Initializing leverage_tiers for {len(symbols)} markets. " "This will take about a minute.") else: logger.info("Using cached leverage_tiers.") async def gather_results(input_coro): return await asyncio.gather(*input_coro, return_exceptions=True) for input_coro in chunks(coros, 100): with self._loop_lock: results = self.loop.run_until_complete(gather_results(input_coro)) for res in results: if isinstance(res, Exception): logger.warning(f"Leverage tier exception: {repr(res)}") continue symbol, tier = res tiers[symbol] = tier if len(coros) > 0: self.cache_leverage_tiers(tiers, self._config['stake_currency']) logger.info(f"Done initializing {len(symbols)} markets.") return tiers return {} def cache_leverage_tiers(self, tiers: Dict[str, List[Dict]], stake_currency: str) -> None: filename = self._config['datadir'] / "futures" / f"leverage_tiers_{stake_currency}.json" if not filename.parent.is_dir(): filename.parent.mkdir(parents=True) data = { "updated": datetime.now(timezone.utc), "data": tiers, } file_dump_json(filename, data) def load_cached_leverage_tiers(self, stake_currency: str) -> Optional[Dict[str, List[Dict]]]: filename = self._config['datadir'] / "futures" / f"leverage_tiers_{stake_currency}.json" if filename.is_file(): try: tiers = file_load_json(filename) updated = tiers.get('updated') if updated: updated_dt = parser.parse(updated) if updated_dt < datetime.now(timezone.utc) - timedelta(weeks=4): logger.info("Cached leverage tiers are outdated. Will update.") return None return tiers['data'] except Exception: logger.exception("Error loading cached leverage tiers. Refreshing.") return None def fill_leverage_tiers(self) -> None: """ Assigns property _leverage_tiers to a dictionary of information about the leverage allowed on each pair """ leverage_tiers = self.load_leverage_tiers() for pair, tiers in leverage_tiers.items(): pair_tiers = [] for tier in tiers: pair_tiers.append(self.parse_leverage_tier(tier)) self._leverage_tiers[pair] = pair_tiers def parse_leverage_tier(self, tier) -> Dict: info = tier.get('info', {}) return { 'minNotional': tier['minNotional'], 'maxNotional': tier['maxNotional'], 'maintenanceMarginRate': tier['maintenanceMarginRate'], 'maxLeverage': tier['maxLeverage'], 'maintAmt': float(info['cum']) if 'cum' in info else None, } def get_max_leverage(self, pair: str, stake_amount: Optional[float]) -> float: """ Returns the maximum leverage that a pair can be traded at :param pair: The base/quote currency pair being traded :stake_amount: The total value of the traders margin_mode in quote currency """ if self.trading_mode == TradingMode.SPOT: return 1.0 if self.trading_mode == TradingMode.FUTURES: # Checks and edge cases if stake_amount is None: raise OperationalException( f'{self.name}.get_max_leverage requires argument stake_amount' ) if pair not in self._leverage_tiers: # Maybe raise exception because it can't be traded on futures? return 1.0 pair_tiers = self._leverage_tiers[pair] if stake_amount == 0: return self._leverage_tiers[pair][0]['maxLeverage'] # Max lev for lowest amount for tier_index in range(len(pair_tiers)): tier = pair_tiers[tier_index] lev = tier['maxLeverage'] if tier_index < len(pair_tiers) - 1: next_tier = pair_tiers[tier_index + 1] next_floor = next_tier['minNotional'] / next_tier['maxLeverage'] if next_floor > stake_amount: # Next tier min too high for stake amount return min((tier['maxNotional'] / stake_amount), lev) # # With the two leverage tiers below, # - a stake amount of 150 would mean a max leverage of (10000 / 150) = 66.66 # - stakes below 133.33 = max_lev of 75 # - stakes between 133.33-200 = max_lev of 10000/stake = 50.01-74.99 # - stakes from 200 + 1000 = max_lev of 50 # # { # "min": 0, # stake = 0.0 # "max": 10000, # max_stake@75 = 10000/75 = 133.33333333333334 # "lev": 75, # }, # { # "min": 10000, # stake = 200.0 # "max": 50000, # max_stake@50 = 50000/50 = 1000.0 # "lev": 50, # } # else: # if on the last tier if stake_amount > tier['maxNotional']: # If stake is > than max tradeable amount raise InvalidOrderException(f'Amount {stake_amount} too high for {pair}') else: return tier['maxLeverage'] raise OperationalException( 'Looped through all tiers without finding a max leverage. Should never be reached' ) elif self.trading_mode == TradingMode.MARGIN: # Search markets.limits for max lev market = self.markets[pair] if market['limits']['leverage']['max'] is not None: return market['limits']['leverage']['max'] else: return 1.0 # Default if max leverage cannot be found else: return 1.0 @retrier def _set_leverage( self, leverage: float, pair: Optional[str] = None, accept_fail: bool = False, ): """ Set's the leverage before making a trade, in order to not have the same leverage on every trade """ if self._config['dry_run'] or not self.exchange_has("setLeverage"): # Some exchanges only support one margin_mode type return if self._ft_has.get('floor_leverage', False) is True: # Rounding for binance ... leverage = floor(leverage) try: res = self._api.set_leverage(symbol=pair, leverage=leverage) self._log_exchange_response('set_leverage', res) except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.BadRequest, ccxt.InsufficientFunds) as e: if not accept_fail: raise TemporaryError( f'Could not set leverage due to {e.__class__.__name__}. Message: {e}') from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not set leverage due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def get_interest_rate(self) -> float: """ Retrieve interest rate - necessary for Margin trading. Should not call the exchange directly when used from backtesting. """ return 0.0 def funding_fee_cutoff(self, open_date: datetime) -> bool: """ Funding fees are only charged at full hours (usually every 4-8h). Therefore a trade opening at 10:00:01 will not be charged a funding fee until the next hour. :param open_date: The open date for a trade :return: True if the date falls on a full hour, False otherwise """ return open_date.minute == 0 and open_date.second == 0 @retrier def set_margin_mode(self, pair: str, margin_mode: MarginMode, accept_fail: bool = False, params: dict = {}): """ Set's the margin mode on the exchange to cross or isolated for a specific pair :param pair: base/quote currency pair (e.g. "ADA/USDT") """ if self._config['dry_run'] or not self.exchange_has("setMarginMode"): # Some exchanges only support one margin_mode type return try: res = self._api.set_margin_mode(margin_mode.value, pair, params) self._log_exchange_response('set_margin_mode', res) except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except ccxt.BadRequest as e: if not accept_fail: raise TemporaryError( f'Could not set margin mode due to {e.__class__.__name__}. Message: {e}') from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not set margin mode due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def _fetch_and_calculate_funding_fees( self, pair: str, amount: float, is_short: bool, open_date: datetime, close_date: Optional[datetime] = None ) -> float: """ Fetches and calculates the sum of all funding fees that occurred for a pair during a futures trade. Only used during dry-run or if the exchange does not provide a funding_rates endpoint. :param pair: The quote/base pair of the trade :param amount: The quantity of the trade :param is_short: trade direction :param open_date: The date and time that the trade started :param close_date: The date and time that the trade ended """ if self.funding_fee_cutoff(open_date): # Shift back to 1h candle to avoid missing funding fees # Only really relevant for trades very close to the full hour open_date = timeframe_to_prev_date('1h', open_date) timeframe = self._ft_has['mark_ohlcv_timeframe'] timeframe_ff = self._ft_has.get('funding_fee_timeframe', self._ft_has['mark_ohlcv_timeframe']) if not close_date: close_date = datetime.now(timezone.utc) since_ms = int(timeframe_to_prev_date(timeframe, open_date).timestamp()) * 1000 mark_comb: PairWithTimeframe = ( pair, timeframe, CandleType.from_string(self._ft_has["mark_ohlcv_price"])) funding_comb: PairWithTimeframe = (pair, timeframe_ff, CandleType.FUNDING_RATE) candle_histories = self.refresh_latest_ohlcv( [mark_comb, funding_comb], since_ms=since_ms, cache=False, drop_incomplete=False, ) try: # we can't assume we always get histories - for example during exchange downtimes funding_rates = candle_histories[funding_comb] mark_rates = candle_histories[mark_comb] except KeyError: raise ExchangeError("Could not find funding rates.") from None funding_mark_rates = self.combine_funding_and_mark(funding_rates, mark_rates) return self.calculate_funding_fees( funding_mark_rates, amount=amount, is_short=is_short, open_date=open_date, close_date=close_date ) @staticmethod def combine_funding_and_mark(funding_rates: DataFrame, mark_rates: DataFrame, futures_funding_rate: Optional[int] = None) -> DataFrame: """ Combine funding-rates and mark-rates dataframes :param funding_rates: Dataframe containing Funding rates (Type FUNDING_RATE) :param mark_rates: Dataframe containing Mark rates (Type mark_ohlcv_price) :param futures_funding_rate: Fake funding rate to use if funding_rates are not available """ if futures_funding_rate is None: return mark_rates.merge( funding_rates, on='date', how="inner", suffixes=["_mark", "_fund"]) else: if len(funding_rates) == 0: # No funding rate candles - full fillup with fallback variable mark_rates['open_fund'] = futures_funding_rate return mark_rates.rename( columns={'open': 'open_mark', 'close': 'close_mark', 'high': 'high_mark', 'low': 'low_mark', 'volume': 'volume_mark'}) else: # Fill up missing funding_rate candles with fallback value combined = mark_rates.merge( funding_rates, on='date', how="outer", suffixes=["_mark", "_fund"] ) combined['open_fund'] = combined['open_fund'].fillna(futures_funding_rate) return combined def calculate_funding_fees( self, df: DataFrame, amount: float, is_short: bool, open_date: datetime, close_date: datetime, time_in_ratio: Optional[float] = None ) -> float: """ calculates the sum of all funding fees that occurred for a pair during a futures trade :param df: Dataframe containing combined funding and mark rates as `open_fund` and `open_mark`. :param amount: The quantity of the trade :param is_short: trade direction :param open_date: The date and time that the trade started :param close_date: The date and time that the trade ended :param time_in_ratio: Not used by most exchange classes """ fees: float = 0 if not df.empty: df1 = df[(df['date'] >= open_date) & (df['date'] <= close_date)] fees = sum(df1['open_fund'] * df1['open_mark'] * amount) # Negate fees for longs as funding_fees expects it this way based on live endpoints. return fees if is_short else -fees def get_funding_fees( self, pair: str, amount: float, is_short: bool, open_date: datetime) -> float: """ Fetch funding fees, either from the exchange (live) or calculates them based on funding rate/mark price history :param pair: The quote/base pair of the trade :param is_short: trade direction :param amount: Trade amount :param open_date: Open date of the trade :return: funding fee since open_date """ if self.trading_mode == TradingMode.FUTURES: try: if self._config['dry_run']: funding_fees = self._fetch_and_calculate_funding_fees( pair, amount, is_short, open_date) else: funding_fees = self._get_funding_fees_from_exchange(pair, open_date) return funding_fees except ExchangeError: logger.warning(f"Could not update funding fees for {pair}.") return 0.0 def get_liquidation_price( self, pair: str, # Dry-run open_rate: float, # Entry price of position is_short: bool, amount: float, # Absolute value of position size stake_amount: float, leverage: float, wallet_balance: float, mm_ex_1: float = 0.0, # (Binance) Cross only upnl_ex_1: float = 0.0, # (Binance) Cross only ) -> Optional[float]: """ Set's the margin mode on the exchange to cross or isolated for a specific pair """ if self.trading_mode == TradingMode.SPOT: return None elif (self.trading_mode != TradingMode.FUTURES): raise OperationalException( f"{self.name} does not support {self.margin_mode} {self.trading_mode}") liquidation_price = None if self._config['dry_run'] or not self.exchange_has("fetchPositions"): liquidation_price = self.dry_run_liquidation_price( pair=pair, open_rate=open_rate, is_short=is_short, amount=amount, leverage=leverage, stake_amount=stake_amount, wallet_balance=wallet_balance, mm_ex_1=mm_ex_1, upnl_ex_1=upnl_ex_1 ) else: positions = self.fetch_positions(pair) if len(positions) > 0: pos = positions[0] liquidation_price = pos['liquidationPrice'] if liquidation_price is not None: buffer_amount = abs(open_rate - liquidation_price) * self.liquidation_buffer liquidation_price_buffer = ( liquidation_price - buffer_amount if is_short else liquidation_price + buffer_amount ) return max(liquidation_price_buffer, 0.0) else: return None def dry_run_liquidation_price( self, pair: str, open_rate: float, # Entry price of position is_short: bool, amount: float, stake_amount: float, leverage: float, wallet_balance: float, # Or margin balance mm_ex_1: float = 0.0, # (Binance) Cross only upnl_ex_1: float = 0.0, # (Binance) Cross only ) -> Optional[float]: """ Important: Must be fetching data from cached values as this is used by backtesting! PERPETUAL: gate: https://www.gate.io/help/futures/futures/27724/liquidation-price-bankruptcy-price > Liquidation Price = (Entry Price ± Margin / Contract Multiplier / Size) / [ 1 ± (Maintenance Margin Ratio + Taker Rate)] Wherein, "+" or "-" depends on whether the contract goes long or short: "-" for long, and "+" for short. okex: https://www.okex.com/support/hc/en-us/articles/ 360053909592-VI-Introduction-to-the-isolated-mode-of-Single-Multi-currency-Portfolio-margin :param pair: Pair to calculate liquidation price for :param open_rate: Entry price of position :param is_short: True if the trade is a short, false otherwise :param amount: Absolute value of position size incl. leverage (in base currency) :param stake_amount: Stake amount - Collateral in settle currency. :param leverage: Leverage used for this position. :param trading_mode: SPOT, MARGIN, FUTURES, etc. :param margin_mode: Either ISOLATED or CROSS :param wallet_balance: Amount of margin_mode in the wallet being used to trade Cross-Margin Mode: crossWalletBalance Isolated-Margin Mode: isolatedWalletBalance # * Not required by Gate or OKX :param mm_ex_1: :param upnl_ex_1: """ market = self.markets[pair] taker_fee_rate = market['taker'] mm_ratio, _ = self.get_maintenance_ratio_and_amt(pair, stake_amount) if self.trading_mode == TradingMode.FUTURES and self.margin_mode == MarginMode.ISOLATED: if market['inverse']: raise OperationalException( "Freqtrade does not yet support inverse contracts") value = wallet_balance / amount mm_ratio_taker = (mm_ratio + taker_fee_rate) if is_short: return (open_rate + value) / (1 + mm_ratio_taker) else: return (open_rate - value) / (1 - mm_ratio_taker) else: raise OperationalException( "Freqtrade only supports isolated futures for leverage trading") def get_maintenance_ratio_and_amt( self, pair: str, nominal_value: float, ) -> Tuple[float, Optional[float]]: """ Important: Must be fetching data from cached values as this is used by backtesting! :param pair: Market symbol :param nominal_value: The total trade amount in quote currency including leverage maintenance amount only on Binance :return: (maintenance margin ratio, maintenance amount) """
if (self._config.get('runmode') in OPTIMIZE_MODES
7
2023-10-21 10:02:05+00:00
24k
yanzhh/HGERE
transformers/src/transformers/modeling_bert.py
[ { "identifier": "gelu", "path": "transformers/src/transformers/activations.py", "snippet": "def swish(x):\ndef _gelu_python(x):\ndef gelu_new(x):\ndef get_activation(activation_string):\nACT2FN = {\n \"relu\": F.relu,\n \"swish\": swish,\n \"gelu\": gelu,\n \"tanh\": F.tanh,\n \"gelu_new\...
import logging import math import os import torch import torch.nn.functional as F import pdb import re import numpy as np import tensorflow as tf import pdb from symbol import factor from tkinter import E from torch import nn from torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss from torch.nn.utils.rnn import pad_sequence from .modules import * from .activations import gelu, gelu_new, swish from .configuration_bert import BertConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_utils import PreTrainedModel, prune_linear_layer
16,698
config_class = BertConfig pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = load_tf_weights_in_bert base_model_prefix = "bert" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() BERT_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.encode_plus` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. """ @add_start_docstrings( "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", BERT_START_DOCSTRING, ) class BertModel(BertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration set to :obj:`True`; an :obj:`encoder_hidden_states` is expected as an input to the forward pass. .. _`Attention is all you need`: https://arxiv.org/abs/1706.03762 """ def __init__(self, config): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads)
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model. """ # from .modules import BiaffineSpanRepr, BiaffineRelationCls, BiafEncoder, \ # CatEncoder, max_pool, Tetrafine, BiaffineMessagePasser, \ # LinearMessegePasser, CPDTrilinear, CatEncoderCross, \ # bilinear_classifier, BiafCrossEncoder logger = logging.getLogger(__name__) BERT_PRETRAINED_MODEL_ARCHIVE_MAP = { "bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin", "bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin", "bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin", "bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin", "bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin", "bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin", "bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin", "bert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin", "bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin", "bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin", "bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin", "bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin", "bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin", "bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-pytorch_model.bin", "bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-pytorch_model.bin", "bert-base-japanese": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-pytorch_model.bin", "bert-base-japanese-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking-pytorch_model.bin", "bert-base-japanese-char": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-pytorch_model.bin", "bert-base-japanese-char-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking-pytorch_model.bin", "bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/pytorch_model.bin", "bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/pytorch_model.bin", "bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/pytorch_model.bin", } def load_tf_weights_in_bert(model, config, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model. """ try: except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info("Skipping {}".format("/".join(name))) continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model def mish(x): return x * torch.tanh(nn.functional.softplus(x)) ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new, "mish": mish} BertLayerNorm = torch.nn.LayerNorm class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else inputs_embeds.device if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.output_attentions = config.output_attentions self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. if encoder_hidden_states is not None: mixed_key_layer = self.key(encoder_hidden_states) mixed_value_layer = self.value(encoder_hidden_states) attention_mask = encoder_attention_mask else: mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size) heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class BertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = BertAttention(config) self.is_decoder = config.is_decoder if self.is_decoder: self.crossattention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights if self.is_decoder and encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + outputs return outputs class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) try: self.use_full_layer = config.use_full_layer except: self.use_full_layer = -1 def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, full_attention_mask=None, ): all_hidden_states = () all_attentions = () for i, layer_module in enumerate(self.layer): if i==self.use_full_layer: attention_mask = full_attention_mask if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask ) hidden_states = layer_outputs[0] if self.output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) return outputs # last-layer hidden state, (all hidden states), (all attentions) class BertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = load_tf_weights_in_bert base_model_prefix = "bert" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() BERT_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.encode_plus` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. """ @add_start_docstrings( "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", BERT_START_DOCSTRING, ) class BertModel(BertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration set to :obj:`True`; an :obj:`encoder_hidden_states` is expected as an input to the forward pass. .. _`Attention is all you need`: https://arxiv.org/abs/1706.03762 """ def __init__(self, config): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(BERT_INPUTS_DOCSTRING)
3
2023-10-15 02:31:09+00:00
24k
akashgreninja/GreSec
backend/venv/lib/python3.10/site-packages/numpy/array_api/linalg.py
[ { "identifier": "_floating_dtypes", "path": "backend/venv/lib/python3.10/site-packages/numpy/array_api/_dtypes.py", "snippet": "def _result_type(type1, type2):" }, { "identifier": "reshape", "path": "backend/venv/lib/python3.10/site-packages/numpy/array_api/_manipulation_functions.py", "...
from ._dtypes import ( _floating_dtypes, _numeric_dtypes, float32, float64, complex64, complex128 ) from ._manipulation_functions import reshape from ._array_object import Array from ..core.numeric import normalize_axis_tuple from typing import TYPE_CHECKING from ._typing import Literal, Optional, Sequence, Tuple, Union, Dtype from typing import NamedTuple from ..linalg.linalg import (_makearray, _assert_stacked_2d, _assert_stacked_square, _commonType, isComplexType, get_linalg_error_extobj, _raise_linalgerror_singular) from ..linalg import _umath_linalg import numpy.linalg import numpy as np
14,859
""" Array API compatible wrapper for :py:func:`np.linalg.qr <numpy.linalg.qr>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.qr. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in qr') # Note: the return type here is a namedtuple, which is different from # np.linalg.qr, which only returns a tuple. return QRResult(*map(Array._new, np.linalg.qr(x._array, mode=mode))) def slogdet(x: Array, /) -> SlogdetResult: """ Array API compatible wrapper for :py:func:`np.linalg.slogdet <numpy.linalg.slogdet>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.slogdet. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in slogdet') # Note: the return type here is a namedtuple, which is different from # np.linalg.slogdet, which only returns a tuple. return SlogdetResult(*map(Array._new, np.linalg.slogdet(x._array))) # Note: unlike np.linalg.solve, the array API solve() only accepts x2 as a # vector when it is exactly 1-dimensional. All other cases treat x2 as a stack # of matrices. The np.linalg.solve behavior of allowing stacks of both # matrices and vectors is ambiguous c.f. # https://github.com/numpy/numpy/issues/15349 and # https://github.com/data-apis/array-api/issues/285. # To workaround this, the below is the code from np.linalg.solve except # only calling solve1 in the exactly 1D case. def _solve(a, b): a, _ = _makearray(a) _assert_stacked_2d(a) _assert_stacked_square(a) b, wrap = _makearray(b) t, result_t = _commonType(a, b) # This part is different from np.linalg.solve if b.ndim == 1: gufunc = _umath_linalg.solve1 else: gufunc = _umath_linalg.solve # This does nothing currently but is left in because it will be relevant # when complex dtype support is added to the spec in 2022. signature = 'DD->D' if isComplexType(t) else 'dd->d' extobj = get_linalg_error_extobj(_raise_linalgerror_singular) r = gufunc(a, b, signature=signature, extobj=extobj) return wrap(r.astype(result_t, copy=False)) def solve(x1: Array, x2: Array, /) -> Array: """ Array API compatible wrapper for :py:func:`np.linalg.solve <numpy.linalg.solve>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.solve. if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in solve') return Array._new(_solve(x1._array, x2._array)) def svd(x: Array, /, *, full_matrices: bool = True) -> SVDResult: """ Array API compatible wrapper for :py:func:`np.linalg.svd <numpy.linalg.svd>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.svd. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in svd') # Note: the return type here is a namedtuple, which is different from # np.svd, which only returns a tuple. return SVDResult(*map(Array._new, np.linalg.svd(x._array, full_matrices=full_matrices))) # Note: svdvals is not in NumPy (but it is in SciPy). It is equivalent to # np.linalg.svd(compute_uv=False). def svdvals(x: Array, /) -> Union[Array, Tuple[Array, ...]]: if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in svdvals') return Array._new(np.linalg.svd(x._array, compute_uv=False)) # Note: tensordot is the numpy top-level namespace but not in np.linalg # Note: axes must be a tuple, unlike np.tensordot where it can be an array or array-like. def tensordot(x1: Array, x2: Array, /, *, axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2) -> Array: # Note: the restriction to numeric dtypes only is different from # np.tensordot. if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: raise TypeError('Only numeric dtypes are allowed in tensordot') return Array._new(np.tensordot(x1._array, x2._array, axes=axes)) # Note: trace is the numpy top-level namespace, not np.linalg def trace(x: Array, /, *, offset: int = 0, dtype: Optional[Dtype] = None) -> Array: """ Array API compatible wrapper for :py:func:`np.trace <numpy.trace>`. See its docstring for more information. """ if x.dtype not in _numeric_dtypes: raise TypeError('Only numeric dtypes are allowed in trace') # Note: trace() works the same as sum() and prod() (see # _statistical_functions.py) if dtype is None: if x.dtype == float32:
from __future__ import annotations if TYPE_CHECKING: class EighResult(NamedTuple): eigenvalues: Array eigenvectors: Array class QRResult(NamedTuple): Q: Array R: Array class SlogdetResult(NamedTuple): sign: Array logabsdet: Array class SVDResult(NamedTuple): U: Array S: Array Vh: Array # Note: the inclusion of the upper keyword is different from # np.linalg.cholesky, which does not have it. def cholesky(x: Array, /, *, upper: bool = False) -> Array: """ Array API compatible wrapper for :py:func:`np.linalg.cholesky <numpy.linalg.cholesky>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.cholesky. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in cholesky') L = np.linalg.cholesky(x._array) if upper: return Array._new(L).mT return Array._new(L) # Note: cross is the numpy top-level namespace, not np.linalg def cross(x1: Array, x2: Array, /, *, axis: int = -1) -> Array: """ Array API compatible wrapper for :py:func:`np.cross <numpy.cross>`. See its docstring for more information. """ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: raise TypeError('Only numeric dtypes are allowed in cross') # Note: this is different from np.cross(), which broadcasts if x1.shape != x2.shape: raise ValueError('x1 and x2 must have the same shape') if x1.ndim == 0: raise ValueError('cross() requires arrays of dimension at least 1') # Note: this is different from np.cross(), which allows dimension 2 if x1.shape[axis] != 3: raise ValueError('cross() dimension must equal 3') return Array._new(np.cross(x1._array, x2._array, axis=axis)) def det(x: Array, /) -> Array: """ Array API compatible wrapper for :py:func:`np.linalg.det <numpy.linalg.det>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.det. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in det') return Array._new(np.linalg.det(x._array)) # Note: diagonal is the numpy top-level namespace, not np.linalg def diagonal(x: Array, /, *, offset: int = 0) -> Array: """ Array API compatible wrapper for :py:func:`np.diagonal <numpy.diagonal>`. See its docstring for more information. """ # Note: diagonal always operates on the last two axes, whereas np.diagonal # operates on the first two axes by default return Array._new(np.diagonal(x._array, offset=offset, axis1=-2, axis2=-1)) def eigh(x: Array, /) -> EighResult: """ Array API compatible wrapper for :py:func:`np.linalg.eigh <numpy.linalg.eigh>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.eigh. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in eigh') # Note: the return type here is a namedtuple, which is different from # np.eigh, which only returns a tuple. return EighResult(*map(Array._new, np.linalg.eigh(x._array))) def eigvalsh(x: Array, /) -> Array: """ Array API compatible wrapper for :py:func:`np.linalg.eigvalsh <numpy.linalg.eigvalsh>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.eigvalsh. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in eigvalsh') return Array._new(np.linalg.eigvalsh(x._array)) def inv(x: Array, /) -> Array: """ Array API compatible wrapper for :py:func:`np.linalg.inv <numpy.linalg.inv>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.inv. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in inv') return Array._new(np.linalg.inv(x._array)) # Note: matmul is the numpy top-level namespace but not in np.linalg def matmul(x1: Array, x2: Array, /) -> Array: """ Array API compatible wrapper for :py:func:`np.matmul <numpy.matmul>`. See its docstring for more information. """ # Note: the restriction to numeric dtypes only is different from # np.matmul. if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: raise TypeError('Only numeric dtypes are allowed in matmul') return Array._new(np.matmul(x1._array, x2._array)) # Note: the name here is different from norm(). The array API norm is split # into matrix_norm and vector_norm(). # The type for ord should be Optional[Union[int, float, Literal[np.inf, # -np.inf, 'fro', 'nuc']]], but Literal does not support floating-point # literals. def matrix_norm(x: Array, /, *, keepdims: bool = False, ord: Optional[Union[int, float, Literal['fro', 'nuc']]] = 'fro') -> Array: """ Array API compatible wrapper for :py:func:`np.linalg.norm <numpy.linalg.norm>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.norm. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in matrix_norm') return Array._new(np.linalg.norm(x._array, axis=(-2, -1), keepdims=keepdims, ord=ord)) def matrix_power(x: Array, n: int, /) -> Array: """ Array API compatible wrapper for :py:func:`np.matrix_power <numpy.matrix_power>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.matrix_power. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed for the first argument of matrix_power') # np.matrix_power already checks if n is an integer return Array._new(np.linalg.matrix_power(x._array, n)) # Note: the keyword argument name rtol is different from np.linalg.matrix_rank def matrix_rank(x: Array, /, *, rtol: Optional[Union[float, Array]] = None) -> Array: """ Array API compatible wrapper for :py:func:`np.matrix_rank <numpy.matrix_rank>`. See its docstring for more information. """ # Note: this is different from np.linalg.matrix_rank, which supports 1 # dimensional arrays. if x.ndim < 2: raise np.linalg.LinAlgError("1-dimensional array given. Array must be at least two-dimensional") S = np.linalg.svd(x._array, compute_uv=False) if rtol is None: tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * np.finfo(S.dtype).eps else: if isinstance(rtol, Array): rtol = rtol._array # Note: this is different from np.linalg.matrix_rank, which does not multiply # the tolerance by the largest singular value. tol = S.max(axis=-1, keepdims=True)*np.asarray(rtol)[..., np.newaxis] return Array._new(np.count_nonzero(S > tol, axis=-1)) # Note: this function is new in the array API spec. Unlike transpose, it only # transposes the last two axes. def matrix_transpose(x: Array, /) -> Array: if x.ndim < 2: raise ValueError("x must be at least 2-dimensional for matrix_transpose") return Array._new(np.swapaxes(x._array, -1, -2)) # Note: outer is the numpy top-level namespace, not np.linalg def outer(x1: Array, x2: Array, /) -> Array: """ Array API compatible wrapper for :py:func:`np.outer <numpy.outer>`. See its docstring for more information. """ # Note: the restriction to numeric dtypes only is different from # np.outer. if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: raise TypeError('Only numeric dtypes are allowed in outer') # Note: the restriction to only 1-dim arrays is different from np.outer if x1.ndim != 1 or x2.ndim != 1: raise ValueError('The input arrays to outer must be 1-dimensional') return Array._new(np.outer(x1._array, x2._array)) # Note: the keyword argument name rtol is different from np.linalg.pinv def pinv(x: Array, /, *, rtol: Optional[Union[float, Array]] = None) -> Array: """ Array API compatible wrapper for :py:func:`np.linalg.pinv <numpy.linalg.pinv>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.pinv. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in pinv') # Note: this is different from np.linalg.pinv, which does not multiply the # default tolerance by max(M, N). if rtol is None: rtol = max(x.shape[-2:]) * np.finfo(x.dtype).eps return Array._new(np.linalg.pinv(x._array, rcond=rtol)) def qr(x: Array, /, *, mode: Literal['reduced', 'complete'] = 'reduced') -> QRResult: """ Array API compatible wrapper for :py:func:`np.linalg.qr <numpy.linalg.qr>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.qr. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in qr') # Note: the return type here is a namedtuple, which is different from # np.linalg.qr, which only returns a tuple. return QRResult(*map(Array._new, np.linalg.qr(x._array, mode=mode))) def slogdet(x: Array, /) -> SlogdetResult: """ Array API compatible wrapper for :py:func:`np.linalg.slogdet <numpy.linalg.slogdet>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.slogdet. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in slogdet') # Note: the return type here is a namedtuple, which is different from # np.linalg.slogdet, which only returns a tuple. return SlogdetResult(*map(Array._new, np.linalg.slogdet(x._array))) # Note: unlike np.linalg.solve, the array API solve() only accepts x2 as a # vector when it is exactly 1-dimensional. All other cases treat x2 as a stack # of matrices. The np.linalg.solve behavior of allowing stacks of both # matrices and vectors is ambiguous c.f. # https://github.com/numpy/numpy/issues/15349 and # https://github.com/data-apis/array-api/issues/285. # To workaround this, the below is the code from np.linalg.solve except # only calling solve1 in the exactly 1D case. def _solve(a, b): a, _ = _makearray(a) _assert_stacked_2d(a) _assert_stacked_square(a) b, wrap = _makearray(b) t, result_t = _commonType(a, b) # This part is different from np.linalg.solve if b.ndim == 1: gufunc = _umath_linalg.solve1 else: gufunc = _umath_linalg.solve # This does nothing currently but is left in because it will be relevant # when complex dtype support is added to the spec in 2022. signature = 'DD->D' if isComplexType(t) else 'dd->d' extobj = get_linalg_error_extobj(_raise_linalgerror_singular) r = gufunc(a, b, signature=signature, extobj=extobj) return wrap(r.astype(result_t, copy=False)) def solve(x1: Array, x2: Array, /) -> Array: """ Array API compatible wrapper for :py:func:`np.linalg.solve <numpy.linalg.solve>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.solve. if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in solve') return Array._new(_solve(x1._array, x2._array)) def svd(x: Array, /, *, full_matrices: bool = True) -> SVDResult: """ Array API compatible wrapper for :py:func:`np.linalg.svd <numpy.linalg.svd>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.svd. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in svd') # Note: the return type here is a namedtuple, which is different from # np.svd, which only returns a tuple. return SVDResult(*map(Array._new, np.linalg.svd(x._array, full_matrices=full_matrices))) # Note: svdvals is not in NumPy (but it is in SciPy). It is equivalent to # np.linalg.svd(compute_uv=False). def svdvals(x: Array, /) -> Union[Array, Tuple[Array, ...]]: if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in svdvals') return Array._new(np.linalg.svd(x._array, compute_uv=False)) # Note: tensordot is the numpy top-level namespace but not in np.linalg # Note: axes must be a tuple, unlike np.tensordot where it can be an array or array-like. def tensordot(x1: Array, x2: Array, /, *, axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2) -> Array: # Note: the restriction to numeric dtypes only is different from # np.tensordot. if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes: raise TypeError('Only numeric dtypes are allowed in tensordot') return Array._new(np.tensordot(x1._array, x2._array, axes=axes)) # Note: trace is the numpy top-level namespace, not np.linalg def trace(x: Array, /, *, offset: int = 0, dtype: Optional[Dtype] = None) -> Array: """ Array API compatible wrapper for :py:func:`np.trace <numpy.trace>`. See its docstring for more information. """ if x.dtype not in _numeric_dtypes: raise TypeError('Only numeric dtypes are allowed in trace') # Note: trace() works the same as sum() and prod() (see # _statistical_functions.py) if dtype is None: if x.dtype == float32:
dtype = float64
0
2023-10-23 18:09:28+00:00
24k
zju3dv/nr_in_a_room
test/batch_optim_pano.py
[ { "identifier": "RoomOptimizer", "path": "optim/room_optimizer.py", "snippet": "class RoomOptimizer:\n def __init__(\n self,\n scale_factor: float,\n bg_scale_factor: float,\n bg_scene_center: list,\n img_wh: list,\n near: float,\n far: float,\n ...
from posix import listdir from PIL import Image from tqdm import tqdm from omegaconf import OmegaConf from optim.room_optimizer import RoomOptimizer from optim.misc_utils import read_testing_config from utils.util import list_dir, read_json import sys import os import torch import numpy as np import os.path as osp
14,890
bbox_enlarge=0.1, optimize_option=[ "keypoint_mask", "photometric_loss", # "perceptual_loss", "z_axis_align_loss", "object_room_wall_attach", "object_room_floor_attach", "physical_violation", # "physical_violation_delayed_start", "object_object_attach", "viewing_constraint", # "optimize_exposure", "regenerate_relation_during_test", # "visualize_pred", # "print_loss_dict", ], ) # room_optimizer.set_sampling_mask_from_seg( # seg_mask=None, # seg_mask_path=config.seg_mask_path, # # add_noise_to_seg=0, # add_noise_to_seg=5, # dilate mask # convert_seg_mask_to_box_mask=True, # # convert_seg_mask_to_box_mask=False, # ) # if "obj_prediction_json" in config: # room_optimizer.set_initial_pose_from_prediction(config["obj_prediction_json"]) # else: # room_optimizer.set_initial_object_poses_from_scene_meta() # room_optimizer.generate_relation() # room_optimizer.optimize(input_rgb) return room_optimizer def main(config): scene_name = config["scene_name"] pred_src_dir = config["pred_src_dir"] pred_src_scene_dir = osp.join("data/object_prediction", pred_src_dir, scene_name) print("Using prediction from", pred_src_scene_dir) # currently only support multi_lights multi_case_dirs = list_dir(pred_src_scene_dir) print("Find cases", multi_case_dirs) # prepare room optimizer and backup config room_optimizer = prepare_room_optimizer( config, f"data/{scene_name}/scene/full/data.json", # also load scene info json, # which can be an empty placeholder in the future ) config["optimize_option"] = room_optimizer.optimize_option OmegaConf.save( config=config, f=os.path.join(room_optimizer.output_path, "optim_config_full.json"), ) output_path_base = room_optimizer.output_path for curr_case in tqdm(multi_case_dirs): items = list_dir(osp.join(pred_src_scene_dir, curr_case)) for item in tqdm(items): item_dir = osp.join(pred_src_scene_dir, curr_case, item) print("Working on", item_dir) pred_json = osp.join(item_dir, "pred.json") active_instance_id = list(map(int, list(read_json(pred_json).keys()))) active_instance_id = [x for x in active_instance_id if x > 0] active_instance_id += [0] # reset optimizer state room_optimizer.reset_active_instance_id(active_instance_id) room_optimizer.reset_optimizable_parameters() room_optimizer.set_output_path( output_path_base, f"{curr_case}/{item}", with_timestamp=False ) src_item_dir = ( # f"data/{scene_name}/scene_multi_lights/{curr_case}/{item}/" f"data/{scene_name}/scene_custom_arrange_multi_lights/{curr_case}/{item}/" ) # TODO: seg should be provided by object detector # seg_mask_path = f"{src_item_dir}/seg.png" seg_mask_path = osp.join(item_dir, "seg.png") room_optimizer.set_sampling_mask_from_seg( seg_mask_path=seg_mask_path, add_noise_to_seg=5, # dilate mask convert_seg_mask_to_box_mask=False, ) room_optimizer.set_initial_pose_from_prediction(pred_json) room_optimizer.generate_relation() image_path = f"{src_item_dir}/rgb.png" print(image_path) img_wh = config.img_wh # read image input_rgb = Image.open(image_path) input_rgb = input_rgb.resize(img_wh, Image.LANCZOS) input_rgb = np.array(input_rgb) input_rgb = torch.from_numpy(input_rgb).float() / 255 # (H, W, 3) pose = np.array( read_json(f"{src_item_dir}/data.json")["camera"]["cam3d2world"] ).reshape(4, 4) room_optimizer.optimize(input_rgb, pose=pose) if __name__ == "__main__": """ Usage: python test/batch_optim_pano.py config=test/config/ig_bedroom.yml "img_wh=[320,180]" pred_src_dir=DeepPano_wo_relation prefix=dbg_bedroom_batch """
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def prepare_room_optimizer(config, scene_info_json_path): active_instance_id = config.active_instance_id dataset_config = config.dataset_config["dataset"] bg_scale_factor = 1 bg_scene_center = [0, 0, 0] if config.bg_dataset_config != "": bg_dataset_config = config.bg_dataset_config["dataset"] bg_scale_factor = bg_dataset_config["scale_factor"] bg_scene_center = bg_dataset_config["scene_center"] # intialize room optimizer room_optimizer = RoomOptimizer( scene_info_json_path=scene_info_json_path, scale_factor=dataset_config["scale_factor"], scale_factor_dict=dataset_config.get("scale_factor_dict", {}), bg_scale_factor=bg_scale_factor, bg_scene_center=bg_scene_center, img_wh=config.img_wh, near=0.3, far=10.0, N_samples=64, N_importance=128, chunk=config.chunk, model_ckpt_path_dict=config.ckpt_path_dict, # relation_info=relation_info, relation_info={}, output_path="debug", prefix=config.prefix, active_instance_id=active_instance_id, lr=1e-2, # lr=5e-2, N_optim_step=500, adjust_lr_per_step=0, # optim_batch_size=1024, optim_batch_size=2048, # optim_batch_size=4096, # use_amp=False, use_amp=True, optimize_light_env=True, use_light_from_image_attr=True, optimize_appearance_code=config.get("optimize_appearance_code", False), mask_per_object=False, bbox_ray_intersect=True, bbox_enlarge=0.1, optimize_option=[ "keypoint_mask", "photometric_loss", # "perceptual_loss", "z_axis_align_loss", "object_room_wall_attach", "object_room_floor_attach", "physical_violation", # "physical_violation_delayed_start", "object_object_attach", "viewing_constraint", # "optimize_exposure", "regenerate_relation_during_test", # "visualize_pred", # "print_loss_dict", ], ) # room_optimizer.set_sampling_mask_from_seg( # seg_mask=None, # seg_mask_path=config.seg_mask_path, # # add_noise_to_seg=0, # add_noise_to_seg=5, # dilate mask # convert_seg_mask_to_box_mask=True, # # convert_seg_mask_to_box_mask=False, # ) # if "obj_prediction_json" in config: # room_optimizer.set_initial_pose_from_prediction(config["obj_prediction_json"]) # else: # room_optimizer.set_initial_object_poses_from_scene_meta() # room_optimizer.generate_relation() # room_optimizer.optimize(input_rgb) return room_optimizer def main(config): scene_name = config["scene_name"] pred_src_dir = config["pred_src_dir"] pred_src_scene_dir = osp.join("data/object_prediction", pred_src_dir, scene_name) print("Using prediction from", pred_src_scene_dir) # currently only support multi_lights multi_case_dirs = list_dir(pred_src_scene_dir) print("Find cases", multi_case_dirs) # prepare room optimizer and backup config room_optimizer = prepare_room_optimizer( config, f"data/{scene_name}/scene/full/data.json", # also load scene info json, # which can be an empty placeholder in the future ) config["optimize_option"] = room_optimizer.optimize_option OmegaConf.save( config=config, f=os.path.join(room_optimizer.output_path, "optim_config_full.json"), ) output_path_base = room_optimizer.output_path for curr_case in tqdm(multi_case_dirs): items = list_dir(osp.join(pred_src_scene_dir, curr_case)) for item in tqdm(items): item_dir = osp.join(pred_src_scene_dir, curr_case, item) print("Working on", item_dir) pred_json = osp.join(item_dir, "pred.json") active_instance_id = list(map(int, list(read_json(pred_json).keys()))) active_instance_id = [x for x in active_instance_id if x > 0] active_instance_id += [0] # reset optimizer state room_optimizer.reset_active_instance_id(active_instance_id) room_optimizer.reset_optimizable_parameters() room_optimizer.set_output_path( output_path_base, f"{curr_case}/{item}", with_timestamp=False ) src_item_dir = ( # f"data/{scene_name}/scene_multi_lights/{curr_case}/{item}/" f"data/{scene_name}/scene_custom_arrange_multi_lights/{curr_case}/{item}/" ) # TODO: seg should be provided by object detector # seg_mask_path = f"{src_item_dir}/seg.png" seg_mask_path = osp.join(item_dir, "seg.png") room_optimizer.set_sampling_mask_from_seg( seg_mask_path=seg_mask_path, add_noise_to_seg=5, # dilate mask convert_seg_mask_to_box_mask=False, ) room_optimizer.set_initial_pose_from_prediction(pred_json) room_optimizer.generate_relation() image_path = f"{src_item_dir}/rgb.png" print(image_path) img_wh = config.img_wh # read image input_rgb = Image.open(image_path) input_rgb = input_rgb.resize(img_wh, Image.LANCZOS) input_rgb = np.array(input_rgb) input_rgb = torch.from_numpy(input_rgb).float() / 255 # (H, W, 3) pose = np.array( read_json(f"{src_item_dir}/data.json")["camera"]["cam3d2world"] ).reshape(4, 4) room_optimizer.optimize(input_rgb, pose=pose) if __name__ == "__main__": """ Usage: python test/batch_optim_pano.py config=test/config/ig_bedroom.yml "img_wh=[320,180]" pred_src_dir=DeepPano_wo_relation prefix=dbg_bedroom_batch """
config = read_testing_config()
1
2023-10-15 08:41:29+00:00
24k
WenzhengZhang/Seq2seqCoref
main_trainer.py
[ { "identifier": "DataArguments", "path": "arguments.py", "snippet": "class DataArguments:\n data_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Path to data directory\"}\n )\n\n max_train_len: Optional[int] = field(\n default=1536,\n metadata={\n ...
import logging import os import sys from transformers import HfArgumentParser, set_seed from transformers import AutoModelForSeq2SeqLM, \ DataCollatorForSeq2Seq, AutoConfig, AutoTokenizer from transformers.integrations import TensorBoardCallback from arguments import DataArguments, ModelArguments, CorefTrainingArguments \ as TrainingArguments from data import CorefDataset, JointDataset from constants import SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, \ COPY, CLUSTER_NEW, CLUSTERS, SENTENCE_START, SENTENCE_END, SPECIAL_IDS, \ NON_INT_SPECIAL_IDS, MARK_SPECIAL_IDS, MENTION_END_NON_INT_SPECIAL_IDS, \ MENTION_ENDS from trainer import CorefTrainer from data import ConstrainedDataCollator from model import ConstrainedT5
20,411
logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stdout)) def main(): parser = HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args: ModelArguments data_args: DataArguments training_args: TrainingArguments if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, fp16 training: %s, bf16 training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, training_args.bf16, ) logger.info("Training/evaluation parameters %s", training_args) logger.info("MODEL parameters %s", model_args) logger.info("Data arguments %s", data_args) set_seed(training_args.seed) tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) if training_args.action_type == "integer": num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY]) elif training_args.action_type == "non_integer": if training_args.add_mention_end: num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY, CLUSTER_NEW] + CLUSTERS) else: num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, COPY, CLUSTER_NEW] + MENTION_ENDS) else: raise ValueError(f"wrong action type {training_args.action_type}") if training_args.seq2seq_type == 'short_seq' and \ training_args.mark_sentence:
logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stdout)) def main(): parser = HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args: ModelArguments data_args: DataArguments training_args: TrainingArguments if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, fp16 training: %s, bf16 training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, training_args.bf16, ) logger.info("Training/evaluation parameters %s", training_args) logger.info("MODEL parameters %s", model_args) logger.info("Data arguments %s", data_args) set_seed(training_args.seed) tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) if training_args.action_type == "integer": num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY]) elif training_args.action_type == "non_integer": if training_args.add_mention_end: num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY, CLUSTER_NEW] + CLUSTERS) else: num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, COPY, CLUSTER_NEW] + MENTION_ENDS) else: raise ValueError(f"wrong action type {training_args.action_type}") if training_args.seq2seq_type == 'short_seq' and \ training_args.mark_sentence:
num_new_tokens += tokenizer.add_tokens([SENTENCE_START, SENTENCE_END])
13
2023-10-17 17:39:16+00:00
24k
giulio98/functional-diffusion-processes
src/functional_diffusion_processes/trainers/trainer.py
[ { "identifier": "AudioDataset", "path": "src/functional_diffusion_processes/datasets/audio_dataset.py", "snippet": "class AudioDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for defining audio datasets.\n\n This class serves as the foundation for defining datasets containing audio data.\n It...
import abc import gc import io import logging import os import flax import flax.jax_utils as flax_utils import hydra.utils import jax import numpy as np import tensorflow as tf import wandb from typing import Any, Callable, Tuple, Union from cleanfid import fid from flax import linen, traverse_util from flax.training import checkpoints from flax.training.checkpoints import restore_checkpoint from jax import numpy as jnp from omegaconf import DictConfig, OmegaConf from tqdm.auto import tqdm from wandb.sdk.lib import RunDisabled from wandb.sdk.wandb_run import Run from ..datasets import AudioDataset, ImageDataset from ..datasets.base_dataset import BaseDataset from ..losses.base_loss import Loss from ..metrics import FIDMetric from ..samplers import Sampler from ..sdetools.base_sde import SDE from ..utils.common import filter_mask, make_grid_image, process_images, save_samples, to_grayscale from ..utils.scaler import get_data_inverse_scaler, get_data_scaler from ..utils.training_state import TrainState from .helpers import colorizing_fn, construct_sampling_fn, construct_train_step, inpainting_fn, sampling_fn
15,005
) ds_mask = hydra.utils.instantiate(config_object, _recursive_=False) ds_mask_iter = iter(ds_mask) batch_masked = jax.tree_map(f=lambda x: x._numpy(), tree=next(ds_mask_iter)["data"]) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, (batch * batch_masked), batch_masked ) elif self.training_config.sampling_type == "deblurring": n_rows, n_cols = ds_train.data_config.image_height_size, ds_train.data_config.image_width_size batch_masked = filter_mask(batch.reshape(-1, b, n_rows, n_cols, c).shape, radius=10) batch_freq = jnp.fft.fftshift( jnp.fft.fft2(batch.reshape(-1, b, n_rows, n_cols, c), axes=(2, 3)), axes=(2, 3), ) batch_freq = batch_freq * batch_masked batch_blurred = jnp.real(jnp.fft.ifft2(jnp.fft.ifftshift(batch_freq, axes=(2, 3)), axes=(2, 3))) batch_blurred = batch_blurred.reshape(-1, b, g, c) batch_masked = batch_masked.reshape(-1, b, g, c) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, batch_blurred, batch_masked ) if jax.host_id() == 0 and self.logging.use_wandb: if isinstance(ds_train, ImageDataset): this_sample_dir = os.path.join( self.sample_dir, "iter_{}_host_{}".format(step, jax.host_id()), ) tf.io.gfile.makedirs(this_sample_dir) # code below to show the gif of the sampled images # processed_images = [] # for n in range(batch_sampled_all.shape[1]): # batch_sampled_i = batch_sampled_all[:, n, :, :, :] # batch_sampled_i = ds_train.postprocess_fn( # batch_data=batch_sampled_i, inverse_scaler=inverse_scaler # ) # processed_images.append(np.asarray(batch_sampled_i)) # # # Log the sampled images as a GIF # imageio.mimwrite( # os.path.join(this_sample_dir, "image_sequence.gif"), # processed_images, # fps=10, # ) # gif_wandb = wandb.Image( # os.path.join(this_sample_dir, "image_sequence.gif"), # caption="Sampled_all_gif", # ) # wandb.log({"Sampled_all_gif": gif_wandb}, step=step) batch_sampled = ds_train.postprocess_fn(batch_data=batch_sampled, inverse_scaler=inverse_scaler) batch_sampled_last = ds_train.postprocess_fn( batch_data=batch_sampled_last, inverse_scaler=inverse_scaler ) batch_real = ds_train.postprocess_fn( batch_data=batch.reshape(-1, b, g, c), inverse_scaler=inverse_scaler ) if not self.training_config.sampling_only: batch_target = ds_train.postprocess_fn( batch_data=target.reshape(-1, b, g, c), inverse_scaler=inverse_scaler ) if isinstance(ds_train, ImageDataset): data_sampled = wandb.Image(np.asarray(batch_sampled), caption="Sampled") data_sampled_rec = wandb.Image(np.asarray(batch_sampled_last), caption="Sampled Rec") data_real = wandb.Image(np.asarray(batch_real), caption="Real") if not self.training_config.sampling_only: data_target = wandb.Image(np.asarray(batch_target), caption="Target") elif isinstance(ds_train, AudioDataset): sample_rate = ds_train.data_config.audio_sample_rate long_audio_sampled = np.concatenate( np.asarray(batch_sampled).reshape(-1, sample_rate), axis=0 ) data_sampled = wandb.Audio(long_audio_sampled, sample_rate=sample_rate, caption="Sampled") if not self.training_config.sampling_only: long_audio_target = np.concatenate( np.asarray(batch_target).reshape(-1, sample_rate), axis=0 ) data_target = wandb.Audio(long_audio_target, sample_rate=sample_rate, caption="Target") long_audio_batch_sampled_rec = np.concatenate( np.asarray(batch_sampled_last).reshape(-1, sample_rate), axis=0 ) data_sampled_rec = wandb.Audio( long_audio_batch_sampled_rec, sample_rate=sample_rate, caption="Sampled Rec" ) long_audio_batch_real = np.concatenate( np.asarray(batch_real).reshape(-1, sample_rate), axis=0 ) data_real = wandb.Audio(long_audio_batch_real, sample_rate=sample_rate, caption="Real") else: raise ValueError("Unsupported dataset type: {}".format(type(ds_train))) wandb.log({"Sampled": data_sampled}, step=step) if not self.training_config.sampling_only: wandb.log({"Target": data_target}, step=step) wandb.log({"Sampled_rec": data_sampled_rec}, step=step) wandb.log({"Real": data_real}, step=step) if self.training_config.sampling_type == "colorization": batch_gray = make_grid_image( batch_grayscale.reshape( -1, ds_train.data_config.image_width_size, ds_train.data_config.image_height_size, 1, ), inverse_scaler=inverse_scaler, ) image_gray = wandb.Image(np.asarray(batch_gray), caption="Gray") wandb.log({"Gray": image_gray}, step=step) elif self.training_config.sampling_type == "inpainting": batch_masked = make_grid_image(
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig, sampler: Sampler, loss_obj: Loss, ) -> None: """Initialize a Trainer instance with configurations and core components. Args: mode (str): Specifies the mode of the trainer which can be either "train" or "eval". model_name (str): The name identifier for the model. training_config (DictConfig): A configuration dictionary for training settings. optimizer: The optimizer instance used for training. evaluation_config (DictConfig): A configuration dictionary for evaluation settings. trainer_logging (DictConfig): A configuration dictionary for logging settings. sampler (Sampler): A sampler instance for sampling from the model. loss_obj (Loss): A loss object used for computing the loss during training. """ self.mode = mode self.model_name = model_name self.training_config = training_config self.optimizer = hydra.utils.instantiate(optimizer) self.evaluation_config = evaluation_config self.logging = trainer_logging self.sampler = sampler self.loss_obj = loss_obj self.checkpoint_dir = os.path.join(self.training_config.save_dir, self.training_config.checkpoint_dir) self.sample_dir = os.path.join(self.training_config.save_dir, self.training_config.sample_dir) self.eval_dir = os.path.join(self.training_config.save_dir, self.evaluation_config.eval_dir) # Create the directories for saving samples and checkpoints tf.io.gfile.makedirs(self.checkpoint_dir) tf.io.gfile.makedirs(self.sample_dir) tf.io.gfile.makedirs(self.eval_dir) tf.io.gfile.makedirs(os.path.join(self.eval_dir, "clean")) def initialize_wandb( self, dataset_config: DictConfig, sde_config: DictConfig, model_config: DictConfig ) -> Union[Run, RunDisabled, None]: """Initialize wandb if logging is enabled.""" if self.logging.use_wandb: run = wandb.init( name=os.path.basename(self.logging.wandb_init.name), project=self.logging.wandb_init.project, entity=self.logging.wandb_init.entity, save_code=self.logging.wandb_init.save_code, config={ **self.training_config, **dataset_config, **sde_config, **model_config, }, ) else: run = None return run def initialize_run(self, model, ds_train, sde): """Perform all initialization steps required for training.""" run = self.initialize_wandb(ds_train.data_config, sde.sde_config, model.model_config) scaler = get_data_scaler(is_centered=ds_train.data_config.data_centered) inverse_scaler = get_data_inverse_scaler(is_centered=ds_train.data_config.data_centered) rng = jax.random.PRNGKey(seed=self.training_config.seed) rng, step_rng = jax.random.split(rng) batch_input = model.initialize_input( (ds_train.data_config.batch_size, *sde.sde_config.shape, ds_train.data_config.output_size) ) params = jax.jit(model.initialize_model, backend="cpu")(step_rng, batch_input) flat_params = traverse_util.flatten_dict(params).values() tot_params = sum([jnp.size(p) for p in flat_params]) pylogger.info("Total number of parameters: {:.2f}M".format(tot_params / 1e6)) state = TrainState.create( apply_fn=model.apply, params=params, tx=self.optimizer, opt_state_params=self.optimizer.init(params), rng=rng, ema_params=params, ) train_step_fn = construct_train_step(self.optimizer, self.loss_obj.construct_loss_fn(model)) sample_fn = construct_sampling_fn(model, self.sampler) # Resume training when intermediate checkpoints are detected if self.training_config.resume_training: pylogger.warning("Resuming training from the latest checkpoint.") if self.logging.use_wandb and self.model_name != "local": model_file = wandb.use_artifact(self.model_name).download() state = restore_checkpoint(ckpt_dir=model_file, prefix="checkpoint_", target=state) else: state = checkpoints.restore_checkpoint(ckpt_dir=self.checkpoint_dir, target=state) return run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input def train_step( self, train_step_fn: Callable, carry_state: Tuple, batch: jnp.ndarray, batch_input: jnp.ndarray, ) -> Tuple: """Perform a single training step, updating the model parameters. Args: train_step_fn (Callable): The train step function. carry_state (Tuple): The current state of the model and optimizer. batch (jnp.ndarray): The batch of data used for training. batch_input (jnp.ndarray): The input data to the model. Returns: Tuple: The updated state after performing the training step. """ (rng, state) = carry_state ( new_rng, loss, loss_inner, new_params, new_optim_state, batch_reconstructed, batch_corrupted, target, ) = train_step_fn( rng, state.params, state.opt_state_params, state.step, batch_input, batch, ) ema_rate = self.training_config.ema_rate new_params_ema = jax.tree_map( lambda p_ema, p: p_ema * ema_rate + p * (1.0 - ema_rate), state.ema_params, new_params, ) # update the state new_state = state.replace( rng=flax.jax_utils.unreplicate(new_rng), step=state.step + 1, opt_state_params=new_optim_state, params=new_params, ema_params=new_params_ema, ) new_carry_state = (new_rng, new_state) loss = flax.jax_utils.unreplicate(loss) step = int(flax_utils.unreplicate(state.step)) # Log the training progress if jax.host_id() == 0 and step % self.training_config.log_freq == 0: pylogger.info("step: %d, training_loss: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, "loss": loss}, step=step) if loss_inner is not None: loss_inner = flax.jax_utils.unreplicate(loss_inner) for inner_step, loss in enumerate(loss_inner): pylogger.info("step: %d, training_loss_inner: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, f"loss inner step {inner_step}": loss}, step=step) return new_carry_state, batch_reconstructed, batch_corrupted, target def save_checkpoint(self, step, run, state): pylogger.info("Saving the model at step %d." % (step,)) # Log the evaluation progress # Save the model parameters ( params, opt_state_params, step_, ema_params, ) = flax_utils.unreplicate( ( state.params, state.opt_state_params, state.step, state.ema_params, ) ) saved_state = state.replace( step=step_, opt_state_params=opt_state_params, params=params, ema_params=ema_params, ) checkpoint_file = checkpoints.save_checkpoint( self.checkpoint_dir, saved_state, step=step_ // self.training_config.eval_freq, keep=np.inf, ) if self.logging.use_wandb: wandb_model_artifact_name = str(step_) + "_" + run.id wandb_model = wandb.Artifact(wandb_model_artifact_name, type="model") wandb_model.add_file(checkpoint_file) run.log_artifact(wandb_model) # noinspection PyProtectedMember def train(self, model: linen.Module, ds_train: BaseDataset, sde: SDE) -> None: """Train the model with optional evaluation and logging. This method encapsulates the entire training process including initialization, training loop, checkpointing, evaluation, and logging. It supports different sampling types like colorization, inpainting, super resolution, and deblurring. Args: model (linen.Module): The model to be trained. ds_train (BaseDataset): The training dataset. sde (SDE): Stochastic differential equation object, governing the dynamics for sampling. Raises: ValueError: If an unsupported dataset type is provided. Note: The method leverages the Weights & Biases (wandb) platform for logging and checkpointing, make sure it's configured properly if logging is enabled. """ run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input = self.initialize_run( model, ds_train, sde ) # `state.step` is JAX integer on the GPU/TPU devices start_step = int(state.step) rng = state.rng # Replicate the train state on all devices ( p_params, p_opt_state_params, p_step, p_ema_params, p_batch_input, ) = flax_utils.replicate( ( state.params, state.opt_state_params, state.step, state.ema_params, batch_input, ) ) # update the TrainState with replicated parameters and optimizer state state = state.replace( params=p_params, opt_state_params=p_opt_state_params, step=p_step, ema_params=p_ema_params, ) if jax.host_id() == 0: pylogger.info("Starting training loop at step %d." % (start_step,)) rng = jax.random.fold_in(rng, jax.host_id()) assert ( self.training_config.log_freq % self.training_config.n_jitted_steps == 0 and self.training_config.eval_freq % self.training_config.n_jitted_steps == 0 ), "Missing logs or checkpoints!" ds_train_iter = iter(ds_train) with tqdm( total=self.training_config.total_steps + 1, initial=start_step, position=0, leave=True, ) as pbar: for step in range( start_step, self.training_config.total_steps + 1, self.training_config.n_jitted_steps, ): # Get the next batch of data and scale it batch = jax.tree_map(f=lambda x: scaler(x._numpy()), tree=next(ds_train_iter)["data"]) if not self.training_config.sampling_only: # Split the random number generator for the current step rng, *next_rng = jax.random.split(key=rng, num=jax.local_device_count() + 1) next_rng = jnp.asarray(next_rng) ((_, state), batch_reconstructed, batch_corrupted, target) = self.train_step( train_step_fn=train_step_fn, carry_state=(next_rng, state), batch=batch, batch_input=p_batch_input, ) if not self.training_config.sampling_only and ( (jax.host_id() == 0 and step % self.training_config.checkpoint_freq == 0 and step != 0) ): self.save_checkpoint(step, run, state) # Evaluate the model if self.training_config.sampling and (step % self.training_config.eval_freq == 0): # if step != 0: if jax.host_id() == 0: pylogger.info("Generating samples at step %d." % (step,)) _, *sample_rng = jax.random.split(rng, jax.local_device_count() + 1) _, b, g, c = batch.shape sample_rng = jnp.asarray(sample_rng) if self.training_config.sampling_type == "full": batch_sampled, batch_sampled_last, batch_sampled_all = sampling_fn( sample_fn, (sample_rng, state), p_batch_input ) elif self.training_config.sampling_type == "colorization": batch_grayscale = to_grayscale(batch) batch_grayscale = batch_grayscale.reshape(-1, b, g, 1) batch_sampled, batch_sampled_last, batch_sampled_all = colorizing_fn( sample_fn, (sample_rng, state), p_batch_input, batch_grayscale ) elif self.training_config.sampling_type == "inpainting": config_object = OmegaConf.create( { "_target_": "functional_diffusion_processes.datasets.mnist_dataset.MNISTDataset", "data_config": { "seed": 42, "batch_size": ds_train.data_config.batch_size, "image_height_size": ds_train.data_config.image_height_size, "image_width_size": ds_train.data_config.image_width_size, "output_size": 1, "random_flip": False, "uniform_dequantization": False, "data_centered": False, "data_dir": "${oc.env:DATA_ROOT}/tensorflow_datasets", "download": True, "is_mask": True, }, "split": "train", "evaluation": False, } ) ds_mask = hydra.utils.instantiate(config_object, _recursive_=False) ds_mask_iter = iter(ds_mask) batch_masked = jax.tree_map(f=lambda x: x._numpy(), tree=next(ds_mask_iter)["data"]) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, (batch * batch_masked), batch_masked ) elif self.training_config.sampling_type == "deblurring": n_rows, n_cols = ds_train.data_config.image_height_size, ds_train.data_config.image_width_size batch_masked = filter_mask(batch.reshape(-1, b, n_rows, n_cols, c).shape, radius=10) batch_freq = jnp.fft.fftshift( jnp.fft.fft2(batch.reshape(-1, b, n_rows, n_cols, c), axes=(2, 3)), axes=(2, 3), ) batch_freq = batch_freq * batch_masked batch_blurred = jnp.real(jnp.fft.ifft2(jnp.fft.ifftshift(batch_freq, axes=(2, 3)), axes=(2, 3))) batch_blurred = batch_blurred.reshape(-1, b, g, c) batch_masked = batch_masked.reshape(-1, b, g, c) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, batch_blurred, batch_masked ) if jax.host_id() == 0 and self.logging.use_wandb: if isinstance(ds_train, ImageDataset): this_sample_dir = os.path.join( self.sample_dir, "iter_{}_host_{}".format(step, jax.host_id()), ) tf.io.gfile.makedirs(this_sample_dir) # code below to show the gif of the sampled images # processed_images = [] # for n in range(batch_sampled_all.shape[1]): # batch_sampled_i = batch_sampled_all[:, n, :, :, :] # batch_sampled_i = ds_train.postprocess_fn( # batch_data=batch_sampled_i, inverse_scaler=inverse_scaler # ) # processed_images.append(np.asarray(batch_sampled_i)) # # # Log the sampled images as a GIF # imageio.mimwrite( # os.path.join(this_sample_dir, "image_sequence.gif"), # processed_images, # fps=10, # ) # gif_wandb = wandb.Image( # os.path.join(this_sample_dir, "image_sequence.gif"), # caption="Sampled_all_gif", # ) # wandb.log({"Sampled_all_gif": gif_wandb}, step=step) batch_sampled = ds_train.postprocess_fn(batch_data=batch_sampled, inverse_scaler=inverse_scaler) batch_sampled_last = ds_train.postprocess_fn( batch_data=batch_sampled_last, inverse_scaler=inverse_scaler ) batch_real = ds_train.postprocess_fn( batch_data=batch.reshape(-1, b, g, c), inverse_scaler=inverse_scaler ) if not self.training_config.sampling_only: batch_target = ds_train.postprocess_fn( batch_data=target.reshape(-1, b, g, c), inverse_scaler=inverse_scaler ) if isinstance(ds_train, ImageDataset): data_sampled = wandb.Image(np.asarray(batch_sampled), caption="Sampled") data_sampled_rec = wandb.Image(np.asarray(batch_sampled_last), caption="Sampled Rec") data_real = wandb.Image(np.asarray(batch_real), caption="Real") if not self.training_config.sampling_only: data_target = wandb.Image(np.asarray(batch_target), caption="Target") elif isinstance(ds_train, AudioDataset): sample_rate = ds_train.data_config.audio_sample_rate long_audio_sampled = np.concatenate( np.asarray(batch_sampled).reshape(-1, sample_rate), axis=0 ) data_sampled = wandb.Audio(long_audio_sampled, sample_rate=sample_rate, caption="Sampled") if not self.training_config.sampling_only: long_audio_target = np.concatenate( np.asarray(batch_target).reshape(-1, sample_rate), axis=0 ) data_target = wandb.Audio(long_audio_target, sample_rate=sample_rate, caption="Target") long_audio_batch_sampled_rec = np.concatenate( np.asarray(batch_sampled_last).reshape(-1, sample_rate), axis=0 ) data_sampled_rec = wandb.Audio( long_audio_batch_sampled_rec, sample_rate=sample_rate, caption="Sampled Rec" ) long_audio_batch_real = np.concatenate( np.asarray(batch_real).reshape(-1, sample_rate), axis=0 ) data_real = wandb.Audio(long_audio_batch_real, sample_rate=sample_rate, caption="Real") else: raise ValueError("Unsupported dataset type: {}".format(type(ds_train))) wandb.log({"Sampled": data_sampled}, step=step) if not self.training_config.sampling_only: wandb.log({"Target": data_target}, step=step) wandb.log({"Sampled_rec": data_sampled_rec}, step=step) wandb.log({"Real": data_real}, step=step) if self.training_config.sampling_type == "colorization": batch_gray = make_grid_image( batch_grayscale.reshape( -1, ds_train.data_config.image_width_size, ds_train.data_config.image_height_size, 1, ), inverse_scaler=inverse_scaler, ) image_gray = wandb.Image(np.asarray(batch_gray), caption="Gray") wandb.log({"Gray": image_gray}, step=step) elif self.training_config.sampling_type == "inpainting": batch_masked = make_grid_image(
ndarray=process_images(images=batch_masked * batch - (1 - batch_masked)),
9
2023-10-24 22:01:35+00:00
24k
violet-sto/HN-GFN
main_mobo.py
[ { "identifier": "Dataset", "path": "dataset.py", "snippet": "class Dataset:\n\n def __init__(self, args, bpath, oracle, device):\n self.test_split_rng = np.random.RandomState(142857)\n self.train_rng = np.random.RandomState(int(time.time()))\n self.train_mols = []\n self.t...
from collections import defaultdict from dataset import Dataset from mol_mdp_ext import MolMDPExtended, BlockMoleculeDataExtended from oracle.oracle import Oracle from proxy import get_proxy from generator import TBGFlowNet, FMGFlowNet, MOReinforce from utils.utils import set_random_seed from utils.metrics import compute_success, compute_diversity, compute_novelty, compute_correlation, circle_points from utils.logging import get_logger from datetime import datetime from botorch.utils.multi_objective.hypervolume import Hypervolume from botorch.utils.sampling import sample_simplex from botorch.utils.transforms import normalize, unnormalize from torch.distributions.dirichlet import Dirichlet from main import RolloutWorker, get_test_mols from pymoo.util.ref_dirs import get_reference_directions from copy import deepcopy import random import os import re import argparse import json import time import threading import pdb import pickle import gzip import torch.multiprocessing as mp import torch.nn.functional as F import torch import pandas as pd import numpy as np import warnings
15,621
checkpoint_path = os.path.join(args.log_dir, f'round_{round_idx}/{i}_generator_checkpoint.pth') generator.load_state_dict(torch.load(checkpoint_path)) return rollout_worker, {'train_losses': train_losses, 'test_losses': test_losses, 'test_infos': test_infos, 'train_infos': train_infos} def sample_batch(args, generator, rollout_worker, oracle=None, proxy=None, ref_mols=None, Y_bounds=None, compute_multi_objective_metric=False): score_succ = {'gsk3b': 0.5, 'jnk3': 0.5, 'drd2': 0.5, 'chemprop_sars': 0.5, 'chemprop_hiv': 0.5, "seh": 0.5, 'qed': 0.6, 'sa': 0.67} if Y_bounds is None: Y_bounds = torch.stack([proxy.partitioning.Y.min( dim=-2).values, proxy.partitioning.Y.max(dim=-2).values]) time_start = time.time() print(f"Sampling molecules...") raw_rewards = [] raw_rewards_weight = {} means = [] picked_mols = [] smis = [] for i, weights in enumerate(rollout_worker.test_weights): sampled_mols = [] sampled_raw_rewards = [] sampled_means = [] sampled_smis = [] while len(sampled_mols) < args.num_samples: rollout_worker.rollout(generator, use_rand_policy=False, weights=torch.tensor(weights).unsqueeze(0).to(args.device)) (raw_r, _, m, trajectory_stats, inflow) = rollout_worker.sampled_mols[-1] sampled_mols.append(m) sampled_raw_rewards.append(raw_r[0].item()) sampled_means.append(raw_r[1]) sampled_smis.append(m.smiles) idx_pick = np.argsort(sampled_raw_rewards)[::-1][:int(args.num_samples/len(rollout_worker.test_weights))] picked_mols.extend(np.array(sampled_mols)[idx_pick].tolist()) means.extend(np.array(sampled_means)[idx_pick].tolist()) smis.extend(np.array(sampled_smis)[idx_pick].tolist()) raw_rewards.extend(np.array(sampled_raw_rewards)[idx_pick].tolist()) raw_rewards_weight[str(weights.cpu())] = np.array(sampled_raw_rewards)[idx_pick].mean() raw_rewards_mean = np.mean(list(raw_rewards_weight.values())) assert len(picked_mols) == args.num_samples top_means = torch.tensor(means) scores_dict = oracle.batch_get_scores(picked_mols) scores = torch.tensor(pd.DataFrame.from_dict(scores_dict).values) test_loss = F.mse_loss(top_means, scores) hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives))) volume = hypervolume.compute(top_means) volume_oracle = hypervolume.compute(scores) diversity = compute_diversity(picked_mols) batch_metrics = {'Hypervolume_reward': volume, 'Hypervolume_oracle': volume_oracle, 'Reward_mean': raw_rewards_mean, 'scores_max': pd.DataFrame.from_dict(scores_dict).max().to_dict(), 'scores_mean': pd.DataFrame.from_dict(scores_dict).mean().to_dict(), 'Test_loss': test_loss, 'Diversity': diversity} print(batch_metrics) print('Time: {}'.format(time.time()-time_start)) if not compute_multi_objective_metric: return volume, volume_oracle, raw_rewards_weight, raw_rewards_mean, test_loss, diversity else: for i in range(len(picked_mols)): picked_mols[i].score = scores_dict[i] # success/diversity/novelty is computed among the top mols. success, positive_mols = compute_success( picked_mols, scores_dict, args.objectives, score_succ) succ_diversity = compute_diversity(positive_mols) if ref_mols: novelty = compute_novelty(positive_mols, ref_mols) else: novelty = 1. mo_metrics = {'success': success, 'novelty': novelty, 'succ_diversity': succ_diversity, } picked_smis = [(raw_rewards[i], picked_mols[i].score, smis[i]) for i in range(len(raw_rewards))] print(mo_metrics) return (picked_mols, scores_dict, picked_smis), batch_metrics, mo_metrics def log_overall_metrics(args, dataset, batch_infos=None, MultiObjective_metrics=None): volume = dataset.compute_hypervolume() print("Hypervolume for {}: {}".format(args.logger.context, volume)) args.logger.add_scalar('Metric/hypervolume', volume, use_context=False) args.logger.add_object('scores', dataset.scores) args.logger.add_object('smis', dataset.smis) if batch_infos: args.logger.add_scalar( 'Metric/test_loss', batch_infos['Test_loss'], use_context=False) args.logger.add_object('collected_info', batch_infos) if MultiObjective_metrics: args.logger.add_scalars('Metric/MultiObjective', MultiObjective_metrics, use_context=False) def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args):
warnings.filterwarnings('ignore') def arg_parse(): parser = argparse.ArgumentParser() parser.add_argument("--device", type=str, default='cuda') parser.add_argument('--seed', type=int, default=42, help='seed') parser.add_argument("--run", default=0, help="run", type=int) parser.add_argument('--save', action='store_true', default=False, help='Save model.') parser.add_argument('--debug',action='store_true', default=False, help='debug mode, no multi thread') parser.add_argument("--enable_tensorboard", action='store_true', default=False) parser.add_argument("--log_dir", default='runs/mobo') parser.add_argument("--include_nblocks", default=False) parser.add_argument("--num_init_examples", default=200, type=int) parser.add_argument("--num_outer_loop_iters", default=8, type=int) parser.add_argument("--num_samples", default=100, type=int) parser.add_argument("--floatX", default='float32') parser.add_argument('--sample_iterations', type=int, default=1000, help='sample mols and compute metrics') parser.add_argument("--log_weight_score", action='store_true', default=False) # objectives parser.add_argument("--objectives", type=str, default='gsk3b,jnk3,qed,sa') parser.add_argument("--acq_fn", default='UCB', type=str) parser.add_argument("--beta", default=0.1, type=float) parser.add_argument("--scalar", default='WeightedSum', type=str) parser.add_argument("--alpha", default=1., type=float, help='dirichlet distribution') parser.add_argument("--alpha_vector", default='1,1,1,1', type=str) # Proxy parser.add_argument("--proxy_normalize", action='store_true', default=False, help='normalize Y') parser.add_argument("--proxy_num_iterations", default=10000, type=int) parser.add_argument("--proxy_learning_rate", default=2.5e-4, help="Learning rate", type=float) parser.add_argument("--proxy_mbsize", default=64, help="Minibatch size", type=int) parser.add_argument("--proxy_early_stop_tol", default=10, type=int) parser.add_argument("--proxy_repr_type", default='atom_graph') parser.add_argument("--proxy_model_version", default='v2') parser.add_argument("--proxy_num_conv_steps", default=12, type=int) parser.add_argument("--proxy_nemb", default=64, help="#hidden", type=int) parser.add_argument("--proxy_weight_decay", default=1e-6, help="Weight Decay in Proxy", type=float) parser.add_argument("--proxy_uncertainty", default="evidential", type=str) # deep ensemble and GP parser.add_argument("--proxy_dropout", default=0.1, help="MC Dropout in Proxy", type=float) parser.add_argument("--proxy_num_dropout_samples", default=5, type=int) parser.add_argument("--evidential_lam", default=0.1, type=float) parser.add_argument( "--fp_radius", type=int, default=2, help="Morgan fingerprint radius." ) parser.add_argument( "--fp_nbits", type=int, default=1024, help="Morgan fingerprint nBits." ) # GFlowNet parser.add_argument("--min_blocks", default=2, type=int) parser.add_argument("--max_blocks", default=8, type=int) parser.add_argument("--num_iterations", default=5000, type=int) parser.add_argument("--criterion", default="FM", type=str) parser.add_argument("--learning_rate", default=5e-4, help="Learning rate", type=float) parser.add_argument("--Z_learning_rate", default=5e-3, help="Learning rate", type=float) parser.add_argument("--clip_grad", default=0, type=float) parser.add_argument("--trajectories_mbsize", default=8, type=int) parser.add_argument("--offline_mbsize", default=8, type=int) parser.add_argument("--hindsight_prob", default=0.2, type=float) parser.add_argument("--hindsight_buffer_mbsize", default=8, type=int) parser.add_argument("--hindsight_trajectories_mbsize", default=8, type=int) parser.add_argument("--reward_min", default=1e-2, type=float) parser.add_argument("--reward_norm", default=1, type=float) parser.add_argument("--reward_exp", default=8, type=float) parser.add_argument("--reward_exp_ramping", default=0, type=float) parser.add_argument("--logit_clipping", default=0., type=float) # Hyperparameters for TB parser.add_argument("--partition_init", default=1, type=float) # Hyperparameters for FM parser.add_argument("--log_reg_c", default=(0.1/8)**4, type=float) parser.add_argument("--balanced_loss", default=True) parser.add_argument("--leaf_coef", default=10, type=float) # Architecture parser.add_argument("--repr_type", default='block_graph') parser.add_argument("--model_version", default='v4') parser.add_argument("--num_conv_steps", default=10, type=int) parser.add_argument("--nemb", default=256, help="#hidden", type=int) parser.add_argument("--weight_decay", default=0, type=float) parser.add_argument("--random_action_prob", default=0.05, type=float) parser.add_argument("--bootstrap_tau", default=0, type=float) parser.add_argument("--condition_type", type=str, default='HN') parser.add_argument("--ray_hidden_dim", default=100, type=int) return parser.parse_args() class BoRolloutWorker(RolloutWorker): def __init__(self, args, bpath, proxy, device): super(BoRolloutWorker, self).__init__(args, bpath, proxy, device) self.hindsight_prob = args.hindsight_prob self.hindsight_mols = defaultdict(list) self.hindsight_smiles = defaultdict(list) self.replay_threshold = 0.9 def _get(self, i, dset, weights=None): # Sample trajectories by walking backwards from the molecules in our dataset # Handle possible multithreading issues when independent threads # add/substract from dset: m = dset[i] if not isinstance(m, BlockMoleculeDataExtended): m = m[-1] r, raw_r = self._get_reward(m, weights) done = 1 samples = [] # a sample is a tuple (parents(s), parent actions, reward(s), s, done) # an action is (blockidx, stemidx) or (-1, x) for 'stop' # so we start with the stop action, unless the molecule is already # a "terminal" node (if it has no stems, no actions). if len(m.stems) and len(m.blocks) < self.max_blocks: samples.append(((m,), ((-1, 0),), weights, weights, r, m, done)) r = done = 0 while len(m.blocks): # and go backwards if self.ignore_parents: parents = self.mdp.parents(m) parent, action = parents[self.train_rng.randint(len(parents))] samples.append(((parent,), (action,), weights, weights, r, m, done)) r = done = 0 m = parent else: parents, actions = zip(*self.mdp.parents(m)) samples.append((parents, actions, weights.repeat(len(parents), 1), weights, r, m, done)) r = done = 0 m = parents[self.train_rng.randint(len(parents))] return samples[::-1] def _add_mol_to_replay(self, m): for i, weights in enumerate(self.test_weights): r, raw_r = self._get_reward(m, weights) if len(self.hindsight_mols[i]) < self.max_hindsight_mols or raw_r[0] > self.hindsight_mols[i][0][0]: if m.smiles not in self.hindsight_smiles[i]: self.hindsight_mols[i].append((raw_r[0].item(), m.smiles, m)) self.hindsight_smiles[i].append(m.smiles) if len(self.hindsight_mols[i]) > self.max_hindsight_mols: self.hindsight_mols[i] = sorted(self.hindsight_mols[i], key=lambda x:(x[0]))[ max(int(0.05 * self.max_hindsight_mols), 1):] self.hindsight_smiles[i] = [x[1] for x in self.hindsight_mols[i]] def _add_mol_to_online(self, r, m, inflow): if self.replay_mode == 'online': r = r + self.train_rng.normal() * 0.01 if len(self.online_mols) < self.max_online_mols or r > self.online_mols[0][0]: self.online_mols.append((r, m)) if len(self.online_mols) > self.max_online_mols: self.online_mols = sorted(self.online_mols)[ max(int(0.05 * self.max_online_mols), 1):] elif self.replay_mode == 'prioritized': self.online_mols.append((abs(inflow - np.log(r)), m)) if len(self.online_mols) > self.max_online_mols * 1.1: self.online_mols = self.online_mols[-self.max_online_mols:] def _get_reward(self, m, weights=None): rdmol = m.mol if rdmol is None: return self.reward_min # get reward from proxy raw_reward, score = self.proxy(m, weights) raw_reward = raw_reward.clip(self.reward_min) reward = self.l2r(raw_reward) return reward, (raw_reward, score) def execute_train_episode_batch(self, generator, dataset=None, Y_bounds=None, use_rand_policy=True): if self.train_rng.uniform() < self.hindsight_prob: idx = self.train_rng.randint(self.test_weights.shape[0]) weights = self.test_weights[idx].unsqueeze(0) samples = sum((self.rollout(generator, use_rand_policy, weights) for i in range(self.args.hindsight_trajectories_mbsize)), []) if self.args.hindsight_buffer_mbsize > 0: buffer = deepcopy(self.hindsight_mols[idx]) reward = np.array([x[0] for x in buffer]) prob = reward / sum(reward) eidx = np.random.choice(list(range(len(buffer))), self.args.hindsight_buffer_mbsize, replace=False, p=prob) offline_samples = sum((self._get(i, buffer, weights) for i in eidx), []) samples += offline_samples else: weights = Dirichlet(torch.tensor(self.args.alpha_vector)*self.args.alpha).sample_n(1).to(self.args.device) #* sample weights per batch, seem better samples = sum((self.rollout(generator, use_rand_policy, weights, replay=True) for i in range(self.args.trajectories_mbsize)), []) # offline sampling from dataset if self.args.offline_mbsize > 0 and dataset is not None: # use the oracle reward scores = torch.tensor(pd.DataFrame.from_dict(dataset.scores).values, dtype=torch.float32).to(args.device) if Y_bounds is not None: scores = normalize(scores, Y_bounds) reward = torch.matmul(scores, weights.reshape(-1, 1)) prob = (reward / sum(reward)).squeeze(1).cpu().numpy() eidx = np.random.choice(list(range(len(dataset.all_mols))), self.args.offline_mbsize, replace=False, p=prob) offline_samples = sum((self._get(i, dataset.all_mols, weights) for i in eidx), []) samples += offline_samples return zip(*samples) def initialize_hindsight_mols(self, dataset): for m in dataset.all_mols: for i, weights in enumerate(self.test_weights): r, raw_r = self._get_reward(m, weights) self.hindsight_mols[i].append((raw_r[0].item(), m.smiles, m)) for i, weights in enumerate(self.test_weights): self.hindsight_mols[i] = sorted(self.hindsight_mols[i], key=lambda x:(x[0])) self.hindsight_smiles[i] = [x[1] for x in self.hindsight_mols[i]] def train_generative_model(args, generator, bpath, proxy, oracle, dataset, test_weights, round_idx, do_save=False): print("Training generator...") os.makedirs(os.path.join(args.log_dir, f'round_{round_idx}'), exist_ok=True) device = args.device rollout_worker = BoRolloutWorker(args, bpath, proxy, device) rollout_worker.test_weights = torch.tensor(test_weights).to(device) rollout_worker.initialize_hindsight_mols(dataset) Y_bounds = torch.stack([proxy.partitioning.Y.min(dim=-2).values, proxy.partitioning.Y.max(dim=-2).values]) def save_stuff(round_idx, iter): torch.save(generator.state_dict(), os.path.join( args.log_dir, 'round_{}/{}_generator_checkpoint.pth'.format(round_idx, iter))) pickle.dump(rollout_worker.sampled_mols, gzip.open(f'{args.log_dir}/sampled_mols.pkl.gz', 'wb')) multi_thread = not args.debug if multi_thread: sampler = rollout_worker.start_samplers(generator, 8, dataset) def stop_everything(): print('joining') rollout_worker.stop_samplers_and_join() last_losses = [] train_losses = [] test_losses = [] test_infos = [] train_infos = [] time_last_check = time.time() for i in range(args.num_iterations + 1): if multi_thread: r = sampler() for thread in rollout_worker.sampler_threads: if thread.failed: stop_everything() pdb.post_mortem(thread.exception.__traceback__) return p, pb, a, pw, w, r, s, d, mols = r else: p, pb, a, pw, w, r, s, d, mols = rollout_worker.sample2batch( rollout_worker.execute_train_episode_batch(generator, dataset, Y_bounds, use_rand_policy=True)) loss = generator.train_step(p, pb, a, pw, w, r, s, d, mols, i) last_losses.append(loss) if not i % 100: train_loss = [np.round(np.mean(i), 3) for i in zip(*last_losses)] train_losses.append(train_loss) args.logger.add_scalar( 'Loss/round{}/train'.format(round_idx), train_loss[0], use_context=False) print('Iter {}: Loss {}, Time {}'.format( i, train_loss, round(time.time() - time_last_check, 3))) time_last_check = time.time() last_losses = [] if not i % args.sample_iterations and i != 0: volume, volume_oracle, reward_weight, reward_mean, test_loss, diversity = sample_batch( args, generator, rollout_worker, oracle, proxy, Y_bounds, compute_multi_objective_metric=False) args.logger.add_scalar( 'round{}/Top-100-sampled/volumes'.format(round_idx), volume, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/volumes_oracle'.format(round_idx), volume_oracle, use_context=False) args.logger.add_scalars( 'round{}/Top-100-sampled/reward_weight'.format(round_idx), reward_weight, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/reward_mean'.format(round_idx), reward_mean, use_context=False) # reward_mean is a dict, the keys are test_weights args.logger.add_scalar( 'round{}/Top-100-sampled/test_loss'.format(round_idx), test_loss, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/dists'.format(round_idx), diversity, use_context=False) if do_save: save_stuff(round_idx, i) stop_everything() if do_save: save_stuff(round_idx, i) checkpoint_path = os.path.join(args.log_dir, f'round_{round_idx}/{i}_generator_checkpoint.pth') generator.load_state_dict(torch.load(checkpoint_path)) return rollout_worker, {'train_losses': train_losses, 'test_losses': test_losses, 'test_infos': test_infos, 'train_infos': train_infos} def sample_batch(args, generator, rollout_worker, oracle=None, proxy=None, ref_mols=None, Y_bounds=None, compute_multi_objective_metric=False): score_succ = {'gsk3b': 0.5, 'jnk3': 0.5, 'drd2': 0.5, 'chemprop_sars': 0.5, 'chemprop_hiv': 0.5, "seh": 0.5, 'qed': 0.6, 'sa': 0.67} if Y_bounds is None: Y_bounds = torch.stack([proxy.partitioning.Y.min( dim=-2).values, proxy.partitioning.Y.max(dim=-2).values]) time_start = time.time() print(f"Sampling molecules...") raw_rewards = [] raw_rewards_weight = {} means = [] picked_mols = [] smis = [] for i, weights in enumerate(rollout_worker.test_weights): sampled_mols = [] sampled_raw_rewards = [] sampled_means = [] sampled_smis = [] while len(sampled_mols) < args.num_samples: rollout_worker.rollout(generator, use_rand_policy=False, weights=torch.tensor(weights).unsqueeze(0).to(args.device)) (raw_r, _, m, trajectory_stats, inflow) = rollout_worker.sampled_mols[-1] sampled_mols.append(m) sampled_raw_rewards.append(raw_r[0].item()) sampled_means.append(raw_r[1]) sampled_smis.append(m.smiles) idx_pick = np.argsort(sampled_raw_rewards)[::-1][:int(args.num_samples/len(rollout_worker.test_weights))] picked_mols.extend(np.array(sampled_mols)[idx_pick].tolist()) means.extend(np.array(sampled_means)[idx_pick].tolist()) smis.extend(np.array(sampled_smis)[idx_pick].tolist()) raw_rewards.extend(np.array(sampled_raw_rewards)[idx_pick].tolist()) raw_rewards_weight[str(weights.cpu())] = np.array(sampled_raw_rewards)[idx_pick].mean() raw_rewards_mean = np.mean(list(raw_rewards_weight.values())) assert len(picked_mols) == args.num_samples top_means = torch.tensor(means) scores_dict = oracle.batch_get_scores(picked_mols) scores = torch.tensor(pd.DataFrame.from_dict(scores_dict).values) test_loss = F.mse_loss(top_means, scores) hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives))) volume = hypervolume.compute(top_means) volume_oracle = hypervolume.compute(scores) diversity = compute_diversity(picked_mols) batch_metrics = {'Hypervolume_reward': volume, 'Hypervolume_oracle': volume_oracle, 'Reward_mean': raw_rewards_mean, 'scores_max': pd.DataFrame.from_dict(scores_dict).max().to_dict(), 'scores_mean': pd.DataFrame.from_dict(scores_dict).mean().to_dict(), 'Test_loss': test_loss, 'Diversity': diversity} print(batch_metrics) print('Time: {}'.format(time.time()-time_start)) if not compute_multi_objective_metric: return volume, volume_oracle, raw_rewards_weight, raw_rewards_mean, test_loss, diversity else: for i in range(len(picked_mols)): picked_mols[i].score = scores_dict[i] # success/diversity/novelty is computed among the top mols. success, positive_mols = compute_success( picked_mols, scores_dict, args.objectives, score_succ) succ_diversity = compute_diversity(positive_mols) if ref_mols: novelty = compute_novelty(positive_mols, ref_mols) else: novelty = 1. mo_metrics = {'success': success, 'novelty': novelty, 'succ_diversity': succ_diversity, } picked_smis = [(raw_rewards[i], picked_mols[i].score, smis[i]) for i in range(len(raw_rewards))] print(mo_metrics) return (picked_mols, scores_dict, picked_smis), batch_metrics, mo_metrics def log_overall_metrics(args, dataset, batch_infos=None, MultiObjective_metrics=None): volume = dataset.compute_hypervolume() print("Hypervolume for {}: {}".format(args.logger.context, volume)) args.logger.add_scalar('Metric/hypervolume', volume, use_context=False) args.logger.add_object('scores', dataset.scores) args.logger.add_object('smis', dataset.smis) if batch_infos: args.logger.add_scalar( 'Metric/test_loss', batch_infos['Test_loss'], use_context=False) args.logger.add_object('collected_info', batch_infos) if MultiObjective_metrics: args.logger.add_scalars('Metric/MultiObjective', MultiObjective_metrics, use_context=False) def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args):
set_random_seed(args.seed)
8
2023-10-24 14:10:35+00:00
24k
caglarkucuk/earthformer-satellite-to-radar
ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer_unet_dec.py
[ { "identifier": "Upsample3DLayer", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class Upsample3DLayer(nn.Module):\n \"\"\"Upsampling based on nn.UpSampling and Conv3x3.\n\n If the temporal dimension remains the same:\n x --> interpolation-2d (neares...
from typing import Sequence, Union from functools import lru_cache from collections import OrderedDict from torch import nn from einops import rearrange from .cuboid_transformer import ( Upsample3DLayer, PatchMerging3D, PosEmbed, InitialEncoder, FinalDecoder, InitialStackPatchMergingEncoder, FinalStackUpsamplingDecoder, StackCuboidSelfAttentionBlock, StackCuboidCrossAttentionBlock, CuboidTransformerEncoder) from .cuboid_transformer_patterns import CuboidSelfAttentionPatterns, CuboidCrossAttentionPatterns from .utils import ( get_activation, get_norm_layer, _generalize_padding, _generalize_unpadding, apply_initialization, round_to) import warnings import torch import torch.nn.functional as F import torch.utils.checkpoint as checkpoint
17,578
attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, upsample_kernel_size=upsample_kernel_size, ffn_activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=dec_use_inter_ffn, max_temporal_relative=T_in + T_out, padding_type=padding_type, hierarchical_pos_embed=dec_hierarchical_pos_embed, pos_embed_type=pos_embed_type, use_self_global=(num_global_vectors > 0) and use_dec_self_global, self_update_global=dec_self_update_global, use_cross_global=(num_global_vectors > 0) and use_dec_cross_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, up_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, # different from CuboidTransformerDecoder downsample=downsample, downsample_type=downsample_type, cross_mode=unet_dec_cross_mode, down_linear_init_mode=down_up_linear_init_mode, ) self.reset_parameters() def get_initial_encoder_final_decoder( self, initial_downsample_type, activation, # initial_downsample_type=="conv" initial_downsample_scale, initial_downsample_conv_layers, final_upsample_conv_layers, padding_type, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers, initial_downsample_stack_conv_dim_list, initial_downsample_stack_conv_downscale_list, initial_downsample_stack_conv_num_conv_list, ): T_in, H_in, W_in, C_in = self.input_shape T_out, H_out, W_out, C_out = self.target_shape # Construct the initial upsampling / downsampling layers self.initial_downsample_type = initial_downsample_type if self.initial_downsample_type == "conv": if isinstance(initial_downsample_scale, int): initial_downsample_scale = (1, initial_downsample_scale, initial_downsample_scale) elif len(initial_downsample_scale) == 2: initial_downsample_scale = (1, *initial_downsample_scale) elif len(initial_downsample_scale) == 3: initial_downsample_scale = tuple(initial_downsample_scale) else: raise NotImplementedError(f"initial_downsample_scale {initial_downsample_scale} format not supported!") # if any(ele > 1 for ele in initial_downsample_scale): self.initial_encoder = InitialEncoder(dim=C_in, out_dim=self.base_units, downsample_scale=initial_downsample_scale, num_conv_layers=initial_downsample_conv_layers, padding_type=padding_type, activation=activation, conv_init_mode=self.conv_init_mode, linear_init_mode=self.down_up_linear_init_mode, norm_init_mode=self.norm_init_mode) self.initial_aux_encoder = InitialEncoder(dim=self.auxiliary_channels, out_dim=self.base_units, downsample_scale=initial_downsample_scale, num_conv_layers=initial_downsample_conv_layers, padding_type=padding_type, activation=activation, conv_init_mode=self.conv_init_mode, linear_init_mode=self.down_up_linear_init_mode, norm_init_mode=self.norm_init_mode) self.final_decoder = FinalDecoder(dim=self.base_units, target_thw=(T_out, H_out, W_out), num_conv_layers=final_upsample_conv_layers, activation=activation, conv_init_mode=self.conv_init_mode, linear_init_mode=self.down_up_linear_init_mode, norm_init_mode=self.norm_init_mode) new_input_shape = self.initial_encoder.patch_merge.get_out_shape(self.input_shape) self.dec_final_proj = nn.Linear(self.base_units, C_out) elif self.initial_downsample_type == "stack_conv": if initial_downsample_stack_conv_dim_list is None: initial_downsample_stack_conv_dim_list = [self.base_units, ] * initial_downsample_stack_conv_num_layers self.initial_encoder = InitialStackPatchMergingEncoder( num_merge=initial_downsample_stack_conv_num_layers, in_dim=C_in, out_dim_list=initial_downsample_stack_conv_dim_list, downsample_scale_list=initial_downsample_stack_conv_downscale_list, num_conv_per_merge_list=initial_downsample_stack_conv_num_conv_list, padding_type=padding_type, activation=activation, conv_init_mode=self.conv_init_mode, linear_init_mode=self.down_up_linear_init_mode, norm_init_mode=self.norm_init_mode) self.initial_aux_encoder = InitialStackPatchMergingEncoder( num_merge=initial_downsample_stack_conv_num_layers, in_dim=self.auxiliary_channels, out_dim_list=initial_downsample_stack_conv_dim_list, downsample_scale_list=initial_downsample_stack_conv_downscale_list, num_conv_per_merge_list=initial_downsample_stack_conv_num_conv_list, padding_type=padding_type, activation=activation, conv_init_mode=self.conv_init_mode, linear_init_mode=self.down_up_linear_init_mode, norm_init_mode=self.norm_init_mode) # use `self.target_shape` to get correct T_out initial_encoder_out_shape_list = self.initial_encoder.get_out_shape_list(self.target_shape) dec_target_shape_list, dec_in_dim = \
"""CuboidTransformer adapted for auxiliary inputs in decoder""" class CuboidTransformerUNetDecoder(nn.Module): """U-Net style Decoder of the CuboidTransformer. For each block, we first apply the StackCuboidSelfAttention and then apply the StackCuboidCrossAttention We add cross attention following 3 modes: cross_mode == "down": x --> attn --> cross_attn --> downscale --> ... --> z --> attn --> upscale --> ... --> out ^ ^ | | | | mem mem cross_mode == "up": x --> attn --> downscale --> ... --> z --> attn --> cross_attn --> upscale --> ... --> out ^ ^ | | | | mem mem cross_mode == "both": x --> attn --> cross_attn --> downscale --> ... --> z --> attn --> cross_attn --> upscale --> ... --> out ^ ^ ^ ^ | | | | | | | | mem mem mem mem """ def __init__(self, target_temporal_length, mem_shapes, cross_start=0, depth=[2, 2], upsample_type="upsample", upsample_kernel_size=3, block_self_attn_patterns=None, block_self_cuboid_size=[(4, 4, 4), (4, 4, 4)], block_self_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')], block_self_shift_size=[(1, 1, 1), (0, 0, 0)], block_cross_attn_patterns=None, block_cross_cuboid_hw=[(4, 4), (4, 4)], block_cross_cuboid_strategy=[('l', 'l', 'l'), ('d', 'l', 'l')], block_cross_shift_hw=[(0, 0), (0, 0)], block_cross_n_temporal=[1, 2], cross_last_n_frames=None, num_heads=4, attn_drop=0.0, proj_drop=0.0, ffn_drop=0.0, ffn_activation='leaky', gated_ffn=False, norm_layer='layer_norm', use_inter_ffn=False, hierarchical_pos_embed=False, pos_embed_type='t+hw', max_temporal_relative=50, padding_type='ignore', checkpoint_level=True, use_relative_pos=True, self_attn_use_final_proj=True, # global vectors use_self_global=False, self_update_global=True, use_cross_global=False, use_global_vector_ffn=True, use_global_self_attn=False, separate_global_qkv=False, global_dim_ratio=1, # initialization attn_linear_init_mode="0", ffn_linear_init_mode="0", conv_init_mode="0", up_linear_init_mode="0", norm_init_mode="0", # different from `CuboidTransformerDecoder`, no arg `use_first_self_attn=False` downsample=2, downsample_type='patch_merge', cross_mode="up", down_linear_init_mode="0", ): """ Parameters ---------- target_temporal_length mem_shapes cross_start The block to start cross attention depth Depth of each block downsample The downsample ratio downsample_type Type of the downsampling layer upsample_type The type of the upsampling layers upsample_kernel_size block_self_attn_patterns Pattern of the block self attentions block_self_cuboid_size block_self_cuboid_strategy block_self_shift_size block_cross_attn_patterns block_cross_cuboid_hw block_cross_cuboid_strategy block_cross_shift_hw block_cross_n_temporal cross_last_n_frames cross_mode Must be one of ("up", "down", "both") Control whether the upsampling/downsampling/both phases cross attend to the encoded latent features num_heads attn_drop proj_drop ffn_drop ffn_activation gated_ffn Whether to enable gated ffn or not norm_layer The normalization layer use_inter_ffn Whether to use intermediate FFN hierarchical_pos_embed Whether to add pos embedding for each hierarchy. max_temporal_relative padding_type checkpoint_level """ super(CuboidTransformerUNetDecoder, self).__init__() # initialization mode self.attn_linear_init_mode = attn_linear_init_mode self.ffn_linear_init_mode = ffn_linear_init_mode self.conv_init_mode = conv_init_mode self.up_linear_init_mode = up_linear_init_mode self.norm_init_mode = norm_init_mode assert len(depth) == len(mem_shapes) self.target_temporal_length = target_temporal_length self.num_blocks = len(mem_shapes) self.cross_start = cross_start self.mem_shapes = mem_shapes self.block_units = tuple(mem_shape[-1] for mem_shape in self.mem_shapes) self.depth = depth if not isinstance(downsample, (tuple, list)): downsample = (1, downsample, downsample) self.downsample = downsample self.downsample_type = downsample_type self.upsample_type = upsample_type self.hierarchical_pos_embed = hierarchical_pos_embed self.checkpoint_level = checkpoint_level self.use_self_global = use_self_global self.self_update_global = self_update_global self.use_cross_global = use_cross_global self.use_global_vector_ffn = use_global_vector_ffn assert cross_mode in ["up", "down", "both"], f"Invalid cross_mode {cross_mode}!" self.cross_mode = cross_mode self.up_use_cross = self.cross_mode in ["up", "both"] self.down_use_cross = self.cross_mode in ["down", "both"] if self.num_blocks > 1: # Construct downsampling layers if downsample_type == 'patch_merge': self.downsample_layers = nn.ModuleList( [PatchMerging3D(dim=self.block_units[i], downsample=downsample, # downsample=(1, 1, 1), padding_type=padding_type, out_dim=self.block_units[i + 1], linear_init_mode=down_linear_init_mode, norm_init_mode=norm_init_mode) for i in range(self.num_blocks - 1)]) else: raise NotImplementedError # Construct upsampling layers if self.upsample_type == "upsample": self.upsample_layers = nn.ModuleList([ Upsample3DLayer( dim=self.mem_shapes[i + 1][-1], out_dim=self.mem_shapes[i][-1], target_size=(target_temporal_length,) + self.mem_shapes[i][1:3], kernel_size=upsample_kernel_size, temporal_upsample=False, conv_init_mode=conv_init_mode, ) for i in range(self.num_blocks - 1)]) else: raise NotImplementedError if self.hierarchical_pos_embed: self.down_hierarchical_pos_embed_l = nn.ModuleList([ PosEmbed(embed_dim=self.block_units[i], typ=pos_embed_type, maxT=self.mem_shapes[i][0], maxH=self.mem_shapes[i][1], maxW=self.mem_shapes[i][2]) for i in range(self.num_blocks - 1)]) self.up_hierarchical_pos_embed_l = nn.ModuleList([ PosEmbed(embed_dim=self.block_units[i], typ=pos_embed_type, maxT=self.mem_shapes[i][0], maxH=self.mem_shapes[i][1], maxW=self.mem_shapes[i][2]) for i in range(self.num_blocks - 1)]) if block_self_attn_patterns is not None: if isinstance(block_self_attn_patterns, (tuple, list)): assert len(block_self_attn_patterns) == self.num_blocks else: block_self_attn_patterns = [block_self_attn_patterns for _ in range(self.num_blocks)] block_self_cuboid_size = [] block_self_cuboid_strategy = [] block_self_shift_size = [] for idx, key in enumerate(block_self_attn_patterns): func = CuboidSelfAttentionPatterns.get(key) cuboid_size, strategy, shift_size = func(mem_shapes[idx]) block_self_cuboid_size.append(cuboid_size) block_self_cuboid_strategy.append(strategy) block_self_shift_size.append(shift_size) else: if not isinstance(block_self_cuboid_size[0][0], (list, tuple)): block_self_cuboid_size = [block_self_cuboid_size for _ in range(self.num_blocks)] else: assert len(block_self_cuboid_size) == self.num_blocks,\ f'Incorrect input format! Received block_self_cuboid_size={block_self_cuboid_size}' if not isinstance(block_self_cuboid_strategy[0][0], (list, tuple)): block_self_cuboid_strategy = [block_self_cuboid_strategy for _ in range(self.num_blocks)] else: assert len(block_self_cuboid_strategy) == self.num_blocks,\ f'Incorrect input format! Received block_self_cuboid_strategy={block_self_cuboid_strategy}' if not isinstance(block_self_shift_size[0][0], (list, tuple)): block_self_shift_size = [block_self_shift_size for _ in range(self.num_blocks)] else: assert len(block_self_shift_size) == self.num_blocks,\ f'Incorrect input format! Received block_self_shift_size={block_self_shift_size}' down_self_blocks = [] up_self_blocks = [] for i in range(self.num_blocks): ele_depth = depth[i] stack_cuboid_blocks =\ [StackCuboidSelfAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_size=block_self_cuboid_size[i], block_strategy=block_self_cuboid_strategy[i], block_shift_size=block_self_shift_size[i], attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, padding_type=padding_type, use_global_vector=use_self_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(ele_depth)] down_self_blocks.append(nn.ModuleList(stack_cuboid_blocks)) stack_cuboid_blocks = \ [StackCuboidSelfAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_size=block_self_cuboid_size[i], block_strategy=block_self_cuboid_strategy[i], block_shift_size=block_self_shift_size[i], attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, padding_type=padding_type, use_global_vector=use_self_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(ele_depth)] up_self_blocks.append(nn.ModuleList(stack_cuboid_blocks)) self.down_self_blocks = nn.ModuleList(down_self_blocks) self.up_self_blocks = nn.ModuleList(up_self_blocks) if block_cross_attn_patterns is not None: if isinstance(block_cross_attn_patterns, (tuple, list)): assert len(block_cross_attn_patterns) == self.num_blocks else: block_cross_attn_patterns = [block_cross_attn_patterns for _ in range(self.num_blocks)] block_cross_cuboid_hw = [] block_cross_cuboid_strategy = [] block_cross_shift_hw = [] block_cross_n_temporal = [] for idx, key in enumerate(block_cross_attn_patterns): if key == "last_frame_dst": cuboid_hw = None shift_hw = None strategy = None n_temporal = None else: func = CuboidCrossAttentionPatterns.get(key) cuboid_hw, shift_hw, strategy, n_temporal = func(mem_shapes[idx]) block_cross_cuboid_hw.append(cuboid_hw) block_cross_cuboid_strategy.append(strategy) block_cross_shift_hw.append(shift_hw) block_cross_n_temporal.append(n_temporal) else: if not isinstance(block_cross_cuboid_hw[0][0], (list, tuple)): block_cross_cuboid_hw = [block_cross_cuboid_hw for _ in range(self.num_blocks)] else: assert len(block_cross_cuboid_hw) == self.num_blocks, \ f'Incorrect input format! Received block_cross_cuboid_hw={block_cross_cuboid_hw}' if not isinstance(block_cross_cuboid_strategy[0][0], (list, tuple)): block_cross_cuboid_strategy = [block_cross_cuboid_strategy for _ in range(self.num_blocks)] else: assert len(block_cross_cuboid_strategy) == self.num_blocks, \ f'Incorrect input format! Received block_cross_cuboid_strategy={block_cross_cuboid_strategy}' if not isinstance(block_cross_shift_hw[0][0], (list, tuple)): block_cross_shift_hw = [block_cross_shift_hw for _ in range(self.num_blocks)] else: assert len(block_cross_shift_hw) == self.num_blocks, \ f'Incorrect input format! Received block_cross_shift_hw={block_cross_shift_hw}' if not isinstance(block_cross_n_temporal[0], (list, tuple)): block_cross_n_temporal = [block_cross_n_temporal for _ in range(self.num_blocks)] else: assert len(block_cross_n_temporal) == self.num_blocks, \ f'Incorrect input format! Received block_cross_n_temporal={block_cross_n_temporal}' if self.up_use_cross: self.up_cross_blocks = nn.ModuleList() for i in range(self.cross_start, self.num_blocks): cross_block = nn.ModuleList( [StackCuboidCrossAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_hw=block_cross_cuboid_hw[i], block_strategy=block_cross_cuboid_strategy[i], block_shift_hw=block_cross_shift_hw[i], block_n_temporal=block_cross_n_temporal[i], cross_last_n_frames=cross_last_n_frames, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, activation=ffn_activation, max_temporal_relative=max_temporal_relative, padding_type=padding_type, use_global_vector=use_cross_global, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(depth[i])]) self.up_cross_blocks.append(cross_block) if self.down_use_cross: self.down_cross_blocks = nn.ModuleList() for i in range(self.cross_start, self.num_blocks): cross_block = nn.ModuleList( [StackCuboidCrossAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_hw=block_cross_cuboid_hw[i], block_strategy=block_cross_cuboid_strategy[i], block_shift_hw=block_cross_shift_hw[i], block_n_temporal=block_cross_n_temporal[i], cross_last_n_frames=cross_last_n_frames, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, activation=ffn_activation, max_temporal_relative=max_temporal_relative, padding_type=padding_type, use_global_vector=use_cross_global, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(depth[i])]) self.down_cross_blocks.append(cross_block) self.reset_parameters() def reset_parameters(self): for ms in self.down_self_blocks: for m in ms: m.reset_parameters() for ms in self.up_self_blocks: for m in ms: m.reset_parameters() if self.up_use_cross: for ms in self.up_cross_blocks: for m in ms: m.reset_parameters() if self.down_use_cross: for ms in self.down_cross_blocks: for m in ms: m.reset_parameters() if self.num_blocks > 1: for m in self.downsample_layers: m.reset_parameters() for m in self.upsample_layers: m.reset_parameters() if self.hierarchical_pos_embed: for m in self.down_hierarchical_pos_embed_l: m.reset_parameters() for m in self.up_hierarchical_pos_embed_l: m.reset_parameters() def forward(self, x, mem_l, mem_global_vector_l=None): """ Parameters ---------- x Shape (B, T, H, W, C) mem_l A list of memory tensors Returns ------- out """ B, T, H, W, C = x.shape assert T == self.target_temporal_length assert (H, W) == (self.mem_shapes[0][1], self.mem_shapes[0][2]) new_mem_global_vector_l = [] for i in range(self.num_blocks): # Downample if i > 0: x = self.downsample_layers[i - 1](x) if self.hierarchical_pos_embed: x = self.down_hierarchical_pos_embed_l[i - 1](x) mem_global_vector = None if mem_global_vector_l is None else mem_global_vector_l[i] for idx in range(self.depth[i]): if self.use_self_global: if self.self_update_global: x, mem_global_vector = self.down_self_blocks[i][idx](x, mem_global_vector) else: x, _ = self.down_self_blocks[i][idx](x, mem_global_vector) else: x = self.down_self_blocks[i][idx](x) if self.down_use_cross and i >= self.cross_start: x = self.down_cross_blocks[i - self.cross_start][idx](x, mem_l[i], mem_global_vector) new_mem_global_vector_l.append(mem_global_vector) for i in range(self.num_blocks - 1, -1, -1): mem_global_vector = new_mem_global_vector_l[i] for idx in range(self.depth[i]): if self.use_self_global: if self.self_update_global: x, mem_global_vector = self.up_self_blocks[i][idx](x, mem_global_vector) else: x, _ = self.up_self_blocks[i][idx](x, mem_global_vector) else: x = self.up_self_blocks[i][idx](x) if self.up_use_cross and i >= self.cross_start: x = self.up_cross_blocks[i - self.cross_start][idx](x, mem_l[i], mem_global_vector) # Upsample if i > 0: x = self.upsample_layers[i - 1](x) if self.hierarchical_pos_embed: x = self.up_hierarchical_pos_embed_l[i - 1](x) return x class CuboidTransformerAuxModel(nn.Module): """Cuboid Transformer with auxiliary input in decoder for spatiotemporal forecasting We adopt the Non-autoregressive encoder-decoder architecture. The decoder takes the multi-scale memory output from the encoder, as well as auxiliary input. The initial downsampling / upsampling layers will be Downsampling: [K x Conv2D --> PatchMerge] Upsampling: [Nearest Interpolation-based Upsample --> K x Conv2D] x -----------> downsample (optional) ---> (+pos_embed) ---> enc ---------> mem_l | | |------------------| | | aux_input ---> downsample (optional) ---> (+pos_embed) ---> enc -> cross_attn -> dec -> upsample (optional) -> y """ def __init__(self, input_shape, target_shape, base_units=128, block_units=None, scale_alpha=1.0, num_heads=4, attn_drop=0.0, proj_drop=0.0, ffn_drop=0.0, # inter-attn downsample/upsample downsample=2, downsample_type='patch_merge', upsample_type="upsample", upsample_kernel_size=3, # encoder enc_depth=[4, 4, 4], enc_attn_patterns=None, enc_cuboid_size=[(4, 4, 4), (4, 4, 4)], enc_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')], enc_shift_size=[(0, 0, 0), (0, 0, 0)], enc_use_inter_ffn=True, # decoder dec_depth=[2, 2], dec_cross_start=0, dec_self_attn_patterns=None, dec_self_cuboid_size=[(4, 4, 4), (4, 4, 4)], dec_self_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')], dec_self_shift_size=[(1, 1, 1), (0, 0, 0)], dec_cross_attn_patterns=None, dec_cross_cuboid_hw=[(4, 4), (4, 4)], dec_cross_cuboid_strategy=[('l', 'l', 'l'), ('d', 'l', 'l')], dec_cross_shift_hw=[(0, 0), (0, 0)], dec_cross_n_temporal=[1, 2], dec_cross_last_n_frames=None, dec_use_inter_ffn=True, dec_hierarchical_pos_embed=False, # global vectors num_global_vectors=4, use_dec_self_global=True, dec_self_update_global=True, use_dec_cross_global=True, use_global_vector_ffn=True, use_global_self_attn=False, separate_global_qkv=False, global_dim_ratio=1, # # initial downsample and final upsample initial_downsample_type="conv", initial_downsample_activation="leaky", # initial_downsample_type=="conv" initial_downsample_scale=1, initial_downsample_conv_layers=2, final_upsample_conv_layers=2, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers=1, initial_downsample_stack_conv_dim_list=None, initial_downsample_stack_conv_downscale_list=[1, ], initial_downsample_stack_conv_num_conv_list=[2, ], # # end of initial downsample and final upsample ffn_activation='leaky', gated_ffn=False, norm_layer='layer_norm', padding_type='ignore', pos_embed_type='t+hw', checkpoint_level=True, use_relative_pos=True, self_attn_use_final_proj=True, # initialization attn_linear_init_mode="0", ffn_linear_init_mode="0", conv_init_mode="0", down_up_linear_init_mode="0", norm_init_mode="0", # different from CuboidTransformerModel, no arg `dec_use_first_self_attn=False` auxiliary_channels: int = 1, unet_dec_cross_mode="up", ): """ Parameters ---------- input_shape Shape of the input tensor. It will be (T, H, W, C_in) target_shape Shape of the input tensor. It will be (T_out, H, W, C_out) base_units The base units """ super(CuboidTransformerAuxModel, self).__init__() # initialization mode self.attn_linear_init_mode = attn_linear_init_mode self.ffn_linear_init_mode = ffn_linear_init_mode self.conv_init_mode = conv_init_mode self.down_up_linear_init_mode = down_up_linear_init_mode self.norm_init_mode = norm_init_mode assert len(enc_depth) == len(dec_depth) self.base_units = base_units self.num_global_vectors = num_global_vectors if global_dim_ratio != 1: assert separate_global_qkv == True, \ f"Setting global_dim_ratio != 1 requires separate_global_qkv == True." self.global_dim_ratio = global_dim_ratio self.input_shape = input_shape self.target_shape = target_shape T_in, H_in, W_in, C_in = input_shape T_out, H_out, W_out, C_out = target_shape assert H_in == H_out and W_in == W_out self.auxiliary_channels = auxiliary_channels if self.num_global_vectors > 0: self.init_global_vectors = nn.Parameter( torch.zeros((self.num_global_vectors, global_dim_ratio*base_units))) new_input_shape = self.get_initial_encoder_final_decoder( initial_downsample_scale=initial_downsample_scale, initial_downsample_type=initial_downsample_type, activation=initial_downsample_activation, # initial_downsample_type=="conv" initial_downsample_conv_layers=initial_downsample_conv_layers, final_upsample_conv_layers=final_upsample_conv_layers, padding_type=padding_type, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers=initial_downsample_stack_conv_num_layers, initial_downsample_stack_conv_dim_list=initial_downsample_stack_conv_dim_list, initial_downsample_stack_conv_downscale_list=initial_downsample_stack_conv_downscale_list, initial_downsample_stack_conv_num_conv_list=initial_downsample_stack_conv_num_conv_list, ) T_in, H_in, W_in, _ = new_input_shape self.encoder = CuboidTransformerEncoder( input_shape=(T_in, H_in, W_in, base_units), base_units=base_units, block_units=block_units, scale_alpha=scale_alpha, depth=enc_depth, downsample=downsample, downsample_type=downsample_type, block_attn_patterns=enc_attn_patterns, block_cuboid_size=enc_cuboid_size, block_strategy=enc_cuboid_strategy, block_shift_size=enc_shift_size, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, gated_ffn=gated_ffn, ffn_activation=ffn_activation, norm_layer=norm_layer, use_inter_ffn=enc_use_inter_ffn, padding_type=padding_type, use_global_vector=num_global_vectors > 0, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, down_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, ) self.enc_pos_embed = PosEmbed( embed_dim=base_units, typ=pos_embed_type, maxH=H_in, maxW=W_in, maxT=T_in) mem_shapes = self.encoder.get_mem_shapes() self.dec_pos_embed = PosEmbed( embed_dim=mem_shapes[-1][-1], typ=pos_embed_type, maxT=T_out, maxH=mem_shapes[-1][1], maxW=mem_shapes[-1][2]) self.unet_dec_cross_mode = unet_dec_cross_mode self.decoder = CuboidTransformerUNetDecoder( target_temporal_length=T_out, mem_shapes=mem_shapes, cross_start=dec_cross_start, depth=dec_depth, upsample_type=upsample_type, block_self_attn_patterns=dec_self_attn_patterns, block_self_cuboid_size=dec_self_cuboid_size, block_self_shift_size=dec_self_shift_size, block_self_cuboid_strategy=dec_self_cuboid_strategy, block_cross_attn_patterns=dec_cross_attn_patterns, block_cross_cuboid_hw=dec_cross_cuboid_hw, block_cross_shift_hw=dec_cross_shift_hw, block_cross_cuboid_strategy=dec_cross_cuboid_strategy, block_cross_n_temporal=dec_cross_n_temporal, cross_last_n_frames=dec_cross_last_n_frames, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, upsample_kernel_size=upsample_kernel_size, ffn_activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=dec_use_inter_ffn, max_temporal_relative=T_in + T_out, padding_type=padding_type, hierarchical_pos_embed=dec_hierarchical_pos_embed, pos_embed_type=pos_embed_type, use_self_global=(num_global_vectors > 0) and use_dec_self_global, self_update_global=dec_self_update_global, use_cross_global=(num_global_vectors > 0) and use_dec_cross_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, up_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, # different from CuboidTransformerDecoder downsample=downsample, downsample_type=downsample_type, cross_mode=unet_dec_cross_mode, down_linear_init_mode=down_up_linear_init_mode, ) self.reset_parameters() def get_initial_encoder_final_decoder( self, initial_downsample_type, activation, # initial_downsample_type=="conv" initial_downsample_scale, initial_downsample_conv_layers, final_upsample_conv_layers, padding_type, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers, initial_downsample_stack_conv_dim_list, initial_downsample_stack_conv_downscale_list, initial_downsample_stack_conv_num_conv_list, ): T_in, H_in, W_in, C_in = self.input_shape T_out, H_out, W_out, C_out = self.target_shape # Construct the initial upsampling / downsampling layers self.initial_downsample_type = initial_downsample_type if self.initial_downsample_type == "conv": if isinstance(initial_downsample_scale, int): initial_downsample_scale = (1, initial_downsample_scale, initial_downsample_scale) elif len(initial_downsample_scale) == 2: initial_downsample_scale = (1, *initial_downsample_scale) elif len(initial_downsample_scale) == 3: initial_downsample_scale = tuple(initial_downsample_scale) else: raise NotImplementedError(f"initial_downsample_scale {initial_downsample_scale} format not supported!") # if any(ele > 1 for ele in initial_downsample_scale): self.initial_encoder = InitialEncoder(dim=C_in, out_dim=self.base_units, downsample_scale=initial_downsample_scale, num_conv_layers=initial_downsample_conv_layers, padding_type=padding_type, activation=activation, conv_init_mode=self.conv_init_mode, linear_init_mode=self.down_up_linear_init_mode, norm_init_mode=self.norm_init_mode) self.initial_aux_encoder = InitialEncoder(dim=self.auxiliary_channels, out_dim=self.base_units, downsample_scale=initial_downsample_scale, num_conv_layers=initial_downsample_conv_layers, padding_type=padding_type, activation=activation, conv_init_mode=self.conv_init_mode, linear_init_mode=self.down_up_linear_init_mode, norm_init_mode=self.norm_init_mode) self.final_decoder = FinalDecoder(dim=self.base_units, target_thw=(T_out, H_out, W_out), num_conv_layers=final_upsample_conv_layers, activation=activation, conv_init_mode=self.conv_init_mode, linear_init_mode=self.down_up_linear_init_mode, norm_init_mode=self.norm_init_mode) new_input_shape = self.initial_encoder.patch_merge.get_out_shape(self.input_shape) self.dec_final_proj = nn.Linear(self.base_units, C_out) elif self.initial_downsample_type == "stack_conv": if initial_downsample_stack_conv_dim_list is None: initial_downsample_stack_conv_dim_list = [self.base_units, ] * initial_downsample_stack_conv_num_layers self.initial_encoder = InitialStackPatchMergingEncoder( num_merge=initial_downsample_stack_conv_num_layers, in_dim=C_in, out_dim_list=initial_downsample_stack_conv_dim_list, downsample_scale_list=initial_downsample_stack_conv_downscale_list, num_conv_per_merge_list=initial_downsample_stack_conv_num_conv_list, padding_type=padding_type, activation=activation, conv_init_mode=self.conv_init_mode, linear_init_mode=self.down_up_linear_init_mode, norm_init_mode=self.norm_init_mode) self.initial_aux_encoder = InitialStackPatchMergingEncoder( num_merge=initial_downsample_stack_conv_num_layers, in_dim=self.auxiliary_channels, out_dim_list=initial_downsample_stack_conv_dim_list, downsample_scale_list=initial_downsample_stack_conv_downscale_list, num_conv_per_merge_list=initial_downsample_stack_conv_num_conv_list, padding_type=padding_type, activation=activation, conv_init_mode=self.conv_init_mode, linear_init_mode=self.down_up_linear_init_mode, norm_init_mode=self.norm_init_mode) # use `self.target_shape` to get correct T_out initial_encoder_out_shape_list = self.initial_encoder.get_out_shape_list(self.target_shape) dec_target_shape_list, dec_in_dim = \
FinalStackUpsamplingDecoder.get_init_params(
6
2023-10-23 11:45:50+00:00
24k
IBM/VillanDiffusion
loss.py
[ { "identifier": "Backdoor", "path": "dataset.py", "snippet": "class Backdoor():\n CHANNEL_LAST = -1\n CHANNEL_FIRST = -3\n \n GREY_BG_RATIO = 0.3\n \n STOP_SIGN_IMG = \"static/stop_sign_wo_bg.png\"\n # STOP_SIGN_IMG = \"static/stop_sign_bg_blk.jpg\"\n CAT_IMG = \"static/cat_wo_bg...
import copy import torch import torch.nn.functional as F import os from functools import partial from os import terminal_size from sched import scheduler from typing import Callable, Dict, List, Tuple, Union from torch import nn from matplotlib import pyplot as plt from dataset import Backdoor, DEFAULT_VMIN, DEFAULT_VMAX from model import DiffuserModelSched from diffusers import DDPMScheduler from dataset import DatasetLoader from model import DiffuserModelSched
17,786
return x.reshape(len(x), *([1] * len(x_start.shape[1:]))) # Set up model mode backdoor_model = backdoor_model.train() clean_model = clean_model.eval() alphas = noise_sched.alphas.to(device=x_start.device, dtype=x_start.dtype) betas = noise_sched.betas.to(device=x_start.device, dtype=x_start.dtype) timesteps = torch.clamp(timesteps, max=timesteps_num - backprop_depth - 1).to(x_start.device) if adaptive_score_loss.alphas_cumprod_derivative == None: alpha_fn, beta_fn = get_alpha_beta_fn_linear(beta_start=float(betas[0]), beta_end=float(betas[-1]), timesteps=float(len(betas))) adaptive_score_loss.alphas_cumprod_derivative = get_alphas_cumprod_derivative(alphas=alphas, alpha_fn=alpha_fn).to(device=x_start.device, dtype=x_start.dtype) adaptive_score_loss.alphas_cumprod = noise_sched.alphas_cumprod.to(device=x_start.device, dtype=x_start.dtype) # adaptive_score_loss.alphas_cumprod = prod_integral(xs=alphas, x_fn=alpha_fn).to(device=x_start.device, dtype=x_start.dtype) alphas_cumprod_derivative = adaptive_score_loss.alphas_cumprod_derivative alphas_cumprod = adaptive_score_loss.alphas_cumprod def ode_x_k_t(model: torch.nn.Module, xs: torch.Tensor, rs: torch.Tensor, k: int, ts: torch.Tensor, f_func, h_func, g_square_func, sigma_func, delta: float=1e-6) -> torch.Tensor: # with torch.no_grad(): if k == 0: return xs prev_ode_x_k_t: torch.Tensor = ode_x_k_t(model=model, xs=xs, rs=rs, k=k - 1, ts=ts - 1, f_func=f_func, h_func=h_func, g_square_func=g_square_func, sigma_func=sigma_func) pred = model(prev_ode_x_k_t.contiguous(), (ts - 1).contiguous(), return_dict=False)[0] if torch.isnan(xs).any(): print(f"[{k}] xs: Nan") if torch.isnan(pred).any(): print(f"[{k}] ode pred: Nan") if torch.isnan(prev_ode_x_k_t).any(): print(f"[{k}] prev_ode_x_k_t: Nan") return prev_ode_x_k_t - (f_func[k] * prev_ode_x_k_t + h_func[k] * rs + g_square_func[k] / (2 * sigma_func[k] + delta) * pred) def sde_x_k_t(model: torch.nn.Module, xs: torch.Tensor, rs: torch.Tensor, k: int, u: float, ts: torch.Tensor, f_func, h_func, g_square_func, sigma_func, rand: bool=True, delta: float=1e-6) -> torch.Tensor: if k == 0: return xs prev_sde_x_k_t: torch.Tensor = sde_x_k_t(model=model, xs=xs, rs=rs, k=k - 1, u=u, ts=ts - 1, f_func=f_func, h_func=h_func, g_square_func=g_square_func, sigma_func=sigma_func, rand=True) pred = model(prev_sde_x_k_t.contiguous(), (ts - 1).contiguous(), return_dict=True)[0] if torch.isnan(xs).any(): print(f"[{k}] xs: Nan") if torch.isnan(pred).any(): print(f"[{k}] sde pred: Nan") if torch.isnan(prev_sde_x_k_t).any(): print(f"[{k}] prev_sde_x_k_t: Nan") if rand: return prev_sde_x_k_t - (f_func[k] * prev_sde_x_k_t + g_square_func[k] * (u + 1) / (2 * sigma_func[k] + delta) * pred + torch.sqrt(g_square_func[k]) * u * torch.randn_like(xs)) else: return prev_sde_x_k_t - (f_func[k] * prev_sde_x_k_t + g_square_func[k] * (u + 1) / (2 * sigma_func[k] + delta) * pred) def func_t_dict_gen(func: torch.Tensor, k: int, timesteps: torch.Tensor): funcs: Dict[int, torch.Tensor] = {} for i in range(1, k + 1): funcs[k - i + 1] = unqueeze_n(func[timesteps + i]) return funcs # Functions used in the expansion f_func= 1 / (2 * alphas_cumprod) * alphas_cumprod_derivative # g_square_func = - alphas_cumprod_derivative g_square_func = - alphas_cumprod_derivative / alphas_cumprod sigma_func = torch.sqrt(1 - alphas_cumprod) h_func = - psi * (alphas_cumprod_derivative / (2 * torch.sqrt(alphas_cumprod))) - (1 - psi) * (alphas_cumprod_derivative / (2 * torch.sqrt(1 - alphas_cumprod))) # print(f"sigma_func min: {sigma_func.min()}") if torch.isnan(f_func).any(): print(f"f_func: Nan") if torch.isnan(g_square_func).any(): print(f"g_square_func: Nan") if torch.isnan(sigma_func).any(): print(f"sigma_func: Nan") if torch.isnan(h_func).any(): print(f"h_func: Nan") f_func_dict = func_t_dict_gen(f_func, k=backprop_depth, timesteps=timesteps) g_square_func_dict = func_t_dict_gen(g_square_func, k=backprop_depth, timesteps=timesteps) sigma_func_dict = func_t_dict_gen(sigma_func, k=backprop_depth, timesteps=timesteps) h_func_dict = func_t_dict_gen(h_func, k=backprop_depth, timesteps=timesteps) # ODE ground truth and SDE prediction x_noisy = noise_sched.add_noise(x_start, noise, timesteps + backprop_depth) target_x_k_t = ode_x_k_t(model=clean_model, xs=x_noisy, rs=R, k=backprop_depth, ts=timesteps, f_func=f_func_dict, h_func=h_func_dict, g_square_func=g_square_func_dict, sigma_func=sigma_func_dict) pred_x_k_t = sde_x_k_t(model=backdoor_model, xs=x_noisy, rs=R, k=backprop_depth, u=1, ts=timesteps, f_func=f_func_dict, h_func=h_func_dict, g_square_func=g_square_func_dict, sigma_func=sigma_func_dict, rand=False) if torch.isnan(x_start).any(): print(f"x_start: Nan") if torch.isnan(R).any(): print(f"R: Nan") if torch.isnan(target_x_k_t).any(): print(f"target_x_k_t: Nan") if torch.isnan(pred_x_k_t).any(): print(f"pred_x_k_t: Nan") if loss_type == 'l1': loss = F.l1_loss(target_x_k_t, pred_x_k_t) elif loss_type == 'l2': loss = F.mse_loss(target_x_k_t, pred_x_k_t) if torch.isnan(loss): print(f"loss: Nan") elif loss_type == "huber": loss = F.smooth_l1_loss(target_x_k_t, pred_x_k_t) else: raise NotImplementedError() return loss adaptive_score_loss.alphas_cumprod_derivative = None adaptive_score_loss.alphas_cumprod = None # %% if __name__ == '__main__': time_step = 95 num_train_timesteps = 100 # time_step = 140 # num_train_timesteps = 150 ds_root = os.path.join('datasets')
# %% # from tmp_loss_sde import q_sample_diffuser_alt_half """## Defining the forward diffusion process The forward diffusion process gradually adds noise to an image from the real distribution, in a number of time steps $T$. This happens according to a **variance schedule**. The original DDPM authors employed a linear schedule: > We set the forward process variances to constants increasing linearly from $\beta_1 = 10^{−4}$ to $\beta_T = 0.02$. However, it was shown in ([Nichol et al., 2021](https://arxiv.org/abs/2102.09672)) that better results can be achieved when employing a cosine schedule. Below, we define various schedules for the $T$ timesteps, as well as corresponding variables which we'll need, such as cumulative variances. """ def cosine_beta_schedule(timesteps, s=0.008): """ cosine schedule as proposed in https://arxiv.org/abs/2102.09672 """ steps = timesteps + 1 x = torch.linspace(0, timesteps, steps) alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2 alphas_cumprod = alphas_cumprod / alphas_cumprod[0] betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) return torch.clip(betas, 0.0001, 0.9999) def linear_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 return torch.linspace(beta_start, beta_end, timesteps) def quadratic_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 return torch.linspace(beta_start**0.5, beta_end**0.5, timesteps) ** 2 def sigmoid_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 betas = torch.linspace(-6, 6, timesteps) return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start def extract(a, t, x_shape): batch_size = t.shape[0] out = a.gather(-1, t.cpu()) return out.reshape(batch_size, *((1,) * (len(x_shape) - 1))).to(t.device) class NoiseScheduler(): SCHED_COSINE = "SC_COS" SCHED_LINEAR = "SC_LIN" SCHED_QUADRATIC = "SC_QUAD" SCHED_SIGMOID = "SC_SIGM" def __init__(self, timesteps: int, scheduler: str, s: float=0.008): self.__timesteps = int(timesteps) self.__s = float(s) self.__scheduler = scheduler # define beta schedule if self.__scheduler == self.SCHED_COSINE: self.__betas = NoiseScheduler.cosine_beta_schedule(timesteps=self.__timesteps, s=self.__s) elif self.__scheduler == self.SCHED_LINEAR: self.__betas = NoiseScheduler.linear_beta_schedule(timesteps=self.__timesteps) self.__derivative_beta = 1 / self.__timesteps self.__derivative_alpha = - 1 / self.__timesteps elif self.__scheduler == self.SCHED_QUADRATIC: self.__betas = NoiseScheduler.quadratic_beta_schedule(timesteps=self.__timesteps) elif self.__scheduler == self.SCHED_SIGMOID: self.__betas = NoiseScheduler.sigmoid_beta_schedule(timesteps=self.__timesteps) else: raise ImportError(f"Undefined scheduler: {self.__scheduler}") # define alphas self.__alphas = 1. - self.betas self.__alphas_cumprod = torch.cumprod(self.alphas, axis=0) self.__alphas_cumprod_prev = F.pad(self.alphas_cumprod[:-1], (1, 0), value=1.0) self.__sqrt_recip_alphas = torch.sqrt(1.0 / self.alphas) # Calculations for backdoor self.__sqrt_alphas = torch.sqrt(self.alphas) self.__one_minus_sqrt_alphas = 1 - self.sqrt_alphas self.__one_minus_alphas = 1 - self.alphas # calculations for diffusion q(x_t | x_{t-1}) and others self.__sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod) self.__sqrt_one_minus_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod) self.__R_coef = self.one_minus_sqrt_alphas * self.sqrt_one_minus_alphas_cumprod / self.one_minus_alphas # calculations for posterior q(x_{t-1} | x_t, x_0) self.__posterior_variance = self.betas * (1. - self.alphas_cumprod_prev) / (1. - self.alphas_cumprod) @staticmethod def cosine_beta_schedule(timesteps, s=0.008): """ cosine schedule as proposed in https://arxiv.org/abs/2102.09672 """ steps = timesteps + 1 x = torch.linspace(0, timesteps, steps) alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2 alphas_cumprod = alphas_cumprod / alphas_cumprod[0] betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) return torch.clip(betas, 0.0001, 0.9999) @staticmethod def linear_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 return torch.linspace(beta_start, beta_end, timesteps) @staticmethod def quadratic_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 return torch.linspace(beta_start**0.5, beta_end**0.5, timesteps) ** 2 @staticmethod def sigmoid_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 betas = torch.linspace(-6, 6, timesteps) return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start @property def betas(self): return self.__betas @property def alphas(self): return self.__alphas @property def alphas_cumprod(self): return self.__alphas_cumprod @property def alphas_cumprod_prev(self): return self.__alphas_cumprod_prev @property def sqrt_recip_alphas(self): return self.__sqrt_recip_alphas @property def sqrt_alphas(self): return self.__sqrt_alphas @property def one_minus_sqrt_alphas(self): return self.__one_minus_sqrt_alphas @property def one_minus_alphas(self): return self.__one_minus_alphas @property def sqrt_alphas_cumprod(self): return self.__sqrt_alphas_cumprod @property def sqrt_one_minus_alphas_cumprod(self): return self.__sqrt_one_minus_alphas_cumprod @property def R_coef(self): return self.__R_coef @property def posterior_variance(self): return self.__posterior_variance """<img src="https://drive.google.com/uc?id=1QifsBnYiijwTqru6gur9C0qKkFYrm-lN" width="800" /> This means that we can now define the loss function given the model as follows: """ # forward diffusion def q_sample_clean(noise_sched, x_start, t, noise=None): if noise is None: noise = torch.randn_like(x_start) sqrt_alphas_cumprod_t = extract(noise_sched.sqrt_alphas_cumprod, t, x_start.shape) sqrt_one_minus_alphas_cumprod_t = extract( noise_sched.sqrt_one_minus_alphas_cumprod, t, x_start.shape ) return sqrt_alphas_cumprod_t * x_start + sqrt_one_minus_alphas_cumprod_t * noise, noise def q_sample_backdoor(noise_sched, x_start, R, t, noise=None): if noise is None: noise = torch.randn_like(x_start) sqrt_alphas_cumprod_t = extract(noise_sched.sqrt_alphas_cumprod, t, x_start.shape) sqrt_one_minus_alphas_cumprod_t = extract( noise_sched.sqrt_one_minus_alphas_cumprod, t, x_start.shape ) R_coef_t = extract(noise_sched.R_coef, t, x_start.shape) return sqrt_alphas_cumprod_t * x_start + (1 - sqrt_alphas_cumprod_t) * R + sqrt_one_minus_alphas_cumprod_t * noise, R_coef_t * R + noise """ <img src="https://drive.google.com/uc?id=1QifsBnYiijwTqru6gur9C0qKkFYrm-lN" width="800" /> This means that we can now define the loss function given the model as follows: """ def p_losses_clean(noise_sched, denoise_model, x_start, t, noise=None, loss_type="l2"): if len(x_start) == 0: return 0 if noise is None: noise = torch.randn_like(x_start) x_noisy, target = q_sample_clean(noise_sched=noise_sched, x_start=x_start, t=t, noise=noise) predicted_noise = denoise_model(x_noisy, t) if loss_type == 'l1': loss = F.l1_loss(target, predicted_noise) elif loss_type == 'l2': loss = F.mse_loss(target, predicted_noise) elif loss_type == "huber": loss = F.smooth_l1_loss(target, predicted_noise) else: raise NotImplementedError() return loss def p_losses_backdoor(noise_sched, denoise_model, x_start, R, t, noise=None, loss_type="l2"): if len(x_start) == 0: return 0 if noise is None: noise = torch.randn_like(x_start) x_noisy, target = q_sample_backdoor(noise_sched=noise_sched, x_start=x_start, R=R, t=t, noise=noise) predicted_noise = denoise_model(x_noisy, t) if loss_type == 'l1': loss = F.l1_loss(target, predicted_noise) elif loss_type == 'l2': loss = F.mse_loss(target, predicted_noise) elif loss_type == "huber": loss = F.smooth_l1_loss(target, predicted_noise) else: raise NotImplementedError() return loss def p_losses(noise_sched, denoise_model, x_start, R, is_clean, t, noise=None, loss_type="l2"): is_not_clean = torch.where(is_clean, False, True) if noise != None: noise_clean = noise[is_clean] noise_backdoor = noise[is_not_clean] else: noise_clean = noise_backdoor = noise loss_clean = p_losses_clean(noise_sched=noise_sched, denoise_model=denoise_model, x_start=x_start[is_clean], t=t[is_clean], noise=noise_clean, loss_type=loss_type) loss_backdoor = p_losses_backdoor(noise_sched=noise_sched, denoise_model=denoise_model, x_start=x_start[is_not_clean], R=R[is_not_clean], t=t[is_not_clean], noise=noise_backdoor, loss_type=loss_type) return (loss_clean + loss_backdoor) / 2 # ================================================== class LossSampler(): def __init__(self, noise_sched: NoiseScheduler): self.__noise_sched = noise_sched def get_fn(self): return partial(p_losses_backdoor, self.__noise_sched), partial(q_sample_backdoor, self.__noise_sched) def plot(x, title: str, log_scale: bool=False): plt.plot(x) plt.title(title) if log_scale: plt.yscale("log") plt.show() def get_derivative(x: torch.Tensor, t: int): if t + 1 < len(x): return x[t + 1] - x[t] return x[t] - x[t - 1] def get_derivatives(x: torch.Tensor): x_delta_t = torch.roll(x, -1, 0) x_delta_t[-1] = x_delta_t[-2] x[-1] = x[-2] return x_delta_t - x def central_derivative(fn, x, stop_thres: float=1e-5, stop_iter_n: int=50, delta: float=1e-2, divisor: float=10.0): der = lambda d: (fn(x + d) - fn(x - d)) / (2 * d) iter_n = 0 res = der(delta) last_res = 0 while (abs(res - last_res) > stop_thres or iter_n < 1) and iter_n < stop_iter_n: last_res = res delta = delta / divisor res = der(delta) iter_n = iter_n + 1 return res def get_alpha_beta_fn_linear(beta_start: float, beta_end: float, timesteps: int): def beta_fn(t): return float(beta_start) + (float(beta_end) - float(beta_start)) * t / (float(timesteps) - 1.0) def alpha_fn(t): return 1.0 - beta_fn(t) return alpha_fn, beta_fn def integral(fn: Callable[[Union[int, float]], Union[int, float]], interval_low: float, interval_up: float, div: int=100): lin_space = torch.linspace(interval_low, interval_up, div, dtype=torch.float32) res = fn(lin_space[:-1]) return torch.sum(res, dim=0) * (interval_up - interval_low) / div def prod_integral(xs: torch.Tensor, x_fn: Callable[[Union[int, float]], Union[int, float]], div: int=200): def log_x_fn(x): return torch.log(x_fn(x).double()).double() def integral_fn(x): return (torch.trapezoid(log_x_fn(torch.linspace(0, x, div * int(x)).to('cpu').double())) / div).double() def exp_integral_fn(x): return torch.exp(integral_fn(x)).double() return torch.linspace(start=0, end=len(xs)-1, steps=len(xs)).to('cpu').double().apply_(exp_integral_fn).float() def get_alphas_cumprod_derivative(alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]]): div = 200 def log_alpha_fn(x): return torch.log(alpha_fn(x).double()).double() def integral_fn(x): return (torch.trapezoid(log_alpha_fn(torch.linspace(0, x, div * int(x)).to('cpu').double())) / div).double() def exp_integral_fn(x): return torch.exp(integral_fn(x)).double() def der_fn(x): return central_derivative(exp_integral_fn, x, stop_thres=1e-3, stop_iter_n=2, delta=1e-2, divisor=10.0) def coef_fn(x): return (exp_integral_fn(x) * torch.log(alpha_fn(torch.Tensor([x]).double()))).double() # fn_int = torch.linspace(start=0, end=len(alphas)-1, steps=len(alphas)).double().apply_(integral_fn) # fn_prod_int = torch.linspace(start=0, end=len(alphas)-1, steps=len(alphas)).double().apply_(exp_integral_fn) # for i in range(len(fn_prod_int[:20])): # print(f"Time: {i} - Alpha Fn Product Integral Analytic: {fn_prod_int[i]}") # plot(fn_prod_int, title="Alpha Fn Product Integral", log_scale=True) # print(f"fn_int: {fn_int[:20]}") # plot(fn_int, title="Alpha Fn Integral") res = torch.linspace(start=0, end=len(alphas)-1, steps=len(alphas)).to('cpu').float().apply_(coef_fn).double() return res # return torch.exp(integral_res) * (torch.log(alphas[-1]) - torch.log(alphas[0])) def get_alphas_hat_derivative(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]]): return get_alphas_cumprod_derivative(alphas=alphas, alpha_fn=alpha_fn).to(alphas_cumprod.device) / 2 * (alphas_cumprod ** 0.5) def get_sigmas_hat_derivative(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]]): return - get_alphas_cumprod_derivative(alphas=alphas, alpha_fn=alpha_fn).to(alphas_cumprod.device) / 2 * ((1 - alphas_cumprod) ** 0.5) def sci(x: float): return "{:.2e}".format(x) def get_R_coef_alt(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]], psi: float=1, solver_type: str='sde'): one_minus_alphas_cumprod = 1 - alphas_cumprod # Fokker-Planck: g^2(t) = derivative of \hat{\beta}^2(t) # coef = psi * (torch.sqrt(one_minus_alphas_cumprod / alphas_cumprod)) + (1 - psi) # g^2(t) = \frac{d \hat{\beta}^2(t)}{dt} - 2 * \frac{d \log \hat{\alpha}(t)}{dt} * \hat{\beta}^2(t) coef = (psi * (torch.sqrt(one_minus_alphas_cumprod / alphas_cumprod)) + (1 - psi)) / (1 + (one_minus_alphas_cumprod / alphas_cumprod)) # Simplified # coef = torch.ones_like(alphas_cumprod) if str(solver_type).lower() == 'ode': return coef elif str(solver_type).lower() == 'sde': return 0.5 * coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef_variational(alphas_cumprod: torch.Tensor, psi: float=1, solver_type: str='sde'): coef = psi * (1 - alphas_cumprod ** 0.5) / (1 - alphas_cumprod) ** 0.5 + (1 - psi) if str(solver_type).lower() == 'ode': return 2 * coef elif str(solver_type).lower() == 'sde': return coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") # def get_R_coef_baddiff(alphas_cumprod: torch.Tensor, psi: float=1, solver_type: str='sde'): # coef = psi * (1 - alphas_cumprod ** 0.5) / (1 - alphas_cumprod) ** 0.5 + (1 - psi) # if str(solver_type).lower() == 'ode': # return 2 * coef # elif str(solver_type).lower() == 'sde': # return coef # else: # raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]], psi: float=1): alphas_hat = (alphas_cumprod ** 0.5).double() sigmas_hat = ((1 - alphas_cumprod) ** 0.5).double() alphas_hat_derivative = get_alphas_hat_derivative(alphas_cumprod=alphas_cumprod, alphas=alphas, alpha_fn=alpha_fn).double() sigmas_hat_derivative = get_sigmas_hat_derivative(alphas_cumprod=alphas_cumprod, alphas=alphas, alpha_fn=alpha_fn).double() alt_r = 0.5 * alphas_hat / (alphas_hat + sigmas_hat) # plot(alt_r, title="Alternate R", log_scale=True) a = (- psi * alphas_hat_derivative + (1 - psi) * sigmas_hat_derivative).double() b = (psi * (1 - alphas_hat) + (1 - psi) * sigmas_hat).double() c = (2 * sigmas_hat * sigmas_hat_derivative - 2 * (alphas_hat_derivative / alphas_hat) * (sigmas_hat ** 2)).double() # plot(alpha_fn(torch.linspace(0, 999, 1000).float()), title="Alpha Fn", log_scale=True) # fn_cumprod = torch.cumprod(alpha_fn(torch.linspace(0, 999, 1000).float()), dim=0) # for i in range(len(fn_cumprod[:20])): # print(f"Time: {i} - Alpha Fn Cumprod: {fn_cumprod[i]}") # plot(fn_cumprod, title="Alpha Fn Cumprod", log_scale=True) # plot(alphas, title="Alpha") # for i in range(len(alphas_cumprod[:20])): # print(f"Time: {i} - Alpha Cumprod: {alphas_cumprod[i]}") # plot(alphas_cumprod, title="Alpha Cumprod", log_scale=True) # plot(get_alphas_cumprod_derivative(alphas=alphas, alpha_fn=alpha_fn), title="Alpha Cumprod Derivative Anlytic") # plot(get_derivatives(x=alphas_cumprod)[:-1], title="Alpha Cumprod Derivative Numeric") # plot(alphas_hat, title="Alpha Hat", log_scale=True) # plot(sigmas_hat, title="Beta Hat", log_scale=True) # plot(alphas_hat_derivative, title="Alpha Hat Derivative") # plot(sigmas_hat_derivative, title="Sigma Hat Derivative") # plot(a, title="Rho Derivative") # plot(b, title="Rho") # plot(c, title="G^2", log_scale=True) # plot(alphas_hat_derivative / alphas_hat, title="f(t)") coef = (sigmas_hat * a / (c)).double() # for i in range(len(sigmas_hat[:20])): # print(f"Time: {i} - R: {sci(coef[i])} beta_hat: {sci(sigmas_hat[i])}, rho_deriv: {sci(a[i])}, G^2: {sci(c[i])}") if torch.isnan(sigmas_hat).any(): print(f"sigmas_hat - Nan: {sigmas_hat[torch.isnan(sigmas_hat).nonzero()]}") if torch.isnan(a).any(): print(f"Rho Derivative - Nan: {a[torch.isnan(a).nonzero()]}") if torch.isnan(b).any(): print(f"Rho - Nan: {b[torch.isnan(b).nonzero()]}") if torch.isnan(c).any(): print(f"G^2 - Nan: {c[torch.isnan(c).nonzero()]}") # return torch.clamp(coef, min=None, max=1) # return coef return alt_r def get_ks(alphas_hat: torch.Tensor) -> torch.Tensor: prev_alphas_hat = torch.roll(alphas_hat, 1, 0) prev_alphas_hat[0] = 1 return alphas_hat / prev_alphas_hat def get_ws(betas_hat: torch.Tensor, ks: torch.Tensor) -> torch.Tensor: ws = [betas_hat[0]] residuals = [0] for i, beta_hat_i in enumerate(betas_hat): if i < 1: continue residuals.append((ks[i] ** 2) * (ws[i - 1] ** 2 + residuals[i - 1])) ws.append((beta_hat_i ** 2 - residuals[i]) ** 0.5) return torch.Tensor(ws) def get_hs(rhos_hat: torch.Tensor, ks: torch.Tensor) -> torch.Tensor: hs = [rhos_hat[0]] residuals = [0] for i, rho_hat_i in enumerate(rhos_hat): if i < 1: continue residuals.append(ks[i] * (hs[i - 1] + residuals[i - 1])) hs.append(rho_hat_i - residuals[i]) return torch.Tensor(hs) def get_ws_ve(sigmas: torch.Tensor) -> torch.Tensor: ws = [sigmas[0]] residuals = [0] for i, sigma_i in enumerate(sigmas): if i < 1: continue residuals.append(ws[i - 1] ** 2 + residuals[i - 1]) ws.append((sigma_i ** 2 - residuals[i]) ** 0.5) return torch.Tensor(ws) def get_hs_ve(rhos_hat: torch.Tensor) -> torch.Tensor: hs = [rhos_hat[0]] residuals = [0] for i, rho_hat_i in enumerate(rhos_hat): if i < 1: continue residuals.append(hs[i - 1] + residuals[i - 1]) hs.append(rho_hat_i - residuals[i]) return torch.Tensor(hs) def get_R_coef_gen_ve(sigmas: torch.Tensor, rhos_hat: torch.Tensor, ws: torch.Tensor, hs: torch.Tensor, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term, None if psi != 0: raise NotImplementedError(f"Variance Explode model doesn't support BadDiffusion style correction term") # TrojDiff style correction term if hs == None: raise ValueError(f"Arguement hs shouldn't be {hs} when psi is {psi}") prev_rhos_hat = torch.roll(rhos_hat, 1, 0) prev_rhos_hat[0] = 0 prev_sigmas = torch.roll(sigmas, 1, 0) prev_sigmas[0] = 0 trojdiff_step = rhos_hat trojdiff_coef = ve_scale * (ws ** 2 * (rhos_hat - prev_rhos_hat) + hs * prev_sigmas) / (ws ** 2 * sigmas) # print(f"trojdiff_coef isnan: {torch.isnan(trojdiff_coef)}") # Coefficients & Steps step = trojdiff_step coef = trojdiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef_gen_ve_reduce(sigmas: torch.Tensor, hs: torch.Tensor, rhos_hat_w: float=1.0, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term, None if psi != 0: raise NotImplementedError(f"Variance Explode model doesn't support BadDiffusion style correction term") # TrojDiff style correction term if hs == None: raise ValueError(f"Arguement hs shouldn't be {hs} when psi is {psi}") # prev_rhos_hat = torch.roll(rhos_hat, 1, 0) # prev_rhos_hat[0] = 0 prev_sigmas = torch.roll(sigmas, 1, 0) prev_sigmas[0] = 0 trojdiff_step = rhos_hat_w * sigmas trojdiff_coef = ve_scale * (sigmas * rhos_hat_w / (sigmas + prev_sigmas)) # print(f"trojdiff_coef isnan: {torch.isnan(trojdiff_coef)}") # Coefficients & Steps step = trojdiff_step coef = trojdiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_hs_vp(alphas: torch.Tensor, alphas_cumprod: torch.Tensor) -> torch.Tensor: hs = [(1 - alphas_cumprod[0]) ** 0.5] residuals = [0] for i, (alphas_cumprod_i, alphas_i) in enumerate(zip(alphas_cumprod, alphas)): if i < 1: continue residuals.append((alphas_i ** 0.5) * (hs[i - 1] + residuals[i - 1])) hs.append((1 - alphas_cumprod_i) ** 0.5 - residuals[i]) return torch.Tensor(hs) def get_R_coef_gen_vp(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, hs: torch.Tensor=None, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term baddiff_step = 1 - alphas_cumprod ** 0.5 baddiff_coef = vp_scale * (1 - alphas ** 0.5) * (1 - alphas_cumprod) ** 0.5 / (1 - alphas) # TrojDiff style correction term if psi != 1: if hs == None: raise ValueError(f"Arhuement hs shouldn't be {hs} when psi is {psi}") trojdiff_step = (1 - alphas_cumprod) ** 0.5 trojdiff_coef = - ve_scale * ((alphas ** 0.5 - 1) * (1 - alphas_cumprod) ** 0.5 * (1 - alphas) - hs * (alphas - alphas_cumprod)) / (1 - alphas) # Coefficients & Steps step = psi * baddiff_step + (1 - psi) * trojdiff_step coef = psi * baddiff_coef + (1 - psi) * trojdiff_coef else: # Coefficients & Steps step = baddiff_step coef = baddiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef_elbo_gen(noise_sched, sde_type: str="vp", psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0, device=None, dtype=None, rhos_hat_w: float=1.0, rhos_hat_b: float=0.0) -> Tuple[torch.Tensor, torch.Tensor]: if sde_type == DiffuserModelSched.SDE_VP or sde_type == DiffuserModelSched.SDE_LDM: if device == None: device = noise_sched.alphas.device if dtype == None: dtype = noise_sched.alphas.dtype alphas: torch.Tensor = noise_sched.alphas.to(device=device, dtype=dtype) alphas_cumprod: torch.Tensor = noise_sched.alphas_cumprod.to(device=device, dtype=dtype) # hs if get_R_coef_elbo_gen.hs_vp == None: get_R_coef_elbo_gen.hs_vp = get_hs_vp(alphas=alphas, alphas_cumprod=alphas_cumprod) hs: torch.Tensors = get_R_coef_elbo_gen.hs_vp.to(device=device, dtype=dtype) step, R_coef = get_R_coef_gen_vp(alphas_cumprod=alphas_cumprod, alphas=alphas, hs=hs, psi=psi, solver_type=solver_type, vp_scale=vp_scale, ve_scale=ve_scale) elif sde_type == DiffuserModelSched.SDE_VE: if device == None: device = noise_sched.sigmas.device if dtype == None: dtype = noise_sched.sigmas.dtype sigmas: torch.Tensor = noise_sched.sigmas.to(device=device, dtype=dtype).flip(dims=[0]) rhos_hat: torch.Tensor = rhos_hat_w * sigmas + rhos_hat_b # ws if get_R_coef_elbo_gen.ws_ve == None: get_R_coef_elbo_gen.ws_ve = get_ws_ve(sigmas=sigmas) ws: torch.Tensor = get_R_coef_elbo_gen.ws_ve.to(device=device, dtype=dtype) # print(f"sigmas: {sigmas}") # print(f"sigmas isnan: {torch.isnan(sigmas).any()}: {torch.isnan(sigmas)}") # print(f"ws isnan: {torch.isnan(ws).any()}: {torch.isnan(ws)}") # hs if get_R_coef_elbo_gen.hs_ve == None: get_R_coef_elbo_gen.hs_ve = get_hs_ve(rhos_hat=rhos_hat) hs: torch.Tensor = get_R_coef_elbo_gen.hs_ve.to(device=device, dtype=dtype) # print(f"hs isnan: {torch.isnan(hs).any()}: {torch.isnan(hs)}") step, R_coef = get_R_coef_gen_ve(sigmas=sigmas, rhos_hat=rhos_hat, ws=ws, hs=hs, psi=psi, solver_type=solver_type, vp_scale=vp_scale, ve_scale=ve_scale) # R_coef = - R_coef / sigmas step, R_coef = step.flip(dims=[0]), R_coef.flip(dims=[0]) # print(f"step: {torch.isnan(step).any()}, Min: {step.min()}, Max: {step.max()}: {step}") # print(f"R_coef: {torch.isnan(R_coef).any()}, Min: {R_coef.min()}, Max: {R_coef.max()}: {R_coef}") else: raise NotImplementedError(f"sde_type: {sde_type} isn't implemented") return step, R_coef get_R_coef_elbo_gen.hs_vp: torch.Tensor = None get_R_coef_elbo_gen.ws_ve: torch.Tensor = None get_R_coef_elbo_gen.hs_ve: torch.Tensor = None def get_R_coef_continuous(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, hs: torch.Tensor=None, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0): # Variance Preserve vp_step = 1 - alphas_cumprod ** 0.5 vp_coef = vp_scale * (1 - alphas_cumprod) ** 0.5 / (1 - alphas_cumprod) # Variance Explode if psi != 1: if hs == None: raise ValueError(f"Arhuement hs shouldn't be {hs} when psi is {psi}") ve_step = (1 - alphas_cumprod) ** 0.5 ve_coef = ve_scale * 0.5 # Coefficients & Steps step = psi * vp_step + (1 - psi) * ve_step coef = psi * vp_coef + (1 - psi) * ve_coef else: # Coefficients & Steps step = vp_step coef = vp_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def q_sample_diffuser_alt(noise_sched, sde_type: str, x_start: torch.Tensor, R: torch.Tensor, timesteps: torch.Tensor, noise: torch.Tensor=None, psi: float=1, solver_type: str="sde", vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: if noise is None: noise = torch.randn_like(x_start) def unqueeze_n(x): return x.reshape(len(x_start), *([1] * len(x_start.shape[1:]))) # alphas = noise_sched.alphas.to(device=x_start.device, dtype=x_start.dtype) # betas = noise_sched.betas.to(device=x_start.device, dtype=x_start.dtype) timesteps = timesteps.to(x_start.device) # Alphas Cumprod # if q_sample_diffuser_alt.alphas_cumprod == None: # alpha_fn, beta_fn = get_alpha_beta_fn_linear(beta_start=float(betas[0]), beta_end=float(betas[-1]), timesteps=float(len(betas))) # q_sample_diffuser_alt.alphas_cumprod = prod_integral(xs=alphas, x_fn=alpha_fn).to(device=x_start.device, dtype=x_start.dtype) # alphas_cumprod = q_sample_diffuser_alt.alphas_cumprod # alphas_cumprod = noise_sched.alphas_cumprod.to(device=x_start.device, dtype=x_start.dtype) # sqrt_alphas_cumprod = alphas_cumprod ** 0.5 # hs # if q_sample_diffuser_alt.hs == None: # q_sample_diffuser_alt.hs = get_hs_vp(alphas=alphas, alphas_cumprod=alphas_cumprod) # hs = q_sample_diffuser_alt.hs.to(device=x_start.device, dtype=x_start.dtype) # BadDiffusion # R_coef = (1 - alphas ** 0.5) * (1 - alphas_cumprod) ** 0.5 / (1 - alphas) step, R_coef = get_R_coef_elbo_gen(noise_sched=noise_sched, sde_type=sde_type, psi=psi, solver_type=solver_type, vp_scale=vp_scale, ve_scale=ve_scale, device=x_start.device, dtype=x_start.dtype) # step, R_coef = get_R_coef_gen_vp(alphas_cumprod=alphas_cumprod, alphas=alphas, hs=hs, psi=psi, solver_type=solver_type, vp_scale=vp_scale, ve_scale=ve_scale) # step, R_coef = get_R_coef_continuous(alphas_cumprod=alphas_cumprod, alphas=alphas, hs=hs, psi=psi, solver_type=solver_type, vp_scale=vp_scale, ve_scale=ve_scale) # plot(R_coef, title="R Coef Discrete") # Generalized # alpha_fn, beta_fn = get_alpha_beta_fn_linear(beta_start=float(betas[0]), beta_end=float(betas[-1]), timesteps=float(len(betas))) # R_coef = get_R_coef_alt(alphas_cumprod=alphas_cumprod, alphas=alphas, alpha_fn=alpha_fn, psi=psi, solver_type=solver_type) # plot(R_coef, title="R Coef Continuous") # Unsqueeze & Select R_coef_t = unqueeze_n(R_coef[timesteps]) step_t = unqueeze_n(step[timesteps]) if sde_type == DiffuserModelSched.SDE_VP or sde_type == DiffuserModelSched.SDE_LDM: noisy_images = noise_sched.add_noise(x_start, noise, timesteps) return noisy_images + step_t * R, R_coef_t * R + noise elif sde_type == DiffuserModelSched.SDE_VE: sigma_t = unqueeze_n(noise_sched.sigmas.to(timesteps.device)[timesteps]) noisy_images = x_start + sigma_t * noise # noisy_images = x_start print(f"noisy_images: {noisy_images.shape}, {torch.isnan(noisy_images).any()}, Min: {noisy_images.min()}, Max: {noisy_images.max()}") print(f"R: {torch.isnan(R).any()}, Min: {R.min()}, Max: {R.max()}") print(f"sigma_t: {sigma_t.shape}, {torch.isnan(sigma_t).any()}, Min: {sigma_t.min()}, Max: {sigma_t.max()}") # return noisy_images + step_t * R, - (R_coef_t * R + noise) / sigma_t # return noisy_images, - (noise) / sigma_t return noisy_images, noise else: raise NotImplementedError(f"sde_type: {sde_type} isn't implemented") q_sample_diffuser_alt.alphas_cumprod = None q_sample_diffuser_alt.hs = None def q_sample_diffuser(noise_sched, x_start: torch.Tensor, R: torch.Tensor, timesteps: torch.Tensor, noise: torch.Tensor=None) -> torch.Tensor: if noise is None: noise = torch.randn_like(x_start) def unqueeze_n(x): return x.reshape(len(x_start), *([1] * len(x_start.shape[1:]))) alphas_cumprod = noise_sched.alphas_cumprod.to(device=x_start.device, dtype=x_start.dtype) alphas = noise_sched.alphas.to(device=x_start.device, dtype=x_start.dtype) betas = noise_sched.betas.to(device=x_start.device, dtype=x_start.dtype) timesteps = timesteps.to(x_start.device) sqrt_alphas_cumprod_t = alphas_cumprod[timesteps] ** 0.5 sqrt_one_minus_alphas_cumprod_t = (1 - alphas_cumprod[timesteps]) ** 0.5 R_coef_t = (1 - alphas[timesteps] ** 0.5) * sqrt_one_minus_alphas_cumprod_t / (1 - alphas[timesteps]) sqrt_alphas_cumprod_t = unqueeze_n(sqrt_alphas_cumprod_t) # NOTE: BadDiffusion # R_coef = (1 - alphas ** 0.5) * (1 - alphas_cumprod) ** 0.5 / (1 - alphas) # plot(R_coef, title="R Coef", log_scale=True) R_coef_t = unqueeze_n(R_coef_t) noisy_images = noise_sched.add_noise(x_start, noise, timesteps) # if q_sample_diffuser.R_coef == None: # # NOTE: Generalized BadDiffusion # alpha_fn, beta_fn = get_alpha_beta_fn_linear(beta_start=float(betas[0]), beta_end=float(betas[-1]), timesteps=float(len(betas))) # # q_sample_diffuser.R_coef = torch.flip(get_R_coef(alphas_cumprod=alphas_cumprod, alphas=alphas, alpha_fn=alpha_fn, psi=1), dims=(0,)) # q_sample_diffuser.R_coef = get_R_coef_alt(alphas_cumprod=alphas_cumprod, alphas=alphas, alpha_fn=alpha_fn, psi=1).float() # R_coef_t = unqueeze_n(q_sample_diffuser.R_coef[timesteps]) # # plot(q_sample_diffuser.R_coef, title="R Coef", log_scale=True) # if torch.isnan(R_coef_t).any(): # print(f"Nan: {timesteps[torch.isnan(R_coef_t).nonzero()]}") return noisy_images + (1 - sqrt_alphas_cumprod_t) * R, R_coef_t * R + noise q_sample_diffuser.R_coef = None def p_losses_diffuser(noise_sched, model: nn.Module, sde_type: str, x_start: torch.Tensor, R: torch.Tensor, timesteps: torch.Tensor, noise: torch.Tensor=None, loss_type: str="l2", psi: float=1, solver_type: str="sde", vp_scale: float=1.0, ve_scale: float=1.0) -> torch.Tensor: if len(x_start) == 0: return 0 if noise is None: noise = torch.randn_like(x_start) noise = noise.clamp(-2, 2) def unqueeze_n(x): return x.reshape(len(x_start), *([1] * len(x_start.shape[1:]))) # if sde_type == DiffuserModelSched.SDE_VE: # x_start = x_start / 2 + 0.5 # R = R / 2 + 0.5 # Main loss function x_noisy, target = q_sample_diffuser_alt(noise_sched=noise_sched, sde_type=sde_type, x_start=x_start, R=R, timesteps=timesteps, noise=noise, psi=psi, solver_type=solver_type, vp_scale=vp_scale, ve_scale=ve_scale) # Additiolnal loss function # x_noisy_half, target_half = q_sample_diffuser_alt_half(noise_sched=noise_sched, x_start=x_start, R=R, timesteps=timesteps, noise=noise) # predicted_noise_half = model(x_noisy_half.contiguous(), timesteps.contiguous(), return_dict=False)[0] if sde_type == DiffuserModelSched.SDE_VP or sde_type == DiffuserModelSched.SDE_LDM: predicted_noise = model(x_noisy.contiguous(), timesteps.contiguous(), return_dict=False)[0] print(f"x_noisy: {x_noisy.shape}, {torch.isnan(x_noisy).any()}, min: {x_noisy.min()}, max: {x_noisy.max()}") print(f"predicted_noise: {predicted_noise.shape}, {torch.isnan(predicted_noise).any()}, min: {predicted_noise.min()}, max: {predicted_noise.max()}") if loss_type == 'l1': loss: torch.Tensor = F.l1_loss(target, predicted_noise, reduction='none') elif loss_type == 'l2': loss = F.mse_loss(target, predicted_noise, reduction='none') elif loss_type == "huber": loss = F.smooth_l1_loss(target, predicted_noise, reduction='none') else: raise NotImplementedError() return loss.mean() elif sde_type == DiffuserModelSched.SDE_VE: sigma_t = noise_sched.sigmas.unsqueeze(0).to(timesteps.device)[timesteps] predicted_noise = model(x_noisy.contiguous(), sigma_t.contiguous(), return_dict=False)[0] print(f"x_noisy: {x_noisy.shape}, {torch.isnan(x_noisy).any()}, min: {x_noisy.min()}, max: {x_noisy.max()}") print(f"predicted_noise: {predicted_noise.shape}, {torch.isnan(predicted_noise).any()}, min: {predicted_noise.min()}, max: {predicted_noise.max()}") if loss_type == 'l1': loss: torch.Tensor = F.l1_loss(target, predicted_noise) elif loss_type == 'l2': loss = F.mse_loss(target, predicted_noise) elif loss_type == "huber": loss = F.smooth_l1_loss(target, predicted_noise) else: raise NotImplementedError() # return (loss * unqueeze_n(noise_sched.sigmas.to(timesteps.device)[timesteps]) ** 2).mean() return loss else: raise NotImplementedError(f"sde_type: {sde_type} isn't implemented") class LossFn: RANDN_BOUND: float = 2.5 def __init__(self, noise_sched, sde_type: str, loss_type: str="l2", psi: float=1, solver_type: str="sde", vp_scale: float=1.0, ve_scale: float=1.0, rhos_hat_w: float=1.0, rhos_hat_b: float=0.0): self.__noise_sched = noise_sched if sde_type == DiffuserModelSched.SDE_VP or sde_type == DiffuserModelSched.SDE_LDM: self.__alphas: torch.Tensor = self.__noise_sched.alphas self.__alphas_cumprod: torch.Tensor = self.__noise_sched.alphas_cumprod self.__betas: torch.Tensor = self.__noise_sched.betas if sde_type == DiffuserModelSched.SDE_VE: self.__sigmas: torch.Tensor = self.__noise_sched.sigmas.flip([0]) self.__sde_type = sde_type self.__loss_type = loss_type self.__psi = psi self.__solver_type = solver_type self.__vp_scale = vp_scale self.__ve_scale = ve_scale self.__rhos_hat_w = rhos_hat_w self.__rhos_hat_b = rhos_hat_b self.__hs_vp: torch.Tensor = None self.__ws_ve: torch.Tensor = None self.__hs_ve: torch.Tensor = None def __norm(self): reduction = 'none' if self.__loss_type == 'l1': return partial(F.l1_loss, reduction=reduction) elif self.__loss_type == 'l2': return partial(F.mse_loss, reduction=reduction) elif self.__loss_type == "huber": return partial(F.smooth_l1_loss, reduction=reduction) else: raise NotImplementedError() def __get_R_step_coef(self, device=None, dtype=None): if self.__sde_type == DiffuserModelSched.SDE_VP or self.__sde_type == DiffuserModelSched.SDE_LDM: if device == None: device = self.__alphas.device if dtype == None: dtype = self.__alphas.dtype alphas: torch.Tensor = self.__alphas.to(device=device, dtype=dtype) alphas_cumprod: torch.Tensor = self.__alphas_cumprod.to(device=device, dtype=dtype) betas: torch.Tensor = self.__betas.to(device=device, dtype=dtype) # hs if self.__hs_vp == None: self.__hs_vp = get_hs_vp(alphas=alphas, alphas_cumprod=alphas_cumprod) hs: torch.Tensors = self.__hs_vp.to(device=device, dtype=dtype) step, R_coef = get_R_coef_gen_vp(alphas_cumprod=alphas_cumprod, alphas=alphas, hs=hs, psi=self.__psi, solver_type=self.__solver_type, vp_scale=self.__vp_scale, ve_scale=self.__ve_scale) elif self.__sde_type == DiffuserModelSched.SDE_VE: if device == None: device = self.__sigmas.device if dtype == None: dtype = self.__sigmas.dtype sigmas: torch.Tensor = self.__sigmas.to(device=device, dtype=dtype) rhos_hat: torch.Tensor = self.__rhos_hat_w * sigmas + self.__rhos_hat_b # ws if self.__ws_ve == None: self.__ws_ve = get_ws_ve(sigmas=sigmas) ws: torch.Tensor = self.__ws_ve.to(device=device, dtype=dtype) # print(f"sigmas: {sigmas}") # print(f"sigmas isnan: {torch.isnan(sigmas).any()}: {torch.isnan(sigmas)}") # print(f"ws isnan: {torch.isnan(ws).any()}: {torch.isnan(ws)}") # hs if self.__hs_ve == None: self.__hs_ve = get_hs_ve(rhos_hat=rhos_hat) hs: torch.Tensor = self.__hs_ve.to(device=device, dtype=dtype) # print(f"hs isnan: {torch.isnan(hs).any()}: {torch.isnan(hs)}") # step, R_coef = get_R_coef_gen_ve(sigmas=sigmas, rhos_hat=rhos_hat, ws=ws, hs=hs, psi=self.__psi, solver_type=self.__solver_type, vp_scale=self.__vp_scale, ve_scale=self.__ve_scale) step, R_coef = get_R_coef_gen_ve_reduce(sigmas=sigmas, hs=hs, rhos_hat_w=self.__rhos_hat_w, psi=self.__psi, solver_type=self.__solver_type, vp_scale=self.__vp_scale, ve_scale=self.__ve_scale) # print(f"step: {torch.isnan(step).any()}, Min: {step.min()}, Max: {step.max()}: {step}") # print(f"R_coef: {torch.isnan(R_coef).any()}, Min: {R_coef.min()}, Max: {R_coef.max()}: {R_coef}") else: raise NotImplementedError(f"sde_type: {self.__sde_type} isn't implemented") return step, R_coef def __get_inputs_targets(self, x_start: torch.Tensor, R: torch.Tensor, timesteps: torch.Tensor, noise: torch.Tensor): # if noise is None: # noise = torch.randn_like(x_start) def unqueeze_n(x): return x.reshape(len(x_start), *([1] * len(x_start.shape[1:]))) timesteps = timesteps.to(x_start.device) step, R_coef = self.__get_R_step_coef(device=x_start.device, dtype=x_start.dtype) # Unsqueeze & Select R_coef_t = unqueeze_n(R_coef[timesteps]) step_t = unqueeze_n(step[timesteps]) if self.__sde_type == DiffuserModelSched.SDE_VP or self.__sde_type == DiffuserModelSched.SDE_LDM: noisy_images = self.__noise_sched.add_noise(x_start, noise, timesteps) return noisy_images + step_t * R, R_coef_t * R + noise elif self.__sde_type == DiffuserModelSched.SDE_VE: sigma_t = unqueeze_n(self.__sigmas.to(timesteps.device)[timesteps]) noisy_images = x_start + sigma_t * noise # noisy_images = x_start # print(f"step_t: {step_t.shape}, Min: {step_t.min()}, Max: {step_t.max()}") # print(f"R_coef_t: {R_coef_t.shape}, Min: {R_coef_t.min()}, Max: {R_coef_t.max()}") return noisy_images + step_t * R, R_coef_t * R + noise # print(f"noisy_images: {noisy_images.shape}, {torch.isnan(noisy_images).any()}, Min: {noisy_images.min()}, Max: {noisy_images.max()}") # print(f"R: {torch.isnan(R).any()}, Min: {R.min()}, Max: {R.max()}") # No likelihood_weighting # return noisy_images, noise else: raise NotImplementedError(f"sde_type: {self.__sde_type} isn't implemented") @staticmethod def __encode_latents(vae, x: torch.Tensor, weight_dtype: str=None, scaling_factor: float=None): vae = vae.eval() with torch.no_grad(): x = x.to(vae.device) if weight_dtype != None and weight_dtype != "": x = x.to(dtype=weight_dtype) if scaling_factor != None: return (vae.encode(x).latents * scaling_factor).clone().detach() # return vae.encode(x).latents * vae.config.scaling_factor return vae.encode(x).latents.clone().detach() @staticmethod def __decode_latents(vae, x: torch.Tensor, weight_dtype: str=None, scaling_factor: float=None): vae = vae.eval() with torch.no_grad(): x = x.to(vae.device) if weight_dtype != None and weight_dtype != "": x = x.to(dtype=weight_dtype) if scaling_factor != None: return (vae.decode(x).sample / scaling_factor).clone().detach() # return vae.decode(x).sample / vae.config.scaling_factor return (vae.decode(x).sample).clone().detach() @staticmethod def __get_latent(batch, key: str, vae=None, weight_dtype: str=None, scaling_factor: float=None) -> torch.Tensor: if vae == None: return batch[key] return LossFn.__encode_latents(vae=vae, x=batch[key], weight_dtype=weight_dtype, scaling_factor=scaling_factor) @staticmethod def __get_latents(batch, keys: List[str], vae=None, weight_dtype: str=None, scaling_factor: float=None) -> List[torch.Tensor]: return [LossFn.__get_latent(batch=batch, vae=vae, key=key, weight_dtype=weight_dtype, scaling_factor=scaling_factor) for key in keys] def p_loss_by_keys(self, batch, model: nn.Module, target_latent_key: torch.Tensor, poison_latent_key: torch.Tensor, timesteps: torch.Tensor, vae=None, noise: torch.Tensor=None, weight_dtype: str=None, scaling_factor: float=None) -> torch.Tensor: target_latents, poison_latents = LossFn.__get_latents(batch=batch, keys=[target_latent_key, poison_latent_key], vae=vae, weight_dtype=weight_dtype, scaling_factor=scaling_factor) return self.p_loss(model=model, x_start=target_latents, R=poison_latents, timesteps=timesteps, noise=noise) def p_loss(self, model: nn.Module, x_start: torch.Tensor, R: torch.Tensor, timesteps: torch.Tensor, noise: torch.Tensor=None) -> torch.Tensor: if len(x_start) == 0: return 0 if noise is None: noise = torch.randn_like(x_start) # noise = noise.clamp(-LossFn.RANDN_BOUND, LossFn.RANDN_BOUND) def unqueeze_n(x): return x.reshape(len(x_start), *([1] * len(x_start.shape[1:]))) # Main loss function x_noisy, target = self.__get_inputs_targets(x_start=x_start, R=R, timesteps=timesteps, noise=noise) if self.__sde_type == DiffuserModelSched.SDE_VP or self.__sde_type == DiffuserModelSched.SDE_LDM: predicted_noise = model(x_noisy.contiguous(), timesteps.contiguous(), return_dict=False)[0] loss: torch.Tensor = self.__norm()(target=target, input=predicted_noise) return loss.mean() elif self.__sde_type == DiffuserModelSched.SDE_VE: sigmas_t: torch.Tensor = self.__sigmas.to(timesteps.device)[timesteps] predicted_noise = model(x_noisy.contiguous(), sigmas_t.contiguous(), return_dict=False)[0] # print(f"x_noisy: {x_noisy.shape}, {torch.isnan(x_noisy).any()}, min: {x_noisy.min()}, max: {x_noisy.max()}") # print(f"predicted_noise: {predicted_noise.shape}, {torch.isnan(predicted_noise).any()}, min: {predicted_noise.min()}, max: {predicted_noise.max()}") loss: torch.Tensor = self.__norm()(target=target, input=- predicted_noise * unqueeze_n(sigmas_t)) return loss.mean() else: raise NotImplementedError(f"sde_type: {self.__sde_type} isn't implemented") def adaptive_score_loss(noise_sched, backdoor_model: nn.Module, clean_model: torch.nn.Module, x_start: torch.Tensor, R: torch.Tensor, timesteps: torch.Tensor, psi: float=0, noise: torch.Tensor=None, loss_type: str="l2", backprop_depth: int=2, timesteps_num: int=1000) -> torch.Tensor: if timesteps_num - backprop_depth < 0: raise ValueError(f"backprop_depth should <= timesteps_num") if noise is None: noise = torch.randn_like(x_start) def unqueeze_n(x): return x.reshape(len(x), *([1] * len(x_start.shape[1:]))) # Set up model mode backdoor_model = backdoor_model.train() clean_model = clean_model.eval() alphas = noise_sched.alphas.to(device=x_start.device, dtype=x_start.dtype) betas = noise_sched.betas.to(device=x_start.device, dtype=x_start.dtype) timesteps = torch.clamp(timesteps, max=timesteps_num - backprop_depth - 1).to(x_start.device) if adaptive_score_loss.alphas_cumprod_derivative == None: alpha_fn, beta_fn = get_alpha_beta_fn_linear(beta_start=float(betas[0]), beta_end=float(betas[-1]), timesteps=float(len(betas))) adaptive_score_loss.alphas_cumprod_derivative = get_alphas_cumprod_derivative(alphas=alphas, alpha_fn=alpha_fn).to(device=x_start.device, dtype=x_start.dtype) adaptive_score_loss.alphas_cumprod = noise_sched.alphas_cumprod.to(device=x_start.device, dtype=x_start.dtype) # adaptive_score_loss.alphas_cumprod = prod_integral(xs=alphas, x_fn=alpha_fn).to(device=x_start.device, dtype=x_start.dtype) alphas_cumprod_derivative = adaptive_score_loss.alphas_cumprod_derivative alphas_cumprod = adaptive_score_loss.alphas_cumprod def ode_x_k_t(model: torch.nn.Module, xs: torch.Tensor, rs: torch.Tensor, k: int, ts: torch.Tensor, f_func, h_func, g_square_func, sigma_func, delta: float=1e-6) -> torch.Tensor: # with torch.no_grad(): if k == 0: return xs prev_ode_x_k_t: torch.Tensor = ode_x_k_t(model=model, xs=xs, rs=rs, k=k - 1, ts=ts - 1, f_func=f_func, h_func=h_func, g_square_func=g_square_func, sigma_func=sigma_func) pred = model(prev_ode_x_k_t.contiguous(), (ts - 1).contiguous(), return_dict=False)[0] if torch.isnan(xs).any(): print(f"[{k}] xs: Nan") if torch.isnan(pred).any(): print(f"[{k}] ode pred: Nan") if torch.isnan(prev_ode_x_k_t).any(): print(f"[{k}] prev_ode_x_k_t: Nan") return prev_ode_x_k_t - (f_func[k] * prev_ode_x_k_t + h_func[k] * rs + g_square_func[k] / (2 * sigma_func[k] + delta) * pred) def sde_x_k_t(model: torch.nn.Module, xs: torch.Tensor, rs: torch.Tensor, k: int, u: float, ts: torch.Tensor, f_func, h_func, g_square_func, sigma_func, rand: bool=True, delta: float=1e-6) -> torch.Tensor: if k == 0: return xs prev_sde_x_k_t: torch.Tensor = sde_x_k_t(model=model, xs=xs, rs=rs, k=k - 1, u=u, ts=ts - 1, f_func=f_func, h_func=h_func, g_square_func=g_square_func, sigma_func=sigma_func, rand=True) pred = model(prev_sde_x_k_t.contiguous(), (ts - 1).contiguous(), return_dict=True)[0] if torch.isnan(xs).any(): print(f"[{k}] xs: Nan") if torch.isnan(pred).any(): print(f"[{k}] sde pred: Nan") if torch.isnan(prev_sde_x_k_t).any(): print(f"[{k}] prev_sde_x_k_t: Nan") if rand: return prev_sde_x_k_t - (f_func[k] * prev_sde_x_k_t + g_square_func[k] * (u + 1) / (2 * sigma_func[k] + delta) * pred + torch.sqrt(g_square_func[k]) * u * torch.randn_like(xs)) else: return prev_sde_x_k_t - (f_func[k] * prev_sde_x_k_t + g_square_func[k] * (u + 1) / (2 * sigma_func[k] + delta) * pred) def func_t_dict_gen(func: torch.Tensor, k: int, timesteps: torch.Tensor): funcs: Dict[int, torch.Tensor] = {} for i in range(1, k + 1): funcs[k - i + 1] = unqueeze_n(func[timesteps + i]) return funcs # Functions used in the expansion f_func= 1 / (2 * alphas_cumprod) * alphas_cumprod_derivative # g_square_func = - alphas_cumprod_derivative g_square_func = - alphas_cumprod_derivative / alphas_cumprod sigma_func = torch.sqrt(1 - alphas_cumprod) h_func = - psi * (alphas_cumprod_derivative / (2 * torch.sqrt(alphas_cumprod))) - (1 - psi) * (alphas_cumprod_derivative / (2 * torch.sqrt(1 - alphas_cumprod))) # print(f"sigma_func min: {sigma_func.min()}") if torch.isnan(f_func).any(): print(f"f_func: Nan") if torch.isnan(g_square_func).any(): print(f"g_square_func: Nan") if torch.isnan(sigma_func).any(): print(f"sigma_func: Nan") if torch.isnan(h_func).any(): print(f"h_func: Nan") f_func_dict = func_t_dict_gen(f_func, k=backprop_depth, timesteps=timesteps) g_square_func_dict = func_t_dict_gen(g_square_func, k=backprop_depth, timesteps=timesteps) sigma_func_dict = func_t_dict_gen(sigma_func, k=backprop_depth, timesteps=timesteps) h_func_dict = func_t_dict_gen(h_func, k=backprop_depth, timesteps=timesteps) # ODE ground truth and SDE prediction x_noisy = noise_sched.add_noise(x_start, noise, timesteps + backprop_depth) target_x_k_t = ode_x_k_t(model=clean_model, xs=x_noisy, rs=R, k=backprop_depth, ts=timesteps, f_func=f_func_dict, h_func=h_func_dict, g_square_func=g_square_func_dict, sigma_func=sigma_func_dict) pred_x_k_t = sde_x_k_t(model=backdoor_model, xs=x_noisy, rs=R, k=backprop_depth, u=1, ts=timesteps, f_func=f_func_dict, h_func=h_func_dict, g_square_func=g_square_func_dict, sigma_func=sigma_func_dict, rand=False) if torch.isnan(x_start).any(): print(f"x_start: Nan") if torch.isnan(R).any(): print(f"R: Nan") if torch.isnan(target_x_k_t).any(): print(f"target_x_k_t: Nan") if torch.isnan(pred_x_k_t).any(): print(f"pred_x_k_t: Nan") if loss_type == 'l1': loss = F.l1_loss(target_x_k_t, pred_x_k_t) elif loss_type == 'l2': loss = F.mse_loss(target_x_k_t, pred_x_k_t) if torch.isnan(loss): print(f"loss: Nan") elif loss_type == "huber": loss = F.smooth_l1_loss(target_x_k_t, pred_x_k_t) else: raise NotImplementedError() return loss adaptive_score_loss.alphas_cumprod_derivative = None adaptive_score_loss.alphas_cumprod = None # %% if __name__ == '__main__': time_step = 95 num_train_timesteps = 100 # time_step = 140 # num_train_timesteps = 150 ds_root = os.path.join('datasets')
dsl = DatasetLoader(root=ds_root, name=DatasetLoader.CELEBA_HQ).set_poison(trigger_type=Backdoor.TRIGGER_GLASSES, target_type=Backdoor.TARGET_CAT, clean_rate=1, poison_rate=0.2).prepare_dataset()
0
2023-10-17 19:57:37+00:00
24k
nchen909/Pass-Tuning
evaluator/CodeBLEU/dataflow_match.py
[ { "identifier": "DFG_python", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_python(root_node,index_to_code,states):\n assignment=['assignment','augmented_assignment','for_in_clause']\n if_statement=['if_statement']\n for_statement=['for_statement']\n while_statement=['while...
from evaluator.CodeBLEU.parser import DFG_python, DFG_java, DFG_ruby, DFG_go, DFG_php, DFG_javascript, DFG_csharp, DFG_c from evaluator.CodeBLEU.parser import (remove_comments_and_docstrings, tree_to_token_index, index_to_code_token, tree_to_variable_index) from tree_sitter import Language, Parser import pdb
17,858
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. parser_path = '/data/pretrain-attention/CodePrompt/evaluator/CodeBLEU/parser' dfg_function = { 'python': DFG_python, 'java': DFG_java, 'ruby': DFG_ruby, 'go': DFG_go, 'php': DFG_php, 'javascript': DFG_javascript, 'c_sharp': DFG_csharp, 'c': DFG_c, } def calc_dataflow_match(references, candidate, lang): return corpus_dataflow_match([references], [candidate], lang) def corpus_dataflow_match(references, candidates, lang): #LANGUAGE = Language('{}/my-languages.so'.format(parser_path), lang) LANGUAGE = Language('build/my-languages.so', lang) parser = Parser() parser.set_language(LANGUAGE) parser = [parser, dfg_function[lang]] match_count = 0 total_count = 0 for i in range(len(candidates)): references_sample = references[i] candidate = candidates[i] for reference in references_sample: try: candidate = remove_comments_and_docstrings(candidate, 'java') except: pass try: reference = remove_comments_and_docstrings(reference, 'java') except: pass cand_dfg = get_data_flow(candidate, parser) ref_dfg = get_data_flow(reference, parser) normalized_cand_dfg = normalize_dataflow(cand_dfg) normalized_ref_dfg = normalize_dataflow(ref_dfg) if len(normalized_ref_dfg) > 0: total_count += len(normalized_ref_dfg) for dataflow in normalized_ref_dfg: if dataflow in normalized_cand_dfg: match_count += 1 normalized_cand_dfg.remove(dataflow) if total_count == 0: print( "WARNING: There is no reference data-flows extracted from the whole corpus, and the data-flow match score degenerates to 0. Please consider ignoring this score.") return 0 score = match_count / total_count return score def get_data_flow(code, parser): try: tree = parser[0].parse(bytes(code, 'utf8')) root_node = tree.root_node tokens_index = tree_to_token_index(root_node) code = code.split('\n')
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. parser_path = '/data/pretrain-attention/CodePrompt/evaluator/CodeBLEU/parser' dfg_function = { 'python': DFG_python, 'java': DFG_java, 'ruby': DFG_ruby, 'go': DFG_go, 'php': DFG_php, 'javascript': DFG_javascript, 'c_sharp': DFG_csharp, 'c': DFG_c, } def calc_dataflow_match(references, candidate, lang): return corpus_dataflow_match([references], [candidate], lang) def corpus_dataflow_match(references, candidates, lang): #LANGUAGE = Language('{}/my-languages.so'.format(parser_path), lang) LANGUAGE = Language('build/my-languages.so', lang) parser = Parser() parser.set_language(LANGUAGE) parser = [parser, dfg_function[lang]] match_count = 0 total_count = 0 for i in range(len(candidates)): references_sample = references[i] candidate = candidates[i] for reference in references_sample: try: candidate = remove_comments_and_docstrings(candidate, 'java') except: pass try: reference = remove_comments_and_docstrings(reference, 'java') except: pass cand_dfg = get_data_flow(candidate, parser) ref_dfg = get_data_flow(reference, parser) normalized_cand_dfg = normalize_dataflow(cand_dfg) normalized_ref_dfg = normalize_dataflow(ref_dfg) if len(normalized_ref_dfg) > 0: total_count += len(normalized_ref_dfg) for dataflow in normalized_ref_dfg: if dataflow in normalized_cand_dfg: match_count += 1 normalized_cand_dfg.remove(dataflow) if total_count == 0: print( "WARNING: There is no reference data-flows extracted from the whole corpus, and the data-flow match score degenerates to 0. Please consider ignoring this score.") return 0 score = match_count / total_count return score def get_data_flow(code, parser): try: tree = parser[0].parse(bytes(code, 'utf8')) root_node = tree.root_node tokens_index = tree_to_token_index(root_node) code = code.split('\n')
code_tokens = [index_to_code_token(x, code) for x in tokens_index]
10
2023-10-20 09:24:44+00:00
24k
JoaoPedro9674/django-ledger
django_ledger/io/io_mixin.py
[ { "identifier": "settings", "path": "django_ledger/settings.py", "snippet": " DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = True\n DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = False\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = True\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = False\nDJANGO_LEDGER_USE_CLOSING_ENTRIES...
from collections import defaultdict, namedtuple from datetime import datetime, date from itertools import groupby from pathlib import Path from random import choice from typing import List, Set, Union, Tuple, Optional, Dict from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Sum, QuerySet from django.db.models.functions import TruncMonth from django.http import Http404 from django.utils.dateparse import parse_date, parse_datetime from django.utils.timezone import make_aware, is_naive, localtime from django.utils.translation import gettext_lazy as _ from django_ledger import settings from django_ledger.exceptions import InvalidDateInputError, TransactionNotInBalanceError from django_ledger.io import roles as roles_module from django_ledger.io.io_context import (RoleContextManager, GroupContextManager, ActivityContextManager, BalanceSheetStatementContextManager, IncomeStatementContextManager, CashFlowStatementContextManager) from django_ledger.io.io_digest import IODigestContextManager from django_ledger.io.ratios import FinancialRatioManager from django_ledger.models.utils import lazy_loader
15,721
gl = list(g) return { 'account_uuid': k[0], 'unit_uuid': k[1], 'unit_name': gl[0].get('journal_entry__entity_unit__name'), 'activity': gl[0].get('journal_entry__activity'), 'period_year': k[2], 'period_month': k[3], 'role_bs': roles_module.BS_ROLES.get(gl[0]['account__role']), 'role': gl[0]['account__role'], 'code': gl[0]['account__code'], 'name': gl[0]['account__name'], 'balance_type': gl[0]['account__balance_type'], 'tx_type': k[5], 'balance': sum(a['balance'] for a in gl), } def digest(self, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, txs_queryset: QuerySet = None, as_io_digest: bool = False, accounts: Optional[Union[Set[str], List[str]]] = None, role: Optional[Union[Set[str], List[str]]] = None, activity: str = None, signs: bool = True, to_date: Union[str, datetime, date] = None, from_date: Union[str, datetime, date] = None, process_roles: bool = False, process_groups: bool = False, process_ratios: bool = False, process_activity: bool = False, equity_only: bool = False, by_period: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, digest_name: str = None, balance_sheet_statement: bool = False, income_statement: bool = False, cash_flow_statement: bool = False, **kwargs) -> Union[Tuple, IODigestContextManager]: if balance_sheet_statement: from_date = None if cash_flow_statement: by_activity = True if activity: activity = validate_activity(activity) if role: role = roles_module.validate_roles(role) from_date, to_date = validate_dates(from_date, to_date) io_data = defaultdict(lambda: dict()) io_data['io_model'] = self io_data['from_date'] = from_date io_data['to_date'] = to_date io_data['by_unit'] = by_unit io_data['by_period'] = by_period io_data['by_activity'] = by_activity io_data['by_tx_type'] = by_tx_type txs_qs, accounts_digest = self.python_digest( txs_queryset=txs_queryset, user_model=user_model, accounts=accounts, role=role, activity=activity, entity_slug=entity_slug, unit_slug=unit_slug, to_date=to_date, from_date=from_date, signs=signs, equity_only=equity_only, by_period=by_period, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, **kwargs ) io_data['txs_qs'] = txs_qs io_data['accounts'] = accounts_digest if process_roles: roles_mgr = RoleContextManager( io_data=io_data, by_period=by_period, by_unit=by_unit ) # idea: change digest() name to something else? maybe aggregate, calculate?... io_data = roles_mgr.digest() if any([ process_groups, balance_sheet_statement, income_statement, cash_flow_statement ]): group_mgr = GroupContextManager( io_data=io_data, by_period=by_period, by_unit=by_unit ) io_data = group_mgr.digest() # todo: migrate this to group manager... io_data['group_account']['GROUP_ASSETS'].sort( key=lambda acc: roles_module.ROLES_ORDER_ASSETS.index(acc['role'])) io_data['group_account']['GROUP_LIABILITIES'].sort( key=lambda acc: roles_module.ROLES_ORDER_LIABILITIES.index(acc['role'])) io_data['group_account']['GROUP_CAPITAL'].sort( key=lambda acc: roles_module.ROLES_ORDER_CAPITAL.index(acc['role'])) if process_ratios:
""" Django Ledger created by Miguel Sanda <msanda@arrobalytics.com>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <msanda@arrobalytics.com> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception: raise TransactionNotInBalanceError( f'Invalid tx data. Credits and debits must match. Currently cr: {CREDITS}, db {DEBITS}.' f'Max Tolerance {settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE}' ) return IS_TX_MODEL, is_valid, diff def check_tx_balance(tx_data: list, perform_correction: bool = False) -> bool: if tx_data: IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data, raise_exception=perform_correction) if not perform_correction and abs(diff): return False if not perform_correction and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: return False while not is_valid: tx_type_choice = choice(['debit', 'credit']) txs_candidates = list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice) if len(txs_candidates) > 0: tx = choice(list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice)) if any([diff > 0 and tx_type_choice == 'debit', diff < 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION elif any([diff < 0 and tx_type_choice == 'debit', diff > 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount -= settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data) return True def validate_io_date(dt: Union[str, date, datetime], no_parse_localdate: bool = True) -> Optional[datetime]: if not dt: return if isinstance(dt, date): dt = make_aware( value=datetime.combine( dt, datetime.min.time() )) return dt elif isinstance(dt, datetime): if is_naive(dt): return make_aware(dt) return dt elif isinstance(dt, str): # try to parse a date object from string... fdt = parse_date(dt) if not fdt: # try to parse a datetime object from string... fdt = parse_datetime(dt) if not fdt: raise InvalidDateInputError( message=f'Could not parse date from {dt}' ) elif is_naive(fdt): fdt = make_aware(fdt) return fdt if no_parse_localdate: return localtime() def validate_dates( from_date: Union[str, datetime, date] = None, to_date: Union[str, datetime, date] = None) -> Tuple[date, date]: from_date = validate_io_date(from_date, no_parse_localdate=False) to_date = validate_io_date(to_date) return from_date, to_date def validate_activity(activity: str, raise_404: bool = False): # idea: move to model???... JournalEntryModel = lazy_loader.get_journal_entry_model() valid = activity in JournalEntryModel.VALID_ACTIVITIES if activity and not valid: exception = ValidationError(f'{activity} is invalid. Choices are {JournalEntryModel.VALID_ACTIVITIES}.') if raise_404: raise Http404(exception) raise exception return activity class IOValidationError(ValidationError): pass class IODatabaseMixIn: """ Controls how transactions are recorded into the ledger. """ def is_entity_model(self): return isinstance(self, lazy_loader.get_entity_model()) def is_ledger_model(self): return isinstance(self, lazy_loader.get_ledger_model()) def is_entity_unit_model(self): return isinstance(self, lazy_loader.get_unit_model()) def get_entity_model_from_io(self): if self.is_entity_model(): return self elif self.is_ledger_model(): return self.entity elif self.is_entity_unit_model(): return self.entity # def is_time_bounded(self, from_date, to_date): def database_digest(self, txs_queryset: QuerySet, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, from_date: date = None, to_date: date = None, activity: str = None, role: str = None, accounts: str or List[str] or Set[str] = None, posted: bool = True, exclude_zero_bal: bool = True, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, by_unit: bool = False, **kwargs): if settings.DJANGO_LEDGER_USE_CLOSING_ENTRIES: if not from_date: entity_model = self.get_entity_model_from_io() closing_entry_date = entity_model.select_closing_entry_for_io_date(to_date=to_date) # print(closing_entry_date) # # if closing_entry_date: # closing_entry_list = entity_model.get_closing_entry_cache_for_date( # closing_date=closing_entry_date, # force_cache_update=True # ) # from_date_d = closing_entry_date + timedelta(days=1) # print('Orig From:', from_date) # print('New from:', from_date_d) # print('To Date:', to_date) # print(closing_entry_list) if not txs_queryset: TransactionModel = lazy_loader.get_txs_model() if self.is_entity_model(): if entity_slug: if entity_slug != self.slug: raise IOValidationError('Inconsistent entity_slug. ' f'Provided {entity_slug} does not match actual {self.slug}') if unit_slug: txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug or self.slug, unit_slug=unit_slug ) else: txs_queryset = TransactionModel.objects.for_entity( user_model=user_model, entity_slug=self ) elif self.is_ledger_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Ledger Model requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_ledger( user_model=user_model, entity_slug=entity_slug, ledger_model=self ) elif self.is_entity_unit_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Entity Unit requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug, unit_slug=unit_slug or self ) else: txs_queryset = TransactionModel.objects.none() txs_queryset = txs_queryset.not_closing_entry() if exclude_zero_bal: txs_queryset = txs_queryset.filter(amount__gt=0) if posted: txs_queryset = txs_queryset.posted() if from_date: txs_queryset = txs_queryset.from_date(from_date=from_date) if to_date: txs_queryset = txs_queryset.to_date(to_date=to_date) if accounts: if not isinstance(accounts, str): accounts = [accounts] txs_queryset = txs_queryset.for_accounts(account_list=accounts) if activity: if isinstance(activity, str): activity = [activity] txs_queryset = txs_queryset.for_activity(activity_list=activity) if role: txs_queryset = txs_queryset.for_roles(role_list=role) VALUES = [ 'account__uuid', 'account__balance_type', 'tx_type', 'account__code', 'account__name', 'account__role', ] ANNOTATE = {'balance': Sum('amount')} ORDER_BY = ['account__uuid'] if by_unit: ORDER_BY.append('journal_entry__entity_unit__uuid') VALUES += ['journal_entry__entity_unit__uuid', 'journal_entry__entity_unit__name'] if by_period: ORDER_BY.append('journal_entry__timestamp') ANNOTATE['dt_idx'] = TruncMonth('journal_entry__timestamp') if by_activity: ORDER_BY.append('journal_entry__activity') VALUES.append('journal_entry__activity') if by_tx_type: ORDER_BY.append('tx_type') VALUES.append('tx_type') return txs_queryset.values(*VALUES).annotate(**ANNOTATE).order_by(*ORDER_BY) def python_digest(self, txs_queryset: Optional[QuerySet] = None, user_model: Optional[UserModel] = None, to_date: date = None, from_date: date = None, equity_only: bool = False, activity: str = None, entity_slug: str = None, unit_slug: str = None, role: Optional[Union[Set[str], List[str]]] = None, accounts: Optional[Union[Set[str], List[str]]] = None, signs: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, **kwargs) -> list or tuple: if equity_only: role = roles_module.GROUP_EARNINGS txs_queryset = self.database_digest( user_model=user_model, txs_queryset=txs_queryset, to_date=to_date, from_date=from_date, entity_slug=entity_slug, unit_slug=unit_slug, activity=activity, role=role, accounts=accounts, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, by_period=by_period, **kwargs) for tx_model in txs_queryset: if tx_model['account__balance_type'] != tx_model['tx_type']: tx_model['balance'] = -tx_model['balance'] # txs_list = list(txs_queryset) # txs_list.sort(key=lambda a: ( # a['account__uuid'], # str(a.get('journal_entry__entity_unit__uuid', '')) if by_unit else '', # a['dt_idx'].year if by_period else 0, # a['dt_idx'].month if by_period else 0, # str(a['journal_entry__activity']) if by_activity else None, # a['tx_type'] if by_tx_type else '', # )) accounts_gb_code = groupby(txs_queryset, key=lambda a: ( a['account__uuid'], a.get('journal_entry__entity_unit__uuid') if by_unit else None, a.get('dt_idx').year if by_period else None, a.get('dt_idx').month if by_period else None, a.get('journal_entry__activity') if by_activity else None, a.get('tx_type') if by_tx_type else None, )) gb_digest = [self.aggregate_balances(k, g) for k, g in accounts_gb_code] for acc in gb_digest: acc['balance_abs'] = abs(acc['balance']) if signs: TransactionModel = lazy_loader.get_txs_model() for acc in gb_digest: if any([ all([acc['role_bs'] == roles_module.BS_ASSET_ROLE, acc['balance_type'] == TransactionModel.CREDIT]), all([acc['role_bs'] in ( roles_module.BS_LIABILITIES_ROLE, roles_module.BS_EQUITY_ROLE ), acc['balance_type'] == TransactionModel.DEBIT]) ]): acc['balance'] = -acc['balance'] return txs_queryset, gb_digest @staticmethod def aggregate_balances(k, g): gl = list(g) return { 'account_uuid': k[0], 'unit_uuid': k[1], 'unit_name': gl[0].get('journal_entry__entity_unit__name'), 'activity': gl[0].get('journal_entry__activity'), 'period_year': k[2], 'period_month': k[3], 'role_bs': roles_module.BS_ROLES.get(gl[0]['account__role']), 'role': gl[0]['account__role'], 'code': gl[0]['account__code'], 'name': gl[0]['account__name'], 'balance_type': gl[0]['account__balance_type'], 'tx_type': k[5], 'balance': sum(a['balance'] for a in gl), } def digest(self, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, txs_queryset: QuerySet = None, as_io_digest: bool = False, accounts: Optional[Union[Set[str], List[str]]] = None, role: Optional[Union[Set[str], List[str]]] = None, activity: str = None, signs: bool = True, to_date: Union[str, datetime, date] = None, from_date: Union[str, datetime, date] = None, process_roles: bool = False, process_groups: bool = False, process_ratios: bool = False, process_activity: bool = False, equity_only: bool = False, by_period: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, digest_name: str = None, balance_sheet_statement: bool = False, income_statement: bool = False, cash_flow_statement: bool = False, **kwargs) -> Union[Tuple, IODigestContextManager]: if balance_sheet_statement: from_date = None if cash_flow_statement: by_activity = True if activity: activity = validate_activity(activity) if role: role = roles_module.validate_roles(role) from_date, to_date = validate_dates(from_date, to_date) io_data = defaultdict(lambda: dict()) io_data['io_model'] = self io_data['from_date'] = from_date io_data['to_date'] = to_date io_data['by_unit'] = by_unit io_data['by_period'] = by_period io_data['by_activity'] = by_activity io_data['by_tx_type'] = by_tx_type txs_qs, accounts_digest = self.python_digest( txs_queryset=txs_queryset, user_model=user_model, accounts=accounts, role=role, activity=activity, entity_slug=entity_slug, unit_slug=unit_slug, to_date=to_date, from_date=from_date, signs=signs, equity_only=equity_only, by_period=by_period, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, **kwargs ) io_data['txs_qs'] = txs_qs io_data['accounts'] = accounts_digest if process_roles: roles_mgr = RoleContextManager( io_data=io_data, by_period=by_period, by_unit=by_unit ) # idea: change digest() name to something else? maybe aggregate, calculate?... io_data = roles_mgr.digest() if any([ process_groups, balance_sheet_statement, income_statement, cash_flow_statement ]): group_mgr = GroupContextManager( io_data=io_data, by_period=by_period, by_unit=by_unit ) io_data = group_mgr.digest() # todo: migrate this to group manager... io_data['group_account']['GROUP_ASSETS'].sort( key=lambda acc: roles_module.ROLES_ORDER_ASSETS.index(acc['role'])) io_data['group_account']['GROUP_LIABILITIES'].sort( key=lambda acc: roles_module.ROLES_ORDER_LIABILITIES.index(acc['role'])) io_data['group_account']['GROUP_CAPITAL'].sort( key=lambda acc: roles_module.ROLES_ORDER_CAPITAL.index(acc['role'])) if process_ratios:
ratio_gen = FinancialRatioManager(io_data=io_data)
11
2023-10-20 01:07:20+00:00
24k
acolas1/KGSimple
simplify.py
[ { "identifier": "FluencyScorer", "path": "scoring/fluency_scorer.py", "snippet": "class FluencyScorer:\n def __init__(self, batch_size=1, reduce=\"mean\", log=True, laplace_smooth=False, prob_dict_path=None):\n self.device = \"cuda:1\" if torch.cuda.is_available() else \"cpu\"\n self.ba...
import os import json import numpy as np import pandas as pd import torch import random from collections import defaultdict from transformers import BartTokenizer, T5Tokenizer from transformers import AdamW, get_linear_schedule_with_warmup from utils import * from scoring.fluency_scorer import FluencyScorer from scoring.saliency_scorer import SaliencyBERTScore from scoring.simplicity_scorer import SimplicityTextScore from scoring.guardrails import * from scoring.aggregate_scorer import ScorerWrapper from GAP.data_relations_as_nodes import GAPDataloader, EventDataset, WebNLGDataset from GAP.data_relations_as_nodes import evaluate_bleu, get_t_emb_dim from tqdm import tqdm, trange from rake_nltk import Rake from evaluate import load from sentence_similarity import sentence_similarity from GAP.modeling_gap_type import GAPBartForConditionalGeneration as GAP_Type_model from GAP.modeling_gap import GAPBartForConditionalGeneration as GAP_model
21,325
# import yake bertscore = load("bertscore") ## sentence model for merge phrase_model = sentence_similarity(model_name='distilbert-base-uncased',embedding_type='cls_token_embedding') ## for sentence checking ner_check = NERInaccuracyPenalty() def run(args, logger): #load in model for graph-to-text and tokenizer checkpoint = args.model_path tokenizer_path = args.tokenizer_path tokenizer = BartTokenizer.from_pretrained(tokenizer_path) n_gpu = torch.cuda.device_count() if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.type_encoding:
# import yake bertscore = load("bertscore") ## sentence model for merge phrase_model = sentence_similarity(model_name='distilbert-base-uncased',embedding_type='cls_token_embedding') ## for sentence checking ner_check = NERInaccuracyPenalty() def run(args, logger): #load in model for graph-to-text and tokenizer checkpoint = args.model_path tokenizer_path = args.tokenizer_path tokenizer = BartTokenizer.from_pretrained(tokenizer_path) n_gpu = torch.cuda.device_count() if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.type_encoding:
t_emb_dim = get_t_emb_dim(args)
8
2023-10-24 13:24:23+00:00
24k
radekd91/inferno
inferno/models/DECA.py
[ { "identifier": "EmoNetLoss", "path": "inferno/layers/losses/EmoNetLoss.py", "snippet": "class EmoNetLoss(EmoLossBase):\n# class EmoNetLoss(object):\n\n def __init__(self, device, emonet=None, trainable=False, normalize_features=False, emo_feat_loss=None, au_loss=None):\n if emonet is None:\n ...
import os, sys import torch import torchvision import torch.nn.functional as F import torchvision.transforms.functional as F_v import numpy as np import cv2 import inferno.layers.losses.DecaLosses as lossfunc import inferno.layers.losses.MediaPipeLandmarkLosses as lossfunc_mp import inferno.utils.DecaUtils as util import pytorch_lightning.plugins.environments.lightning_environment as le import psutil import adabound import copy from pytorch_lightning import LightningModule from pytorch_lightning.loggers import WandbLogger from inferno.layers.losses.EmoNetLoss import EmoNetLoss, create_emo_loss, create_au_loss from skimage.io import imread from skimage.transform import resize from pathlib import Path from inferno.models.Renderer import SRenderY from inferno.models.DecaEncoder import ResnetEncoder, SecondHeadResnet, SwinEncoder from inferno.models.DecaDecoder import Generator, GeneratorAdaIn from inferno.models.DecaFLAME import FLAME, FLAMETex, FLAME_mediapipe from inferno.models.EmotionMLP import EmotionMLP from inferno.datasets.AffWild2Dataset import Expression7 from inferno.datasets.AffectNetDataModule import AffectNetExpressions from inferno.utils.lightning_logging import _log_array_image, _log_wandb_image, _torch_image2np from enum import Enum from inferno.utils.other import class_from_str, get_path_to_assets from inferno.layers.losses.VGGLoss import VGG19Loss from omegaconf import OmegaConf, open_dict from inferno.models.temporal.external.LipReadingLoss import LipReadingLoss from .StarGAN import StarGANWrapper from inferno.models.EmoNetRegressor import EmoNetRegressor, EmonetRegressorStatic from .mica.config import get_cfg_defaults from .mica.mica import MICA from .mica.MicaInputProcessing import MicaInputProcessor from inferno.utils.other import get_path_to_assets from inferno.models.IO import locate_checkpoint
20,233
losses_and_metrics_to_log[prefix + '_' + stage_str + 'mem_usage'] = self.process.memory_info().rss losses_and_metrics_to_log[stage_str + 'epoch'] = self.current_epoch losses_and_metrics_to_log[stage_str + 'step'] = self.global_step losses_and_metrics_to_log[stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[stage_str + 'mem_usage'] = self.process.memory_info().rss if self.logger is not None: # self.logger.log_metrics(losses_and_metrics_to_log) self.log_dict(losses_and_metrics_to_log, sync_dist=True, on_step=False, on_epoch=True) # if self.global_step % 200 == 0: uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] if self.deca.config.test_vis_frequency > 0: # Log visualizations every once in a while if batch_idx % self.deca.config.test_vis_frequency == 0: # if self.trainer.is_global_zero: visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, self.global_step, stage_str[:-1], prefix) visdict = self._create_visualizations_to_log(stage_str[:-1], visualizations, values, batch_idx, indices=0, dataloader_idx=dataloader_idx) self.logger.log_metrics(visdict) return None @property def process(self): if not hasattr(self,"process_"): self.process_ = psutil.Process(os.getpid()) return self.process_ def training_step(self, batch, batch_idx, *args, **kwargs): #, debug=True): """ Training step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ values = self.encode(batch, training=True) values = self.decode(values, training=True) losses_and_metrics = self.compute_loss(values, batch, training=True) uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] # prefix = str(self.mode.name).lower() prefix = self._get_logging_prefix() # losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach().cpu() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log[prefix + '_train_' + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) losses_and_metrics_to_log[prefix + '_train_' + 'epoch'] = self.current_epoch losses_and_metrics_to_log[prefix + '_train_' + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_train_' + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + "train_" + 'mem_usage'] = self.process.memory_info().rss # losses_and_metrics_to_log['train_' + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) losses_and_metrics_to_log['train_' + 'epoch'] = self.current_epoch losses_and_metrics_to_log['train_' + 'step'] = self.global_step losses_and_metrics_to_log['train_' + 'batch_idx'] = batch_idx losses_and_metrics_to_log["train_" + 'mem_usage'] = self.process.memory_info().rss # log loss also without any prefix for a model checkpoint to track it losses_and_metrics_to_log['loss'] = losses_and_metrics_to_log[prefix + '_train_loss'] if self.logger is not None: self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended if self.deca.config.train_vis_frequency > 0: if self.global_step % self.deca.config.train_vis_frequency == 0: if self.trainer.is_global_zero: visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, batch_idx, "train", prefix) visdict = self._create_visualizations_to_log('train', visualizations, values, batch_idx, indices=0) if isinstance(self.logger, WandbLogger): self.logger.log_metrics(visdict)#, step=self.global_step) # self.log_dict(visdict, sync_dist=True) # self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=False) # log per step # self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True) # log per both # return losses_and_metrics return losses_and_metrics['loss'] ### STEP ENDS ARE PROBABLY NOT NECESSARY BUT KEEP AN EYE ON THEM IF MULI-GPU TRAINING DOESN'T WORK # def training_step_end(self, batch_parts): # return self._step_end(batch_parts) # # def validation_step_end(self, batch_parts): # return self._step_end(batch_parts) # # def _step_end(self, batch_parts): # # gpu_0_prediction = batch_parts.pred[0]['pred'] # # gpu_1_prediction = batch_parts.pred[1]['pred'] # N = len(batch_parts) # loss_dict = {} # for key in batch_parts[0]: # for i in range(N): # if key not in loss_dict.keys(): # loss_dict[key] = batch_parts[i] # else: # loss_dict[key] = batch_parts[i] # loss_dict[key] = loss_dict[key] / N # return loss_dict def vae_2_str(self, valence=None, arousal=None, affnet_expr=None, expr7=None, prefix=""): caption = "" if len(prefix) > 0: prefix += "_" if valence is not None and not np.isnan(valence).any(): caption += prefix + "valence= %.03f\n" % valence if arousal is not None and not np.isnan(arousal).any(): caption += prefix + "arousal= %.03f\n" % arousal if affnet_expr is not None and not np.isnan(affnet_expr).any():
""" Author: Radek Danecek Copyright (c) 2022, Radek Danecek All rights reserved. # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # Using this computer program means that you agree to the terms # in the LICENSE file included with this software distribution. # Any use not explicitly granted by the LICENSE is prohibited. # # Copyright©2022 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # For comments or questions, please email us at emoca@tue.mpg.de # For commercial licensing contact, please contact ps-license@tuebingen.mpg.de Parts of the code were adapted from the original DECA release: https://github.com/YadiraF/DECA/ """ # from time import time torch.backends.cudnn.benchmark = True class DecaMode(Enum): COARSE = 1 # when switched on, only coarse part of DECA-based networks is used DETAIL = 2 # when switched on, only coarse and detail part of DECA-based networks is used class DecaModule(LightningModule): """ DecaModule is a PL module that implements DECA-inspired face reconstruction networks. """ def __init__(self, model_params, learning_params, inout_params, stage_name = ""): """ :param model_params: a DictConfig of parameters about the model itself :param learning_params: a DictConfig of parameters corresponding to the learning process (such as optimizer, lr and others) :param inout_params: a DictConfig of parameters about input and output (where checkpoints and visualizations are saved) """ super().__init__() self.learning_params = learning_params self.inout_params = inout_params # detail conditioning - what is given as the conditioning input to the detail generator in detail stage training if 'detail_conditioning' not in model_params.keys(): # jaw, expression and detail code by default self.detail_conditioning = ['jawpose', 'expression', 'detail'] OmegaConf.set_struct(model_params, True) with open_dict(model_params): model_params.detail_conditioning = self.detail_conditioning else: self.detail_conditioning = model_params.detail_conditioning # deprecated and is not used if 'detailemo_conditioning' not in model_params.keys(): self.detailemo_conditioning = [] OmegaConf.set_struct(model_params, True) with open_dict(model_params): model_params.detailemo_conditioning = self.detailemo_conditioning else: self.detailemo_conditioning = model_params.detailemo_conditioning supported_conditioning_keys = ['identity', 'jawpose', 'expression', 'detail', 'detailemo'] for c in self.detail_conditioning: if c not in supported_conditioning_keys: raise ValueError(f"Conditioning on '{c}' is not supported. Supported conditionings: {supported_conditioning_keys}") for c in self.detailemo_conditioning: if c not in supported_conditioning_keys: raise ValueError(f"Conditioning on '{c}' is not supported. Supported conditionings: {supported_conditioning_keys}") # which type of DECA network is used if 'deca_class' not in model_params.keys() or model_params.deca_class is None: print(f"Deca class is not specified. Defaulting to {str(DECA.__class__.__name__)}") # vanilla DECA by default (not EMOCA) deca_class = DECA else: # other type of DECA-inspired networks possible (such as ExpDECA, which is what EMOCA) deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) # instantiate the network self.deca = deca_class(config=model_params) self.mode = DecaMode[str(model_params.mode).upper()] self.stage_name = stage_name if self.stage_name is None: self.stage_name = "" if len(self.stage_name) > 0: self.stage_name += "_" # initialize the emotion perceptual loss (used for EMOCA supervision) self.emonet_loss = None self._init_emotion_loss() # initialize the au perceptual loss (not currently used in EMOCA) self.au_loss = None self._init_au_loss() # initialize the lip reading perceptual loss (not currently used in original EMOCA) self.lipread_loss = None self._init_lipread_loss() # MPL regressor from the encoded space to emotion labels (not used in EMOCA but could be used for direct emotion supervision) if 'mlp_emotion_predictor' in self.deca.config.keys(): # self._build_emotion_mlp(self.deca.config.mlp_emotion_predictor) self.emotion_mlp = EmotionMLP(self.deca.config.mlp_emotion_predictor, model_params) else: self.emotion_mlp = None def get_input_image_size(self): return (self.deca.config.image_size, self.deca.config.image_size) def _instantiate_deca(self, model_params): """ Instantiate the DECA network. """ # which type of DECA network is used if 'deca_class' not in model_params.keys() or model_params.deca_class is None: print(f"Deca class is not specified. Defaulting to {str(DECA.__class__.__name__)}") # vanilla DECA by default (not EMOCA) deca_class = DECA else: # other type of DECA-inspired networks possible (such as ExpDECA, which is what EMOCA) deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) # instantiate the network self.deca = deca_class(config=model_params) def _init_emotion_loss(self): """ Initialize the emotion perceptual loss (used for EMOCA supervision) """ if 'emonet_weight' in self.deca.config.keys() and bool(self.deca.config.get('emonet_model_path', False)): if self.emonet_loss is not None: emoloss_force_override = True if 'emoloss_force_override' in self.deca.config.keys() and self.deca.config.emoloss_force_override else False if self.emonet_loss.is_trainable(): if not emoloss_force_override: print("The old emonet loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old emonet loss is trainable but override is set so it will be replaced.") else: print("The old emonet loss is not trainable. It will be replaced.") if 'emonet_model_path' in self.deca.config.keys(): emonet_model_path = self.deca.config.emonet_model_path else: emonet_model_path=None # self.emonet_loss = EmoNetLoss(self.device, emonet=emonet_model_path) emoloss_trainable = True if 'emoloss_trainable' in self.deca.config.keys() and self.deca.config.emoloss_trainable else False emoloss_dual = True if 'emoloss_dual' in self.deca.config.keys() and self.deca.config.emoloss_dual else False normalize_features = self.deca.config.normalize_features if 'normalize_features' in self.deca.config.keys() else None emo_feat_loss = self.deca.config.emo_feat_loss if 'emo_feat_loss' in self.deca.config.keys() else None old_emonet_loss = self.emonet_loss self.emonet_loss = create_emo_loss(self.device, emoloss=emonet_model_path, trainable=emoloss_trainable, dual=emoloss_dual, normalize_features=normalize_features, emo_feat_loss=emo_feat_loss) if old_emonet_loss is not None and type(old_emonet_loss) != self.emonet_loss: print(f"The old emonet loss {old_emonet_loss.__class__.__name__} is replaced during reconfiguration by " f"new emotion loss {self.emonet_loss.__class__.__name__}") else: self.emonet_loss = None def _init_au_loss(self): """ Initialize the au perceptual loss (not currently used in EMOCA) """ if 'au_loss' in self.deca.config.keys(): if self.au_loss is not None: force_override = True if 'force_override' in self.deca.config.au_loss.keys() \ and self.deca.config.au_loss.force_override else False if self.au_loss.is_trainable(): if not force_override: print("The old AU loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old AU loss is trainable but override is set so it will be replaced.") else: print("The old AU loss is not trainable. It will be replaced.") old_au_loss = self.emonet_loss self.au_loss = create_au_loss(self.device, self.deca.config.au_loss) else: self.au_loss = None def _init_lipread_loss(self): """ Initialize the au perceptual loss (not currently used in EMOCA) """ if 'lipread_loss' in self.deca.config.keys() and self.deca.config.lipread_loss.get('load', True): if self.lipread_loss is not None: force_override = True if 'force_override' in self.deca.config.lipread_loss.keys() \ and self.deca.config.lipread_loss.force_override else False assert self.lipread_loss.is_trainable(), "Trainable lip reading loss is not supported yet." if self.lipread_loss.is_trainable(): if not force_override: print("The old lip reading loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old lip reading loss is trainable but override is set so it will be replaced.") else: print("The old lip reading loss is not trainable. It will be replaced.") # old_lipread_loss = self.emonet_loss self.lipread_loss = LipReadingLoss(self.device, self.deca.config.lipread_loss.lipread_loss) self.lipread_loss.eval() self.lipread_loss.requires_grad_(False) else: self.lipread_loss = None def reconfigure(self, model_params, inout_params, learning_params, stage_name="", downgrade_ok=False, train=True): """ Reconfigure the model. Usually used to switch between detail and coarse stages (which have separate configs) """ if (self.mode == DecaMode.DETAIL and model_params.mode != DecaMode.DETAIL) and not downgrade_ok: raise RuntimeError("You're switching the EMOCA mode from DETAIL to COARSE. Is this really what you want?!") self.inout_params = inout_params self.learning_params = learning_params if self.deca.__class__.__name__ != model_params.deca_class: old_deca_class = self.deca.__class__.__name__ state_dict = self.deca.state_dict() if 'deca_class' in model_params.keys(): deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) else: deca_class = DECA self.deca = deca_class(config=model_params) diff = set(state_dict.keys()).difference(set(self.deca.state_dict().keys())) if len(diff) > 0: raise RuntimeError(f"Some values from old state dict will not be used. This is probably not what you " f"want because it most likely means that the pretrained model's weights won't be used. " f"Maybe you messed up backbone compatibility (i.e. SWIN vs ResNet?) {diff}") ret = self.deca.load_state_dict(state_dict, strict=False) if len(ret.unexpected_keys) > 0: raise print(f"Unexpected keys: {ret.unexpected_keys}") missing_modules = set([s.split(".")[0] for s in ret.missing_keys]) print(f"Missing modules when upgrading from {old_deca_class} to {model_params.deca_class}:") print(missing_modules) else: self.deca._reconfigure(model_params) self._init_emotion_loss() self._init_au_loss() self.stage_name = stage_name if self.stage_name is None: self.stage_name = "" if len(self.stage_name) > 0: self.stage_name += "_" self.mode = DecaMode[str(model_params.mode).upper()] self.train(mode=train) print(f"EMOCA MODE RECONFIGURED TO: {self.mode}") if 'shape_contrain_type' in self.deca.config.keys() and str(self.deca.config.shape_constrain_type).lower() != 'none': shape_constraint = self.deca.config.shape_constrain_type else: shape_constraint = None if 'expression_constrain_type' in self.deca.config.keys() and str(self.deca.config.expression_constrain_type).lower() != 'none': expression_constraint = self.deca.config.expression_constrain_type else: expression_constraint = None if shape_constraint is not None and expression_constraint is not None: raise ValueError("Both shape constraint and expression constraint are active. This is probably not what we want.") def uses_texture(self): """ Check if the model uses texture """ return self.deca.uses_texture() def visualize(self, visdict, savepath, catdim=1): return self.deca.visualize(visdict, savepath, catdim) def train(self, mode: bool = True): # super().train(mode) # not necessary self.deca.train(mode) if self.emotion_mlp is not None: self.emotion_mlp.train(mode) if self.emonet_loss is not None: self.emonet_loss.eval() if self.deca.perceptual_loss is not None: self.deca.perceptual_loss.eval() if self.deca.id_loss is not None: self.deca.id_loss.eval() return self def to(self, *args, **kwargs): super().to(*args, **kwargs) return self def cuda(self, device=None): super().cuda(device) return self def cpu(self): super().cpu() return self def forward(self, batch): values = self.encode(batch, training=False) values = self.decode(values, training=False) return values def _unwrap_list(self, codelist): shapecode, texcode, expcode, posecode, cam, lightcode = codelist return shapecode, texcode, expcode, posecode, cam, lightcode def _unwrap_list_to_dict(self, codelist): shapecode, texcode, expcode, posecode, cam, lightcode = codelist return {'shape': shapecode, 'tex': texcode, 'exp': expcode, 'pose': posecode, 'cam': cam, 'light': lightcode} # return shapecode, texcode, expcode, posecode, cam, lightcode def _encode_flame(self, images, **kwargs): if self.mode == DecaMode.COARSE or \ (self.mode == DecaMode.DETAIL and self.deca.config.train_coarse): # forward pass with gradients (for coarse stage (used), or detail stage with coarse training (not used)) parameters = self.deca._encode_flame(images, **kwargs) elif self.mode == DecaMode.DETAIL: # in detail stage, the coarse forward pass does not need gradients with torch.no_grad(): parameters = self.deca._encode_flame(images, **kwargs) else: raise ValueError(f"Invalid EMOCA Mode {self.mode}") code_list, original_code = self.deca.decompose_code(parameters) # shapecode, texcode, expcode, posecode, cam, lightcode = code_list # return shapecode, texcode, expcode, posecode, cam, lightcode, original_code return code_list, original_code def _expression_ring_exchange(self, original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode=None, detailemocode=None, exprw=None, lmk_mp=None, mica_images=None): """ Deprecated. Expression ring exchange is not used in EMOCA (nor DECA). """ new_order = np.array([np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() expcode_new = expcode[new_order] ## append new shape code data expcode = torch.cat([expcode, expcode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) shapecode = torch.cat([shapecode, shapecode], dim=0) globpose = posecode[..., :3] jawpose = posecode[..., 3:] if self.deca.config.expression_constrain_use_jaw_pose: jawpose_new = jawpose[new_order] jawpose = torch.cat([jawpose, jawpose_new], dim=0) else: jawpose = torch.cat([jawpose, jawpose], dim=0) if self.deca.config.expression_constrain_use_global_pose: globpose_new = globpose[new_order] globpose = torch.cat([globpose, globpose_new], dim=0) else: globpose = torch.cat([globpose, globpose], dim=0) if self.deca.config.expression_constrain_use_jaw_pose or self.deca.config.expression_constrain_use_global_pose: posecode = torch.cat([globpose, jawpose], dim=-1) # posecode_new = torch.cat([globpose, jawpose], dim=-1) else: # posecode_new = posecode # posecode_new = posecode posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) # NOTE: # Here we could think about what makes sense to exchange # 1) Do we exchange all emotion GT (VA and expression) within the ring? # 2) Do we exchange only the GT on which the ring is constructed (AffectNet ring based on binned VA or expression or Emonet feature?) # note: if we use EmoMLP that goes from (expression, jawpose, detailcode) -> (v,a,expr) and we exchange # ALL of these, the EmoMLP prediction will of course be the same. The output image still changes, # so EmoNet loss (if used) would be different. Same for the photometric/landmark losses. # TODO: # For now I decided to exchange everything but this should probably be experimented with # I would argue though, that exchanging the GT is the right thing to do if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) if affectnetexp is not None: affectnetexp = torch.cat([affectnetexp, affectnetexp[new_order]], dim=0) if exprw is not None: exprw = torch.cat([exprw, exprw[new_order]], dim=0) if detailcode is not None: #TODO: to exchange or not to exchange, that is the question, the answer is probably NO detailcode = torch.cat([detailcode, detailcode], dim=0) # detailcode = torch.cat([detailcode, detailcode[new_order]], dim=0) if detailemocode is not None: # TODO: to exchange or not to exchange, that is the question, the answer is probably YES detailemocode = torch.cat([detailemocode, detailemocode[new_order]], dim=0) return expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, \ detailcode, detailemocode, exprw, lmk_mp, mica_images # return expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7 def encode(self, batch, training=True) -> dict: """ Forward encoding pass of the model. Takes a batch of images and returns the corresponding latent codes for each image. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. For a testing pass, the images suffice. :param training: Whether the forward pass is for training or testing. """ codedict = {} original_batch_size = batch['image'].shape[0] images = batch['image'] if 'mica_images' in batch.keys(): mica_images = batch['mica_images'] else: mica_images = None if len(images.shape) == 5: K = images.shape[1] elif len(images.shape) == 4: K = 1 else: raise RuntimeError("Invalid image batch dimensions.") # [B, K, 3, size, size] ==> [BxK, 3, size, size] images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = mica_images.view(-1, mica_images.shape[-3], mica_images.shape[-2], mica_images.shape[-1]) if 'landmark' in batch.keys(): lmk = batch['landmark'] lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if 'landmark_mediapipe' in batch.keys(): lmk_mp = batch['landmark_mediapipe'] lmk_mp = lmk_mp.view(-1, lmk_mp.shape[-2], lmk_mp.shape[-1]) else: lmk_mp = None if 'mask' in batch.keys(): masks = batch['mask'] masks = masks.view(-1, images.shape[-2], images.shape[-1]) # valence / arousal - not necessary unless we want to use VA for supervision (not done in EMOCA) if 'va' in batch: va = batch['va'] va = va.view(-1, va.shape[-1]) else: va = None # 7 basic expression - not necessary unless we want to use expression for supervision (not done in EMOCA or DECA) if 'expr7' in batch: expr7 = batch['expr7'] expr7 = expr7.view(-1, expr7.shape[-1]) else: expr7 = None # affectnet basic expression - not necessary unless we want to use expression for supervision (not done in EMOCA or DECA) if 'affectnetexp' in batch: affectnetexp = batch['affectnetexp'] affectnetexp = affectnetexp.view(-1, affectnetexp.shape[-1]) else: affectnetexp = None # expression weights if supervising by expression is used (to balance the classification loss) - not done in EMOCA or DECA if 'expression_weight' in batch: exprw = batch['expression_weight'] exprw = exprw.view(-1, exprw.shape[-1]) else: exprw = None # 1) COARSE STAGE # forward pass of the coarse encoder # shapecode, texcode, expcode, posecode, cam, lightcode = self._encode_flame(images) code, original_code = self._encode_flame(images, mica_image=mica_images) shapecode, texcode, expcode, posecode, cam, lightcode = self._unwrap_list(code) if original_code is not None: original_code = self._unwrap_list_to_dict(original_code) if training: # If training, we employ the disentanglement strategy if self.mode == DecaMode.COARSE: if self.deca.config.shape_constrain_type == 'same': ## Enforce that all identity shape codes within ring are the same. The batch is duplicated ## and the duplicated part's shape codes are shuffled. # reshape shapecode => [B, K, n_shape] # shapecode_idK = shapecode.view(self.batch_size, self.deca.K, -1) shapecode_idK = shapecode.view(original_batch_size, K, -1) # get mean id shapecode_mean = torch.mean(shapecode_idK, dim=[1]) # shapecode_new = shapecode_mean[:, None, :].repeat(1, self.deca.K, 1) shapecode_new = shapecode_mean[:, None, :].repeat(1, K, 1) shapecode = shapecode_new.view(-1, self.deca._get_num_shape_params()) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_idK = shapecode_orig.view(original_batch_size, K, -1) shapecode_orig_mean = torch.mean(shapecode_orig_idK, dim=[1]) shapecode_orig_new = shapecode_orig_mean[:, None, :].repeat(1, K, 1) original_code['shape'] = shapecode_orig_new.view(-1, self.deca._get_num_shape_params()) elif self.deca.config.shape_constrain_type == 'exchange': ## Shuffle identitys shape codes within ring (they should correspond to the same identity) ''' make sure s0, s1 is something to make shape close the difference from ||so - s1|| is the later encourage s0, s1 is cloase in l2 space, but not really ensure shape will be close ''' # new_order = np.array([np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(self.deca.config.batch_size_train)]) # new_order = np.array([np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(original_batch_size)]) new_order = np.array([np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_new = shapecode_orig[new_order] original_code['shape'] = torch.cat([shapecode_orig, shapecode_orig_new], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp']], dim=0) original_code['pose'] = torch.cat([original_code['pose'], original_code['pose']], dim=0) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) elif self.deca.config.shape_constrain_type == 'shuffle_expression': assert original_code is not None ## DEPRECATED, NOT USED IN EMOCA OR DECA new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order # exchange expression expcode_new = expcode[new_order] expcode = torch.cat([expcode, expcode_new], dim=0) # exchange jaw pose (but not global pose) global_pose = posecode[:, :3] jaw_pose = posecode[:, 3:] jaw_pose_new = jaw_pose[new_order] jaw_pose = torch.cat([jaw_pose, jaw_pose_new], dim=0) global_pose = torch.cat([global_pose, global_pose], dim=0) posecode = torch.cat([global_pose, jaw_pose], dim=1) ## duplicate the rest shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## duplicate gt if any images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) print(f"TRAINING: {training}") if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, old_order]) ref_images_expression_idxs = np.concatenate([old_order, new_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) # do the same for the original code dict original_code['shape'] = torch.cat([original_code['shape'], original_code['shape']], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp'][new_order]], dim=0) original_global_pose = original_code['pose'][:, :3] original_jaw_pose = original_code['pose'][:, 3:] original_jaw_pose = torch.cat([original_jaw_pose, original_jaw_pose[new_order]], dim=0) original_global_pose = torch.cat([original_global_pose, original_global_pose], dim=0) original_code['pose'] = torch.cat([original_global_pose, original_jaw_pose], dim=1) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) elif self.deca.config.shape_constrain_type == 'shuffle_shape': ## The shape codes are shuffled without duplication new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, new_order]) ref_images_expression_idxs = np.concatenate([old_order, old_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_new = shapecode_orig[new_order] original_code['shape'] = torch.cat([shapecode_orig, shapecode_orig_new], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp']], dim=0) original_code['pose'] = torch.cat([original_code['pose'], original_code['pose']], dim=0) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) original_code['ref_images_identity_idxs'] = ref_images_identity_idxs original_code['ref_images_expression_idxs'] = ref_images_expression_idxs elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'same': ## NOT USED IN EMOCA OR DECA, deprecated # reshape shapecode => [B, K, n_shape] # shapecode_idK = shapecode.view(self.batch_size, self.deca.K, -1) expcode_idK = expcode.view(original_batch_size, K, -1) # get mean id expcode_mean = torch.mean(expcode_idK, dim=[1]) # shapecode_new = shapecode_mean[:, None, :].repeat(1, self.deca.K, 1) expcode_new = expcode_mean[:, None, :].repeat(1, K, 1) expcode = expcode_new.view(-1, self.deca._get_num_shape_params()) # do the same thing for the original code dict expcode_idK = original_code['exp'].view(original_batch_size, K, -1) expcode_mean = torch.mean(expcode_idK, dim=[1]) expcode_new = expcode_mean[:, None, :].repeat(1, K, 1) original_code['exp'] = expcode_new.view(-1, self.deca._get_num_shape_params()) elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'exchange': ## NOT USED IN EMOCA OR DECA, deprecated expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, \ masks, va, expr7, affectnetexp, _, _, exprw, lmk_mp, mica_images = \ self._expression_ring_exchange(original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, None, None, exprw, lmk_mp, mica_images) # (self, original_batch_size, K, # expcode, posecode, shapecode, lightcode, texcode, # images, cam, lmk, masks, va, expr7, affectnetexp, # detailcode=None, detailemocode=None, exprw=None): # 2) DETAIL STAGE if self.mode == DecaMode.DETAIL: all_detailcode = self.deca.E_detail(images) # identity-based detail code detailcode = all_detailcode[:, :self.deca.n_detail] # detail emotion code is deprecated and will be empty detailemocode = all_detailcode[:, self.deca.n_detail:(self.deca.n_detail + self.deca.n_detail_emo)] if training: # If training, we employ the disentanglement strategy if self.deca.config.detail_constrain_type == 'exchange': # Identity within the same ring should be the same, so they should have the same code. # This can be enforced by shuffling. The batch is duplicated and the duplicated part's code shuffled ''' make sure s0, s1 is something to make shape close the difference from ||so - s1|| is the later encourage s0, s1 is cloase in l2 space, but not really ensure shape will be close ''' # this creates a per-ring random permutation. The detail exchange happens ONLY between the same # identities (within the ring) but not outside (no cross-identity detail exchange) new_order = np.array( # [np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(original_batch_size)]) [np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() detailcode_new = detailcode[new_order] detailcode = torch.cat([detailcode, detailcode_new], dim=0) detailemocode = torch.cat([detailemocode, detailemocode], dim=0) ## append new shape code data shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) elif self.deca.config.detail_constrain_type == 'shuffle_expression': ## Deprecated and not used in EMOCA or DECA new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order # exchange expression expcode_new = expcode[new_order] expcode = torch.cat([expcode, expcode_new], dim=0) # exchange emotion code, but not (identity-based) detailcode detailemocode_new = detailemocode[new_order] detailemocode = torch.cat([detailemocode, detailemocode_new], dim=0) detailcode = torch.cat([detailcode, detailcode], dim=0) # exchange jaw pose (but not global pose) global_pose = posecode[:, :3] jaw_pose = posecode[:, 3:] jaw_pose_new = jaw_pose[new_order] jaw_pose = torch.cat([jaw_pose, jaw_pose_new], dim=0) global_pose = torch.cat([global_pose, global_pose], dim=0) posecode = torch.cat([global_pose, jaw_pose], dim=1) ## duplicate the rest shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## duplicate gt if any images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) print(f"TRAINING: {training}") if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, old_order]) ref_images_expression_idxs = np.concatenate([old_order, new_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) elif self.deca.config.detail_constrain_type == 'shuffle_shape': ## Shuffles teh shape code without duplicating the batch new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) # exchange (identity-based) detailcode, but not emotion code detailcode_new = detailcode[new_order] detailcode = torch.cat([detailcode, detailcode_new], dim=0) detailemocode = torch.cat([detailemocode, detailemocode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, new_order]) ref_images_expression_idxs = np.concatenate([old_order, old_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'exchange': expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode, detailemocode, exprw, lmk_mp, mica_images = \ self._expression_ring_exchange(original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode, detailemocode, exprw, lmk_mp, mica_images) codedict['shapecode'] = shapecode codedict['texcode'] = texcode codedict['expcode'] = expcode codedict['posecode'] = posecode codedict['cam'] = cam codedict['lightcode'] = lightcode if self.mode == DecaMode.DETAIL: codedict['detailcode'] = detailcode codedict['detailemocode'] = detailemocode codedict['images'] = images if mica_images is not None: codedict['mica_images'] = mica_images if 'mask' in batch.keys(): codedict['masks'] = masks if 'landmark' in batch.keys(): codedict['lmk'] = lmk if lmk_mp is not None: codedict['lmk_mp'] = lmk_mp if 'va' in batch.keys(): codedict['va'] = va if 'expr7' in batch.keys(): codedict['expr7'] = expr7 if 'affectnetexp' in batch.keys(): codedict['affectnetexp'] = affectnetexp if 'expression_weight' in batch.keys(): codedict['expression_weight'] = exprw if original_code is not None: codedict['original_code'] = original_code return codedict def _create_conditioning_lists(self, codedict, condition_list): detail_conditioning_list = [] if 'globalpose' in condition_list: detail_conditioning_list += [codedict["posecode"][:, :3]] if 'jawpose' in condition_list: detail_conditioning_list += [codedict["posecode"][:, 3:]] if 'identity' in condition_list: detail_conditioning_list += [codedict["shapecode"]] if 'expression' in condition_list: detail_conditioning_list += [codedict["expcode"]] if isinstance(self.deca.D_detail, Generator): # the detail codes might be excluded from conditioning based on the Generator architecture (for instance # for AdaIn Generator) if 'detail' in condition_list: detail_conditioning_list += [codedict["detailcode"]] if 'detailemo' in condition_list: detail_conditioning_list += [codedict["detailemocode"]] return detail_conditioning_list def decode(self, codedict, training=True, render=True, **kwargs) -> dict: """ Forward decoding pass of the model. Takes the latent code predicted by the encoding stage and reconstructs and renders the shape. :param codedict: Batch dict of the predicted latent codes :param training: Whether the forward pass is for training or testing. """ shapecode = codedict['shapecode'] expcode = codedict['expcode'] posecode = codedict['posecode'] texcode = codedict['texcode'] cam = codedict['cam'] lightcode = codedict['lightcode'] images = codedict['images'] if 'masks' in codedict.keys(): masks = codedict['masks'] else: masks = None effective_batch_size = images.shape[0] # this is the current batch size after all training augmentations modifications # 1) Reconstruct the face mesh # FLAME - world space if not isinstance(self.deca.flame, FLAME_mediapipe): verts, landmarks2d, landmarks3d = self.deca.flame(shape_params=shapecode, expression_params=expcode, pose_params=posecode) landmarks2d_mediapipe = None else: verts, landmarks2d, landmarks3d, landmarks2d_mediapipe = self.deca.flame(shapecode, expcode, posecode) # world to camera trans_verts = util.batch_orth_proj(verts, cam) predicted_landmarks = util.batch_orth_proj(landmarks2d, cam)[:, :, :2] # camera to image space trans_verts[:, :, 1:] = -trans_verts[:, :, 1:] predicted_landmarks[:, :, 1:] = - predicted_landmarks[:, :, 1:] if landmarks2d_mediapipe is not None: predicted_landmarks_mediapipe = util.batch_orth_proj(landmarks2d_mediapipe, cam)[:, :, :2] predicted_landmarks_mediapipe[:, :, 1:] = - predicted_landmarks_mediapipe[:, :, 1:] if self.uses_texture(): albedo = self.deca.flametex(texcode) else: # if not using texture, default to gray albedo = torch.ones([effective_batch_size, 3, self.deca.config.uv_size, self.deca.config.uv_size], device=images.device) * 0.5 # 2) Render the coarse image if render: ops = self.deca.render(verts, trans_verts, albedo, lightcode) # mask mask_face_eye = F.grid_sample(self.deca.uv_face_eye_mask.expand(effective_batch_size, -1, -1, -1), ops['grid'].detach(), align_corners=False) # images predicted_images = ops['images'] # predicted_images = ops['images'] * mask_face_eye * ops['alpha_images'] # predicted_images_no_mask = ops['images'] #* mask_face_eye * ops['alpha_images'] segmentation_type = None if isinstance(self.deca.config.useSeg, bool): if self.deca.config.useSeg: segmentation_type = 'gt' else: segmentation_type = 'rend' elif isinstance(self.deca.config.useSeg, str): segmentation_type = self.deca.config.useSeg else: raise RuntimeError(f"Invalid 'useSeg' type: '{type(self.deca.config.useSeg)}'") if segmentation_type not in ["gt", "rend", "intersection", "union"]: raise ValueError(f"Invalid segmentation type for masking '{segmentation_type}'") if masks is None: # if mask not provided, the only mask available is the rendered one segmentation_type = 'rend' elif masks.shape[-1] != predicted_images.shape[-1] or masks.shape[-2] != predicted_images.shape[-2]: # resize masks if need be (this is only done if configuration was changed at some point after training) dims = masks.ndim == 3 if dims: masks = masks[:, None, :, :] masks = F.interpolate(masks, size=predicted_images.shape[-2:], mode='bilinear') if dims: masks = masks[:, 0, ...] # resize images if need be (this is only done if configuration was changed at some point after training) if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed images_resized = F.interpolate(images, size=predicted_images.shape[-2:], mode='bilinear') else: images_resized = images # what type of segmentation we use if segmentation_type == "gt": # GT stands for external segmetnation predicted by face parsing or similar masks = masks[:, None, :, :] elif segmentation_type == "rend": # mask rendered as a silhouette of the face mesh masks = mask_face_eye * ops['alpha_images'] elif segmentation_type == "intersection": # intersection of the two above masks = masks[:, None, :, :] * mask_face_eye * ops['alpha_images'] elif segmentation_type == "union": # union of the first two options masks = torch.max(masks[:, None, :, :], mask_face_eye * ops['alpha_images']) else: raise RuntimeError(f"Invalid segmentation type for masking '{segmentation_type}'") if self.deca.config.background_from_input in [True, "input"]: if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed predicted_images = (1. - masks) * images_resized + masks * predicted_images else: predicted_images = (1. - masks) * images + masks * predicted_images elif self.deca.config.background_from_input in [False, "black"]: predicted_images = masks * predicted_images elif self.deca.config.background_from_input in ["none"]: predicted_images = predicted_images else: raise ValueError(f"Invalid type of background modification {self.deca.config.background_from_input}") # 3) Render the detail image if self.mode == DecaMode.DETAIL: detailcode = codedict['detailcode'] detailemocode = codedict['detailemocode'] # a) Create the detail conditioning lists detail_conditioning_list = self._create_conditioning_lists(codedict, self.detail_conditioning) detailemo_conditioning_list = self._create_conditioning_lists(codedict, self.detailemo_conditioning) final_detail_conditioning_list = detail_conditioning_list + detailemo_conditioning_list # b) Pass the detail code and the conditions through the detail generator to get displacement UV map if isinstance(self.deca.D_detail, Generator): uv_z = self.deca.D_detail(torch.cat(final_detail_conditioning_list, dim=1)) elif isinstance(self.deca.D_detail, GeneratorAdaIn): uv_z = self.deca.D_detail(z=torch.cat([detailcode, detailemocode], dim=1), cond=torch.cat(final_detail_conditioning_list, dim=1)) else: raise ValueError(f"This class of generarator is not supported: '{self.deca.D_detail.__class__.__name__}'") # if there is a displacement mask, apply it (DEPRECATED and not USED in DECA or EMOCA) if hasattr(self.deca, 'displacement_mask') and self.deca.displacement_mask is not None: if 'apply_displacement_masks' in self.deca.config.keys() and self.deca.config.apply_displacement_masks: uv_z = uv_z * self.deca.displacement_mask # uv_z = self.deca.D_detail(torch.cat([posecode[:, 3:], expcode, detailcode], dim=1)) # render detail if render: detach_from_coarse_geometry = not self.deca.config.train_coarse uv_detail_normals, uv_coarse_vertices = self.deca.displacement2normal(uv_z, verts, ops['normals'], detach=detach_from_coarse_geometry) uv_shading = self.deca.render.add_SHlight(uv_detail_normals, lightcode.detach()) uv_texture = albedo.detach() * uv_shading # batch size X image_rows X image_cols X 2 # you can query the grid for UV values of the face mesh at pixel locations grid = ops['grid'] if detach_from_coarse_geometry: # if the grid is detached, the gradient of the positions of UV-values in image space won't flow back to the geometry grid = grid.detach() predicted_detailed_image = F.grid_sample(uv_texture, grid, align_corners=False) if self.deca.config.background_from_input in [True, "input"]: if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed # images_resized = F.interpolate(images, size=predicted_images.shape[-2:], mode='bilinear') ## before bugfix # predicted_images = (1. - masks) * images_resized + masks * predicted_images ## after bugfix predicted_detailed_image = (1. - masks) * images_resized + masks * predicted_detailed_image else: predicted_detailed_image = (1. - masks) * images + masks * predicted_detailed_image elif self.deca.config.background_from_input in [False, "black"]: predicted_detailed_image = masks * predicted_detailed_image elif self.deca.config.background_from_input in ["none"]: predicted_detailed_image = predicted_detailed_image else: raise ValueError(f"Invalid type of background modification {self.deca.config.background_from_input}") # --- extract texture uv_pverts = self.deca.render.world2uv(trans_verts).detach() uv_gt = F.grid_sample(torch.cat([images_resized, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], mode='bilinear') uv_texture_gt = uv_gt[:, :3, :, :].detach() uv_mask_gt = uv_gt[:, 3:, :, :].detach() # self-occlusion normals = util.vertex_normals(trans_verts, self.deca.render.faces.expand(effective_batch_size, -1, -1)) uv_pnorm = self.deca.render.world2uv(normals) uv_mask = (uv_pnorm[:, -1, :, :] < -0.05).float().detach() uv_mask = uv_mask[:, None, :, :] ## combine masks uv_vis_mask = uv_mask_gt * uv_mask * self.deca.uv_face_eye_mask else: uv_detail_normals = None predicted_detailed_image = None ## 4) (Optional) NEURAL RENDERING - not used in neither DECA nor EMOCA # If neural rendering is enabled, the differentiable rendered synthetic images are translated using an image translation net (such as StarGan) predicted_translated_image = None predicted_detailed_translated_image = None translated_uv_texture = None if render: if self.deca._has_neural_rendering(): predicted_translated_image = self.deca.image_translator( { "input_image" : predicted_images, "ref_image" : images, "target_domain" : torch.tensor([0]*predicted_images.shape[0], dtype=torch.int64, device=predicted_images.device) } ) if self.mode == DecaMode.DETAIL: predicted_detailed_translated_image = self.deca.image_translator( { "input_image" : predicted_detailed_image, "ref_image" : images, "target_domain" : torch.tensor([0]*predicted_detailed_image.shape[0], dtype=torch.int64, device=predicted_detailed_image.device) } ) translated_uv = F.grid_sample(torch.cat([predicted_detailed_translated_image, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], mode='bilinear') translated_uv_texture = translated_uv[:, :3, :, :].detach() else: predicted_detailed_translated_image = None translated_uv_texture = None # no need in coarse mode # translated_uv = F.grid_sample(torch.cat([predicted_translated_image, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], # mode='bilinear') # translated_uv_texture = translated_uv_gt[:, :3, :, :].detach() if self.emotion_mlp is not None: codedict = self.emotion_mlp(codedict, "emo_mlp_") # populate the value dict for metric computation/visualization if render: codedict['predicted_images'] = predicted_images codedict['predicted_detailed_image'] = predicted_detailed_image codedict['predicted_translated_image'] = predicted_translated_image codedict['ops'] = ops codedict['normals'] = ops['normals'] codedict['mask_face_eye'] = mask_face_eye codedict['verts'] = verts codedict['albedo'] = albedo codedict['landmarks2d'] = landmarks2d codedict['landmarks3d'] = landmarks3d codedict['predicted_landmarks'] = predicted_landmarks if landmarks2d_mediapipe is not None: codedict['predicted_landmarks_mediapipe'] = predicted_landmarks_mediapipe codedict['trans_verts'] = trans_verts codedict['masks'] = masks if self.mode == DecaMode.DETAIL: if render: codedict['predicted_detailed_translated_image'] = predicted_detailed_translated_image codedict['translated_uv_texture'] = translated_uv_texture codedict['uv_texture_gt'] = uv_texture_gt codedict['uv_texture'] = uv_texture codedict['uv_detail_normals'] = uv_detail_normals codedict['uv_shading'] = uv_shading codedict['uv_vis_mask'] = uv_vis_mask codedict['uv_mask'] = uv_mask codedict['uv_z'] = uv_z codedict['displacement_map'] = uv_z + self.deca.fixed_uv_dis[None, None, :, :] return codedict def _compute_emotion_loss(self, images, predicted_images, loss_dict, metric_dict, prefix, va=None, expr7=None, with_grad=True, batch_size=None, ring_size=None): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict emo_feat_loss_1, emo_feat_loss_2, valence_loss, arousal_loss, expression_loss, au_loss = \ self.emonet_loss.compute_loss(images, predicted_images, batch_size=batch_size, ring_size=ring_size) else: d = metric_dict with torch.no_grad(): emo_feat_loss_1, emo_feat_loss_2, valence_loss, arousal_loss, expression_loss, au_loss = \ self.emonet_loss.compute_loss(images, predicted_images, batch_size=batch_size, ring_size=ring_size) # EmoNet self-consistency loss terms if emo_feat_loss_1 is not None: loss_or_metric(prefix + '_emonet_feat_1_L1', emo_feat_loss_1 * self.deca.config.emonet_weight, self.deca.config.use_emonet_feat_1 and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_feat_2_L1', emo_feat_loss_2 * self.deca.config.emonet_weight, self.deca.config.use_emonet_feat_2 and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_valence_L1', valence_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_valence and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_arousal_L1', arousal_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_arousal and self.deca.config.use_emonet_loss) # loss_or_metric(prefix + 'emonet_expression_KL', expression_loss * self.deca.config.emonet_weight) # KL seems to be causing NaN's loss_or_metric(prefix + '_emonet_expression_L1',expression_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_expression and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_combined', ((emo_feat_loss_1 if emo_feat_loss_1 is not None else 0) + emo_feat_loss_2 + valence_loss + arousal_loss + expression_loss) * self.deca.config.emonet_weight, self.deca.config.use_emonet_combined and self.deca.config.use_emonet_loss) # Log also the VA metric_dict[prefix + "_valence_input"] = self.emonet_loss.input_emotion['valence'].mean().detach() metric_dict[prefix + "_valence_output"] = self.emonet_loss.output_emotion['valence'].mean().detach() metric_dict[prefix + "_arousal_input"] = self.emonet_loss.input_emotion['arousal'].mean().detach() metric_dict[prefix + "_arousal_output"] = self.emonet_loss.output_emotion['arousal'].mean().detach() input_ex = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() input_ex = np.argmax(input_ex, axis=1).mean() output_ex = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() output_ex = np.argmax(output_ex, axis=1).mean() metric_dict[prefix + "_expression_input"] = torch.tensor(input_ex, device=self.device) metric_dict[prefix + "_expression_output"] = torch.tensor(output_ex, device=self.device) # # GT emotion loss terms # if self.deca.config.use_gt_emotion_loss: # d = loss_dict # else: # d = metric_dict # TODO: uncomment this after you handle the case when certain entries are NaN (GT missing, not a bug) # if va is not None: # d[prefix + 'emo_sup_val_L1'] = F.l1_loss(self.emonet_loss.output_emotion['valence'], va[:, 0]) \ # * self.deca.config.gt_emotion_reg # d[prefix + 'emo_sup_ar_L1'] = F.l1_loss(self.emonet_loss.output_emotion['arousal'], va[:, 1]) \ # * self.deca.config.gt_emotion_reg # # metric_dict[prefix + "_valence_gt"] = va[:, 0].mean().detach() # metric_dict[prefix + "_arousal_gt"] = va[:, 1].mean().detach() # # if expr7 is not None: # affectnet_gt = [expr7_to_affect_net(int(expr7[i])).value for i in range(len(expr7))] # affectnet_gt = torch.tensor(np.array(affectnet_gt), device=self.device, dtype=torch.long) # d[prefix + '_emo_sup_expr_CE'] = F.cross_entropy(self.emonet_loss.output_emotion['expression'], affectnet_gt) * self.deca.config.gt_emotion_reg # metric_dict[prefix + "_expr_gt"] = affectnet_gt.mean().detach() def _compute_au_loss(self, images, predicted_images, loss_dict, metric_dict, prefix, au=None, with_grad=True): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict au_feat_loss_1, au_feat_loss_2, _, _, _, au_loss = \ self.au_loss.compute_loss(images, predicted_images) else: d = metric_dict with torch.no_grad(): au_feat_loss_1, au_feat_loss_2, _, _, _, au_loss = \ self.au_loss.compute_loss(images, predicted_images) # EmoNet self-consistency loss terms if au_feat_loss_1 is not None: loss_or_metric(prefix + '_au_feat_1_L1', au_feat_loss_1 * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_feat_1 and self.deca.config.au_loss.use_as_loss) loss_or_metric(prefix + '_au_feat_2_L1', au_feat_loss_2 * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_feat_2 and self.deca.config.au_loss.use_as_loss) loss_or_metric(prefix + '_au_loss', au_loss * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_aus and self.deca.config.au_loss.use_as_loss) # loss_or_metric(prefix + '_au_losses_L1', arousal_loss * self.deca.config.au_loss.au_weight, # self.deca.config.au_loss.use_emonet_arousal and self.deca.config.au_loss.use_as_loss) # loss_or_metric(prefix + 'emonet_expression_KL', expression_loss * self.deca.config.au_loss.au_weight) # KL seems to be causing NaN's # # Log also the VA # metric_dict[prefix + "_valence_input"] = self.emonet_loss.input_emotion['valence'].mean().detach() # metric_dict[prefix + "_valence_output"] = self.emonet_loss.output_emotion['valence'].mean().detach() # metric_dict[prefix + "_arousal_input"] = self.emonet_loss.input_emotion['arousal'].mean().detach() # metric_dict[prefix + "_arousal_output"] = self.emonet_loss.output_emotion['arousal'].mean().detach() # input_ex = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() # input_ex = np.argmax(input_ex, axis=1).mean() # output_ex = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() # output_ex = np.argmax(output_ex, axis=1).mean() # metric_dict[prefix + "_expression_input"] = torch.tensor(input_ex, device=self.device) # metric_dict[prefix + "_expression_output"] = torch.tensor(output_ex, device=self.device) # # GT emotion loss terms # if self.deca.config.use_gt_emotion_loss: # d = loss_dict # else: # d = metric_dict def _cut_mouth_vectorized(self, images, landmarks, convert_grayscale=True): # mouth_window_margin = 12 mouth_window_margin = 1 # not temporal mouth_crop_height = 96 mouth_crop_width = 96 mouth_landmark_start_idx = 48 mouth_landmark_stop_idx = 68 B, T = images.shape[:2] landmarks = landmarks.to(torch.float32) with torch.no_grad(): image_size = images.shape[-1] / 2 landmarks = landmarks * image_size + image_size # #1) smooth the landmarks with temporal convolution # landmarks are of shape (T, 68, 2) # reshape to (T, 136) landmarks_t = landmarks.reshape(*landmarks.shape[:2], -1) # make temporal dimension last landmarks_t = landmarks_t.permute(0, 2, 1) # change chape to (N, 136, T) # landmarks_t = landmarks_t.unsqueeze(0) # smooth with temporal convolution temporal_filter = torch.ones(mouth_window_margin, device=images.device) / mouth_window_margin # pad the the landmarks landmarks_t_padded = F.pad(landmarks_t, (mouth_window_margin // 2, mouth_window_margin // 2), mode='replicate') # convolve each channel separately with the temporal filter num_channels = landmarks_t.shape[1] if temporal_filter.numel() > 1: smooth_landmarks_t = F.conv1d(landmarks_t_padded, temporal_filter.unsqueeze(0).unsqueeze(0).expand(num_channels,1,temporal_filter.numel()), groups=num_channels, padding='valid' ) smooth_landmarks_t = smooth_landmarks_t[..., 0:landmarks_t.shape[-1]] else: smooth_landmarks_t = landmarks_t # reshape back to the original shape smooth_landmarks_t = smooth_landmarks_t.permute(0, 2, 1).view(landmarks.shape) smooth_landmarks_t = smooth_landmarks_t + landmarks.mean(dim=2, keepdims=True) - smooth_landmarks_t.mean(dim=2, keepdims=True) # #2) get the mouth landmarks mouth_landmarks_t = smooth_landmarks_t[..., mouth_landmark_start_idx:mouth_landmark_stop_idx, :] # #3) get the mean of the mouth landmarks mouth_landmarks_mean_t = mouth_landmarks_t.mean(dim=-2, keepdims=True) # #4) get the center of the mouth center_x_t = mouth_landmarks_mean_t[..., 0] center_y_t = mouth_landmarks_mean_t[..., 1] # #5) use grid_sample to crop the mouth in every image # create the grid height = mouth_crop_height//2 width = mouth_crop_width//2 torch.arange(0, mouth_crop_width, device=images.device) grid = torch.stack(torch.meshgrid(torch.linspace(-height, height, mouth_crop_height).to(images.device) / (images.shape[-2] /2), torch.linspace(-width, width, mouth_crop_width).to(images.device) / (images.shape[-1] /2) ), dim=-1) grid = grid[..., [1, 0]] grid = grid.unsqueeze(0).unsqueeze(0).repeat(*images.shape[:2], 1, 1, 1) center_x_t -= images.shape[-1] / 2 center_y_t -= images.shape[-2] / 2 center_x_t /= images.shape[-1] / 2 center_y_t /= images.shape[-2] / 2 grid = grid + torch.cat([center_x_t, center_y_t ], dim=-1).unsqueeze(-2).unsqueeze(-2) images = images.view(B*T, *images.shape[2:]) grid = grid.view(B*T, *grid.shape[2:]) if convert_grayscale: images = F_v.rgb_to_grayscale(images) image_crops = F.grid_sample( images, grid, align_corners=True, padding_mode='zeros', mode='bicubic' ) image_crops = image_crops.view(B, T, *image_crops.shape[1:]) if convert_grayscale: image_crops = image_crops#.squeeze(1) # import matplotlib.pyplot as plt # plt.figure() # plt.imshow(image_crops[0, 0].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[0, 10].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[0, 20].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 0].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 10].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 20].permute(1,2,0).cpu().numpy()) # plt.show() return image_crops def _compute_lipread_loss(self, images, predicted_images, landmarks, predicted_landmarks, loss_dict, metric_dict, prefix, with_grad=True): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # shape of images is: (B, R, C, H, W) # convert to (B * R, 1, H, W, C) images = images.unsqueeze(1) predicted_images = predicted_images.unsqueeze(1) landmarks = landmarks.unsqueeze(1) predicted_landmarks = predicted_landmarks.unsqueeze(1) # cut out the mouth region images_mouth = self._cut_mouth_vectorized(images, landmarks) predicted_images_mouth = self._cut_mouth_vectorized(predicted_images, predicted_landmarks) # make sure that the lip reading net interprests things with depth=1, # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict loss = self.lipread_loss.compute_loss(images_mouth, predicted_images_mouth) else: d = metric_dict with torch.no_grad(): loss = self.lipread_loss.compute_loss(images_mouth, predicted_images_mouth) d[prefix + '_lipread'] = loss * self.deca.config.lipread_loss.weight def _metric_or_loss(self, loss_dict, metric_dict, is_loss): if is_loss: d = loss_dict else: d = metric_dict return d def _compute_id_loss(self, codedict, batch, training, testing, losses, batch_size, ring_size): # if self.deca.config.idw > 1e-3: if self.deca.id_loss is not None: images = codedict["images"] ops = codedict["ops"] mask_face_eye = codedict["mask_face_eye"] shading_images = self.deca.render.add_SHlight(ops['normal_images'], codedict["lightcode"].detach()) albedo_images = F.grid_sample(codedict["albedo"].detach(), ops['grid'], align_corners=False) # TODO: get to the bottom of this weird overlay thing - why is it there? # answer: This renders the face and takes background from the image overlay = albedo_images * shading_images * mask_face_eye + images * (1 - mask_face_eye) if self.global_step >= self.deca.id_loss_start_step: if 'id_metric' in self.deca.config.keys() and 'barlow_twins' in self.deca.config.id_metric: assert ring_size == 1 or ring_size == 2 effective_bs = images.shape[0] # losses['identity'] = self.deca.id_loss(overlay, images, batch_size=batch_size, # ring_size=ring_size) * self.deca.config.idw if "ref_images_identity_idxs" in codedict.keys(): # in case there was shuffling, this ensures that the proper images are used for identity loss images_ = images[codedict["ref_images_identity_idxs"]] else: images_ = images losses['identity'] = self.deca.id_loss(overlay, images_, batch_size=effective_bs, ring_size=1) * self.deca.config.idw if 'id_contrastive' in self.deca.config.keys() and bool(self.deca.config.id_contrastive): if ring_size == 2: assert effective_bs % 2 == 0 assert self.deca.id_loss.trainable has_been_shuffled = 'new_order' in codedict.keys() idxs_a = torch.arange(0, images.shape[0], 2) # indices of first images within the ring idxs_b = torch.arange(1, images.shape[0], 2) # indices of second images within the ring # WARNING - this assumes the ring is identity-based if self.deca.config.id_contrastive in [True, "real", "both"]: # we are taking this from the original batch dict because we do not care about the # shuffled, duplicated samples (they don't have to be doubled) images_0 = batch["image"][:, 0, ...] images_1 = batch["image"][:, 1, ...] losses['identity_contrastive_real'] = self.deca.id_loss( images_0, # first images within the ring images_1, # second images within the ring batch_size=images_0.shape[0], ring_size=1) * self.deca.config.idw * 2 if self.deca.config.id_contrastive in [True, "synth", "both"]: if self.deca.config.shape_constrain_type in ['exchange', 'same']: # we can take all when identity has been exchange within rings overlay_0 = overlay[idxs_a] overlay_1 = overlay[idxs_b] else: #if the batch was double otherwise (global shuffling) we only take the first half # if batch_size * ring_size < effective_bs: overlay_0 = overlay[0:batch_size * ring_size:2] overlay_1 = overlay[1:batch_size * ring_size:2] losses['identity_contrastive_synthetic'] = self.deca.id_loss( overlay_0, # first images within the ring overlay_1, # second images within the ring batch_size=overlay_0.shape[0], ring_size=1) * self.deca.config.idw if has_been_shuffled: new_order = codedict['new_order'] # TODO: compare the idxs to these: # codedict["ref_images_identity_idxs"] if self.deca.config.shape_constrain_type == 'shuffle_expression': idxs_a_synth = np.arange(new_order.shape[0]) # first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch elif self.deca.config.shape_constrain_type == 'shuffle_shape': idxs_a_synth = new_order # shuffled first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch else: raise NotImplementedError("Unexpected shape consistency value ") # if this doesn't go through, something went wrong with the shuffling indexations assert codedict["shapecode"][idxs_a_synth].allclose(codedict["shapecode"][idxs_b_synth]) losses['identity_contrastive_synthetic_shuffled'] = self.deca.id_loss( overlay[idxs_a_synth], # synthetic images of identities with reconstructed expressions overlay[idxs_b_synth], # synthetic images of identities with shuffled expressions batch_size=idxs_a_synth.size, ring_size=1) * self.deca.config.idw losses['identity_contrastive_synthetic2real_shuffled'] = self.deca.id_loss( images[idxs_a_synth], # synthetic images of identities with reconstructed expressions overlay[idxs_b_synth], # synthetic images of identities with shuffled expressions batch_size=idxs_a_synth.size, ring_size=1) * self.deca.config.idw elif ring_size > 2: raise NotImplementedError("Contrastive loss does not support ring sizes > 2.") return losses def _compute_emonet_loss_wrapper(self, codedict, batch, training, testing, losses, metrics, prefix, image_key, with_grad, batch_size, ring_size): if self.emonet_loss is not None: if 'va' in codedict: va = codedict['va'] va = va.view(-1, va.shape[-1]) else: va = None if 'expr7' in codedict: expr7 = codedict['expr7'] expr7 = expr7.view(-1, expr7.shape[-1]) else: expr7 = None # with torch.no_grad(): # TODO: if expression shuffled, this needs to be changed, the input images no longer correspond images = codedict["images"] predicted_images = codedict[image_key] effective_bs = images.shape[0] if "ref_images_expression_idxs" in codedict.keys(): # in case there was shuffling, this ensures that the proper images are used for emotion loss images_ = images[codedict["ref_images_expression_idxs"]] else: images_ = images effective_bs = images.shape[0] self._compute_emotion_loss(images_, predicted_images, losses, metrics, f"{prefix}", va, expr7, with_grad=with_grad, batch_size=effective_bs, ring_size=1) codedict[f"{prefix}_valence_input"] = self.emonet_loss.input_emotion['valence'] codedict[f"{prefix}_arousal_input"] = self.emonet_loss.input_emotion['arousal'] codedict[f"{prefix}_expression_input"] = self.emonet_loss.input_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] codedict[f"{prefix}_valence_output"] = self.emonet_loss.output_emotion['valence'] codedict[f"{prefix}_arousal_output"] = self.emonet_loss.output_emotion['arousal'] codedict[f"{prefix}_expression_output"] = self.emonet_loss.output_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] if 'emo_contrastive' in self.deca.config.keys() and self.deca.config.emo_contrastive: assert ring_size == 2 or ring_size == 1 assert self.emonet_loss.trainable or ( hasattr(self.emonet_loss, 'clone_is_trainable') and self.emonet_lossclone_is_trainable) has_been_shuffled = 'new_order' in codedict.keys() # if self.deca.config.shape_constrain_type == 'shuffle_expression' and has_been_shuffled: # new_order = codedict['new_order'] # if self.deca.config.emo_contrastive in [True, "real", "both"]: if ring_size == 2: assert effective_bs % 2 == 0 if not isinstance(self.deca, ExpDECA): raise NotImplementedError("Cross-ring emotion contrast means the ring has to be " "expression based, not identity based. This is not guaranteed " "for vanilla EMOCA (or its datasets).") # we are taking this from the original batch dict because we do not care about the # shuffled, duplicated samples (they don't have to be doubled) images_0 = batch["image"][:, 0, ...] images_1 = batch["image"][:, 1, ...] self._compute_emotion_loss(images_0, # real images of first expressions in the ring images_1, # real images of second expressions in the ring losses, metrics, f"{prefix}_contrastive_real", va, expr7, with_grad=self.deca.config.use_emonet_loss, batch_size=images_0.shape[0], ring_size=1) else: print("[WARNING] Cannot compute real contrastive emotion loss because there is no ring!") if self.deca.config.emo_contrastive in [True, "synth", "both"]: if ring_size == 2: assert effective_bs % 2 == 0 idxs_a = torch.arange(0, images.shape[0], 2) # indices of first expressions within a ring idxs_b = torch.arange(1, images.shape[0], 2) # indices of second expressions within a ring if 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type in ['exchange', 'same']: # we can take all when identity has been exchange within rings predicted_images_0 = predicted_images[idxs_a] predicted_images_1 = predicted_images[idxs_b] raise RuntimeError("This should work but it was never tested or intended. Make sure this works.") else: # if the batch was double otherwise (global shuffling) we only take the first half # if batch_size * ring_size < effective_bs: predicted_images_0 = predicted_images[0:batch_size * ring_size:2] predicted_images_1 = predicted_images[1:batch_size * ring_size:2] if not isinstance(self.deca, ExpDECA): raise NotImplementedError("Cross-ring emotion contrast means the ring has to be " "expression based, not identity based. This is not guaranteed " "for vanilla EMOCA.") self._compute_emotion_loss(predicted_images_0, # rec images of first expressions in the ring predicted_images_1, # rec images of second expressions in the ring losses, metrics, f"{prefix}_contrastive_synth", va, expr7, with_grad=self.deca.config.use_emonet_loss, batch_size=predicted_images_1.shape[0], ring_size=1) else: print("[WARNING] Cannot compute synthetic contrastive emotion loss because there is no ring!") if has_been_shuffled: new_order = codedict['new_order'] if self.deca.config.shape_constrain_type == 'shuffle_expression': # this gets tricky, in this case the images are not duplicates -> we need all, but the second # half's order is shuffled, so we need to be careful here idxs_a_synth = new_order # shuffled first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch elif self.deca.config.shape_constrain_type == 'shuffle_shape': idxs_a_synth = np.arange(new_order.shape[0]) # first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch # if this doesn't go through, something went wrong with the shuffling indexations assert codedict["expcode"][idxs_a_synth].allclose(codedict["expcode"][idxs_b_synth]) # the expressions at corresponding index positions of idxs_a_synth and idxs_b_synth should match now self._compute_emotion_loss(predicted_images[idxs_a_synth], # synthetic images of reconstructed expressions and corresponding identities predicted_images[idxs_b_synth], # synthetic images of reconstructed expressions and shuffled identities losses, metrics, f"{prefix}_contrastive_synth_shuffled", va, expr7, with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=idxs_a_synth.size, ring_size=1) self._compute_emotion_loss(images[idxs_a_synth], # synthetic images of reconstructed expressions and corresponding identities predicted_images[idxs_b_synth], # synthetic images of reconstructed expressions and shuffled identities losses, metrics, f"{prefix}_contrastive_synth2real_shuffled", va, expr7, with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=idxs_a_synth.size, ring_size=1) if va is not None: codedict[f"{prefix}_valence_gt"] = va[:, 0] codedict[f"{prefix}_arousal_gt"] = va[:, 1] if expr7 is not None: codedict[f"{prefix}_expression_gt"] = expr7 if self.deca._has_neural_rendering(): assert 'emo_contrastive' not in self.deca.config.keys() or self.deca.config.emo_contrastive is False # TODO possible to make this more GPU efficient by not recomputing emotion for input image self._compute_emotion_loss(images, predicted_translated_image, losses, metrics, f"{prefix}_translated", va, expr7, with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=1) # codedict[f"{prefix}_valence_input"] = self.emonet_loss.input_emotion['valence'] # codedict[f"{prefix}_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # codedict[f"{prefix}_expression_input"] = self.emonet_loss.input_emotion['expression'] codedict[f"{prefix}_translated_valence_output"] = self.emonet_loss.output_emotion['valence'] codedict[f"{prefix}_translated_arousal_output"] = self.emonet_loss.output_emotion['arousal'] codedict[f"{prefix}_translated_expression_output"] = self.emonet_loss.output_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] return losses, metrics, codedict def _compute_loss(self, codedict, batch, training=True, testing=False): #### ----------------------- Losses losses = {} metrics = {} predicted_landmarks = codedict["predicted_landmarks"] predicted_landmarks_mediapipe = codedict.get("predicted_landmarks_mediapipe", None) if "lmk" in codedict.keys(): lmk = codedict["lmk"] else: lmk = None if "lmk_mp" in codedict.keys(): lmk_mp = codedict["lmk_mp"] else: lmk_mp = None if "masks" in codedict.keys(): masks = codedict["masks"] else: masks = None batch_size = codedict["predicted_images"].shape[0] use_geom_losses = 'use_geometric_losses_expression_exchange' in self.deca.config.keys() and \ self.deca.config.use_geometric_losses_expression_exchange if training and ('expression_constrain_type' in self.deca.config.keys() \ and ('expression_constrain_type' in self.deca.config.keys() and self.deca.config.expression_constrain_type == 'exchange') or ( 'shape_constrain_type' in self.deca.config.keys() and self.deca.config.shape_constrain_type in ['shuffle_expression', 'shuffle_shape'])) \ and (self.deca.mode == DecaMode.COARSE or self.deca.config.train_coarse) \ and (not use_geom_losses): if batch_size % 2 != 0: raise RuntimeError("The batch size should be even because it should have " f"got doubled in expression ring exchange. Instead it was odd: {batch_size}") # THIS IS DONE BECAUSE LANDMARK AND PHOTOMETRIC LOSSES MAKE NO SENSE FOR EXPRESSION EXCHANGE geom_losses_idxs = batch_size // 2 else: geom_losses_idxs = batch_size predicted_images = codedict["predicted_images"] images = codedict["images"] lightcode = codedict["lightcode"] albedo = codedict["albedo"] mask_face_eye = codedict["mask_face_eye"] shapecode = codedict["shapecode"] expcode = codedict["expcode"] texcode = codedict["texcode"] ops = codedict["ops"] if self.mode == DecaMode.DETAIL: uv_texture = codedict["uv_texture"] uv_texture_gt = codedict["uv_texture_gt"] # this determines the configured batch size that is currently used (training, validation or testing) # the reason why this is important is because of potential multi-gpu training and loss functions (such as Barlow Twins) # that might need the full size of the batch (not just the chunk of the current GPU). if training: bs = self.learning_params.batch_size_train rs = self.learning_params.train_K else: if not testing: bs = self.learning_params.batch_size_val rs = self.learning_params.val_K else: bs = self.learning_params.batch_size_test rs = self.learning_params.test_K ## COARSE loss only if self.mode == DecaMode.COARSE or (self.mode == DecaMode.DETAIL and self.deca.config.train_coarse): # landmark losses (only useful if coarse model is being trained # if training or lmk is not None: if lmk is not None: # if self.deca.config.use_landmarks: # d = losses # else: # d = metrics d = self._metric_or_loss(losses, metrics, self.deca.config.use_landmarks) if self.deca.config.useWlmk: d['landmark'] = \ lossfunc.weighted_landmark_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight else: d['landmark'] = \ lossfunc.landmark_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight d = self._metric_or_loss(losses, metrics, 'use_eye_distance' not in self.deca.config.keys() or self.deca.config.use_eye_distance) # losses['eye_distance'] = lossfunc.eyed_loss(predicted_landmarks, lmk) * self.deca.config.lmk_weight * 2 d['eye_distance'] = lossfunc.eyed_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.eyed d = self._metric_or_loss(losses, metrics, 'use_lip_distance' not in self.deca.config.keys() or self.deca.config.use_lip_distance) d['lip_distance'] = lossfunc.lipd_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lipd d = self._metric_or_loss(losses, metrics, 'use_mouth_corner_distance' in self.deca.config.keys() and self.deca.config.use_mouth_corner_distance) d['mouth_corner_distance'] = lossfunc.mouth_corner_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lipd if predicted_landmarks_mediapipe is not None and lmk_mp is not None: use_mediapipe_landmarks = self.deca.config.get('use_mediapipe_landmarks', False) d = self._metric_or_loss(losses, metrics, use_mediapipe_landmarks) d['landmark_mediapipe'] =lossfunc_mp.landmark_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_eye_distance_mediapipe', False) ) d['eye_distance_mediapipe'] = lossfunc_mp.eyed_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.eyed_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_lip_distance_mediapipe', False) ) d['lip_distance_mediapipe'] = lossfunc_mp.lipd_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lipd_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_mouth_corner_distance_mediapipe', False)) d['mouth_corner_distance_mediapipe'] = lossfunc_mp.mouth_corner_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lipd_mp #TODO: fix this on the next iteration lipd_loss # d['lip_distance'] = lossfunc.lipd_loss(predicted_landmarks, lmk) * self.deca.config.lipd # photometric loss # if training or masks is not None: if masks is not None: # if self.deca.config.use_photometric: # d = losses # else: # d = metrics # d['photometric_texture'] = (masks * (predicted_images - images).abs()).mean() * self.deca.config.photow photometric = masks[:geom_losses_idxs, ...] * ((predicted_images[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()) if 'photometric_normalization' not in self.deca.config.keys() or self.deca.config.photometric_normalization == 'mean': photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'rel_mask_value': photometric = photometric * masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'neg_rel_mask_value': mu = 1. - masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric * mu photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'inv_rel_mask_value': mu = 1./ masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric * mu photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'abs_mask_value': photometric = photometric * masks[:geom_losses_idxs, ...].sum(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric.mean() else: raise ValueError(f"Invalid photometric loss normalization: '{self.deca.config.photometric_normalization}'") self._metric_or_loss(losses, metrics, self.deca.config.use_photometric)['photometric_texture'] = \ photometric * self.deca.config.photow if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_images[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg'] = vggl * self.deca.config.vggw if self.deca._has_neural_rendering(): predicted_translated_image = codedict["predicted_translated_image"] photometric_translated = (masks[:geom_losses_idxs, ...] * ( predicted_translated_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_photometric: losses['photometric_translated_texture'] = photometric_translated else: metrics['photometric_translated_texture'] = photometric_translated if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_translated_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg_translated'] = vggl * self.deca.config.vggw else: raise ValueError("Is this line ever reached?") losses = self._compute_id_loss(codedict, batch, training, testing, losses, batch_size=bs, ring_size=rs) losses['shape_reg'] = (torch.sum(shapecode ** 2) / 2) * self.deca.config.shape_reg losses['expression_reg'] = (torch.sum(expcode ** 2) / 2) * self.deca.config.exp_reg losses['tex_reg'] = (torch.sum(texcode ** 2) / 2) * self.deca.config.tex_reg losses['light_reg'] = ((torch.mean(lightcode, dim=2)[:, :, None] - lightcode) ** 2).mean() * self.deca.config.light_reg if 'original_code' in codedict.keys(): # original jaw pose regularization if self.deca.config.get('exp_deca_jaw_pose', False) and \ 'deca_jaw_reg' in self.deca.config.keys() and self.deca.config.deca_jaw_reg > 0: jaw_pose_orig = codedict['original_code']['pose'][:, 3:] jaw_pose = codedict['posecode'][..., 3:] deca_jaw_pose_reg = (torch.sum((jaw_pose - jaw_pose_orig) ** 2) / 2) * self.deca.config.deca_jaw_reg losses['deca_jaw_pose_reg'] = deca_jaw_pose_reg if self.deca.config.get('exp_deca_global_pose', False) and \ 'deca_global_reg' in self.deca.config.keys() and self.deca.config.deca_global_reg > 0: global_pose_orig = codedict['original_code']['pose'][:, :3] global_pose = codedict['posecode'][..., :3] global_pose_reg = (torch.sum((global_pose - global_pose_orig) ** 2) / 2) * self.deca.config.deca_global_reg losses['deca_global_pose_reg'] = global_pose_reg # original expression regularization if 'deca_expression_reg' in self.deca.config.keys() and self.deca.config.deca_expression_reg > 0: expression_orig = codedict['original_code']['exp'] expression = codedict['expcode'] deca_expression_reg = (torch.sum((expression - expression_orig) ** 2) / 2) * self.deca.config.deca_expression_reg losses['deca_expression_reg'] = deca_expression_reg losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="coarse", image_key="predicted_images", with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) if self.deca._has_neural_rendering(): losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="coarse_translated", image_key="predicted_translated_image", with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs ) if self.au_loss is not None: # with torch.no_grad(): self._compute_au_loss(images, predicted_images, losses, metrics, "coarse", au=None, with_grad=self.deca.config.au_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_au_loss(images, predicted_translated_image, losses, metrics, "coarse", au=None, with_grad=self.deca.config.au_loss.use_as_loss and self.deca._has_neural_rendering()) if self.lipread_loss is not None: # with torch.no_grad(): self._compute_lipread_loss(images, predicted_images, lmk, predicted_landmarks, losses, metrics, "coarse", with_grad=self.deca.config.lipread_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_lipread_loss(images, predicted_translated_image, lmk, predicted_landmarks, losses, metrics, "coarse", with_grad=self.deca.config.lipread_loss.use_as_loss and self.deca._has_neural_rendering()) ## DETAIL loss only if self.mode == DecaMode.DETAIL: predicted_detailed_image = codedict["predicted_detailed_image"] uv_z = codedict["uv_z"] # UV displacement map uv_shading = codedict["uv_shading"] uv_vis_mask = codedict["uv_vis_mask"] # uv_mask of what is visible photometric_detailed = (masks[:geom_losses_idxs, ...] * ( predicted_detailed_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_detailed_photo: losses['photometric_detailed_texture'] = photometric_detailed else: metrics['photometric_detailed_texture'] = photometric_detailed if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_detailed_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg_detailed'] = vggl * self.deca.config.vggw if self.deca._has_neural_rendering(): predicted_detailed_translated_image = codedict["predicted_detailed_translated_image"] photometric_detailed_translated = (masks[:geom_losses_idxs, ...] * ( predicted_detailed_translated_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_detailed_photo: losses['photometric_translated_detailed_texture'] = photometric_detailed_translated else: metrics['photometric_translated_detailed_texture'] = photometric_detailed_translated if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_detailed_translated_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)[ 'vgg_detailed_translated'] = vggl * self.deca.config.vggw losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="detail", image_key = "predicted_detailed_image", with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) if self.deca._has_neural_rendering(): losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="detail_translated", image_key="predicted_detailed_translated_image", with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) # if self.emonet_loss is not None: # self._compute_emotion_loss(images, predicted_detailed_image, losses, metrics, "detail", # with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), # batch_size=bs, ring_size=rs) # codedict["detail_valence_input"] = self.emonet_loss.input_emotion['valence'] # codedict["detail_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # codedict["detail_expression_input"] = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] # codedict["detail_valence_output"] = self.emonet_loss.output_emotion['valence'] # codedict["detail_arousal_output"] = self.emonet_loss.output_emotion['arousal'] # codedict["detail_expression_output"] = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] # # if va is not None: # codedict["detail_valence_gt"] = va[:,0] # codedict["detail_arousal_gt"] = va[:,1] # if expr7 is not None: # codedict["detail_expression_gt"] = expr7 # if self.deca._has_neural_rendering(): # #TODO possible to make this more GPU efficient by not recomputing emotion for input image # self._compute_emotion_loss(images, predicted_detailed_translated_image, # losses, metrics, "detail_translated", # va, expr7, # with_grad= self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), # batch_size=bs, ring_size=rs) # # # codedict["coarse_valence_input"] = self.emonet_loss.input_emotion['valence'] # # codedict["coarse_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # # codedict["coarse_expression_input"] = self.emonet_loss.input_emotion['expression'] # codedict["detail_translated_valence_output"] = self.emonet_loss.output_emotion['valence'] # codedict["detail_translated_arousal_output"] = self.emonet_loss.output_emotion['arousal'] # codedict["detail_translated_expression_output"] = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] if self.au_loss is not None: self._compute_au_loss(images, predicted_images, losses, metrics, "detail", au=None, with_grad=self.deca.config.au_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_au_loss(images, predicted_detailed_translated_image, losses, metrics, "detail", au=None, with_grad=self.deca.config.au_loss.use_as_loss and self.deca._has_neural_rendering()) for pi in range(3): # self.deca.face_attr_mask.shape[0]): if self.deca.config.sfsw[pi] != 0: # if pi==0: new_size = 256 # else: # new_size = 128 # if self.deca.config.uv_size != 256: # new_size = 128 uv_texture_patch = F.interpolate( uv_texture[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') uv_texture_gt_patch = F.interpolate( uv_texture_gt[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') uv_vis_mask_patch = F.interpolate( uv_vis_mask[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') detail_l1 = (uv_texture_patch * uv_vis_mask_patch - uv_texture_gt_patch * uv_vis_mask_patch).abs().mean() * \ self.deca.config.sfsw[pi] if self.deca.config.use_detail_l1 and not self.deca._has_neural_rendering(): losses['detail_l1_{}'.format(pi)] = detail_l1 else: metrics['detail_l1_{}'.format(pi)] = detail_l1 if self.deca.config.use_detail_mrf and not self.deca._has_neural_rendering(): mrf = self.deca.perceptual_loss(uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr losses['detail_mrf_{}'.format(pi)] = mrf else: with torch.no_grad(): mrf = self.deca.perceptual_loss(uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr metrics['detail_mrf_{}'.format(pi)] = mrf if self.deca._has_neural_rendering(): # raise NotImplementedError("Gotta implement the texture extraction first.") translated_uv_texture = codedict["translated_uv_texture"] translated_uv_texture_patch = F.interpolate( translated_uv_texture[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') translated_detail_l1 = (translated_uv_texture_patch * uv_vis_mask_patch - uv_texture_gt_patch * uv_vis_mask_patch).abs().mean() * \ self.deca.config.sfsw[pi] if self.deca.config.use_detail_l1: losses['detail_translated_l1_{}'.format(pi)] = translated_detail_l1 else: metrics['detail_translated_l1_{}'.format(pi)] = translated_detail_l1 if self.deca.config.use_detail_mrf: translated_mrf = self.deca.perceptual_loss(translated_uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr losses['detail_translated_mrf_{}'.format(pi)] = translated_mrf else: with torch.no_grad(): mrf = self.deca.perceptual_loss(translated_uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr metrics['detail_translated_mrf_{}'.format(pi)] = mrf # Old piece of debug code. Good to delete. # if pi == 2: # uv_texture_gt_patch_ = uv_texture_gt_patch # uv_texture_patch_ = uv_texture_patch # uv_vis_mask_patch_ = uv_vis_mask_patch losses['z_reg'] = torch.mean(uv_z.abs()) * self.deca.config.zregw losses['z_diff'] = lossfunc.shading_smooth_loss(uv_shading) * self.deca.config.zdiffw nonvis_mask = (1 - util.binary_erosion(uv_vis_mask)) losses['z_sym'] = (nonvis_mask * (uv_z - torch.flip(uv_z, [-1]).detach()).abs()).sum() * self.deca.config.zsymw if self.emotion_mlp is not None:# and not testing: mlp_losses, mlp_metrics = self.emotion_mlp.compute_loss( codedict, batch, training=training, pred_prefix="emo_mlp_") for key in mlp_losses.keys(): if key in losses.keys(): raise RuntimeError(f"Duplicate loss label {key}") losses[key] = self.deca.config.mlp_emotion_predictor_weight * mlp_losses[key] for key in mlp_metrics.keys(): if key in metrics.keys(): raise RuntimeError(f"Duplicate metric label {key}") # let's report the metrics (which are a superset of losses when it comes to EmoMLP) without the weight, # it's hard to plot the metrics otherwise metrics[key] = mlp_metrics[key] # metrics[key] = self.deca.config.mlp_emotion_predictor_weight * mlp_metrics[key] # else: # uv_texture_gt_patch_ = None # uv_texture_patch_ = None # uv_vis_mask_patch_ = None return losses, metrics def compute_loss(self, values, batch, training=True, testing=False) -> dict: """ The function used to compute the loss on a training batch. : training should be set to true when calling from training_step only """ losses, metrics = self._compute_loss(values, batch, training=training, testing=testing) all_loss = 0. losses_key = losses.keys() for key in losses_key: all_loss = all_loss + losses[key] # losses['all_loss'] = all_loss losses = {'loss_' + key: value for key, value in losses.items()} # add prefix loss for better logging losses['loss'] = all_loss # add metrics that do not effect the loss function (if any) for key in metrics.keys(): losses['metric_' + key] = metrics[key] return losses def _val_to_be_logged(self, d): if not hasattr(self, 'val_dict_list'): self.val_dict_list = [] self.val_dict_list += [d] def _train_to_be_logged(self, d): if not hasattr(self, 'train_dict_list'): self.train_dict_list = [] self.train_dict_list += [d] def validation_step(self, batch, batch_idx, dataloader_idx=None): """ Training step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ with torch.no_grad(): training = False values = self.encode(batch, training=training) values = self.decode(values, training=training) losses_and_metrics = self.compute_loss(values, batch, training=training) #### self.log_dict(losses_and_metrics, on_step=False, on_epoch=True) # prefix = str(self.mode.name).lower() prefix = self._get_logging_prefix() # if dataloader_idx is not None: # dataloader_str = str(dataloader_idx) + "_" # else: dataloader_str = '' stage_str = dataloader_str + 'val_' # losses_and_metrics_to_log = {prefix + dataloader_str +'_val_' + key: value.detach().cpu() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # log val_loss also without any prefix for a model checkpoint to track it losses_and_metrics_to_log[stage_str + 'loss'] = losses_and_metrics_to_log[prefix + '_' + stage_str + 'loss'] losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[stage_str + 'step'] = self.global_step losses_and_metrics_to_log[stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + stage_str + 'mem_usage'] = self.process.memory_info().rss losses_and_metrics_to_log[stage_str + 'mem_usage'] = self.process.memory_info().rss # self._val_to_be_logged(losses_and_metrics_to_log) if self.logger is not None: self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch # recommended if self.trainer.is_global_zero: if self.deca.config.val_vis_frequency > 0: if batch_idx % self.deca.config.val_vis_frequency == 0: uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, batch_idx, stage_str[:-1], prefix) vis_dict = self._create_visualizations_to_log(stage_str[:-1], visualizations, values, batch_idx, indices=0, dataloader_idx=dataloader_idx) # image = Image(grid_image, caption="full visualization") # vis_dict[prefix + '_val_' + "visualization"] = image if isinstance(self.logger, WandbLogger): self.logger.log_metrics(vis_dict) return None def _get_logging_prefix(self): prefix = self.stage_name + str(self.mode.name).lower() return prefix def test_step(self, batch, batch_idx, dataloader_idx=None): """ Testing step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations without gradient :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ prefix = self._get_logging_prefix() losses_and_metrics_to_log = {} # if dataloader_idx is not None: # dataloader_str = str(dataloader_idx) + "_" # else: dataloader_str = '' stage_str = dataloader_str + 'test_' with torch.no_grad(): training = False testing = True values = self.encode(batch, training=training) values = self.decode(values, training=training) if 'mask' in batch.keys(): losses_and_metrics = self.compute_loss(values, batch, training=False, testing=testing) # losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} else: losses_and_metric = None # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = torch.tensor(self.global_step, device=self.device) # losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = torch.tensor(batch_idx, device=self.device) # losses_and_metrics_to_log[stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # losses_and_metrics_to_log[stage_str + 'step'] = torch.tensor(self.global_step, device=self.device) # losses_and_metrics_to_log[stage_str + 'batch_idx'] = torch.tensor(batch_idx, device=self.device) losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + stage_str + 'mem_usage'] = self.process.memory_info().rss losses_and_metrics_to_log[stage_str + 'epoch'] = self.current_epoch losses_and_metrics_to_log[stage_str + 'step'] = self.global_step losses_and_metrics_to_log[stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[stage_str + 'mem_usage'] = self.process.memory_info().rss if self.logger is not None: # self.logger.log_metrics(losses_and_metrics_to_log) self.log_dict(losses_and_metrics_to_log, sync_dist=True, on_step=False, on_epoch=True) # if self.global_step % 200 == 0: uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] if self.deca.config.test_vis_frequency > 0: # Log visualizations every once in a while if batch_idx % self.deca.config.test_vis_frequency == 0: # if self.trainer.is_global_zero: visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, self.global_step, stage_str[:-1], prefix) visdict = self._create_visualizations_to_log(stage_str[:-1], visualizations, values, batch_idx, indices=0, dataloader_idx=dataloader_idx) self.logger.log_metrics(visdict) return None @property def process(self): if not hasattr(self,"process_"): self.process_ = psutil.Process(os.getpid()) return self.process_ def training_step(self, batch, batch_idx, *args, **kwargs): #, debug=True): """ Training step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ values = self.encode(batch, training=True) values = self.decode(values, training=True) losses_and_metrics = self.compute_loss(values, batch, training=True) uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] # prefix = str(self.mode.name).lower() prefix = self._get_logging_prefix() # losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach().cpu() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log[prefix + '_train_' + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) losses_and_metrics_to_log[prefix + '_train_' + 'epoch'] = self.current_epoch losses_and_metrics_to_log[prefix + '_train_' + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_train_' + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + "train_" + 'mem_usage'] = self.process.memory_info().rss # losses_and_metrics_to_log['train_' + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) losses_and_metrics_to_log['train_' + 'epoch'] = self.current_epoch losses_and_metrics_to_log['train_' + 'step'] = self.global_step losses_and_metrics_to_log['train_' + 'batch_idx'] = batch_idx losses_and_metrics_to_log["train_" + 'mem_usage'] = self.process.memory_info().rss # log loss also without any prefix for a model checkpoint to track it losses_and_metrics_to_log['loss'] = losses_and_metrics_to_log[prefix + '_train_loss'] if self.logger is not None: self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended if self.deca.config.train_vis_frequency > 0: if self.global_step % self.deca.config.train_vis_frequency == 0: if self.trainer.is_global_zero: visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, batch_idx, "train", prefix) visdict = self._create_visualizations_to_log('train', visualizations, values, batch_idx, indices=0) if isinstance(self.logger, WandbLogger): self.logger.log_metrics(visdict)#, step=self.global_step) # self.log_dict(visdict, sync_dist=True) # self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=False) # log per step # self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True) # log per both # return losses_and_metrics return losses_and_metrics['loss'] ### STEP ENDS ARE PROBABLY NOT NECESSARY BUT KEEP AN EYE ON THEM IF MULI-GPU TRAINING DOESN'T WORK # def training_step_end(self, batch_parts): # return self._step_end(batch_parts) # # def validation_step_end(self, batch_parts): # return self._step_end(batch_parts) # # def _step_end(self, batch_parts): # # gpu_0_prediction = batch_parts.pred[0]['pred'] # # gpu_1_prediction = batch_parts.pred[1]['pred'] # N = len(batch_parts) # loss_dict = {} # for key in batch_parts[0]: # for i in range(N): # if key not in loss_dict.keys(): # loss_dict[key] = batch_parts[i] # else: # loss_dict[key] = batch_parts[i] # loss_dict[key] = loss_dict[key] / N # return loss_dict def vae_2_str(self, valence=None, arousal=None, affnet_expr=None, expr7=None, prefix=""): caption = "" if len(prefix) > 0: prefix += "_" if valence is not None and not np.isnan(valence).any(): caption += prefix + "valence= %.03f\n" % valence if arousal is not None and not np.isnan(arousal).any(): caption += prefix + "arousal= %.03f\n" % arousal if affnet_expr is not None and not np.isnan(affnet_expr).any():
caption += prefix + "expression= %s \n" % AffectNetExpressions(affnet_expr).name
14
2023-11-07 20:13:32+00:00
24k
codefuse-ai/Collinear-Constrained-Attention
model/build_model.py
[ { "identifier": "get_model_params_num", "path": "utils/common_utils.py", "snippet": "def get_model_params_num(model):\n \"\"\"\n Get params number of the model\n Args:\n model: model(required)\n Returns:\n the number of parameters of model\n \"\"\"\n num = 0\n for _, p...
import os import torch import sys import peft import model.peft.modeling_peft # noqa import bitsandbytes as bnb # noqa import accelerate # noqa from utils.common_utils import get_model_params_num from transformers import ( # noqa: E402 CONFIG_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast ) from .gpt_neox.configuration_gpt_neox import GPTNeoXConfig from .gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM from .gpt_neox.tokenization_gpt_neox_fast import GPTNeoXTokenizerFast from .llama.configuration_llama import LlamaConfig from .llama.modeling_llama import LlamaForCausalLM from .llama.tokenization_llama import LlamaTokenizer from .llama.tokenization_llama_fast import LlamaTokenizerFast from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, StateDictType, ) from utils.common_utils import print_rank_0, is_old_version from tokenizer import build_tokenizer from tokenizer.tokenizer import HFTokenizer from peft.tuners.lora import LoraLayer from model.peft.utils import prepare_model_for_kbit_training from peft import ( # noqa LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptEncoderReparameterizationType, PromptTuningConfig, PromptTuningInit, TaskType, get_peft_model ) from model.peft.tuner import AdaLoraConfig from transformers import BitsAndBytesConfig from packaging import version from .glm.tokenization_glm_deprecated import GLMChineseTokenizer
18,604
tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_path." ) if args.model_type == 'gpt_neox': auto_config = GPTNeoXConfig auto_model_class = GPTNeoXForCausalLM elif args.model_type == 'llama': auto_config = LlamaConfig auto_model_class = LlamaForCausalLM elif args.model_type == 'glm': auto_config = GLMConfig auto_model_class = GLMForConditionalGeneration # else: # auto_config = AutoConfig # auto_model_class = AutoModelForCausalLM # with init_empty_weights_with_disk_offload(ignore_tie_weights=False): if args.pretrained_model_path: logger.info("Training model from checkpoint") config = auto_config.from_pretrained(args.pretrained_model_path) if args.peft_type != "qlora": # config = auto_config.from_pretrained(args.pretrained_model_path) # model = auto_model_class.from_pretrained(args.pretrained_model_path, trust_remote_code=True, device_map='auto').cuda() model = auto_model_class.from_pretrained(args.pretrained_model_path, trust_remote_code=True).cuda() else: if BitsAndBytesConfig is None: raise ImportError( "To use qlora, please upgrade transformers to 4.30.1 by `pip install -U transformers==4.30.1`" ) if bnb is None: raise ImportError("To use qlora, please install bitsandbytes by `pip install -U bitsandbytes==0.39.0`") try: except ImportError: raise ImportError("To use qlora, please install accelerate by `pip install -U accelerate==0.20.3`") peft_version = version.parse(peft.__version__) if peft_version < version.parse("0.4.0"): raise RuntimeError(f"Qlora needs peft>=0.4.0 but current peft version is {peft_version}") if args.bits not in [4, 8]: raise ValueError(f"Qlora only support 4 bits or 8 bits but got {args.bits} bits.") if args.bf16: torch_dtype = torch.bfloat16 else: torch_dtype = torch.float32 if args.fp16: compute_dtype = torch.float16 elif args.bf16: compute_dtype = torch.bfloat16 else: compute_dtype = torch.float32 model = auto_model_class.from_pretrained( # noqa args.pretrained_model_path, trust_remote_code=True, load_in_4bit=args.bits == 4, load_in_8bit=args.bits == 8, torch_dtype=torch_dtype, quantization_config=BitsAndBytesConfig( load_in_4bit=args.bits == 4, load_in_8bit=args.bits == 8, llm_int8_threshold=6.0, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=compute_dtype, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", ) ) else: logger.info("Training model from scratch") if args.model_type == 'gpt_neox': config = GPTNeoXConfig.from_json_file(args.config_path + '/config.json') # model = AutoModelForCausalLM.from_config(config, trust_remote_code=args.trust_remote_code) model = GPTNeoXForCausalLM._from_config(config) elif args.model_type == 'llama': config = LlamaConfig.from_json_file(args.config_path + '/config.json') # llama use xformers if args.use_xformers: config.use_xformers = True model = LlamaForCausalLM._from_config(config) elif args.model_type == 'glm': config = GLMConfig.from_json_file(args.config_path + '/config.json') model = GLMForConditionalGeneration._from_config(config) else: config = AutoConfig.from_json_file(args.config_path + '/config.json') model = AutoModelForCausalLM.from_config(config, trust_remote_code=args.trust_remote_code) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. if args.model_type not in ['glm']: embedding_size = model.get_input_embeddings().weight.shape[0] print_rank_0('embedding size: ' + str(embedding_size)) print_rank_0('vocab size: ' + str(tokenizer.vocab_size)) if tokenizer.vocab_size > embedding_size: model.resize_token_embeddings(tokenizer.vocab_size) print_rank_0('resize embedding size: ' + str(model.get_input_embeddings().weight.shape[0])) print_rank_0(config) num_params = get_model_params_num(model) print_rank_0("num_params of this model:", num_params) args.total_model_param = num_params args.hidden_size = config.hidden_size args.num_hidden_layers = config.num_hidden_layers args.vocab_size = tokenizer.vocab_size print_rank_0(f'hidden size: {args.hidden_size}') print_rank_0(f'num hidden layers: {args.num_hidden_layers}') print_rank_0(f'vocab size: {args.vocab_size}') if args.peft_type: if args.peft_type in ['lora', 'qlora']: target_modules = None if args.peft_type == "qlora":
# coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. sys.path.append("..") # from .glm.modeling_glm import GLMForConditionalGeneration # from .glm.configuration_glm import GLMConfig # from .glm.tokenization_glm import GLMTokenizer try: except ImportError: BitsAndBytesConfig = None try: except ImportError: bnb = None def find_all_linear_names(args, model): cls = bnb.nn.Linear4bit if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split('.') lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if 'lm_head' in lora_module_names: # needed for 16-bit lora_module_names.remove('lm_head') return list(lora_module_names) def setup_model(args, logger, use_cache=False): # Load pretrained model and tokenizer if args.pretrained_model_path: # TODO: 实现from pretrained读tokenizer if args.model_type == 'gpt_neox': # if args.tokenizer_type: # tokenizer = build_tokenizer(args) # tokenizer.eod_token = "<|endoftext|>" # tokenizer.pad_token = "<|pad|>" # # tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset # # tokenizer.eop_token = "<|endoftext|>" # tokenizer.eod_id = tokenizer.tokenize(tokenizer.eod_token)[0] # tokenizer.pad_id = tokenizer.tokenize(tokenizer.pad_token)[0] # else: tokenizer = GPTNeoXTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') elif args.model_type == 'llama': tokenizer = LlamaTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = AutoTokenizer.from_pretrained( # args.pretrained_model_path, # trust_remote_code=True, # ) tokenizer.eod_token = "</s>" tokenizer.eos_token = "</s>" tokenizer.bos_token = "<s>" tokenizer.pad_token = "[PAD]" tokenizer.unk_token = "<unk>" tokenizer.sop_token = "</s>" # 适配multi task dataset tokenizer.eop_token = "</s>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.eos_id = tokenizer.convert_tokens_to_ids(tokenizer.eos_token) tokenizer.bos_id = tokenizer.convert_tokens_to_ids(tokenizer.bos_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) tokenizer.unk_id = tokenizer.convert_tokens_to_ids(tokenizer.unk_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.eos_token} id: {tokenizer.eos_id}') print_rank_0(f'tokenizer {tokenizer.bos_token} id: {tokenizer.bos_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') print_rank_0(f'tokenizer {tokenizer.unk_token} id: {tokenizer.unk_id}') elif args.model_type == 'glm': if is_old_version(args.pretrained_model_path): tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path) else: tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path) elif args.train_mode == 'sst': # tokenizer = build_tokenizer(args) tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_path." ) if args.model_type == 'gpt_neox': auto_config = GPTNeoXConfig auto_model_class = GPTNeoXForCausalLM elif args.model_type == 'llama': auto_config = LlamaConfig auto_model_class = LlamaForCausalLM elif args.model_type == 'glm': auto_config = GLMConfig auto_model_class = GLMForConditionalGeneration # else: # auto_config = AutoConfig # auto_model_class = AutoModelForCausalLM # with init_empty_weights_with_disk_offload(ignore_tie_weights=False): if args.pretrained_model_path: logger.info("Training model from checkpoint") config = auto_config.from_pretrained(args.pretrained_model_path) if args.peft_type != "qlora": # config = auto_config.from_pretrained(args.pretrained_model_path) # model = auto_model_class.from_pretrained(args.pretrained_model_path, trust_remote_code=True, device_map='auto').cuda() model = auto_model_class.from_pretrained(args.pretrained_model_path, trust_remote_code=True).cuda() else: if BitsAndBytesConfig is None: raise ImportError( "To use qlora, please upgrade transformers to 4.30.1 by `pip install -U transformers==4.30.1`" ) if bnb is None: raise ImportError("To use qlora, please install bitsandbytes by `pip install -U bitsandbytes==0.39.0`") try: except ImportError: raise ImportError("To use qlora, please install accelerate by `pip install -U accelerate==0.20.3`") peft_version = version.parse(peft.__version__) if peft_version < version.parse("0.4.0"): raise RuntimeError(f"Qlora needs peft>=0.4.0 but current peft version is {peft_version}") if args.bits not in [4, 8]: raise ValueError(f"Qlora only support 4 bits or 8 bits but got {args.bits} bits.") if args.bf16: torch_dtype = torch.bfloat16 else: torch_dtype = torch.float32 if args.fp16: compute_dtype = torch.float16 elif args.bf16: compute_dtype = torch.bfloat16 else: compute_dtype = torch.float32 model = auto_model_class.from_pretrained( # noqa args.pretrained_model_path, trust_remote_code=True, load_in_4bit=args.bits == 4, load_in_8bit=args.bits == 8, torch_dtype=torch_dtype, quantization_config=BitsAndBytesConfig( load_in_4bit=args.bits == 4, load_in_8bit=args.bits == 8, llm_int8_threshold=6.0, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=compute_dtype, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", ) ) else: logger.info("Training model from scratch") if args.model_type == 'gpt_neox': config = GPTNeoXConfig.from_json_file(args.config_path + '/config.json') # model = AutoModelForCausalLM.from_config(config, trust_remote_code=args.trust_remote_code) model = GPTNeoXForCausalLM._from_config(config) elif args.model_type == 'llama': config = LlamaConfig.from_json_file(args.config_path + '/config.json') # llama use xformers if args.use_xformers: config.use_xformers = True model = LlamaForCausalLM._from_config(config) elif args.model_type == 'glm': config = GLMConfig.from_json_file(args.config_path + '/config.json') model = GLMForConditionalGeneration._from_config(config) else: config = AutoConfig.from_json_file(args.config_path + '/config.json') model = AutoModelForCausalLM.from_config(config, trust_remote_code=args.trust_remote_code) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. if args.model_type not in ['glm']: embedding_size = model.get_input_embeddings().weight.shape[0] print_rank_0('embedding size: ' + str(embedding_size)) print_rank_0('vocab size: ' + str(tokenizer.vocab_size)) if tokenizer.vocab_size > embedding_size: model.resize_token_embeddings(tokenizer.vocab_size) print_rank_0('resize embedding size: ' + str(model.get_input_embeddings().weight.shape[0])) print_rank_0(config) num_params = get_model_params_num(model) print_rank_0("num_params of this model:", num_params) args.total_model_param = num_params args.hidden_size = config.hidden_size args.num_hidden_layers = config.num_hidden_layers args.vocab_size = tokenizer.vocab_size print_rank_0(f'hidden size: {args.hidden_size}') print_rank_0(f'num hidden layers: {args.num_hidden_layers}') print_rank_0(f'vocab size: {args.vocab_size}') if args.peft_type: if args.peft_type in ['lora', 'qlora']: target_modules = None if args.peft_type == "qlora":
model = prepare_model_for_kbit_training(model, False)
12
2023-11-02 01:37:01+00:00
24k
bytedance/cryostar
projects/star/train_atom.py
[ { "identifier": "SpatialGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class SpatialGridTranslate(torch.nn.Module):\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgri...
import os.path as osp import warnings import collections import einops import numpy as np import biotite.structure as struc import torch import lightning.pytorch as pl from pathlib import Path from copy import deepcopy from torch import nn from torch import optim from torch.utils.data import DataLoader from torchinfo import summary from lightning.fabric.utilities.warnings import PossibleUserWarning from lightning.pytorch.utilities import rank_zero_only from lightning.pytorch.strategies import DDPStrategy from mmengine import mkdir_or_exist from cryostar.utils.transforms import SpatialGridTranslate from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig, Mask from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN from cryostar.utils.losses import calc_cor_loss, calc_kl_loss from cryostar.utils.misc import log_to_current, \ pl_init_exp, pretty_dict, set_seed, warmup from cryostar.utils.pdb_tools import bt_save_pdb from cryostar.gmm.gmm import EMAN2Grid, batch_projection, Gaussian from cryostar.gmm.deformer import E3Deformer, NMADeformer from cryostar.utils.fft_utils import primal_to_fourier_2d, fourier_to_primal_2d from cryostar.utils.polymer import Polymer, NT_ATOMS, AA_ATOMS from cryostar.utils.dist_loss import (find_quaint_cutoff_pairs, find_range_cutoff_pairs, find_continuous_pairs, calc_dist_by_pair_indices, remove_duplicate_pairs, filter_same_chain_pairs, DistLoss) from cryostar.utils.latent_space_utils import get_nearest_point, cluster_kmeans, run_pca, get_pc_traj, run_umap from cryostar.utils.vis_utils import plot_z_dist, save_tensor_image from cryostar.utils.pl_utils import merge_step_outputs, squeeze_dict_outputs_1st_dim, \ filter_outputs_by_indices, get_1st_unique_indices from miscs import calc_pair_dist_loss, calc_clash_loss, low_pass_mask2d, VAE, infer_ctf_params_from_config
16,265
# nt tmp_mask = np.isin(meta.atom_name, NT_ATOMS) indices_in_pdb = np.nonzero(tmp_mask)[0] nt_cutoff_pairs = find_quaint_cutoff_pairs(meta.coord[tmp_mask], meta.chain_id[tmp_mask], meta.res_id[tmp_mask], cfg.loss.nt_intra_chain_cutoff, cfg.loss.nt_inter_chain_cutoff, cfg.loss.nt_intra_chain_res_bound) nt_cutoff_pairs = indices_in_pdb[nt_cutoff_pairs] log_to_current(f"{len(nt_cutoff_pairs)} NT pairs") cutoff_pairs = np.vstack((aa_cutoff_pairs, nt_cutoff_pairs)) else: cutoff_pairs = find_quaint_cutoff_pairs(meta.coord, meta.chain_id, meta.res_id, cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) cutoff_pairs = remove_duplicate_pairs(cutoff_pairs, connect_pairs) if cfg.loss.sse_weight != 0.0: log_to_current("use pseduo `sse` by building spatial/sequential edges") sse_pairs = find_quaint_cutoff_pairs(meta.coord, meta.chain_id, meta.res_id, cfg.loss.intra_chain_cutoff, 0, 20) cutoff_pairs = remove_duplicate_pairs(cutoff_pairs, sse_pairs) clash_pairs = find_range_cutoff_pairs(meta.coord, cfg.loss.clash_min_cutoff) clash_pairs = remove_duplicate_pairs(clash_pairs, connect_pairs) if len(connect_pairs) > 0: self.register_buffer("connect_pairs", torch.from_numpy(connect_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, connect_pairs) self.register_buffer("connect_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(connect_pairs)} connect_pairs") else: log_to_current("connect_pairs is empty") if cfg.loss.sse_weight != 0.0: self.register_buffer("sse_pairs", torch.from_numpy(sse_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, sse_pairs) self.register_buffer("sse_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(sse_pairs)} sse_pairs") if len(cutoff_pairs) > 0: dists = calc_dist_by_pair_indices(meta.coord, cutoff_pairs) log_to_current(f"found {len(cutoff_pairs)} cutoff_pairs") self.dist_loss_fn = DistLoss(cutoff_pairs, dists, reduction=None) # for chain-wise dropout cutoff_chain_mask = filter_same_chain_pairs(cutoff_pairs, meta.chain_id) self.register_buffer("cutoff_chain_mask", torch.from_numpy(cutoff_chain_mask)) else: log_to_current("cutoff_pairs is empty") if len(clash_pairs) > 0: self.register_buffer("clash_pairs", torch.from_numpy(clash_pairs).long()) log_to_current(f"found {len(clash_pairs)} clash_pairs") else: log_to_current("clash_pairs is empty") # low-pass filtering if hasattr(cfg.data_process, "low_pass_bandwidth"): log_to_current(f"Use low-pass filtering w/ {cfg.data_process.low_pass_bandwidth} A") lp_mask2d = low_pass_mask2d(cfg.data_process.down_side_shape, cfg.data_process.down_apix, cfg.data_process.low_pass_bandwidth) self.register_buffer("lp_mask2d", torch.from_numpy(lp_mask2d).float()) else: self.lp_mask2d = None # self.mask = Mask(cfg.data_process.down_side_shape, rad=cfg.loss.mask_rad_for_image_loss) # for projection grid = EMAN2Grid(side_shape=cfg.data_process.down_side_shape, voxel_size=cfg.data_process.down_apix) self.grid = grid ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) # translate image helper self.translator = SpatialGridTranslate(D=cfg.data_process.down_side_shape, device=self.device) self.apix = self.cfg.data_process.down_apix # cache self.validation_step_outputs = [] self.stored_metrics = {} self.history_saved_dirs = [] if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") self._load_ckpt(self.cfg.extra_input_data_attr.ckpt_path) def _save_ckpt(self, ckpt_path): torch.save( { "model": self.model.state_dict(), "gmm_sigmas": self.gmm_sigmas.data, "gmm_amps": self.gmm_amps.data }, ckpt_path) def _load_ckpt(self, ckpt_path): state_dict = torch.load(ckpt_path, map_location=self.device) self.model.load_state_dict(state_dict["model"]) if self.cfg.gmm.tunable: self.gmm_sigmas.data = state_dict["gmm_sigmas"] self.gmm_amps.data = state_dict["gmm_amps"] def _get_save_dir(self): save_dir = osp.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def low_pass_images(self, images): f_images = primal_to_fourier_2d(images) f_images = f_images * self.lp_mask2d
# other # avoid num_workers set as cpu_count warning warnings.simplefilter("ignore", PossibleUserWarning) # only log to rank_zero, comment this for debugging log_to_current = rank_zero_only(log_to_current) TASK_NAME = "atom" def prepare_images(images: torch.FloatTensor, space: str): assert space in ("real", "fourier") if space == "real": model_input = einops.rearrange(images, "b 1 ny nx -> b (1 ny nx)") else: fimages = primal_to_fourier_2d(images) model_input = einops.rearrange(torch.view_as_real(fimages), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) return model_input class InitTask(pl.LightningModule): def __init__(self, em_module): super().__init__() self.cfg = em_module.cfg self.em_module = em_module self.loss_deque = collections.deque([ 10, ], maxlen=20) def on_train_batch_end(self, outputs, batch, batch_idx): self.loss_deque.append(outputs['loss'].item()) if np.mean(self.loss_deque) < 1e-3: self.trainer.should_stop = True # update all process status self.trainer.should_stop = self.trainer.strategy.broadcast(self.trainer.should_stop) def training_step(self, batch, batch_idx): images = batch["proj"] idxes = batch["idx"] rot_mats, trans_mats = self.em_module.get_batch_pose(batch) pred_deformation, mu, log_var = self.em_module.model(prepare_images(images, self.cfg.model.input_space), idxes, rot_mats) shift_loss = torch.mean(torch.pow(pred_deformation.flatten(start_dim=-2), 2)) loss = shift_loss if self.global_step % self.cfg.runner.log_every_n_step == 0: log_to_current(f"loss {loss.item()}") return loss def configure_optimizers(self): return optim.AdamW(self.em_module.model.parameters(), lr=1e-4) def on_fit_end(self): log_to_current(f"Init finished with loss {np.mean(self.loss_deque)}") class CryoEMTask(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() cfg = deepcopy(cfg) self.cfg = cfg # Define GMM meta = Polymer.from_pdb(cfg.dataset_attr.ref_pdb_path) log_to_current(f"Load reference structure from {cfg.dataset_attr.ref_pdb_path}") # for save self.template_pdb = meta.to_atom_arr() log_to_current(f"Protein contains {len(meta)} atoms, " f"{meta.num_amino_acids} amino acids, " f"{meta.num_nucleotides} nucleotides, " f"{meta.num_chains} chains.") # ref ref_centers = torch.from_numpy(meta.coord).float() ref_amps = torch.from_numpy(meta.num_electron).float() ref_sigmas = torch.ones_like(ref_amps) ref_sigmas.fill_(2.) log_to_current(f"1st GMM blob amplitude {ref_amps[0].item()}, sigma {ref_sigmas[0].item()}") num_pts = len(meta) log_to_current(f"Reference structure has {num_pts} atom coordinates") # tunable params # gmm self.register_buffer("gmm_centers", ref_centers) if cfg.gmm.tunable: log_to_current("Set GMM sigmas, amplitudes tunable") self.register_parameter("gmm_sigmas", nn.Parameter(ref_sigmas)) self.register_parameter("gmm_amps", nn.Parameter(ref_amps)) else: self.register_buffer("gmm_sigmas", ref_sigmas) self.register_buffer("gmm_amps", ref_amps) nma_modes = None if (hasattr(self.cfg.extra_input_data_attr, "nma_path") and self.cfg.extra_input_data_attr.nma_path not in ["", None]): nma_modes = torch.tensor(np.load(self.cfg.extra_input_data_attr.nma_path), dtype=torch.float32) log_to_current(f"Load NMA coefficients from {self.cfg.extra_input_data_attr.nma_path}, " f"whose shape is {nma_modes.shape}") # model if cfg.model.input_space == "fourier": in_dim = 2 * cfg.data_process.down_side_shape ** 2 elif cfg.model.input_space == "real": in_dim = cfg.data_process.down_side_shape ** 2 else: raise NotImplementedError self.model = VAE(in_dim=in_dim, out_dim=num_pts * 3 if nma_modes is None else 6 + nma_modes.shape[1], **cfg.model.model_cfg) log_to_current('Model summary:\n' + str(summary(self.model, input_size=[(1, in_dim), (1,)], verbose=0))) if nma_modes is None: self.deformer = E3Deformer() else: self.deformer = NMADeformer(nma_modes) # loss or regularization's preparation # dist loss connect_pairs = find_continuous_pairs(meta.chain_id, meta.res_id, meta.atom_name) if cfg.extra_input_data_attr.use_domain: log_to_current("use domain instead of chain!") domain_id = np.load(cfg.extra_input_data_attr.domain_path) cutoff_pairs = find_quaint_cutoff_pairs(meta.coord, domain_id, meta.res_id, cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) else: # deal with RNA/DNA if np.sum(np.isin(meta.atom_name, NT_ATOMS)): # aa tmp_mask = np.isin(meta.atom_name, AA_ATOMS) indices_in_pdb = np.nonzero(tmp_mask)[0] aa_cutoff_pairs = find_quaint_cutoff_pairs(meta.coord[tmp_mask], meta.chain_id[tmp_mask], meta.res_id[tmp_mask], cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) aa_cutoff_pairs = indices_in_pdb[aa_cutoff_pairs] log_to_current(f"{len(aa_cutoff_pairs)} AA pairs") # nt tmp_mask = np.isin(meta.atom_name, NT_ATOMS) indices_in_pdb = np.nonzero(tmp_mask)[0] nt_cutoff_pairs = find_quaint_cutoff_pairs(meta.coord[tmp_mask], meta.chain_id[tmp_mask], meta.res_id[tmp_mask], cfg.loss.nt_intra_chain_cutoff, cfg.loss.nt_inter_chain_cutoff, cfg.loss.nt_intra_chain_res_bound) nt_cutoff_pairs = indices_in_pdb[nt_cutoff_pairs] log_to_current(f"{len(nt_cutoff_pairs)} NT pairs") cutoff_pairs = np.vstack((aa_cutoff_pairs, nt_cutoff_pairs)) else: cutoff_pairs = find_quaint_cutoff_pairs(meta.coord, meta.chain_id, meta.res_id, cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) cutoff_pairs = remove_duplicate_pairs(cutoff_pairs, connect_pairs) if cfg.loss.sse_weight != 0.0: log_to_current("use pseduo `sse` by building spatial/sequential edges") sse_pairs = find_quaint_cutoff_pairs(meta.coord, meta.chain_id, meta.res_id, cfg.loss.intra_chain_cutoff, 0, 20) cutoff_pairs = remove_duplicate_pairs(cutoff_pairs, sse_pairs) clash_pairs = find_range_cutoff_pairs(meta.coord, cfg.loss.clash_min_cutoff) clash_pairs = remove_duplicate_pairs(clash_pairs, connect_pairs) if len(connect_pairs) > 0: self.register_buffer("connect_pairs", torch.from_numpy(connect_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, connect_pairs) self.register_buffer("connect_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(connect_pairs)} connect_pairs") else: log_to_current("connect_pairs is empty") if cfg.loss.sse_weight != 0.0: self.register_buffer("sse_pairs", torch.from_numpy(sse_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, sse_pairs) self.register_buffer("sse_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(sse_pairs)} sse_pairs") if len(cutoff_pairs) > 0: dists = calc_dist_by_pair_indices(meta.coord, cutoff_pairs) log_to_current(f"found {len(cutoff_pairs)} cutoff_pairs") self.dist_loss_fn = DistLoss(cutoff_pairs, dists, reduction=None) # for chain-wise dropout cutoff_chain_mask = filter_same_chain_pairs(cutoff_pairs, meta.chain_id) self.register_buffer("cutoff_chain_mask", torch.from_numpy(cutoff_chain_mask)) else: log_to_current("cutoff_pairs is empty") if len(clash_pairs) > 0: self.register_buffer("clash_pairs", torch.from_numpy(clash_pairs).long()) log_to_current(f"found {len(clash_pairs)} clash_pairs") else: log_to_current("clash_pairs is empty") # low-pass filtering if hasattr(cfg.data_process, "low_pass_bandwidth"): log_to_current(f"Use low-pass filtering w/ {cfg.data_process.low_pass_bandwidth} A") lp_mask2d = low_pass_mask2d(cfg.data_process.down_side_shape, cfg.data_process.down_apix, cfg.data_process.low_pass_bandwidth) self.register_buffer("lp_mask2d", torch.from_numpy(lp_mask2d).float()) else: self.lp_mask2d = None # self.mask = Mask(cfg.data_process.down_side_shape, rad=cfg.loss.mask_rad_for_image_loss) # for projection grid = EMAN2Grid(side_shape=cfg.data_process.down_side_shape, voxel_size=cfg.data_process.down_apix) self.grid = grid ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) # translate image helper self.translator = SpatialGridTranslate(D=cfg.data_process.down_side_shape, device=self.device) self.apix = self.cfg.data_process.down_apix # cache self.validation_step_outputs = [] self.stored_metrics = {} self.history_saved_dirs = [] if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") self._load_ckpt(self.cfg.extra_input_data_attr.ckpt_path) def _save_ckpt(self, ckpt_path): torch.save( { "model": self.model.state_dict(), "gmm_sigmas": self.gmm_sigmas.data, "gmm_amps": self.gmm_amps.data }, ckpt_path) def _load_ckpt(self, ckpt_path): state_dict = torch.load(ckpt_path, map_location=self.device) self.model.load_state_dict(state_dict["model"]) if self.cfg.gmm.tunable: self.gmm_sigmas.data = state_dict["gmm_sigmas"] self.gmm_amps.data = state_dict["gmm_amps"] def _get_save_dir(self): save_dir = osp.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def low_pass_images(self, images): f_images = primal_to_fourier_2d(images) f_images = f_images * self.lp_mask2d
images = fourier_to_primal_2d(f_images).real
16
2023-11-06 07:15:26+00:00
24k
KAIST-AILab/palr
train.py
[ { "identifier": "BC", "path": "imitation/bc.py", "snippet": "class BC(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, envname=None, wandb=None, save_policy_path=None, \n ...
import os import wandb import envs import d4rl import gym import torch from imitation.bc import BC from imitation.rap import RAP from imitation.fca import FCA from imitation.mine import MINE_BC from imitation.palr import PALR from argparse import ArgumentParser from itertools import product from core.policy import TanhGaussianPolicyWithEmbedding, TanhGaussianRAPPolicy from core.replay_buffer import EnvReplayBuffer from core.preprocess import preprocess_dataset_with_prev_actions, data_select_num_transitions from rlkit.envs.wrappers import NormalizedBoxEnv
20,108
replay_buffer_valid.add_path(valid_data) if configs['standardize']: obs_mean, obs_std, act_mean, act_std = replay_buffer.calculate_statistics() replay_buffer_valid.set_statistics(obs_mean, obs_std, act_mean, act_std) # to use wandb, initialize here, e.g. # wandb.init(project='palr', dir=wandb_dir, config=configs) wandb = None if 'BC' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device ) trainer = BC( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, envname = envname, lr = configs['lr'], save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, stacksize = stacksize, wandb = wandb, standardize=configs['standardize'] ) trainer.train(total_iteration=configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num']) elif 'RAP' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianRAPPolicy( obs_dim=obs_dim, stack_size=stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], residual_hidden_size=configs['additional_network_size'], device=device, ) best_policy = TanhGaussianRAPPolicy( obs_dim=obs_dim, stack_size=stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], residual_hidden_size=configs['additional_network_size'], device=device, ) trainer = RAP( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, lr = configs['lr'], save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, embedding_dim = embedding_dim, stacksize = stacksize, wandb = wandb, standardize=configs['standardize'] ) trainer.train(total_iteration = configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num']) elif 'FCA' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, )
wandb_dir = '.' os.environ['WANDB_DIR'] = wandb_dir os.environ['D4RL_DATASET_DIR'] = './dataset/' def train(configs): env = NormalizedBoxEnv(gym.make(configs['envname'])) obs_dim = env.observation_space.low.size action_dim = env.action_space.low.size d4rl_env = gym.make(configs['d4rl_env_name']) stacksize = configs['stacksize'] if stacksize == 0: stacksize = 1 device = 'cuda' if torch.cuda.is_available() else 'cpu' envname, envtype = configs['envname'], configs['envtype'] traj_load_path = configs['traj_load_path'] print(f'-- Loading dataset from {traj_load_path}...') dataset = d4rl_env.get_dataset() print(f'-- Done!') print(f'-- Preprocessing dataset... ({envtype}, {stacksize})') path = preprocess_dataset_with_prev_actions(dataset, envtype, stacksize, configs['partially_observable'], action_history_len=2) train_data = data_select_num_transitions(path, configs['train_data_num']) valid_data = data_select_num_transitions(path, configs['valid_data_num'], start_idx=900000) replay_buffer = EnvReplayBuffer( configs['replay_buffer_size'], env, stacksize, action_history_len=2 ) replay_buffer.add_path(train_data) replay_buffer_valid = EnvReplayBuffer( configs['replay_buffer_size'], env, stacksize, action_history_len=2 ) replay_buffer_valid.add_path(valid_data) if configs['standardize']: obs_mean, obs_std, act_mean, act_std = replay_buffer.calculate_statistics() replay_buffer_valid.set_statistics(obs_mean, obs_std, act_mean, act_std) # to use wandb, initialize here, e.g. # wandb.init(project='palr', dir=wandb_dir, config=configs) wandb = None if 'BC' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device ) trainer = BC( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, envname = envname, lr = configs['lr'], save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, stacksize = stacksize, wandb = wandb, standardize=configs['standardize'] ) trainer.train(total_iteration=configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num']) elif 'RAP' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianRAPPolicy( obs_dim=obs_dim, stack_size=stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], residual_hidden_size=configs['additional_network_size'], device=device, ) best_policy = TanhGaussianRAPPolicy( obs_dim=obs_dim, stack_size=stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], residual_hidden_size=configs['additional_network_size'], device=device, ) trainer = RAP( policy = policy, best_policy = best_policy, env = env, replay_buffer = replay_buffer, replay_buffer_valid = replay_buffer_valid, seed = configs['seed'], device = device, lr = configs['lr'], save_policy_path = configs['save_policy_path'], obs_dim = obs_dim, action_dim = action_dim, embedding_dim = embedding_dim, stacksize = stacksize, wandb = wandb, standardize=configs['standardize'] ) trainer.train(total_iteration = configs['total_iteration'], eval_freq = configs['eval_freq'], batch_size = configs['batch_size'], num_valid = configs['valid_data_num']) elif 'FCA' in configs['algorithm']: embedding_dim = configs['layer_sizes'][1] policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, ) best_policy = TanhGaussianPolicyWithEmbedding( obs_dim=obs_dim * stacksize, action_dim=action_dim, embedding_hidden_size=configs['layer_sizes'][0], embedding_dim=embedding_dim, policy_hidden_size=configs['layer_sizes'][2], device=device, )
trainer = FCA(
2
2023-11-06 08:35:34+00:00
24k
tylerlight071/Project-Cipher
main.py
[ { "identifier": "clear_terminal", "path": "components/common_functions.py", "snippet": "def clear_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')" }, { "identifier": "print_slow", "path": "components/common_functions.py", "snippet": "def print_slow(text, delay=0.00): #...
import msvcrt import os import pickle import sys import time import colorama import pygame from colorama import Fore, Style from components.common_functions import clear_terminal, print_slow, shop_help, help_user, connect_help, mail_help, \ system_help from conversations.calls import intro_call, first_call, second_call, third_call, fourth_call, fifth_call, sixth_call, \ markus_seen_call from conversations.minigame_calls import code_shatter_call from minigames.code_shatter_minigame import code_shatter_minigame from minigames.eye_spy_minigame import port_scanning from systems.level_1.amy.amy_system import AmySystem from systems.level_1.billy.billy_system import BillySystem from systems.level_1.cameras.camera_1 import camera_first from systems.level_1.markus.markus_system import MarkusSystem
16,334
def getpass_star(prompt="Password: "): print(prompt, end='', flush=True) password = [] while True: char = msvcrt.getch().decode('utf-8') if char == '\r' or char == '\n': break elif char == '\b': # Backspace if password: password.pop() print('\b \b', end='', flush=True) else: password.append(char) print('*', end='', flush=True) print() # Move to the next line return ''.join(password) def hack(system_name): global seen_markus # Find the system in the all_systems list system = next((s for s in all_systems if s['name'].lower() == system_name.lower()), None) if system: if system['level'] == player_level: # Check for CodeShatter before prompting for password if system['name'] == 'Markus' and has_item("CodeShatter"): clear_terminal() code_shatter_minigame() print_slow("Password Cracked: 735@&!//") input("Press [Enter] to continue") clear_terminal() markus_system_command_loop(markus_system) add_level(player_level) remove_from_inventory(item="CodeShatter") seen_markus = True elif system['name'] == 'Lobby Camera' and has_item("EyeSpy"): port_scanning() add_level(player_level) camera_first() else: # Prompt the user for the password print_slow("") password = getpass_star("Enter password: ") print_slow("") if password == system['password']: print_slow("") print_slow(Fore.GREEN + "Access granted!" + Style.RESET_ALL) if system['name'] == 'Amy': amy_system_command_loop(amy_system) elif system['name'] == 'Billy': billy_system_command_loop(billy_system) elif system['name'] == 'Markus': markus_system_command_loop(markus_system) add_level(player_level) seen_markus = True elif system['name'] == 'Lobby Camera': camera_first() elif system['name'] == 'Kyle': # Implement Kyle System else: # Add more conditions for other systems pass else: print_slow("") print_slow(Fore.RED + "Access denied! Incorrect password." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) def list_emails(emails): print_slow(Fore.LIGHTBLUE_EX + "\nEmails:" + Style.RESET_ALL) for i, email in enumerate(emails): print_slow(Fore.LIGHTBLUE_EX + f"\n{email['subject']} - From: {email['sender']}" + Style.RESET_ALL) def read_email(emails, subject): global has_read_email, evidence global balance email_found = False for email in emails: if email['subject'].lower() == subject.lower(): email_found = True print_slow( Fore.LIGHTBLUE_EX + f"\nFrom: {email['sender']}\nSubject: {email['subject']}\n\n{email['body']}" + Style.RESET_ALL) # Check if the email is one of the specific emails that increases evidence count if email['subject'].lower() in ["project update"]: evidence_item = 3 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) third_call() if email['subject'].lower() in ["professional development"]: evidence_item = 2 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL)
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem() billy_system = BillySystem() markus_system = MarkusSystem() bg_music_enabled = True player_level = 1 has_started_game = False # Save the game state to a file def save_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus with open('savegame.pkl', 'wb') as f: pickle.dump( (inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus), f) # Load the game state from a file def load_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus if os.path.exists('savegame.pkl'): with open('savegame.pkl', 'rb') as f: inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus = pickle.load( f) else: # If the savegame file doesn't exist, set the default values inventory = [] player_level = 1 evidence = [] has_intro_call = False has_started_game = False seen_markus = False balance = 30000 emails = [ { "sender": "Hacker's Digest", "subject": "Weekly Hacker's Digest", "body": ( "Issue #143\n\n" "Cipher,\n\n" "Welcome to the latest edition of Hacker's Digest! In this issue: \n\n" "- Unveiling the Latest Exploits\n" "- Spotlight on Cryptocurrency Security\n" "- Interview with a Grey Hat Hacker\n" "- Tool of the Week: EnigmaLink\n\n" "Don't miss out on the latest in the world of hacking and cybersecurity. Stay informed and stay secure!\n\n" "Best regards,\n" "Hacker's Digest Team" ) }, { "sender": "The Cyber Mythbuster", "subject": "Busting Cybersecurity Myths", "body": ( "Cipher,\n\n" "Heard any wild cybersecurity myths lately? This week, we're busting the craziest ones, including:\n\n" "- Using 'Password123' for Maximum Security\n" "- Cyber Ninjas and Their Stealthy VPNs\n" "- USB Drives: The Fountain of Eternal Data\n\n" "Stay myth-free and keep on hacking (responsibly)!\n\n" "Mythbustingly,\n" "The Cyber Mythbuster" ) }, { "sender": "CyberSilliness", "subject": "Where Cyber Meets Comedy", "body": ( "Welcome to the CyberSilliness Gazette\n" "Where we believe that a good laugh is the ultimate antivirus! In this week's hilarity-packed issue:\n\n" "- Cyber Jokes to Crack You Up (Without Cracking Your Passwords)\n" "- Tech Support Horror Stories: A Comedy of Errors\n" "- Chuckle Challenge: Share Your Funniest Cybersecurity Anecdote\n" "- Meet the Cyber Clowns: Our Team's Silly Security Habits Revealed\n\n" "Laughter is contagious, and so is good cybersecurity. Dive into the giggles and stay safe!\n\n" "Silly Regards,\n" "The CyberSilliness Team" ) }, { "sender": "Security Insight Weekly", "subject": "Navigating the Cybersecurity Landscape", "body": ( "Hello Cipher,\n\n" "Welcome to Security Insight Weekly, your reliable source for navigating the ever-evolving cybersecurity landscape. In this week's issue:\n\n" "- Threat Analysis: Understanding Recent Cybersecurity Incidents\n" "- Best Practices for Endpoint Security\n" "- Industry Spotlight: Healthcare Cybersecurity Challenges\n" "- Security Compliance Update: Staying Aligned with Regulations\n\n" "Stay informed and empowered as we delve into the serious aspects of cybersecurity. Your security is our priority.\n\n" "Best regards,\n" "The Security Insight Team" ) }, ] # New function for game settings def game_settings(): global bg_music_enabled print_slow(Fore.GREEN + "░██████╗███████╗████████╗████████╗██╗███╗░░██╗░██████╗░░██████╗") print_slow(Fore.GREEN + "██╔════╝██╔════╝╚══██╔══╝╚══██╔══╝██║████╗░██║██╔════╝░██╔════╝") print_slow(Fore.GREEN + "╚█████╗░█████╗░░░░░██║░░░░░░██║░░░██║██╔██╗██║██║░░██╗░╚█████╗░") print_slow(Fore.GREEN + "░╚═══██╗██╔══╝░░░░░██║░░░░░░██║░░░██║██║╚████║██║░░╚██╗░╚═══██╗") print_slow(Fore.GREEN + "██████╔╝███████╗░░░██║░░░░░░██║░░░██║██║░╚███║╚██████╔╝██████╔╝") print_slow(Fore.GREEN + "╚═════╝░╚══════╝░░░╚═╝░░░░░░╚═╝░░░╚═╝╚═╝░░╚══╝░╚═════╝░╚═════╝░" + Style.RESET_ALL) print_slow("") print_slow("") print_slow("") print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) print_slow( Fore.GREEN + f"| [Background Music] {'Enabled |' if bg_music_enabled else 'Disabled |'}" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Delete Savegame] |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Back to Main Menu] |" + Style.RESET_ALL) print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) choice = input(Fore.GREEN + "\n> " + Style.RESET_ALL) if choice.lower() == "background music": # Toggle background music bg_music_enabled = not bg_music_enabled if bg_music_enabled: pygame.mixer.music.play(-1) print_slow(Fore.GREEN + "\nBackground Music Enabled" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() else: pygame.mixer.music.stop() print_slow(Fore.RED + "\nBackground Music Disabled" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() elif choice.lower() == "delete savegame": # Delete savegame confirm = input(Fore.RED + "\nAre you sure you want to delete the savegame? (yes/no): " + Style.RESET_ALL) if confirm.lower() == "yes": try: os.remove("savegame.pkl") print_slow(Fore.GREEN + "\nSavegame Deleted" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() except FileNotFoundError: print_slow(Fore.RED + "\nSavegame not found" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() elif choice.lower() == "back" or choice.lower() == "back to main menu": # Return to Main Menu print_slow(Fore.GREEN + "\nReturning to Main Menu..." + Style.RESET_ALL) time.sleep(1) clear_terminal() else: print_slow(Fore.RED + "\nInvalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() # Function to add an item to the inventory def add_to_inventory(item): inventory.append(item) def remove_from_inventory(item): if item in inventory: inventory.remove(item) def add_evidence(evidence_item): evidence.append(evidence_item) def has_evidence(evidence_item): return evidence_item in evidence # Prints the games title def main(): clear_terminal() colorama.init() print_slow(Fore.GREEN + "██████╗░██╗░░░░░░█████╗░░█████╗░██╗░░██╗██╗░░██╗░█████╗░████████╗" + Style.RESET_ALL) print_slow(Fore.GREEN + "██╔══██╗██║░░░░░██╔══██╗██╔══██╗██║░██╔╝██║░░██║██╔══██╗╚══██╔══╝" + Style.RESET_ALL) print_slow(Fore.GREEN + "██████╦╝██║░░░░░███████║██║░░╚═╝█████═╝░███████║███████║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "██╔══██╗██║░░░░░██╔══██║██║░░██╗██╔═██╗░██╔══██║██╔══██║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "██████╦╝███████╗██║░░██║╚█████╔╝██║░╚██╗██║░░██║██║░░██║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "╚═════╝░╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝░░░╚═╝░░░" + Style.RESET_ALL) # Pause for 2 seconds before clearing the console time.sleep(5) # Clear the console clear_terminal() # Main menu loop while True: print_slow(Fore.GREEN + "███╗░░░███╗░█████╗░██╗███╗░░██╗  ███╗░░░███╗███████╗███╗░░██╗██╗░░░██╗") print_slow(Fore.GREEN + "████╗░████║██╔══██╗██║████╗░██║  ████╗░████║██╔════╝████╗░██║██║░░░██║") print_slow(Fore.GREEN + "██╔████╔██║███████║██║██╔██╗██║  ██╔████╔██║█████╗░░██╔██╗██║██║░░░██║") print_slow(Fore.GREEN + "██║╚██╔╝██║██╔══██║██║██║╚████║  ██║╚██╔╝██║██╔══╝░░██║╚████║██║░░░██║") print_slow(Fore.GREEN + "██║░╚═╝░██║██║░░██║██║██║░╚███║  ██║░╚═╝░██║███████╗██║░╚███║╚██████╔╝") print_slow( Fore.GREEN + "╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝╚═╝░░╚══╝  ╚═╝░░░░░╚═╝╚══════╝╚═╝░░╚══╝░╚═════╝░" + Style.RESET_ALL) print_slow("") print_slow("") print_slow("") print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Start] Start the game |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Options] Change the settings |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Exit] Exit the game |" + Style.RESET_ALL) print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) choice = input(Fore.GREEN + "\n> " + Style.RESET_ALL) # Start the game if choice.lower() == "start": load_game() start_game() # Open game settings elif choice.lower() == "options": clear_terminal() game_settings() # Exit the game elif choice.lower() == "exit": print_slow(Fore.GREEN + "\nExiting..." + Style.RESET_ALL) pygame.mixer.music.stop() sys.exit() else: print_slow(Fore.RED + "\nInvalid choice, please try again." + Style.RESET_ALL) time.sleep(2) clear_terminal() # Function to get the user's balance def get_balance(): return balance # Function to add money to the user's balance def add_money(amount): global balance balance += amount # Function to subtract money from the user's balance def subtract_money(amount): global balance balance -= amount def add_level(level): global player_level player_level += level # Function to print the user's balance def print_balance(): print_slow(f"Your current balance is: £{get_balance()}") # Function to read files and marks files as evidence def read_file(file_content, file_name): global has_read_file, evidence global balance # Print the file content print_slow(Fore.LIGHTBLUE_EX + f"\n{file_name}:\n\n{file_content}" + Style.RESET_ALL) print_slow("") # Check if the file is one of the specific files that increases evidence count if file_name.lower() in ["employee_performance_review.txt"]: evidence_item = 4 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) fourth_call() if file_name.lower() in ["meeting_minutes.txt"]: evidence_item = 5 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) fifth_call() # Add more file names here as needed # Add money to balance based on the file name if file_name.lower() == "employee_performance_review.txt": balance += 30 elif file_name.lower() == "meeting_minutes.txt": balance += 50 # List of available upgrades upgrades = [ {"name": "EnigmaLink", "description": "Application required to connect to Enigma Corps network.", "price": 100}, {"name": "CodeShatter", "description": "A powerful password breaker that can crack even the strongest passwords.", "price": 250}, {"name": "EyeSpy", "description": "A privacy breaker to gain access to the smallest of cameras.", "price": 500}, {"name": "Rift", "description": "Break the barrier between the Server and Network.", "price": 800} ] # Function to display the shop def shop(): clear_terminal() print_slow(Fore.YELLOW + r''' ██╗░░██╗░█████╗░░█████╗░██╗░░██╗███████╗██████╗░  ███╗░░░███╗░█████╗░██████╗░██╗░░██╗███████╗████████╗ ██║░░██║██╔══██╗██╔══██╗██║░██╔╝██╔════╝██╔══██╗  ████╗░████║██╔══██╗██╔══██╗██║░██╔╝██╔════╝╚══██╔══╝ ███████║███████║██║░░╚═╝█████═╝░█████╗░░██████╔╝  ██╔████╔██║███████║██████╔╝█████═╝░█████╗░░░░░██║░░░ ██╔══██║██╔══██║██║░░██╗██╔═██╗░██╔══╝░░██╔══██╗  ██║╚██╔╝██║██╔══██║██╔══██╗██╔═██╗░██╔══╝░░░░░██║░░░ ██║░░██║██║░░██║╚█████╔╝██║░╚██╗███████╗██║░░██║  ██║░╚═╝░██║██║░░██║██║░░██║██║░╚██╗███████╗░░░██║░░░ ╚═╝░░╚═╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝╚══════╝╚═╝░░╚═╝  ╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝╚══════╝░░░╚═╝░░░''' + Style.RESET_ALL) print_slow(Fore.YELLOW + "\nWelcome to the Hacker's Market!" + Style.RESET_ALL) print_slow("") print_slow(Fore.YELLOW + "Here you can buy upgrades to improve your hacking abilities.\n" + Style.RESET_ALL) while True: # Display the list of available upgrades for i, upgrade in enumerate(upgrades): print_slow( Fore.YELLOW + f"\n{upgrade['name']} - {upgrade['description']} - £{upgrade['price']}" + Style.RESET_ALL) # Get the user's choice command = input(Fore.YELLOW + "\n> " + Style.RESET_ALL) # Buy the chosen upgrade if command.lower() == 'exit': print_slow(Fore.YELLOW + "\nExiting Hacker's Market" + Style.RESET_ALL) time.sleep(1) clear_terminal() start_game() elif command.lower() == 'help': shop_help() elif command.lower().startswith('buy '): upgrade_name = command[4:] # [4:] removes first 4 characters if has_item('EnigmaLink'): if upgrade_name.lower() == 'enigmalink': print_slow("") print_slow(Fore.RED + "Sold Out" + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: for upgrade in upgrades: if upgrade_name.lower() == upgrade['name'].lower(): if get_balance() >= upgrade['price']: print_slow("") print_slow( Fore.GREEN + f"You have successfully purchased {upgrade['name']} for ${upgrade['price']}!" + Style.RESET_ALL) subtract_money(upgrade['price']) print_slow("") print_balance() add_to_inventory(upgrade['name']) time.sleep(2) clear_terminal() # Check if the purchased upgrade is CodeShatter if upgrade_name.lower() == 'codeshatter': print_slow("") print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) code_shatter_call() shop() else: clear_terminal() shop() else: print_slow( Fore.RED + "You don't have enough money to buy this upgrade." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: print_slow(Fore.RED + "Invalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: for upgrade in upgrades: if upgrade_name.lower() == upgrade['name'].lower(): if get_balance() >= upgrade['price']: print_slow("") print_slow( Fore.GREEN + f"You have successfully purchased {upgrade['name']} for ${upgrade['price']}!" + Style.RESET_ALL) subtract_money(upgrade['price']) print_slow("") print_balance() add_to_inventory(upgrade['name']) time.sleep(2) clear_terminal() shop() else: print_slow( Fore.RED + "You don't have enough money to buy this upgrade." + Style.RESET_ALL) shop() else: print_slow(Fore.RED + "Invalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() # Function to start the game def start_game(): global has_intro_call, has_started_game, seen_markus if has_intro_call: clear_terminal() pass else: print_slow("\nStarting game...") time.sleep(1) print_slow("\nLoading assets...") time.sleep(1) clear_terminal() print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) intro_call() has_intro_call = True has_started_game = True print_slow(Fore.MAGENTA + "\nHint: Type 'help' to get a list of available commands." + Style.RESET_ALL) pass if seen_markus: print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) markus_seen_call() else: pass # Game command loop command = input(Fore.GREEN + "> " + Style.RESET_ALL) # Connect to the network if command.lower() == "connect": connect() # Access the mail system elif command.lower() == "mail": mail() # Display help message elif command.lower() == "help": help_user() # Check balance elif command.lower() == "balance": print_balance() # Enter shop elif command.lower() == "shop": shop() # Clear terminal elif command.lower() == "clear": clear_terminal() # Return to the main menu elif command.lower() == "exit": print_slow("Returning to Main Menu...") time.sleep(1) main() else: print_slow("Invalid command, please try again.") time.sleep(1) clear_terminal() start_game() # Save the game state save_game() # Function to check if an item is in the inventory def has_item(item): return item in inventory def scan(): print_slow("") print_slow(Fore.YELLOW + "Scanning network..." + Style.RESET_ALL) time.sleep(2) print_slow("") print_slow(Fore.YELLOW + "\nAvailable Systems:" + Style.RESET_ALL) print_slow("") for system in all_systems: if system['level'] == player_level: print_slow("") print_slow(f"{system['name']} ({system['type']})") print_slow("") def getpass_star(prompt="Password: "): print(prompt, end='', flush=True) password = [] while True: char = msvcrt.getch().decode('utf-8') if char == '\r' or char == '\n': break elif char == '\b': # Backspace if password: password.pop() print('\b \b', end='', flush=True) else: password.append(char) print('*', end='', flush=True) print() # Move to the next line return ''.join(password) def hack(system_name): global seen_markus # Find the system in the all_systems list system = next((s for s in all_systems if s['name'].lower() == system_name.lower()), None) if system: if system['level'] == player_level: # Check for CodeShatter before prompting for password if system['name'] == 'Markus' and has_item("CodeShatter"): clear_terminal() code_shatter_minigame() print_slow("Password Cracked: 735@&!//") input("Press [Enter] to continue") clear_terminal() markus_system_command_loop(markus_system) add_level(player_level) remove_from_inventory(item="CodeShatter") seen_markus = True elif system['name'] == 'Lobby Camera' and has_item("EyeSpy"): port_scanning() add_level(player_level) camera_first() else: # Prompt the user for the password print_slow("") password = getpass_star("Enter password: ") print_slow("") if password == system['password']: print_slow("") print_slow(Fore.GREEN + "Access granted!" + Style.RESET_ALL) if system['name'] == 'Amy': amy_system_command_loop(amy_system) elif system['name'] == 'Billy': billy_system_command_loop(billy_system) elif system['name'] == 'Markus': markus_system_command_loop(markus_system) add_level(player_level) seen_markus = True elif system['name'] == 'Lobby Camera': camera_first() elif system['name'] == 'Kyle': # Implement Kyle System else: # Add more conditions for other systems pass else: print_slow("") print_slow(Fore.RED + "Access denied! Incorrect password." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) def list_emails(emails): print_slow(Fore.LIGHTBLUE_EX + "\nEmails:" + Style.RESET_ALL) for i, email in enumerate(emails): print_slow(Fore.LIGHTBLUE_EX + f"\n{email['subject']} - From: {email['sender']}" + Style.RESET_ALL) def read_email(emails, subject): global has_read_email, evidence global balance email_found = False for email in emails: if email['subject'].lower() == subject.lower(): email_found = True print_slow( Fore.LIGHTBLUE_EX + f"\nFrom: {email['sender']}\nSubject: {email['subject']}\n\n{email['body']}" + Style.RESET_ALL) # Check if the email is one of the specific emails that increases evidence count if email['subject'].lower() in ["project update"]: evidence_item = 3 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) third_call() if email['subject'].lower() in ["professional development"]: evidence_item = 2 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL)
second_call()
9
2023-11-06 09:52:13+00:00
24k
ziqi-zhang/TAOISM
python/test/test_conv.py
[ { "identifier": "register_layer", "path": "python/common_net.py", "snippet": "def register_layer(layer, name):\n layer.register_forward_hook(hooking_layer(name))\n layer.register_backward_hook(hooking_layer_backward(name))\n layer_names.append(name)" }, { "identifier": "register_weight_...
import os import sys import numpy as np import torch import torch.distributed as dist import sys import pdb from pdb import set_trace as st from torch import optim, nn from python.common_net import register_layer, register_weight_layer, get_layer_weight, get_layer_input, \ get_layer_weight_grad, get_layer_output, get_layer_output_grad, get_layer_input_grad from python.enclave_interfaces import GlobalTensor from python.layers.batch_norm_2d import SecretBatchNorm2dLayer from python.layers.flatten import SecretFlattenLayer from python.layers.input import SecretInputLayer from python.layers.maxpool2d import SecretMaxpool2dLayer from python.layers.output import SecretOutputLayer from python.layers.relu import SecretReLULayer from python.sgx_net import init_communicate, warming_up_cuda, SecretNeuralNetwork, SgdOptimizer from python.layers.sgx_linear_base import SGXLinearBase from python.layers.sgx_conv_base import SGXConvBase from python.utils.basic_utils import ExecutionModeOptions from python.utils.logger_utils import Logger from python.quantize_net import NetQ from python.test_sgx_net import argparser_distributed, marshal_process, load_cifar10, seed_torch from python.utils.timer_utils import NamedTimerInstance, VerboseLevel, NamedTimer from python.utils.torch_utils import compare_expected_actual from pdb import set_trace as st
21,551
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def compare_layer_member(layer: SGXLinearBase, layer_name: str, extract_func , member_name: str, save_path=None) -> None: print(member_name) layer.make_sure_cpu_is_latest(member_name) compare_expected_actual(extract_func(layer_name), layer.get_cpu(member_name), get_relative=True, verbose=True) if save_path is not None: if not os.path.exists(save_path): os.makedirs(save_path) print("Directory ", save_path, " Created ") else: print("Directory ", save_path, " already exists") torch.save(extract_func(layer_name), os.path.join(save_path, member_name + "_expected")) torch.save(layer.get_cpu(member_name), os.path.join(save_path, member_name + "_actual")) def compare_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: print("comparing with layer in expected NN :", layer_name) compare_name_function = [("input", get_layer_input), ("output", get_layer_output), ("DerOutput", get_layer_output_grad), ] if layer_name != "conv1": compare_name_function.append(("DerInput", get_layer_input_grad)) for member_name, extract_func in compare_name_function: compare_layer_member(layer, layer_name, extract_func, member_name, save_path=save_path) def compare_weight_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: compare_layer(layer, layer_name, save_path) compare_name_function = [("weight", get_layer_weight), ("DerWeight", get_layer_weight_grad) ] for member_name, extract_func in compare_name_function: compare_layer_member(layer, layer_name, extract_func, member_name, save_path=save_path) class ForkedPdb(pdb.Pdb): """A Pdb subclass that may be used from a forked multiprocessing child """ def interaction(self, *args, **kwargs): _stdin = sys.stdin try: sys.stdin = open('/dev/stdin') pdb.Pdb.interaction(self, *args, **kwargs) finally: sys.stdin = _stdin def test_conv( batch_size, img_hw, input_c, output_c, kernel, padding, stride, bias=False, set_values_to_one=False, sid=0 ): print("="*20, "TestConv", "="*20) print( f"batch {batch_size}, img_hw {img_hw}, input_c {input_c}, output_c {output_c}, " + f"kernel {kernel}, padding {padding}, stride {stride}" ) # def test_conv( # bias=False, set_values_to_one=True, # sid=0 # ): # batch_size = 128 # input_c = 3 # output_c = 64 # img_hw = 224 # kernel, padding, stride = 7, 3, 2 # batch_size = 128 # input_c = 512 # output_c = 512 # img_hw = 7 # kernel, padding, stride = 3, 1, 1 x_shape = [batch_size, input_c, img_hw, img_hw] GlobalTensor.init()
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def compare_layer_member(layer: SGXLinearBase, layer_name: str, extract_func , member_name: str, save_path=None) -> None: print(member_name) layer.make_sure_cpu_is_latest(member_name) compare_expected_actual(extract_func(layer_name), layer.get_cpu(member_name), get_relative=True, verbose=True) if save_path is not None: if not os.path.exists(save_path): os.makedirs(save_path) print("Directory ", save_path, " Created ") else: print("Directory ", save_path, " already exists") torch.save(extract_func(layer_name), os.path.join(save_path, member_name + "_expected")) torch.save(layer.get_cpu(member_name), os.path.join(save_path, member_name + "_actual")) def compare_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: print("comparing with layer in expected NN :", layer_name) compare_name_function = [("input", get_layer_input), ("output", get_layer_output), ("DerOutput", get_layer_output_grad), ] if layer_name != "conv1": compare_name_function.append(("DerInput", get_layer_input_grad)) for member_name, extract_func in compare_name_function: compare_layer_member(layer, layer_name, extract_func, member_name, save_path=save_path) def compare_weight_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: compare_layer(layer, layer_name, save_path) compare_name_function = [("weight", get_layer_weight), ("DerWeight", get_layer_weight_grad) ] for member_name, extract_func in compare_name_function: compare_layer_member(layer, layer_name, extract_func, member_name, save_path=save_path) class ForkedPdb(pdb.Pdb): """A Pdb subclass that may be used from a forked multiprocessing child """ def interaction(self, *args, **kwargs): _stdin = sys.stdin try: sys.stdin = open('/dev/stdin') pdb.Pdb.interaction(self, *args, **kwargs) finally: sys.stdin = _stdin def test_conv( batch_size, img_hw, input_c, output_c, kernel, padding, stride, bias=False, set_values_to_one=False, sid=0 ): print("="*20, "TestConv", "="*20) print( f"batch {batch_size}, img_hw {img_hw}, input_c {input_c}, output_c {output_c}, " + f"kernel {kernel}, padding {padding}, stride {stride}" ) # def test_conv( # bias=False, set_values_to_one=True, # sid=0 # ): # batch_size = 128 # input_c = 3 # output_c = 64 # img_hw = 224 # kernel, padding, stride = 7, 3, 2 # batch_size = 128 # input_c = 512 # output_c = 512 # img_hw = 7 # kernel, padding, stride = 3, 1, 1 x_shape = [batch_size, input_c, img_hw, img_hw] GlobalTensor.init()
input_layer = SecretInputLayer(sid, "InputLayer", x_shape, ExecutionModeOptions.Enclave )
11
2023-11-01 10:37:37+00:00
24k
Codra-Ingenierie-Informatique/DataLab
cdl/core/gui/processor/image.py
[ { "identifier": "distance_matrix", "path": "cdl/algorithms/image.py", "snippet": "def distance_matrix(coords: list) -> np.ndarray:\n \"\"\"Return distance matrix from coords\n\n Args:\n coords (list): List of coordinates\n\n Returns:\n np.ndarray: Distance matrix\n \"\"\"\n ...
from collections.abc import Callable from guidata.qthelpers import exec_dialog from numpy import ma from plotpy.widgets.resizedialog import ResizeDialog from qtpy import QtWidgets as QW from cdl.algorithms.image import distance_matrix from cdl.config import APP_NAME, Conf, _ from cdl.core.gui.processor.base import BaseProcessor from cdl.core.model.base import ShapeTypes from cdl.core.model.image import ImageObj from cdl.utils.qthelpers import create_progress_bar, qt_try_except import numpy as np import cdl.core.computation.base as cpb import cdl.core.computation.image as cpi import cdl.core.computation.image.detection as cpi_det import cdl.core.computation.image.edges as cpi_edg import cdl.core.computation.image.exposure as cpi_exp import cdl.core.computation.image.morphology as cpi_mor import cdl.core.computation.image.restoration as cpi_res import cdl.param
15,896
"""Compute difference between two images""" self.compute_n1n( obj2, _("image to subtract"), cpi.compute_difference, title=_("Difference"), ) @qt_try_except() def compute_quadratic_difference(self, obj2: ImageObj | None = None) -> None: """Compute quadratic difference between two images""" self.compute_n1n( obj2, _("image to subtract"), cpi.compute_quadratic_difference, title=_("Quadratic difference"), ) @qt_try_except() def compute_division(self, obj2: ImageObj | None = None) -> None: """Compute division between two images""" self.compute_n1n( obj2, _("divider"), cpi.compute_division, title=_("Division"), ) @qt_try_except() def compute_flatfield( self, obj2: ImageObj | None = None, param: cdl.param.FlatFieldParam | None = None, ) -> None: """Compute flat field correction""" edit, param = self.init_param(param, cpi.FlatFieldParam, _("Flat field")) if edit: obj = self.panel.objview.get_sel_objects()[0] param.set_from_datatype(obj.data.dtype) self.compute_n1n( obj2, _("flat field image"), cpi.compute_flatfield, param=param, title=_("Flat field correction"), edit=edit, ) # ------Image Processing @qt_try_except() def compute_calibration( self, param: cdl.param.ZCalibrateParam | None = None ) -> None: """Compute data linear calibration""" self.compute_11( cpi.compute_calibration, param, cpi.ZCalibrateParam, _("Linear calibration"), "y = a.x + b", ) @qt_try_except() def compute_threshold(self, param: cpb.ThresholdParam | None = None) -> None: """Compute threshold clipping""" self.compute_11( cpi.compute_threshold, param, cpb.ThresholdParam, _("Thresholding"), ) @qt_try_except() def compute_clip(self, param: cpb.ClipParam | None = None) -> None: """Compute maximum data clipping""" self.compute_11( cpi.compute_clip, param, cpb.ClipParam, _("Clipping"), ) @qt_try_except() def compute_gaussian_filter(self, param: cpb.GaussianParam | None = None) -> None: """Compute gaussian filter""" self.compute_11( cpi.compute_gaussian_filter, param, cpb.GaussianParam, _("Gaussian filter") ) @qt_try_except() def compute_moving_average( self, param: cpb.MovingAverageParam | None = None ) -> None: """Compute moving average""" self.compute_11( cpi.compute_moving_average, param, cpb.MovingAverageParam, _("Moving average"), ) @qt_try_except() def compute_moving_median(self, param: cpb.MovingMedianParam | None = None) -> None: """Compute moving median""" self.compute_11( cpi.compute_moving_median, param, cpb.MovingMedianParam, _("Moving median"), ) @qt_try_except() def compute_wiener(self) -> None: """Compute Wiener filter""" self.compute_11(cpi.compute_wiener, title=_("Wiener filter")) @qt_try_except() def compute_fft(self, param: cdl.param.FFTParam | None = None) -> None: """Compute FFT""" if param is None:
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """ Image Processor --------------- """ # pylint: disable=invalid-name # Allows short reference names like x, y, ... from __future__ import annotations class ImageProcessor(BaseProcessor): """Object handling image processing: operations, processing, computing""" # pylint: disable=duplicate-code EDIT_ROI_PARAMS = True @qt_try_except() def compute_sum(self) -> None: """Compute sum""" self.compute_n1("Σ", cpi.compute_add, title=_("Sum")) @qt_try_except() def compute_average(self) -> None: """Compute average""" def func_objs(new_obj: ImageObj, old_objs: list[ImageObj]) -> None: """Finalize average computation""" new_obj.data = new_obj.data / float(len(old_objs)) self.compute_n1("μ", cpi.compute_add, func_objs=func_objs, title=_("Average")) @qt_try_except() def compute_product(self) -> None: """Compute product""" self.compute_n1("Π", cpi.compute_product, title=_("Product")) @qt_try_except() def compute_logp1(self, param: cdl.param.LogP1Param | None = None) -> None: """Compute base 10 logarithm""" self.compute_11(cpi.compute_logp1, param, cpi.LogP1Param, title="Log10") @qt_try_except() def compute_rotate(self, param: cdl.param.RotateParam | None = None) -> None: """Rotate data arbitrarily""" self.compute_11(cpi.compute_rotate, param, cpi.RotateParam, title="Rotate") @qt_try_except() def compute_rotate90(self) -> None: """Rotate data 90°""" self.compute_11(cpi.compute_rotate90, title="Rotate90") @qt_try_except() def compute_rotate270(self) -> None: """Rotate data 270°""" self.compute_11(cpi.compute_rotate270, title="Rotate270") @qt_try_except() def compute_fliph(self) -> None: """Flip data horizontally""" self.compute_11(cpi.compute_fliph, title="HFlip") @qt_try_except() def compute_flipv(self) -> None: """Flip data vertically""" self.compute_11(cpi.compute_flipv, title="VFlip") @qt_try_except() def distribute_on_grid(self, param: cdl.param.GridParam | None = None) -> None: """Distribute images on a grid""" title = _("Distribute on grid") edit, param = self.init_param(param, cpi.GridParam, title) if edit and not param.edit(parent=self.panel.parent()): return objs = self.panel.objview.get_sel_objects(include_groups=True) g_row, g_col, x0, y0, x0_0, y0_0 = 0, 0, 0.0, 0.0, 0.0, 0.0 delta_x0, delta_y0 = 0.0, 0.0 with create_progress_bar(self.panel, title, max_=len(objs)) as progress: for i_row, obj in enumerate(objs): progress.setValue(i_row + 1) QW.QApplication.processEvents() if progress.wasCanceled(): break if i_row == 0: x0_0, y0_0 = x0, y0 = obj.x0, obj.y0 else: delta_x0, delta_y0 = x0 - obj.x0, y0 - obj.y0 obj.x0 += delta_x0 obj.y0 += delta_y0 # pylint: disable=unused-argument def translate_coords(obj, orig, coords): """Apply translation to coords""" coords[:, ::2] += delta_x0 coords[:, 1::2] += delta_y0 obj.transform_shapes(None, translate_coords) if param.direction == "row": # Distributing images over rows sign = np.sign(param.rows) g_row = (g_row + sign) % param.rows y0 += (obj.dy * obj.data.shape[0] + param.rowspac) * sign if g_row == 0: g_col += 1 x0 += obj.dx * obj.data.shape[1] + param.colspac y0 = y0_0 else: # Distributing images over columns sign = np.sign(param.cols) g_col = (g_col + sign) % param.cols x0 += (obj.dx * obj.data.shape[1] + param.colspac) * sign if g_col == 0: g_row += 1 x0 = x0_0 y0 += obj.dy * obj.data.shape[0] + param.rowspac self.panel.SIG_REFRESH_PLOT.emit("selected", True) @qt_try_except() def reset_positions(self) -> None: """Reset image positions""" x0_0, y0_0 = 0.0, 0.0 delta_x0, delta_y0 = 0.0, 0.0 objs = self.panel.objview.get_sel_objects(include_groups=True) for i_row, obj in enumerate(objs): if i_row == 0: x0_0, y0_0 = obj.x0, obj.y0 else: delta_x0, delta_y0 = x0_0 - obj.x0, y0_0 - obj.y0 obj.x0 += delta_x0 obj.y0 += delta_y0 # pylint: disable=unused-argument def translate_coords(obj, orig, coords): """Apply translation to coords""" coords[:, ::2] += delta_x0 coords[:, 1::2] += delta_y0 obj.transform_shapes(None, translate_coords) self.panel.SIG_REFRESH_PLOT.emit("selected", True) @qt_try_except() def compute_resize(self, param: cdl.param.ResizeParam | None = None) -> None: """Resize image""" obj0 = self.panel.objview.get_sel_objects()[0] for obj in self.panel.objview.get_sel_objects(): if obj.size != obj0.size: QW.QMessageBox.warning( self.panel.parent(), APP_NAME, _("Warning:") + "\n" + _("Selected images do not have the same size"), ) edit, param = self.init_param(param, cpi.ResizeParam, _("Resize")) if edit: original_size = obj0.size dlg = ResizeDialog( self.plotwidget, new_size=original_size, old_size=original_size, text=_("Destination size:"), ) if not exec_dialog(dlg): return param.zoom = dlg.get_zoom() self.compute_11(cpi.compute_resize, param, title=_("Resize"), edit=edit) @qt_try_except() def compute_binning(self, param: cdl.param.BinningParam | None = None) -> None: """Binning image""" edit = param is None obj0 = self.panel.objview.get_sel_objects(include_groups=True)[0] input_dtype_str = str(obj0.data.dtype) title = _("Binning") edit, param = self.init_param(param, cpi.BinningParam, title) if edit: param.dtype_str = input_dtype_str if param.dtype_str is None: param.dtype_str = input_dtype_str self.compute_11(cpi.compute_binning, param, title=title, edit=edit) @qt_try_except() def compute_roi_extraction( self, param: cdl.param.ROIDataParam | None = None ) -> None: """Extract Region Of Interest (ROI) from data""" param = self._get_roidataparam(param) if param is None or param.is_empty: return obj = self.panel.objview.get_sel_objects()[0] group = obj.roidata_to_params(param.roidata) if param.singleobj: self.compute_11(cpi.extract_multiple_roi, group, title=_("Extract ROI")) else: self.compute_1n(cpi.extract_single_roi, group.datasets, "ROI", edit=False) @qt_try_except() def compute_profile(self, param: cdl.param.ProfileParam | None = None) -> None: """Compute profile""" self.compute_11( cpi.compute_profile, param, cdl.param.ProfileParam, title=_("Profile") ) @qt_try_except() def compute_average_profile( self, param: cdl.param.AverageProfileParam | None = None ) -> None: """Compute average profile""" self.compute_11( cpi.compute_average_profile, param, cdl.param.AverageProfileParam, title=_("Average profile"), ) @qt_try_except() def compute_swap_axes(self) -> None: """Swap data axes""" self.compute_11(cpi.compute_swap_axes, title=_("Swap axes")) @qt_try_except() def compute_abs(self) -> None: """Compute absolute value""" self.compute_11(cpi.compute_abs, title=_("Absolute value")) @qt_try_except() def compute_re(self) -> None: """Compute real part""" self.compute_11(cpi.compute_re, title=_("Real part")) @qt_try_except() def compute_im(self) -> None: """Compute imaginary part""" self.compute_11(cpi.compute_im, title=_("Imaginary part")) @qt_try_except() def compute_astype(self, param: cdl.param.DataTypeIParam | None = None) -> None: """Convert data type""" self.compute_11( cpi.compute_astype, param, cpi.DataTypeIParam, title=_("Convert data type") ) @qt_try_except() def compute_log10(self) -> None: """Compute Log10""" self.compute_11(cpi.compute_log10, title="Log10") @qt_try_except() def compute_difference(self, obj2: ImageObj | None = None) -> None: """Compute difference between two images""" self.compute_n1n( obj2, _("image to subtract"), cpi.compute_difference, title=_("Difference"), ) @qt_try_except() def compute_quadratic_difference(self, obj2: ImageObj | None = None) -> None: """Compute quadratic difference between two images""" self.compute_n1n( obj2, _("image to subtract"), cpi.compute_quadratic_difference, title=_("Quadratic difference"), ) @qt_try_except() def compute_division(self, obj2: ImageObj | None = None) -> None: """Compute division between two images""" self.compute_n1n( obj2, _("divider"), cpi.compute_division, title=_("Division"), ) @qt_try_except() def compute_flatfield( self, obj2: ImageObj | None = None, param: cdl.param.FlatFieldParam | None = None, ) -> None: """Compute flat field correction""" edit, param = self.init_param(param, cpi.FlatFieldParam, _("Flat field")) if edit: obj = self.panel.objview.get_sel_objects()[0] param.set_from_datatype(obj.data.dtype) self.compute_n1n( obj2, _("flat field image"), cpi.compute_flatfield, param=param, title=_("Flat field correction"), edit=edit, ) # ------Image Processing @qt_try_except() def compute_calibration( self, param: cdl.param.ZCalibrateParam | None = None ) -> None: """Compute data linear calibration""" self.compute_11( cpi.compute_calibration, param, cpi.ZCalibrateParam, _("Linear calibration"), "y = a.x + b", ) @qt_try_except() def compute_threshold(self, param: cpb.ThresholdParam | None = None) -> None: """Compute threshold clipping""" self.compute_11( cpi.compute_threshold, param, cpb.ThresholdParam, _("Thresholding"), ) @qt_try_except() def compute_clip(self, param: cpb.ClipParam | None = None) -> None: """Compute maximum data clipping""" self.compute_11( cpi.compute_clip, param, cpb.ClipParam, _("Clipping"), ) @qt_try_except() def compute_gaussian_filter(self, param: cpb.GaussianParam | None = None) -> None: """Compute gaussian filter""" self.compute_11( cpi.compute_gaussian_filter, param, cpb.GaussianParam, _("Gaussian filter") ) @qt_try_except() def compute_moving_average( self, param: cpb.MovingAverageParam | None = None ) -> None: """Compute moving average""" self.compute_11( cpi.compute_moving_average, param, cpb.MovingAverageParam, _("Moving average"), ) @qt_try_except() def compute_moving_median(self, param: cpb.MovingMedianParam | None = None) -> None: """Compute moving median""" self.compute_11( cpi.compute_moving_median, param, cpb.MovingMedianParam, _("Moving median"), ) @qt_try_except() def compute_wiener(self) -> None: """Compute Wiener filter""" self.compute_11(cpi.compute_wiener, title=_("Wiener filter")) @qt_try_except() def compute_fft(self, param: cdl.param.FFTParam | None = None) -> None: """Compute FFT""" if param is None:
param = cpb.FFTParam.create(shift=Conf.proc.fft_shift_enabled.get())
1
2023-11-09 16:56:03+00:00
24k
ingra14m/Tensor4D-DNeRF
exp_runner.py
[ { "identifier": "Dataset", "path": "models/dataset.py", "snippet": "class Dataset:\n def __init__(self, conf):\n super(Dataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.data_dir = conf.get_string('...
import os import time import logging import argparse import numpy as np import cv2 as cv import torch import torch.nn.functional as F from torch.utils.tensorboard import SummaryWriter from shutil import copyfile from tqdm import tqdm from pyhocon import ConfigFactory from models.dataset import Dataset, BlenderDataset from models.fields import RenderingNetwork, FieldNetwork, SingleVarianceNetwork from models.tensor4d import Tensor4D from models.renderer import NeuSRenderer from models.mask import Mask3D from metrics import *
16,297
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.is_blender = self.conf['dataset'].get_bool('is_blender', default=False) self.dataset = BlenderDataset(self.conf['dataset']) if self.is_blender else Dataset(self.conf['dataset']) self.g_nums = self.conf['dataset']['g_nums'] self.iter_step = 0 self.flow = self.conf.get_bool('model.flow', default=False) # Training parameters self.end_iter = self.conf.get_int('train.end_iter') self.save_freq = self.conf.get_int('train.save_freq') self.report_freq = self.conf.get_int('train.report_freq') self.val_freq = self.conf.get_int('train.val_freq') self.batch_size = self.conf.get_int('train.batch_size') self.fine_level_iter = self.conf.get_int('train.fine_level_iter') self.downsample_iter = self.conf.get_int('train.downsample_iter') self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level') self.learning_rate = self.conf.get_float('train.learning_rate') self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha') self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd') self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0) self.warm_up_imgs = self.conf.get_int('train.warm_up_imgs', default=50) self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0) self.mask_color_loss = self.conf.get_bool('train.mask_color_loss') self.weighted_sample = self.conf.get_bool('train.weighted_sample') # Weights self.igr_weight = self.conf.get_float('train.igr_weight') self.tgr_weight = self.conf.get_float('train.tgr_weight') self.mask_weight = self.conf.get_float('train.mask_weight') self.tv_weight = self.conf.get_float('train.tv_weight') if self.tv_weight > 0: self.reg_l2 = True else: self.reg_l2 = False self.is_continue = is_continue self.mode = mode self.model_list = [] self.writer = None # Masks self.mask3d = Mask3D(**self.conf['model.mask3d'], num_frames=self.dataset.n_images // self.g_nums, device=self.device) # Networks self.tensor4d = Tensor4D(**self.conf['model.tensor4d']).to(self.device) self.sdf_network = FieldNetwork(d_t4d=self.tensor4d.dims, **self.conf['model.sdf_network']).to(self.device) if self.flow: self.flow_tensor4d = Tensor4D(**self.conf['model.flow_tensor4d']).to(self.device) self.flow_network = FieldNetwork(d_t4d=self.flow_tensor4d.dims, **self.conf['model.flow_network']).to(self.device) else: self.flow_network = None
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.is_blender = self.conf['dataset'].get_bool('is_blender', default=False) self.dataset = BlenderDataset(self.conf['dataset']) if self.is_blender else Dataset(self.conf['dataset']) self.g_nums = self.conf['dataset']['g_nums'] self.iter_step = 0 self.flow = self.conf.get_bool('model.flow', default=False) # Training parameters self.end_iter = self.conf.get_int('train.end_iter') self.save_freq = self.conf.get_int('train.save_freq') self.report_freq = self.conf.get_int('train.report_freq') self.val_freq = self.conf.get_int('train.val_freq') self.batch_size = self.conf.get_int('train.batch_size') self.fine_level_iter = self.conf.get_int('train.fine_level_iter') self.downsample_iter = self.conf.get_int('train.downsample_iter') self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level') self.learning_rate = self.conf.get_float('train.learning_rate') self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha') self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd') self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0) self.warm_up_imgs = self.conf.get_int('train.warm_up_imgs', default=50) self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0) self.mask_color_loss = self.conf.get_bool('train.mask_color_loss') self.weighted_sample = self.conf.get_bool('train.weighted_sample') # Weights self.igr_weight = self.conf.get_float('train.igr_weight') self.tgr_weight = self.conf.get_float('train.tgr_weight') self.mask_weight = self.conf.get_float('train.mask_weight') self.tv_weight = self.conf.get_float('train.tv_weight') if self.tv_weight > 0: self.reg_l2 = True else: self.reg_l2 = False self.is_continue = is_continue self.mode = mode self.model_list = [] self.writer = None # Masks self.mask3d = Mask3D(**self.conf['model.mask3d'], num_frames=self.dataset.n_images // self.g_nums, device=self.device) # Networks self.tensor4d = Tensor4D(**self.conf['model.tensor4d']).to(self.device) self.sdf_network = FieldNetwork(d_t4d=self.tensor4d.dims, **self.conf['model.sdf_network']).to(self.device) if self.flow: self.flow_tensor4d = Tensor4D(**self.conf['model.flow_tensor4d']).to(self.device) self.flow_network = FieldNetwork(d_t4d=self.flow_tensor4d.dims, **self.conf['model.flow_network']).to(self.device) else: self.flow_network = None
self.deviation_network = SingleVarianceNetwork(**self.conf['model.variance_network']).to(self.device)
4
2023-11-07 10:16:33+00:00
24k
Kushalhk/AutoFilter
plugins/p_ttishow.py
[ { "identifier": "ADMINS", "path": "info.py", "snippet": "ADMINS = [int(admin) if id_pattern.search(admin) else admin for admin in environ.get('ADMINS', '').split()]" }, { "identifier": "LOG_CHANNEL", "path": "info.py", "snippet": "LOG_CHANNEL = int(environ.get('LOG_CHANNEL', ''))" }, ...
from pyrogram import Client, filters, enums from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery from pyrogram.errors.exceptions.bad_request_400 import MessageTooLong, PeerIdInvalid from info import ADMINS, LOG_CHANNEL, SUPPORT_CHAT, MELCOW_NEW_USERS, MELCOW_VID, CHNL_LNK, GRP_LNK from database.users_chats_db import db from database.ia_filterdb import Media from utils import get_size, temp, get_settings from Script import script from pyrogram.errors import ChatAdminRequired import asyncio
20,080
"""-----------------------------------------https://t.me/TG_LINKS_CHANNEL--------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous" await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j)) await db.add_chat(message.chat.id, message.chat.title) if message.chat.id in temp.BANNED_CHATS: # Inspired from a boat of a banana tree buttons = [[ InlineKeyboardButton('Support', url=f'https://t.me/{SUPPORT_CHAT}') ]] reply_markup=InlineKeyboardMarkup(buttons) k = await message.reply( text='<b>CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..</b>', reply_markup=reply_markup, ) try: await k.pin() except: pass await bot.leave_chat(message.chat.id) return buttons = [[ InlineKeyboardButton('🔸 ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 🔹', url="https://t.me/TG_Bots_Supporter") ],[ InlineKeyboardButton('ᴄʜᴀɴɴᴇʟ', url=CHNL_LNK), InlineKeyboardButton('ɢʀᴏᴜᴘ', url=GRP_LNK) ]] reply_markup=InlineKeyboardMarkup(buttons) await message.reply_text( text=f"<b>ᴛʜᴀɴᴋ ʏᴏᴜ ꜰᴏʀ ᴀᴅᴅɪɴɢ ᴍᴇ ɪɴ {message.chat.title} ❣️\n\nᴅᴏɴ'ᴛ ꜰᴏʀɢᴇᴛ ᴛᴏ ᴍᴀᴋᴇ ᴍᴇ ᴀᴅᴍɪɴ. ɪꜰ ʏᴏᴜ ʜᴀᴠᴇ ᴀɴʏ ǫᴜᴇꜱᴛɪᴏɴꜱ & ᴅᴏᴜʙᴛꜱ ᴀʙᴏᴜᴛ ᴜꜱɪɴɢ ᴍᴇ ᴄᴏɴᴛᴀᴄᴛ ꜰʀᴏᴍ ᴀᴅᴍɪɴ & ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 👇</b>", reply_markup=reply_markup) else: settings = await get_settings(message.chat.id) if settings["welcome"]: for u in message.new_chat_members: if (temp.MELCOW).get('welcome') is not None: try: await (temp.MELCOW['welcome']).delete() except: pass temp.MELCOW['welcome'] = await message.reply_video(
"""-----------------------------------------https://t.me/TG_LINKS_CHANNEL--------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous" await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j)) await db.add_chat(message.chat.id, message.chat.title) if message.chat.id in temp.BANNED_CHATS: # Inspired from a boat of a banana tree buttons = [[ InlineKeyboardButton('Support', url=f'https://t.me/{SUPPORT_CHAT}') ]] reply_markup=InlineKeyboardMarkup(buttons) k = await message.reply( text='<b>CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..</b>', reply_markup=reply_markup, ) try: await k.pin() except: pass await bot.leave_chat(message.chat.id) return buttons = [[ InlineKeyboardButton('🔸 ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 🔹', url="https://t.me/TG_Bots_Supporter") ],[ InlineKeyboardButton('ᴄʜᴀɴɴᴇʟ', url=CHNL_LNK), InlineKeyboardButton('ɢʀᴏᴜᴘ', url=GRP_LNK) ]] reply_markup=InlineKeyboardMarkup(buttons) await message.reply_text( text=f"<b>ᴛʜᴀɴᴋ ʏᴏᴜ ꜰᴏʀ ᴀᴅᴅɪɴɢ ᴍᴇ ɪɴ {message.chat.title} ❣️\n\nᴅᴏɴ'ᴛ ꜰᴏʀɢᴇᴛ ᴛᴏ ᴍᴀᴋᴇ ᴍᴇ ᴀᴅᴍɪɴ. ɪꜰ ʏᴏᴜ ʜᴀᴠᴇ ᴀɴʏ ǫᴜᴇꜱᴛɪᴏɴꜱ & ᴅᴏᴜʙᴛꜱ ᴀʙᴏᴜᴛ ᴜꜱɪɴɢ ᴍᴇ ᴄᴏɴᴛᴀᴄᴛ ꜰʀᴏᴍ ᴀᴅᴍɪɴ & ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 👇</b>", reply_markup=reply_markup) else: settings = await get_settings(message.chat.id) if settings["welcome"]: for u in message.new_chat_members: if (temp.MELCOW).get('welcome') is not None: try: await (temp.MELCOW['welcome']).delete() except: pass temp.MELCOW['welcome'] = await message.reply_video(
video=(MELCOW_VID),
4
2023-11-03 12:21:26+00:00
24k
apple/ml-reed
reed/algorithms/pebble.py
[ { "identifier": "utils", "path": "BPref/utils.py", "snippet": "def make_env(cfg):\ndef ppo_make_env(env_id, seed):\ndef tie_weights(src, trg):\ndef make_metaworld_env(cfg):\ndef ppo_make_metaworld_env(env_id, seed):\n def __init__(self, *models):\n def __enter__(self):\n def __exit__(self, *arg...
import typing as t import time import numpy as np import torch import hydra from pathlib import Path from omegaconf import dictconfig, OmegaConf from BPref import utils from BPref.logger import Logger from BPref.replay_buffer import TrajectoryReplayBuffer from collections import deque from reed.models.reward_model import StateActionRewardModel from reed.data.preference_dataset import PreferenceDataset from reed.data.preference_data_loader import PreferenceTripletEnsembleDataLoader from reed.data.preprocess_images import PreProcessInference
19,294
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # class PEBBLE: """ Train a reward model in conjunction with policy training following the PEBBLE algorithm from (Lee et al. 2021) """ def __init__(self, experiment_config: dictconfig.DictConfig): """ Args: experiment_config: contains the configuration for the experiment to be run. Access like a dictionry """ # track the experimental configuration self.experiment_config = experiment_config # create the logger to track policy learning progress self.logger = Logger( self.experiment_config.out_dir, save_tb=self.experiment_config.log_save_tb, log_frequency=self.experiment_config.log_frequency, agent=self.experiment_config.agent.name) # used to track where we are in training # total amount of feedback the reward model has solicited self.total_feedback = 0 # total amount of feedback given to the reward model self.labeled_feedback = 0 # policy train step self.step = 0 # we need to set the random seed for replication purposes utils.set_seed_everywhere(self.experiment_config.seed) # the device on which models will be trained self.device = torch.device(self.experiment_config.device) # flag to make sure we are handling multi-gpu training where we need to self.multi_gpu = torch.cuda.device_count() > 1 print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print(f"There is {torch.cuda.device_count()} GPU, so models will be trained with torch.nn.DataParallel.") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") # make the environment if 'metaworld' in self.experiment_config.env: self.env = utils.make_metaworld_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = True else: self.env = utils.make_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = False print('----------------------') print('----------------------') print('----------------------') print('----------------------') print("observation space ", self.env.observation_space.shape[0]) print("action space ", self.env.action_space.shape[0]) print('----------------------') print('----------------------') print('----------------------') print('----------------------') # we need to set the policy's observation and action space self.experiment_config.agent.params.obs_dim = self.env.observation_space.shape[0] self.experiment_config.agent.params.action_dim = self.env.action_space.shape[0] self.experiment_config.agent.params.action_range = [ float(self.env.action_space.low.min()), float(self.env.action_space.high.max()) ] # create the agent specified in the configuration self.agent = hydra.utils.instantiate(self.experiment_config.agent) # the class that will format the observations and observation action pairs for consumption by the reward model
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # class PEBBLE: """ Train a reward model in conjunction with policy training following the PEBBLE algorithm from (Lee et al. 2021) """ def __init__(self, experiment_config: dictconfig.DictConfig): """ Args: experiment_config: contains the configuration for the experiment to be run. Access like a dictionry """ # track the experimental configuration self.experiment_config = experiment_config # create the logger to track policy learning progress self.logger = Logger( self.experiment_config.out_dir, save_tb=self.experiment_config.log_save_tb, log_frequency=self.experiment_config.log_frequency, agent=self.experiment_config.agent.name) # used to track where we are in training # total amount of feedback the reward model has solicited self.total_feedback = 0 # total amount of feedback given to the reward model self.labeled_feedback = 0 # policy train step self.step = 0 # we need to set the random seed for replication purposes utils.set_seed_everywhere(self.experiment_config.seed) # the device on which models will be trained self.device = torch.device(self.experiment_config.device) # flag to make sure we are handling multi-gpu training where we need to self.multi_gpu = torch.cuda.device_count() > 1 print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print(f"There is {torch.cuda.device_count()} GPU, so models will be trained with torch.nn.DataParallel.") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") # make the environment if 'metaworld' in self.experiment_config.env: self.env = utils.make_metaworld_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = True else: self.env = utils.make_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = False print('----------------------') print('----------------------') print('----------------------') print('----------------------') print("observation space ", self.env.observation_space.shape[0]) print("action space ", self.env.action_space.shape[0]) print('----------------------') print('----------------------') print('----------------------') print('----------------------') # we need to set the policy's observation and action space self.experiment_config.agent.params.obs_dim = self.env.observation_space.shape[0] self.experiment_config.agent.params.action_dim = self.env.action_space.shape[0] self.experiment_config.agent.params.action_range = [ float(self.env.action_space.low.min()), float(self.env.action_space.high.max()) ] # create the agent specified in the configuration self.agent = hydra.utils.instantiate(self.experiment_config.agent) # the class that will format the observations and observation action pairs for consumption by the reward model
self._reward_input_preprocessor = PreProcessInference(
6
2023-11-06 23:14:20+00:00
24k
alibaba/animate-anything
train_svd.py
[ { "identifier": "VideoJsonDataset", "path": "utils/dataset.py", "snippet": "class VideoJsonDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n video_dir: st...
import argparse import datetime import logging import inspect import math import os import random import gc import copy import json import numpy as np import cv2 import torch import torch.nn.functional as F import torch.utils.checkpoint import torchvision.transforms as T import diffusers import transformers import imageio import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from tqdm.auto import tqdm from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers.models import AutoencoderKL, UNetSpatioTemporalConditionModel from diffusers import DPMSolverMultistepScheduler, DDPMScheduler, EulerDiscreteScheduler from diffusers.image_processor import VaeImageProcessor from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, export_to_video from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth import tensor2vid from diffusers import StableVideoDiffusionPipeline from diffusers.pipelines.stable_video_diffusion.pipeline_stable_video_diffusion import _resize_with_antialiasing from transformers import CLIPTextModel, CLIPTokenizer, CLIPVisionModel, CLIPImageProcessor, CLIPTextConfig from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset, VideoBLIPDataset from einops import rearrange, repeat from models.unet_3d_condition_mask import UNet3DConditionModel from models.pipeline import MaskStableVideoDiffusionPipeline from utils.lora_handler import LoraHandler, LORA_VERSIONS from utils.common import read_mask, generate_random_mask, slerp, calculate_motion_score, \ read_video, calculate_motion_precision, calculate_latent_motion_score, \ DDPM_forward, DDPM_forward_timesteps, motion_mask_loss from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
20,491
pixel_values = batch["pixel_values"] bsz, num_frames = pixel_values.shape[:2] frames = rearrange(pixel_values, 'b f c h w-> (b f) c h w') latents = vae.encode(frames).latent_dist.mode() latents = rearrange(latents, '(b f) c h w-> b f c h w', b=bsz) latents = latents * vae.config.scaling_factor if motion_mask: mask = batch["mask"] mask = mask.div(255).to(latents.device) h, w = latents.shape[-2:] mask = T.Resize((h, w), antialias=False)(mask) mask[mask<0.5] = 0 mask[mask>=0.5] = 1 mask = rearrange(mask, 'b h w -> b 1 1 h w') freeze = repeat(latents[:,0], 'b c h w -> b f c h w', f=num_frames) latents = freeze * (1-mask) + latents * mask # enocde image latent image = pixel_values[:,0] image = image + noise_aug_strength * torch.randn_like(image) image_latents = vae.encode(image).latent_dist.mode() image_latents = repeat(image_latents, 'b c h w->b f c h w',f=num_frames) # vae image to clip image images = _resize_with_antialiasing(pixel_values[:,0], (224, 224)) images = (images + 1.0) / 2.0 # [-1, 1] -> [0, 1] images = pipeline.feature_extractor( images=images, do_normalize=True, do_center_crop=False, do_resize=False, do_rescale=False, return_tensors="pt", ).pixel_values image_embeddings = pipeline._encode_image(images, device, 1, False) negative_image_embeddings = torch.zeros_like(image_embeddings) # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) #[bsz, f, c, h , w] rnd_normal = torch.randn([bsz, 1, 1, 1, 1], device=device) sigma = (rnd_normal * P_std + P_mean).exp() c_skip = 1 / (sigma**2 + 1) c_out = -sigma / (sigma**2 + 1) ** 0.5 c_in = 1 / (sigma**2 + 1) ** 0.5 c_noise = sigma.log() / 4 loss_weight = (sigma ** 2 + 1) / sigma ** 2 noisy_latents = latents + torch.randn_like(latents) * sigma input_latents = c_in * noisy_latents input_latents = torch.cat([input_latents, image_latents], dim=2) if motion_mask: mask = repeat(mask, 'b 1 1 h w -> b f 1 h w', f=num_frames) input_latents = torch.cat([mask, input_latents], dim=2) motion_bucket_id = 127 fps = 7 added_time_ids = pipeline._get_add_time_ids(fps, motion_bucket_id, noise_aug_strength, image_embeddings.dtype, bsz, 1, False) added_time_ids = added_time_ids.to(device) losses = [] for i in range(2): encoder_hidden_states = ( negative_image_embeddings if i==0 else image_embeddings ) model_pred = unet(input_latents, c_noise.reshape([bsz]), encoder_hidden_states=encoder_hidden_states, added_time_ids=added_time_ids,).sample predict_x0 = c_out * model_pred + c_skip * noisy_latents loss = ((predict_x0 - latents)**2 * loss_weight).mean() ''' if motion_mask: loss += F.mse_loss(predict_x0*(1-mask), freeze*(1-mask)) ''' losses.append(loss) loss = losses[0] if len(losses) == 1 else losses[0] + losses[1] return loss def main( pretrained_model_path: str, output_dir: str, train_data: Dict, validation_data: Dict, extra_train_data: list = [], dataset_types: Tuple[str] = ('json'), shuffle: bool = True, validation_steps: int = 100, trainable_modules: Tuple[str] = None, # Eg: ("attn1", "attn2") trainable_text_modules: Tuple[str] = None, # Eg: ("all"), this also applies to trainable_modules extra_unet_params = None, extra_text_encoder_params = None, train_batch_size: int = 1, max_train_steps: int = 500, learning_rate: float = 5e-5, scale_lr: bool = False, lr_scheduler: str = "constant", lr_warmup_steps: int = 0, adam_beta1: float = 0.9, adam_beta2: float = 0.999, adam_weight_decay: float = 1e-2, adam_epsilon: float = 1e-08, max_grad_norm: float = 1.0, gradient_accumulation_steps: int = 1, gradient_checkpointing: bool = False, text_encoder_gradient_checkpointing: bool = False, checkpointing_steps: int = 500, resume_from_checkpoint: Optional[str] = None, resume_step: Optional[int] = None, mixed_precision: Optional[str] = "fp16", use_8bit_adam: bool = False, enable_xformers_memory_efficient_attention: bool = True, enable_torch_2_attn: bool = False, seed: Optional[int] = None, train_text_encoder: bool = False, use_offset_noise: bool = False, rescale_schedule: bool = False, offset_noise_strength: float = 0.1, extend_dataset: bool = False, cache_latents: bool = False, cached_latent_dir = None,
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] dataset_cls = [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset, VideoBLIPDataset] dataset_map = {d.__getname__(): d for d in dataset_cls} # Loop through all available datasets, get the name, then add to list of data to process. for dataset in dataset_types: if dataset in dataset_map: train_datasets.append(dataset_map[dataset](**train_data, tokenizer=tokenizer)) else: raise ValueError(f"Dataset type not found: {dataset} not in {dataset_map.keys()}") return train_datasets def extend_datasets(datasets, dataset_items, extend=False): biggest_data_len = max(x.__len__() for x in datasets) extended = [] for dataset in datasets: if dataset.__len__() == 0: del dataset continue if dataset.__len__() < biggest_data_len: for item in dataset_items: if extend and item not in extended and hasattr(dataset, item): print(f"Extending {item}") value = getattr(dataset, item) value *= biggest_data_len value = value[:biggest_data_len] setattr(dataset, item, value) print(f"New {item} dataset length: {dataset.__len__()}") extended.append(item) def export_to_video(video_frames, output_video_path, fps): fourcc = cv2.VideoWriter_fourcc(*"mp4v") h, w, _ = video_frames[0].shape video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=fps, frameSize=(w, h)) for i in range(len(video_frames)): img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR) video_writer.write(img) def create_output_folders(output_dir, config): now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") out_dir = os.path.join(output_dir, f"train_{now}") os.makedirs(out_dir, exist_ok=True) os.makedirs(f"{out_dir}/samples", exist_ok=True) OmegaConf.save(config, os.path.join(out_dir, 'config.yaml')) return out_dir def load_primary_models(pretrained_model_path, eval=False): if eval: pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path, torch_dtype=torch.float16, variant='fp16') else: pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path) return pipeline, None, pipeline.feature_extractor, pipeline.scheduler, pipeline.image_processor, \ pipeline.image_encoder, pipeline.vae, pipeline.unet def convert_svd(pretrained_model_path, out_path): pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path) unet = UNetSpatioTemporalConditionModel.from_pretrained( pretrained_model_path, subfolder="unet_mask", low_cpu_mem_usage=False, ignore_mismatched_sizes=True) unet.conv_in.bias.data = copy.deepcopy(pipeline.unet.conv_in.bias) torch.nn.init.zeros_(unet.conv_in.weight) unet.conv_in.weight.data[:,1:]= copy.deepcopy(pipeline.unet.conv_in.weight) new_pipeline = StableVideoDiffusionPipeline.from_pretrained( pretrained_model_path, unet=unet) new_pipeline.save_pretrained(out_path) def handle_cache_latents( should_cache, output_dir, train_dataloader, train_batch_size, pipeline, device, cached_latent_dir=None, shuffle=False, ): # Cache latents by storing them in VRAM. # Speeds up training and saves memory by not encoding during the train loop. if not should_cache: return None pipeline.to(device, dtype=torch.half) cached_latent_dir = ( os.path.abspath(cached_latent_dir) if cached_latent_dir is not None else None ) if cached_latent_dir is None: cache_save_dir = f"{output_dir}/cached_latents" os.makedirs(cache_save_dir, exist_ok=True) for i, batch in enumerate(tqdm(train_dataloader, desc="Caching Latents.")): save_name = f"cached_{i}" full_out_path = f"{cache_save_dir}/{save_name}.pt" pixel_values = batch['pixel_values'].to(device) bsz, num_frames = pixel_values.shape[:2] frames = rearrange(pixel_values, 'b f c h w-> (b f) c h w').to(torch.half) latents = pipeline.vae.encode(frames).latent_dist.mode() latents = rearrange(latents, '(b f) c h w-> b f c h w', b=bsz) latents = latents * pipeline.vae.config.scaling_factor batch['latent'] = latents # vae image to clip image images = _resize_with_antialiasing(pixel_values[:,0], (224, 224)).to(torch.half) images = (images + 1.0) / 2.0 # [-1, 1] -> [0, 1] images = pipeline.feature_extractor( images=images, do_normalize=True, do_center_crop=False, do_resize=False, do_rescale=False, return_tensors="pt", ).pixel_values image_embeddings = pipeline._encode_image(images, device, 1, False) batch['image_embeddings'] = image_embeddings for k, v in batch.items(): batch[k] = v[0] torch.save(batch, full_out_path) del pixel_values del batch # We do this to avoid fragmentation from casting latents between devices. torch.cuda.empty_cache() else: cache_save_dir = cached_latent_dir return torch.utils.data.DataLoader( CachedDataset(cache_dir=cache_save_dir), batch_size=train_batch_size, shuffle=shuffle, num_workers=0 ) def _set_gradient_checkpointing(self, value=False): self.gradient_checkpointing = value self.mid_block.gradient_checkpointing = value for module in self.down_blocks + self.up_blocks: module.gradient_checkpointing = value def unet_and_text_g_c(unet, text_encoder, unet_enable, text_enable): _set_gradient_checkpointing(unet, value=unet_enable) if text_enable: text_encoder.gradient_checkpointing_enable() else: text_encoder.gradient_checkpointing_disable() def freeze_models(models_to_freeze): for model in models_to_freeze: if model is not None: model.requires_grad_(False) def is_attn(name): return ('attn1' or 'attn2' == name.split('.')[-1]) def set_processors(attentions): for attn in attentions: attn.set_processor(AttnProcessor2_0()) def set_torch_2_attn(unet): optim_count = 0 for name, module in unet.named_modules(): if is_attn(name): if isinstance(module, torch.nn.ModuleList): for m in module: if isinstance(m, BasicTransformerBlock): set_processors([m.attn1, m.attn2]) optim_count += 1 if optim_count > 0: print(f"{optim_count} Attention layers using Scaled Dot Product Attention.") def handle_memory_attention(enable_xformers_memory_efficient_attention, enable_torch_2_attn, unet): try: is_torch_2 = hasattr(F, 'scaled_dot_product_attention') enable_torch_2 = is_torch_2 and enable_torch_2_attn if enable_xformers_memory_efficient_attention and not enable_torch_2: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) else: raise ValueError("xformers is not available. Make sure it is installed correctly") if enable_torch_2: set_torch_2_attn(unet) except: print("Could not enable memory efficient attention for xformers or Torch 2.0.") def param_optim(model, condition, extra_params=None, is_lora=False, negation=None): extra_params = extra_params if len(extra_params.keys()) > 0 else None return { "model": model, "condition": condition, 'extra_params': extra_params, 'is_lora': is_lora, "negation": negation } def create_optim_params(name='param', params=None, lr=5e-6, extra_params=None): params = { "name": name, "params": params, "lr": lr } if extra_params is not None: for k, v in extra_params.items(): params[k] = v return params def negate_params(name, negation): # We have to do this if we are co-training with LoRA. # This ensures that parameter groups aren't duplicated. if negation is None: return False for n in negation: if n in name and 'temp' not in name: return True return False def create_optimizer_params(model_list, lr): optimizer_params = [] for optim in model_list: model, condition, extra_params, is_lora, negation = optim.values() # Check if we are doing LoRA training. if is_lora and condition and isinstance(model, list): params = create_optim_params( params=itertools.chain(*model), extra_params=extra_params ) optimizer_params.append(params) continue if is_lora and condition and not isinstance(model, list): for n, p in model.named_parameters(): if 'lora' in n: params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) continue # If this is true, we can train it. if condition: for n, p in model.named_parameters(): should_negate = 'lora' in n and not is_lora if should_negate: continue params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) return optimizer_params def get_optimizer(use_8bit_adam): if use_8bit_adam: try: except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) return bnb.optim.AdamW8bit else: return torch.optim.AdamW def is_mixed_precision(accelerator): weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 return weight_dtype def cast_to_gpu_and_type(model_list, device, weight_dtype): for model in model_list: if model is not None: model.to(device, dtype=weight_dtype) def handle_trainable_modules(model, trainable_modules=None, is_enabled=True, negation=None): global already_printed_trainables # This can most definitely be refactored :-) unfrozen_params = 0 if trainable_modules is not None: for name, module in model.named_modules(): for tm in tuple(trainable_modules): if tm == 'all': model.requires_grad_(is_enabled) unfrozen_params =len(list(model.parameters())) break if tm in name and 'lora' not in name: for m in module.parameters(): m.requires_grad_(is_enabled) if is_enabled: unfrozen_params +=1 if unfrozen_params > 0 and not already_printed_trainables: already_printed_trainables = True print(f"{unfrozen_params} params have been unfrozen for training.") def sample_noise(latents, noise_strength, use_offset_noise=False): b ,c, f, *_ = latents.shape noise_latents = torch.randn_like(latents, device=latents.device) offset_noise = None if use_offset_noise: offset_noise = torch.randn(b, c, f, 1, 1, device=latents.device) noise_latents = noise_latents + noise_strength * offset_noise return noise_latents def enforce_zero_terminal_snr(betas): """ Corrects noise in diffusion schedulers. From: Common Diffusion Noise Schedules and Sample Steps are Flawed https://arxiv.org/pdf/2305.08891.pdf """ # Convert betas to alphas_bar_sqrt alphas = 1 - betas alphas_bar = alphas.cumprod(0) alphas_bar_sqrt = alphas_bar.sqrt() # Store old values. alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() # Shift so the last timestep is zero. alphas_bar_sqrt -= alphas_bar_sqrt_T # Scale so the first timestep is back to the old value. alphas_bar_sqrt *= alphas_bar_sqrt_0 / ( alphas_bar_sqrt_0 - alphas_bar_sqrt_T ) # Convert alphas_bar_sqrt to betas alphas_bar = alphas_bar_sqrt ** 2 alphas = alphas_bar[1:] / alphas_bar[:-1] alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas def should_sample(global_step, validation_steps, validation_data): return (global_step % validation_steps == 0 or global_step == 5) \ and validation_data.sample_preview def save_pipe( path, global_step, accelerator, unet, text_encoder, vae, output_dir, lora_manager: LoraHandler, unet_target_replace_module=None, text_target_replace_module=None, is_checkpoint=False, save_pretrained_model=True ): if is_checkpoint: save_path = os.path.join(output_dir, f"checkpoint-{global_step}") os.makedirs(save_path, exist_ok=True) else: save_path = output_dir # Save the dtypes so we can continue training at the same precision. u_dtype, t_dtype, v_dtype = unet.dtype, text_encoder.dtype, vae.dtype # Copy the model without creating a reference to it. This allows keeping the state of our lora training if enabled. unet_save = copy.deepcopy(unet.cpu()) text_encoder_save = copy.deepcopy(text_encoder.cpu()) unet_out = copy.deepcopy(accelerator.unwrap_model(unet_save, keep_fp32_wrapper=False)) text_encoder_out = copy.deepcopy(accelerator.unwrap_model(text_encoder_save, keep_fp32_wrapper=False)) pipeline = StableVideoDiffusionPipeline.from_pretrained( path, unet=unet_out).to(torch_dtype=torch.float32) if save_pretrained_model: pipeline.save_pretrained(save_path) if is_checkpoint: unet, text_encoder = accelerator.prepare(unet, text_encoder) models_to_cast_back = [(unet, u_dtype), (text_encoder, t_dtype), (vae, v_dtype)] [x[0].to(accelerator.device, dtype=x[1]) for x in models_to_cast_back] logger.info(f"Saved model at {save_path} on step {global_step}") del pipeline del unet_out del text_encoder_out torch.cuda.empty_cache() gc.collect() def replace_prompt(prompt, token, wlist): for w in wlist: if w in prompt: return prompt.replace(w, token) return prompt def prompt_image(image, processor, encoder): if type(image) == str: image = Image.open(image) image = processor(images=image, return_tensors="pt")['pixel_values'] image = image.to(encoder.device).to(encoder.dtype) inputs = encoder(image).pooler_output.to(encoder.dtype).unsqueeze(1) #inputs = encoder(image).last_hidden_state.to(encoder.dtype) return inputs def finetune_unet(pipeline, batch, use_offset_noise, rescale_schedule, offset_noise_strength, unet, motion_mask, P_mean=0.7, P_std=1.6, noise_aug_strength=0.02): pipeline.vae.eval() pipeline.image_encoder.eval() vae = pipeline.vae device = vae.device # Convert videos to latent space pixel_values = batch["pixel_values"] bsz, num_frames = pixel_values.shape[:2] frames = rearrange(pixel_values, 'b f c h w-> (b f) c h w') latents = vae.encode(frames).latent_dist.mode() latents = rearrange(latents, '(b f) c h w-> b f c h w', b=bsz) latents = latents * vae.config.scaling_factor if motion_mask: mask = batch["mask"] mask = mask.div(255).to(latents.device) h, w = latents.shape[-2:] mask = T.Resize((h, w), antialias=False)(mask) mask[mask<0.5] = 0 mask[mask>=0.5] = 1 mask = rearrange(mask, 'b h w -> b 1 1 h w') freeze = repeat(latents[:,0], 'b c h w -> b f c h w', f=num_frames) latents = freeze * (1-mask) + latents * mask # enocde image latent image = pixel_values[:,0] image = image + noise_aug_strength * torch.randn_like(image) image_latents = vae.encode(image).latent_dist.mode() image_latents = repeat(image_latents, 'b c h w->b f c h w',f=num_frames) # vae image to clip image images = _resize_with_antialiasing(pixel_values[:,0], (224, 224)) images = (images + 1.0) / 2.0 # [-1, 1] -> [0, 1] images = pipeline.feature_extractor( images=images, do_normalize=True, do_center_crop=False, do_resize=False, do_rescale=False, return_tensors="pt", ).pixel_values image_embeddings = pipeline._encode_image(images, device, 1, False) negative_image_embeddings = torch.zeros_like(image_embeddings) # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) #[bsz, f, c, h , w] rnd_normal = torch.randn([bsz, 1, 1, 1, 1], device=device) sigma = (rnd_normal * P_std + P_mean).exp() c_skip = 1 / (sigma**2 + 1) c_out = -sigma / (sigma**2 + 1) ** 0.5 c_in = 1 / (sigma**2 + 1) ** 0.5 c_noise = sigma.log() / 4 loss_weight = (sigma ** 2 + 1) / sigma ** 2 noisy_latents = latents + torch.randn_like(latents) * sigma input_latents = c_in * noisy_latents input_latents = torch.cat([input_latents, image_latents], dim=2) if motion_mask: mask = repeat(mask, 'b 1 1 h w -> b f 1 h w', f=num_frames) input_latents = torch.cat([mask, input_latents], dim=2) motion_bucket_id = 127 fps = 7 added_time_ids = pipeline._get_add_time_ids(fps, motion_bucket_id, noise_aug_strength, image_embeddings.dtype, bsz, 1, False) added_time_ids = added_time_ids.to(device) losses = [] for i in range(2): encoder_hidden_states = ( negative_image_embeddings if i==0 else image_embeddings ) model_pred = unet(input_latents, c_noise.reshape([bsz]), encoder_hidden_states=encoder_hidden_states, added_time_ids=added_time_ids,).sample predict_x0 = c_out * model_pred + c_skip * noisy_latents loss = ((predict_x0 - latents)**2 * loss_weight).mean() ''' if motion_mask: loss += F.mse_loss(predict_x0*(1-mask), freeze*(1-mask)) ''' losses.append(loss) loss = losses[0] if len(losses) == 1 else losses[0] + losses[1] return loss def main( pretrained_model_path: str, output_dir: str, train_data: Dict, validation_data: Dict, extra_train_data: list = [], dataset_types: Tuple[str] = ('json'), shuffle: bool = True, validation_steps: int = 100, trainable_modules: Tuple[str] = None, # Eg: ("attn1", "attn2") trainable_text_modules: Tuple[str] = None, # Eg: ("all"), this also applies to trainable_modules extra_unet_params = None, extra_text_encoder_params = None, train_batch_size: int = 1, max_train_steps: int = 500, learning_rate: float = 5e-5, scale_lr: bool = False, lr_scheduler: str = "constant", lr_warmup_steps: int = 0, adam_beta1: float = 0.9, adam_beta2: float = 0.999, adam_weight_decay: float = 1e-2, adam_epsilon: float = 1e-08, max_grad_norm: float = 1.0, gradient_accumulation_steps: int = 1, gradient_checkpointing: bool = False, text_encoder_gradient_checkpointing: bool = False, checkpointing_steps: int = 500, resume_from_checkpoint: Optional[str] = None, resume_step: Optional[int] = None, mixed_precision: Optional[str] = "fp16", use_8bit_adam: bool = False, enable_xformers_memory_efficient_attention: bool = True, enable_torch_2_attn: bool = False, seed: Optional[int] = None, train_text_encoder: bool = False, use_offset_noise: bool = False, rescale_schedule: bool = False, offset_noise_strength: float = 0.1, extend_dataset: bool = False, cache_latents: bool = False, cached_latent_dir = None,
lora_version: LORA_VERSIONS = LORA_VERSIONS[0],
9
2023-12-07 08:26:29+00:00
24k
rehg-lab/RAVE
annotator/oneformer/detectron2/modeling/meta_arch/retinanet.py
[ { "identifier": "configurable", "path": "annotator/oneformer/detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\r\n \"\"\"\r\n Decorate a function or a class's __init__ method so that it can be called\r\n with a :class:`CfgNode` object using a :func...
import logging import math import torch from typing import List, Tuple from fvcore.nn import sigmoid_focal_loss_jit from torch import Tensor, nn from torch.nn import functional as F from annotator.oneformer.detectron2.config import configurable from annotator.oneformer.detectron2.layers import CycleBatchNormList, ShapeSpec, batched_nms, cat, get_norm from annotator.oneformer.detectron2.structures import Boxes, ImageList, Instances, pairwise_iou from annotator.oneformer.detectron2.utils.events import get_event_storage from ..anchor_generator import build_anchor_generator from ..backbone import Backbone, build_backbone from ..box_regression import Box2BoxTransform, _dense_box_regression_loss from ..matcher import Matcher from .build import META_ARCH_REGISTRY from .dense_detector import DenseDetector, permute_to_N_HWA_K # noqa
17,658
match_quality_matrix = pairwise_iou(gt_per_image.gt_boxes, anchors) matched_idxs, anchor_labels = self.anchor_matcher(match_quality_matrix) del match_quality_matrix if len(gt_per_image) > 0: matched_gt_boxes_i = gt_per_image.gt_boxes.tensor[matched_idxs] gt_labels_i = gt_per_image.gt_classes[matched_idxs] # Anchors with label 0 are treated as background. gt_labels_i[anchor_labels == 0] = self.num_classes # Anchors with label -1 are ignored. gt_labels_i[anchor_labels == -1] = -1 else: matched_gt_boxes_i = torch.zeros_like(anchors.tensor) gt_labels_i = torch.zeros_like(matched_idxs) + self.num_classes gt_labels.append(gt_labels_i) matched_gt_boxes.append(matched_gt_boxes_i) return gt_labels, matched_gt_boxes def forward_inference( self, images: ImageList, features: List[Tensor], predictions: List[List[Tensor]] ): pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) results: List[Instances] = [] for img_idx, image_size in enumerate(images.image_sizes): scores_per_image = [x[img_idx].sigmoid_() for x in pred_logits] deltas_per_image = [x[img_idx] for x in pred_anchor_deltas] results_per_image = self.inference_single_image( anchors, scores_per_image, deltas_per_image, image_size ) results.append(results_per_image) return results def inference_single_image( self, anchors: List[Boxes], box_cls: List[Tensor], box_delta: List[Tensor], image_size: Tuple[int, int], ): """ Single-image inference. Return bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). Arguments: anchors (list[Boxes]): list of #feature levels. Each entry contains a Boxes object, which contains all the anchors in that feature level. box_cls (list[Tensor]): list of #feature levels. Each entry contains tensor of size (H x W x A, K) box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4. image_size (tuple(H, W)): a tuple of the image height and width. Returns: Same as `inference`, but for only one image. """ pred = self._decode_multi_level_predictions( anchors, box_cls, box_delta, self.test_score_thresh, self.test_topk_candidates, image_size, ) keep = batched_nms( # per-class NMS pred.pred_boxes.tensor, pred.scores, pred.pred_classes, self.test_nms_thresh ) return pred[keep[: self.max_detections_per_image]] class RetinaNetHead(nn.Module): """ The head used in RetinaNet for object classification and box regression. It has two subnets for the two tasks, with a common structure but separate parameters. """ @configurable def __init__( self, *, input_shape: List[ShapeSpec], num_classes, num_anchors, conv_dims: List[int], norm="", prior_prob=0.01, ): """ NOTE: this interface is experimental. Args: input_shape (List[ShapeSpec]): input shape num_classes (int): number of classes. Used to label background proposals. num_anchors (int): number of generated anchors conv_dims (List[int]): dimensions for each convolution layer norm (str or callable): Normalization for conv layers except for the two output layers. See :func:`detectron2.layers.get_norm` for supported types. prior_prob (float): Prior weight for computing bias """ super().__init__() self._num_features = len(input_shape) if norm == "BN" or norm == "SyncBN": logger.info( f"Using domain-specific {norm} in RetinaNetHead with len={self._num_features}." ) bn_class = nn.BatchNorm2d if norm == "BN" else nn.SyncBatchNorm def norm(c): return CycleBatchNormList( length=self._num_features, bn_class=bn_class, num_features=c ) else:
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["RetinaNet"] logger = logging.getLogger(__name__) @META_ARCH_REGISTRY.register() class RetinaNet(DenseDetector): """ Implement RetinaNet in :paper:`RetinaNet`. """ @configurable def __init__( self, *, backbone: Backbone, head: nn.Module, head_in_features, anchor_generator, box2box_transform, anchor_matcher, num_classes, focal_loss_alpha=0.25, focal_loss_gamma=2.0, smooth_l1_beta=0.0, box_reg_loss_type="smooth_l1", test_score_thresh=0.05, test_topk_candidates=1000, test_nms_thresh=0.5, max_detections_per_image=100, pixel_mean, pixel_std, vis_period=0, input_format="BGR", ): """ NOTE: this interface is experimental. Args: backbone: a backbone module, must follow detectron2's backbone interface head (nn.Module): a module that predicts logits and regression deltas for each level from a list of per-level features head_in_features (Tuple[str]): Names of the input feature maps to be used in head anchor_generator (nn.Module): a module that creates anchors from a list of features. Usually an instance of :class:`AnchorGenerator` box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to instance boxes anchor_matcher (Matcher): label the anchors by matching them with ground truth. num_classes (int): number of classes. Used to label background proposals. # Loss parameters: focal_loss_alpha (float): focal_loss_alpha focal_loss_gamma (float): focal_loss_gamma smooth_l1_beta (float): smooth_l1_beta box_reg_loss_type (str): Options are "smooth_l1", "giou", "diou", "ciou" # Inference parameters: test_score_thresh (float): Inference cls score threshold, only anchors with score > INFERENCE_TH are considered for inference (to improve speed) test_topk_candidates (int): Select topk candidates before NMS test_nms_thresh (float): Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) max_detections_per_image (int): Maximum number of detections to return per image during inference (100 is based on the limit established for the COCO dataset). pixel_mean, pixel_std: see :class:`DenseDetector`. """ super().__init__( backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std ) self.num_classes = num_classes # Anchors self.anchor_generator = anchor_generator self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher # Loss parameters: self.focal_loss_alpha = focal_loss_alpha self.focal_loss_gamma = focal_loss_gamma self.smooth_l1_beta = smooth_l1_beta self.box_reg_loss_type = box_reg_loss_type # Inference parameters: self.test_score_thresh = test_score_thresh self.test_topk_candidates = test_topk_candidates self.test_nms_thresh = test_nms_thresh self.max_detections_per_image = max_detections_per_image # Vis parameters self.vis_period = vis_period self.input_format = input_format @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) backbone_shape = backbone.output_shape() feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] head = RetinaNetHead(cfg, feature_shapes) anchor_generator = build_anchor_generator(cfg, feature_shapes) return { "backbone": backbone, "head": head, "anchor_generator": anchor_generator, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), "anchor_matcher": Matcher( cfg.MODEL.RETINANET.IOU_THRESHOLDS, cfg.MODEL.RETINANET.IOU_LABELS, allow_low_quality_matches=True, ), "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, # Loss parameters: "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, # Inference parameters: "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, # Vis parameters "vis_period": cfg.VIS_PERIOD, "input_format": cfg.INPUT.FORMAT, } def forward_training(self, images, features, predictions, gt_instances): # Transpose the Hi*Wi*A dimension to the middle: pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) return self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): """ Args: anchors (list[Boxes]): a list of #feature level Boxes gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. Their shapes are (N, R) and (N, R, 4), respectively, where R is the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). Where K is the number of classes used in `pred_logits`. Returns: dict[str, Tensor]: mapping from a named loss to a scalar tensor storing the loss. Used during training only. The dict keys are: "loss_cls" and "loss_box_reg" """ num_images = len(gt_labels) gt_labels = torch.stack(gt_labels) # (N, R) valid_mask = gt_labels >= 0 pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) num_pos_anchors = pos_mask.sum().item() get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images) normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 100) # classification and regression loss gt_labels_target = F.one_hot(gt_labels[valid_mask], num_classes=self.num_classes + 1)[ :, :-1 ] # no loss for the last (background) class loss_cls = sigmoid_focal_loss_jit( cat(pred_logits, dim=1)[valid_mask], gt_labels_target.to(pred_logits[0].dtype), alpha=self.focal_loss_alpha, gamma=self.focal_loss_gamma, reduction="sum", ) loss_box_reg = _dense_box_regression_loss( anchors, self.box2box_transform, pred_anchor_deltas, gt_boxes, pos_mask, box_reg_loss_type=self.box_reg_loss_type, smooth_l1_beta=self.smooth_l1_beta, ) return { "loss_cls": loss_cls / normalizer, "loss_box_reg": loss_box_reg / normalizer, } @torch.no_grad() def label_anchors(self, anchors, gt_instances): """ Args: anchors (list[Boxes]): A list of #feature level Boxes. The Boxes contains anchors of this image on the specific feature level. gt_instances (list[Instances]): a list of N `Instances`s. The i-th `Instances` contains the ground-truth per-instance annotations for the i-th input image. Returns: list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is the total number of anchors across all feature maps (sum(Hi * Wi * A)). Label values are in {-1, 0, ..., K}, with -1 means ignore, and K means background. list[Tensor]: i-th element is a Rx4 tensor, where R is the total number of anchors across feature maps. The values are the matched gt boxes for each anchor. Values are undefined for those anchors not labeled as foreground. """ anchors = Boxes.cat(anchors) # Rx4 gt_labels = [] matched_gt_boxes = [] for gt_per_image in gt_instances: match_quality_matrix = pairwise_iou(gt_per_image.gt_boxes, anchors) matched_idxs, anchor_labels = self.anchor_matcher(match_quality_matrix) del match_quality_matrix if len(gt_per_image) > 0: matched_gt_boxes_i = gt_per_image.gt_boxes.tensor[matched_idxs] gt_labels_i = gt_per_image.gt_classes[matched_idxs] # Anchors with label 0 are treated as background. gt_labels_i[anchor_labels == 0] = self.num_classes # Anchors with label -1 are ignored. gt_labels_i[anchor_labels == -1] = -1 else: matched_gt_boxes_i = torch.zeros_like(anchors.tensor) gt_labels_i = torch.zeros_like(matched_idxs) + self.num_classes gt_labels.append(gt_labels_i) matched_gt_boxes.append(matched_gt_boxes_i) return gt_labels, matched_gt_boxes def forward_inference( self, images: ImageList, features: List[Tensor], predictions: List[List[Tensor]] ): pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) results: List[Instances] = [] for img_idx, image_size in enumerate(images.image_sizes): scores_per_image = [x[img_idx].sigmoid_() for x in pred_logits] deltas_per_image = [x[img_idx] for x in pred_anchor_deltas] results_per_image = self.inference_single_image( anchors, scores_per_image, deltas_per_image, image_size ) results.append(results_per_image) return results def inference_single_image( self, anchors: List[Boxes], box_cls: List[Tensor], box_delta: List[Tensor], image_size: Tuple[int, int], ): """ Single-image inference. Return bounding-box detection results by thresholding on scores and applying non-maximum suppression (NMS). Arguments: anchors (list[Boxes]): list of #feature levels. Each entry contains a Boxes object, which contains all the anchors in that feature level. box_cls (list[Tensor]): list of #feature levels. Each entry contains tensor of size (H x W x A, K) box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4. image_size (tuple(H, W)): a tuple of the image height and width. Returns: Same as `inference`, but for only one image. """ pred = self._decode_multi_level_predictions( anchors, box_cls, box_delta, self.test_score_thresh, self.test_topk_candidates, image_size, ) keep = batched_nms( # per-class NMS pred.pred_boxes.tensor, pred.scores, pred.pred_classes, self.test_nms_thresh ) return pred[keep[: self.max_detections_per_image]] class RetinaNetHead(nn.Module): """ The head used in RetinaNet for object classification and box regression. It has two subnets for the two tasks, with a common structure but separate parameters. """ @configurable def __init__( self, *, input_shape: List[ShapeSpec], num_classes, num_anchors, conv_dims: List[int], norm="", prior_prob=0.01, ): """ NOTE: this interface is experimental. Args: input_shape (List[ShapeSpec]): input shape num_classes (int): number of classes. Used to label background proposals. num_anchors (int): number of generated anchors conv_dims (List[int]): dimensions for each convolution layer norm (str or callable): Normalization for conv layers except for the two output layers. See :func:`detectron2.layers.get_norm` for supported types. prior_prob (float): Prior weight for computing bias """ super().__init__() self._num_features = len(input_shape) if norm == "BN" or norm == "SyncBN": logger.info( f"Using domain-specific {norm} in RetinaNetHead with len={self._num_features}." ) bn_class = nn.BatchNorm2d if norm == "BN" else nn.SyncBatchNorm def norm(c): return CycleBatchNormList( length=self._num_features, bn_class=bn_class, num_features=c ) else:
norm_name = str(type(get_norm(norm, 32)))
1
2023-12-05 02:51:53+00:00
24k
DiffusionLight/DiffusionLight
relighting/inpainter.py
[ { "identifier": "CustomStableDiffusionControlNetInpaintPipeline", "path": "relighting/pipeline.py", "snippet": "class CustomStableDiffusionControlNetInpaintPipeline(StableDiffusionControlNetInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] =...
import torch import numpy as np import os import pickle from diffusers import ControlNetModel, AutoencoderKL from PIL import Image from tqdm.auto import tqdm from transformers import pipeline as transformers_pipeline from relighting.pipeline import CustomStableDiffusionControlNetInpaintPipeline from relighting.pipeline_inpaintonly import CustomStableDiffusionInpaintPipeline, CustomStableDiffusionXLInpaintPipeline from relighting.argument import SAMPLERS, VAE_MODELS, DEPTH_ESTIMATOR, get_control_signal_type from relighting.image_processor import ( estimate_scene_depth, estimate_scene_normal, merge_normal_map, fill_depth_circular ) from relighting.ball_processor import get_ideal_normal_ball, crop_ball from relighting.pipeline_xl import CustomStableDiffusionXLControlNetInpaintPipeline
17,552
class NoWaterMark: def apply_watermark(self, *args, **kwargs): return args[0] class ControlSignalGenerator(): def __init__(self, sd_arch, control_signal_type, device): self.sd_arch = sd_arch self.control_signal_type = control_signal_type self.device = device def process_sd_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", device=self.device.index) control_image = self.depth_estimator(input_image)['depth'] control_image = np.array(control_image) control_image = control_image[:, :, None] control_image = np.concatenate([control_image, control_image, control_image], axis=2) control_image = Image.fromarray(control_image) control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sdxl_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None:
class NoWaterMark: def apply_watermark(self, *args, **kwargs): return args[0] class ControlSignalGenerator(): def __init__(self, sd_arch, control_signal_type, device): self.sd_arch = sd_arch self.control_signal_type = control_signal_type self.device = device def process_sd_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", device=self.device.index) control_image = self.depth_estimator(input_image)['depth'] control_image = np.array(control_image) control_image = control_image[:, :, None] control_image = np.concatenate([control_image, control_image, control_image], axis=2) control_image = Image.fromarray(control_image) control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sdxl_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None:
self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index)
5
2023-12-07 14:03:31+00:00
24k
modelscope/normal-depth-diffusion
ldm/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\...
import pdb import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn import torch.nn.functional as F from contextlib import contextmanager from functools import partial from einops import rearrange, repeat from ldm.models.autoencoder import (AutoencoderKL, IdentityFirstStage, VQModelInterface) from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.dpm_solver import DPMSolverSampler from ldm.models.diffusion.plms import PLMSSampler from ldm.modules.attention import CrossAttention from ldm.modules.diffusionmodules.util import (extract_into_tensor, make_beta_schedule, noise_like) from ldm.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl) from ldm.modules.ema import LitEma from ldm.util import (count_params, default, exists, filter_nan_loss, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat) from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from pytorch_lightning.utilities.distributed import rank_zero_only from pytorch_lightning.utilities.rank_zero import rank_zero_only
14,901
class anneal_identity(): def __call__(self, x, global_step): return x def upper_bound(arr, key): left = 0 right = len(arr) while left < right: mid = (left + right) >> 1 if arr[mid] < key: left = mid + 1 else: right = mid return left class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule='linear', loss_type='l2', ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor='val/loss', use_ema=True, first_stage_key='image', image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization='eps', # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., anneal_t=False, # we find at the begining, smaller t, larger denoise mse loss. anneal_global_step=[], anneal_ratio=0.9, prior_model=None, prior_normal=None, input_keys=['rgb'], ): super().__init__() assert parameterization in [ 'eps', 'x0' ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f'{self.__class__.__name__}: Running in {self.parameterization}-prediction mode' ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.') self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.input_keys = input_keys if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full( fill_value=logvar_init, size=(self.num_timesteps, )) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) ### anneal t function if not anneal_t: self.anneal_func = anneal_identity() else: self.anneal_func = anneal_warmup(anneal_ratio, anneal_global_step, self.num_timesteps) if prior_model is not None:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ try: except: __conditioning_keys__ = { 'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y' } def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class anneal_identity(): def __call__(self, x, global_step): return x def upper_bound(arr, key): left = 0 right = len(arr) while left < right: mid = (left + right) >> 1 if arr[mid] < key: left = mid + 1 else: right = mid return left class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule='linear', loss_type='l2', ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor='val/loss', use_ema=True, first_stage_key='image', image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization='eps', # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., anneal_t=False, # we find at the begining, smaller t, larger denoise mse loss. anneal_global_step=[], anneal_ratio=0.9, prior_model=None, prior_normal=None, input_keys=['rgb'], ): super().__init__() assert parameterization in [ 'eps', 'x0' ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f'{self.__class__.__name__}: Running in {self.parameterization}-prediction mode' ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.') self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.input_keys = input_keys if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full( fill_value=logvar_init, size=(self.num_timesteps, )) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) ### anneal t function if not anneal_t: self.anneal_func = anneal_identity() else: self.anneal_func = anneal_warmup(anneal_ratio, anneal_global_step, self.num_timesteps) if prior_model is not None:
self.prior_model = instantiate_from_config(prior_model)
17
2023-12-06 07:29:34+00:00
24k
RobertCsordas/moe_attention
tasks/simple/language_model/transformer_lm_mixin.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: ...
import framework import torch import torch.nn import torch.nn.functional as F import torch.utils.data import math from typing import List, Tuple, Dict, Any from models import TransformerLanguageModel from ... import task, args from layers.transformer import RelativeTransformerEncoderLayer, PrelnRelativeTransformerEncoderLayer from layers.transformer.relative_moe_transformer import RelativeMoeTransformerEncoderLayer from layers.transformer.fast_rope_transformer import FastRopeTransformerEncoderLayer from layers.transformer.moe_attention_relative_transformer import MoeAttentionRelativeTransformerEncoderLayer from layers.moe_layer import MoE from interfaces import Result from layers import LayerVisualizer from layers.transformer.full_moe_relative_attention import FullMoeRelativeAttentionCore
21,372
v_projection_size=self.helper.args.moe.att.v_size, same_sel=self.helper.args.moe.att.same_sel, moe_k=self.helper.args.moe.att.k, qside_n_experts=self.helper.args.moe.att.qside_n_experts, shared_experts=self.helper.args.moe.att.shared_experts, kq_n_experts=self.helper.args.moe.att.kq_n_experts, separate_kq_sel=self.helper.args.moe.att.separate_kq_sel, moa_mode=self.helper.args.moa.mode, cvloss=self.helper.args.moa.cvloss, switchloss=self.helper.args.moa.switchloss, zloss=self.helper.args.moa.zloss, rotate_fraction=self.helper.args.rope.rotate_fraction, rope_base=self.helper.args.rope.base, moeatt_norm_init=self.helper.args.moe.att.norm_init) elif self.helper.args.transformer.variant in {"preln_rope", "rope"}: mklayer = lambda: FastRopeTransformerEncoderLayer( **base_args, **extra_args, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size, preln=self.is_preln(), rotate_fraction = self.helper.args.rope.rotate_fraction, rope_base=self.helper.args.rope.base) elif self.helper.args.transformer.variant in {"preln_moe", "moe"}: # def __init__(self, d_model, nhead, n_bins: int, bin_size: int, n_layers: int, dim_feedforward=2048, mklayer = lambda: RelativeMoeTransformerEncoderLayer( **base_args, **extra_args, preln=self.is_preln(), test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, n_experts=self.helper.args.moe.n_experts, expert_size=self.helper.args.moe.expert_size, dropout_mode=self.helper.args.kvmem.dropout, selection_mode=self.helper.args.moe.selection_mode, perplexity_reg=self.helper.args.moe.perplexity_reg, n_heads=self.helper.args.pkm.n_heads, norm_keys=self.helper.args.moe.norm_keys, perplexity_reg_mode=self.helper.args.moe.perplexity_reg_mode, n_random=self.helper.args.moe.n_random, reg_type=self.helper.args.moe.reg_type, topk_mode=self.helper.args.moe.topk_mode, head_projection_size=self.helper.args.transformer.head_projection_size, activation_after_topk=self.helper.args.moe.activation_after_topk, drop_parallel=self.helper.args.moe.drop_parallel, norm_key_init=self.helper.args.moe.norm_key_init, norm_value_init=self.helper.args.moe.norm_value_init, normalize_expert_sel_init=self.helper.args.moe.norm_expert_sel_init, identical_init=self.helper.args.moe.identical_init, sel_norm=self.helper.args.moe.sel_norm, ln_affine=self.helper.args.transformer.ln_affine, moe_dropout_factor=self.helper.args.moe.dropout_factor, drop_expert=self.helper.args.moe.drop_expert, sync_distributed=self.helper.args.moe.sync_distributed, modulation_amplitude=self.helper.args.moe.modulation_amplitude, moe_init_scale=self.helper.args.moe.init_scale, moe_attention=self.helper.args.moe.att.enable, moe_att_n_experts=self.helper.args.moe.att.n_experts, moe_att_expert_dropout=self.helper.args.moe.drop_expert if self.helper.args.moe.att.drop_expert is None else self.helper.args.moe.att.drop_expert, moe_att_selection_mode=self.helper.args.moe.att.selection_mode, moe_att_variant=self.helper.args.moe.att.variant, moe_att_ppl_reg=self.helper.args.moe.perplexity_reg if self.helper.args.moe.att.perplexity_reg is None else self.helper.args.moe.att.perplexity_reg, moe_att_k=self.helper.args.moe.att.k, q_expert=self.helper.args.moe.att.q_expert, k_expert=self.helper.args.moe.att.k_expert, v_expert=self.helper.args.moe.att.v_expert, o_expert=self.helper.args.moe.att.o_expert, v_projection_size=self.helper.args.moe.att.v_size, qside_n_experts=self.helper.args.moe.att.qside_n_experts, moe_att_shared_experts=self.helper.args.moe.att.shared_experts, moe_att_kq_n_experts=self.helper.args.moe.att.kq_n_experts, moe_att_separate_kq_sel=self.helper.args.moe.att.separate_kq_sel, rotate_fraction=self.helper.args.rope.rotate_fraction, rope_base=self.helper.args.rope.base, moe_att_norm_init=self.helper.args.moe.att.norm_init, moe_att_same_sel=self.helper.args.moe.att.same_sel, moe_att_norm_retrieval=self.helper.args.moe.att.norm_ret) else: assert False, f"Invalid variant \"{self.helper.args.transformer.variant}\"" layers = [mklayer() for _ in range(self.helper.args.transformer.encoder_n_layers)] return layers def fix_init(self, model): init_std = 0.02 torch.nn.init.normal_(model.embedding.weight, 0.0, init_std) # torch.nn.init.normal_(model.embedding_adapter.weight, 0.0, init_std) initialized = 0 for m in model.modules(): if isinstance(m, (torch.nn.Linear, torch.nn.Embedding)) and hasattr(m, "weight"): torch.nn.init.normal_(m.weight, 0.0, init_std) initialized += m.weight.numel() if isinstance(m, (torch.nn.Linear, torch.nn.LayerNorm)) and m.bias is not None: torch.nn.init.constant_(m.bias, 0) initialized += m.bias.numel() if isinstance(m, (torch.nn.LayerNorm)) and m.weight is not None: torch.nn.init.normal_(m.weight, 1.0, init_std) initialized += m.weight.numel() if isinstance(m, MoE): torch.nn.init.normal_(m.keys, 0.0, init_std) torch.nn.init.normal_(m.values, 0.0, init_std) if m.expert_sel is not None: torch.nn.init.normal_(m.expert_sel, 0.0, init_std) m.renorm_keep_std(m.expert_sel) initialized += m.expert_sel.numel() initialized += m.keys.numel() + m.values.numel() if isinstance(m, (FullMoeRelativeAttentionCore)): for p in m.parameters(): torch.nn.init.normal_(p, 0.0, init_std) initialized += p.numel() for s in m.selections.values(): m.renorm_keep_std(s) print(f"Reinitialized {initialized/self.n_weights*100:.3f}% weights") def create_model(self) -> torch.nn.Module: self.validation_started_on = None # pyright: reportOptionalMemberAccess=false tlayers = self.get_layers()
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="sigmoid", choice=["gate", "sigmoid", "mul"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-moe.att.n_experts", default=4) parser.add_argument("-moe.att.variant", default="moa", choice=["moa", "simple", "qside", "full", "full_rope", "seq", "target"]) parser.add_argument("-moe.att.enable", default=False) parser.add_argument("-moe.att.q_expert", default=True) parser.add_argument("-moe.att.k_expert", default=True) parser.add_argument("-moe.att.v_expert", default=True) parser.add_argument("-moe.att.o_expert", default=True) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_qk", default=False) parser.add_argument("-moe.att.v_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.same_sel", default=False) parser.add_argument("-moe.att.expert_dropout", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.selection_mode", default="sigmoid", choice=["sigmoid", "softmax"]) parser.add_argument("-moe.att.perplexity_reg", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.qside_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_ret", default=False) parser.add_argument("-moe.att.shared_experts", default=False) parser.add_argument("-moe.att.drop_expert", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.kq_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.separate_kq_sel", default=False) parser.add_argument("-moe.att.norm_init", default=False) parser.add_argument("-rope.rotate_fraction", default=0.5) parser.add_argument("-rope.base", default=10000.0) parser.add_argument("-moa.mode", default="my", choice=["my", "moa"]) parser.add_argument("-moa.cvloss", default=0.0) parser.add_argument("-moa.switchloss", default=0.0) parser.add_argument("-moa.zloss", default=0.0) parser.add_argument("-debug_plot_interval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.plot_head_details", default=False) parser.add_argument("-plot.n_steps", default=-128) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dropout=self.helper.args.dropout, activation=activation ) if self.helper.args.transformer.variant not in {"preln_moe", "moe"}: base_args["dim_feedforward"]=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_moeatt"}: mklayer = lambda: MoeAttentionRelativeTransformerEncoderLayer( **base_args, **extra_args, moe_att_n_experts=self.helper.args.moe.att.n_experts, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size, att_perplexity_reg=self.helper.args.moe.perplexity_reg if self.helper.args.moe.att.perplexity_reg is None else self.helper.args.moe.att.perplexity_reg, expert_dropout=self.helper.args.moe.drop_expert if self.helper.args.moe.att.drop_expert is None else self.helper.args.moe.att.drop_expert, att_selection_mode=self.helper.args.moe.att.selection_mode, preln=self.is_preln(), attention_variant=self.helper.args.moe.att.variant, q_expert=self.helper.args.moe.att.q_expert, k_expert=self.helper.args.moe.att.k_expert, v_expert=self.helper.args.moe.att.v_expert, o_expert=self.helper.args.moe.att.o_expert, norm_qk_score=self.helper.args.moe.att.norm_qk, v_projection_size=self.helper.args.moe.att.v_size, same_sel=self.helper.args.moe.att.same_sel, moe_k=self.helper.args.moe.att.k, qside_n_experts=self.helper.args.moe.att.qside_n_experts, shared_experts=self.helper.args.moe.att.shared_experts, kq_n_experts=self.helper.args.moe.att.kq_n_experts, separate_kq_sel=self.helper.args.moe.att.separate_kq_sel, moa_mode=self.helper.args.moa.mode, cvloss=self.helper.args.moa.cvloss, switchloss=self.helper.args.moa.switchloss, zloss=self.helper.args.moa.zloss, rotate_fraction=self.helper.args.rope.rotate_fraction, rope_base=self.helper.args.rope.base, moeatt_norm_init=self.helper.args.moe.att.norm_init) elif self.helper.args.transformer.variant in {"preln_rope", "rope"}: mklayer = lambda: FastRopeTransformerEncoderLayer( **base_args, **extra_args, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size, preln=self.is_preln(), rotate_fraction = self.helper.args.rope.rotate_fraction, rope_base=self.helper.args.rope.base) elif self.helper.args.transformer.variant in {"preln_moe", "moe"}: # def __init__(self, d_model, nhead, n_bins: int, bin_size: int, n_layers: int, dim_feedforward=2048, mklayer = lambda: RelativeMoeTransformerEncoderLayer( **base_args, **extra_args, preln=self.is_preln(), test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, n_experts=self.helper.args.moe.n_experts, expert_size=self.helper.args.moe.expert_size, dropout_mode=self.helper.args.kvmem.dropout, selection_mode=self.helper.args.moe.selection_mode, perplexity_reg=self.helper.args.moe.perplexity_reg, n_heads=self.helper.args.pkm.n_heads, norm_keys=self.helper.args.moe.norm_keys, perplexity_reg_mode=self.helper.args.moe.perplexity_reg_mode, n_random=self.helper.args.moe.n_random, reg_type=self.helper.args.moe.reg_type, topk_mode=self.helper.args.moe.topk_mode, head_projection_size=self.helper.args.transformer.head_projection_size, activation_after_topk=self.helper.args.moe.activation_after_topk, drop_parallel=self.helper.args.moe.drop_parallel, norm_key_init=self.helper.args.moe.norm_key_init, norm_value_init=self.helper.args.moe.norm_value_init, normalize_expert_sel_init=self.helper.args.moe.norm_expert_sel_init, identical_init=self.helper.args.moe.identical_init, sel_norm=self.helper.args.moe.sel_norm, ln_affine=self.helper.args.transformer.ln_affine, moe_dropout_factor=self.helper.args.moe.dropout_factor, drop_expert=self.helper.args.moe.drop_expert, sync_distributed=self.helper.args.moe.sync_distributed, modulation_amplitude=self.helper.args.moe.modulation_amplitude, moe_init_scale=self.helper.args.moe.init_scale, moe_attention=self.helper.args.moe.att.enable, moe_att_n_experts=self.helper.args.moe.att.n_experts, moe_att_expert_dropout=self.helper.args.moe.drop_expert if self.helper.args.moe.att.drop_expert is None else self.helper.args.moe.att.drop_expert, moe_att_selection_mode=self.helper.args.moe.att.selection_mode, moe_att_variant=self.helper.args.moe.att.variant, moe_att_ppl_reg=self.helper.args.moe.perplexity_reg if self.helper.args.moe.att.perplexity_reg is None else self.helper.args.moe.att.perplexity_reg, moe_att_k=self.helper.args.moe.att.k, q_expert=self.helper.args.moe.att.q_expert, k_expert=self.helper.args.moe.att.k_expert, v_expert=self.helper.args.moe.att.v_expert, o_expert=self.helper.args.moe.att.o_expert, v_projection_size=self.helper.args.moe.att.v_size, qside_n_experts=self.helper.args.moe.att.qside_n_experts, moe_att_shared_experts=self.helper.args.moe.att.shared_experts, moe_att_kq_n_experts=self.helper.args.moe.att.kq_n_experts, moe_att_separate_kq_sel=self.helper.args.moe.att.separate_kq_sel, rotate_fraction=self.helper.args.rope.rotate_fraction, rope_base=self.helper.args.rope.base, moe_att_norm_init=self.helper.args.moe.att.norm_init, moe_att_same_sel=self.helper.args.moe.att.same_sel, moe_att_norm_retrieval=self.helper.args.moe.att.norm_ret) else: assert False, f"Invalid variant \"{self.helper.args.transformer.variant}\"" layers = [mklayer() for _ in range(self.helper.args.transformer.encoder_n_layers)] return layers def fix_init(self, model): init_std = 0.02 torch.nn.init.normal_(model.embedding.weight, 0.0, init_std) # torch.nn.init.normal_(model.embedding_adapter.weight, 0.0, init_std) initialized = 0 for m in model.modules(): if isinstance(m, (torch.nn.Linear, torch.nn.Embedding)) and hasattr(m, "weight"): torch.nn.init.normal_(m.weight, 0.0, init_std) initialized += m.weight.numel() if isinstance(m, (torch.nn.Linear, torch.nn.LayerNorm)) and m.bias is not None: torch.nn.init.constant_(m.bias, 0) initialized += m.bias.numel() if isinstance(m, (torch.nn.LayerNorm)) and m.weight is not None: torch.nn.init.normal_(m.weight, 1.0, init_std) initialized += m.weight.numel() if isinstance(m, MoE): torch.nn.init.normal_(m.keys, 0.0, init_std) torch.nn.init.normal_(m.values, 0.0, init_std) if m.expert_sel is not None: torch.nn.init.normal_(m.expert_sel, 0.0, init_std) m.renorm_keep_std(m.expert_sel) initialized += m.expert_sel.numel() initialized += m.keys.numel() + m.values.numel() if isinstance(m, (FullMoeRelativeAttentionCore)): for p in m.parameters(): torch.nn.init.normal_(p, 0.0, init_std) initialized += p.numel() for s in m.selections.values(): m.renorm_keep_std(s) print(f"Reinitialized {initialized/self.n_weights*100:.3f}% weights") def create_model(self) -> torch.nn.Module: self.validation_started_on = None # pyright: reportOptionalMemberAccess=false tlayers = self.get_layers()
model = TransformerLanguageModel(
0
2023-12-13 08:45:02+00:00
24k
AIFSH/NativeDancer
nativedancer/third_part/magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "nativedancer/third_part/magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_siz...
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from ..models.unet_controlnet import UNet3DConditionModel from ..models.controlnet import ControlNetModel from ..models.mutual_self_attention import ReferenceAttentionControl from ..pipelines.context import ( get_context_scheduler, get_total_steps ) from ..utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
16,791
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel,
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel,
controlnet: ControlNetModel,
1
2023-12-10 20:14:00+00:00
24k
mkang315/ASF-YOLO
segment/val.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=Tr...
import argparse import json import os import sys import numpy as np import torch import torch.nn.functional as F import time from multiprocessing.pool import ThreadPool from pathlib import Path from tqdm import tqdm from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode from pycocotools.mask import encode from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
21,552
# Print results pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): for i, c in enumerate(metrics.ap_class_index): LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i))) # Print speeds t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) # callbacks.run('on_val_end') mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results() # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api results = [] for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate eval.evaluate() eval.accumulate() eval.summarize() results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5) map_bbox, map50_bbox, map_mask, map50_mask = results except Exception as e: LOGGER.info(f'pycocotools unable to run: {e}') # Return results model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/BCC.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs_2/train-seg/base/weights/best.pt', help='model path(s)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.01, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='6', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--verbose', action='store_true', help='report mAP by class') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') parser.add_argument('--project', default=ROOT / 'runs_2/val_test', help='save results to project/name') parser.add_argument('--name', default='base', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML # opt.save_json |= opt.data.endswith('coco.yaml') opt.save_txt |= opt.save_hybrid print_args(vars(opt)) return opt def main(opt): check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') if opt.save_hybrid: LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone') run(**vars(opt)) else: weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results if opt.task == 'speed': # speed benchmarks # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False for opt.weights in weights: run(**vars(opt), plots=False) elif opt.task == 'study': # speed vs mAP benchmarks # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... for opt.weights in weights: f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis for opt.imgsz in x: # img-size LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt')
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map, pred_masks): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} def single_encode(x): rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] rle["counts"] = rle["counts"].decode("utf-8") return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements(['pycocotools']) process = process_mask_upsample # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", "mAP50", "mAP50-95)") dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) masks = masks.to(device) masks = masks.float() im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width # Inference with dt[1]: act = time.time() preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) #print('time.time():',time.time()-act) # Loss if compute_loss: loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: preds = non_max_suppression(preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det, nm=nm) # Metrics plot_masks = [] # masks for plotting for si, (pred, proto) in enumerate(zip(preds, protos)): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init seen += 1 if npr == 0: if nl: stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) if plots: confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) continue # Masks midx = [si] if overlap else targets[:, 0] == si gt_masks = masks[midx] pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) # Predictions if single_cls: pred[:, 5] = 0 predn = pred.clone() scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct_bboxes = process_batch(predn, labelsn, iouv) correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) if plots: confusion_matrix.process_batch(predn, labelsn) stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if plots and batch_i < 3: plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot # Save/log if save_txt: save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') if save_json: pred_masks = scale_image(im[si].shape[1:], pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) # Plot images if plots and batch_i < 3: if len(plot_masks): plot_masks = torch.cat(plot_masks, dim=0) plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred # callbacks.run('on_val_batch_end') # Compute metrics stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names) metrics.update(results) nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class # Print results pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): for i, c in enumerate(metrics.ap_class_index): LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i))) # Print speeds t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) # callbacks.run('on_val_end') mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results() # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api results = [] for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate eval.evaluate() eval.accumulate() eval.summarize() results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5) map_bbox, map50_bbox, map_mask, map50_mask = results except Exception as e: LOGGER.info(f'pycocotools unable to run: {e}') # Return results model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/BCC.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs_2/train-seg/base/weights/best.pt', help='model path(s)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.01, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='6', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--verbose', action='store_true', help='report mAP by class') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') parser.add_argument('--project', default=ROOT / 'runs_2/val_test', help='save results to project/name') parser.add_argument('--name', default='base', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML # opt.save_json |= opt.data.endswith('coco.yaml') opt.save_txt |= opt.save_hybrid print_args(vars(opt)) return opt def main(opt): check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') if opt.save_hybrid: LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone') run(**vars(opt)) else: weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results if opt.task == 'speed': # speed benchmarks # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False for opt.weights in weights: run(**vars(opt), plots=False) elif opt.task == 'study': # speed vs mAP benchmarks # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... for opt.weights in weights: f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis for opt.imgsz in x: # img-size LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt')
plot_val_study(x=x) # plot
22
2023-12-10 14:18:29+00:00
24k
youngskkim/CRN
exps/base_exp.py
[ { "identifier": "NuscDatasetRadarDet", "path": "datasets/nusc_det_dataset.py", "snippet": "class NuscDatasetRadarDet(Dataset):\n def __init__(self,\n ida_aug_conf,\n bda_aug_conf,\n rda_aug_conf,\n classes,\n data_root,\n...
from functools import partial from pytorch_lightning.core import LightningModule from torch.cuda.amp.autocast_mode import autocast from torch.optim.lr_scheduler import MultiStepLR from mmcv.runner import build_optimizer from datasets.nusc_det_dataset import NuscDatasetRadarDet, collate_fn from evaluators.det_evaluators import DetNuscEvaluator from models.base_bev_depth import BaseBEVDepth from utils.torch_dist import all_gather_object, synchronize import mmcv import torch import torch.nn.functional as F import torch.nn.parallel import torch.utils.data import torch.utils.data.distributed import torchvision.models as models
16,010
-1, self.depth_channels + 1)[:, 1:] return gt_depths.float() def eval_step(self, batch, batch_idx, prefix: str): (sweep_imgs, mats, img_metas, _, _, _, _, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=False) if isinstance(self.model, torch.nn.parallel.DistributedDataParallel): results = self.model.module.get_bboxes(preds, img_metas) else: results = self.model.get_bboxes(preds, img_metas) for i in range(len(results)): results[i][0] = results[i][0].tensor.detach().cpu().numpy() results[i][1] = results[i][1].detach().cpu().numpy() results[i][2] = results[i][2].detach().cpu().numpy() results[i].append(img_metas[i]) return results def validation_epoch_end(self, validation_step_outputs): detection_losses = list() heatmap_losses = list() bbox_losses = list() depth_losses = list() for validation_step_output in validation_step_outputs: detection_losses.append(validation_step_output[0]) heatmap_losses.append(validation_step_output[1]) bbox_losses.append(validation_step_output[2]) depth_losses.append(validation_step_output[3]) synchronize() self.log('val/detection', torch.mean(torch.stack(detection_losses)), on_epoch=True) self.log('val/heatmap', torch.mean(torch.stack(heatmap_losses)), on_epoch=True) self.log('val/bbox', torch.mean(torch.stack(bbox_losses)), on_epoch=True) self.log('val/depth', torch.mean(torch.stack(depth_losses)), on_epoch=True) def validation_step(self, batch, batch_idx): (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d] gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d] with torch.no_grad(): preds, depth_preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=True) targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d) loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds) if len(depth_labels.shape) == 5: # only key-frame will calculate depth loss depth_labels = depth_labels[:, 0, ...].contiguous() loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds, weight=3.) return loss_detection, loss_heatmap, loss_bbox, loss_depth def test_epoch_end(self, test_step_outputs): all_pred_results = list() all_img_metas = list() for test_step_output in test_step_outputs: for i in range(len(test_step_output)): all_pred_results.append(test_step_output[i][:3]) all_img_metas.append(test_step_output[i][3]) synchronize() # TODO: Change another way. dataset_length = len(self.val_dataloader().dataset) all_pred_results = sum( map(list, zip(*all_gather_object(all_pred_results))), [])[:dataset_length] all_img_metas = sum(map(list, zip(*all_gather_object(all_img_metas))), [])[:dataset_length] if self.global_rank == 0: self.evaluator.evaluate(all_pred_results, all_img_metas) def configure_optimizers(self): optimizer = build_optimizer(self.model, self.optimizer_config) scheduler = MultiStepLR(optimizer, [19, 23]) return [[optimizer], [scheduler]] def train_dataloader(self): train_dataset = NuscDatasetRadarDet( ida_aug_conf=self.ida_aug_conf, bda_aug_conf=self.bda_aug_conf, rda_aug_conf=self.rda_aug_conf, img_backbone_conf=self.backbone_img_conf, classes=self.class_names, data_root=self.data_root, info_paths=self.train_info_paths, is_train=True, use_cbgs=self.data_use_cbgs, img_conf=self.img_conf, load_interval=self.load_interval, num_sweeps=self.num_sweeps, sweep_idxes=self.sweep_idxes, key_idxes=self.key_idxes, return_image=self.return_image, return_depth=self.return_depth, return_radar_pv=self.return_radar_pv, remove_z_axis=self.remove_z_axis, depth_path='depth_gt', radar_pv_path='radar_pv_filter' ) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=self.batch_size_per_device, num_workers=4, drop_last=True, shuffle=False,
# Copyright (c) Megvii Inc. All rights reserved. pretrain_config = dict( img_model_path=None, img_load_key=[], img_freeze_key=None, pts_model_path=None, pts_load_key=[]) optimizer_config = dict( type='AdamW', lr=2e-4, weight_decay=1e-2) H = 900 W = 1600 final_dim = (256, 704) img_conf = dict(img_mean=[123.675, 116.28, 103.53], img_std=[58.395, 57.12, 57.375], to_rgb=True) ida_aug_conf = { 'resize_lim': (0.386, 0.55), 'final_dim': final_dim, 'rot_lim': (-5.4, 5.4), 'H': 900, 'W': 1600, 'rand_flip': True, 'bot_pct_lim': (0.0, 0.0), 'cams': ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT'], 'Ncams': 6, } bda_aug_conf = { 'rot_ratio': 1.0, 'rot_lim': (-22.5, 22.5), 'scale_lim': (0.95, 1.05), 'flip_dx_ratio': 0.5, 'flip_dy_ratio': 0.5 } rda_aug_conf = { 'N_sweeps': 6, 'N_use': 5, 'drop_ratio': 0.1, } backbone_img_conf = { 'x_bound': [-51.2, 51.2, 0.8], 'y_bound': [-51.2, 51.2, 0.8], 'z_bound': [-5, 3, 8], 'd_bound': [2.0, 58.0, 0.8], 'final_dim': final_dim, 'output_channels': 80, 'downsample_factor': 16, 'img_backbone_conf': dict( type='ResNet', depth=50, frozen_stages=0, out_indices=[0, 1, 2, 3], norm_eval=False, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), ), 'img_neck_conf': dict( type='SECONDFPN', in_channels=[256, 512, 1024, 2048], upsample_strides=[0.25, 0.5, 1, 2], out_channels=[128, 128, 128, 128], ), 'depth_net_conf': dict(in_channels=512, mid_channels=512), 'camera_aware': True } CLASSES = [ 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone', ] head_conf = { 'bev_backbone_conf': dict( type='ResNet', in_channels=80, depth=18, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=[0, 1, 2], norm_eval=False, base_channels=160), 'bev_neck_conf': dict( type='SECONDFPN', in_channels=[80, 160, 320, 640], upsample_strides=[1, 2, 4, 8], out_channels=[64, 64, 64, 64]), 'tasks': [ dict(num_class=1, class_names=['car']), dict(num_class=2, class_names=['truck', 'construction_vehicle']), dict(num_class=2, class_names=['bus', 'trailer']), dict(num_class=1, class_names=['barrier']), dict(num_class=2, class_names=['motorcycle', 'bicycle']), dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),], 'common_heads': dict( reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), 'bbox_coder': dict( type='CenterPointBBoxCoder', post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_num=500, score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], code_size=9), 'train_cfg': dict( point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], grid_size=[512, 512, 1], voxel_size=[0.2, 0.2, 8], out_size_factor=4, dense_reg=1, gaussian_overlap=0.1, max_objs=500, min_radius=2, code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.5]), 'test_cfg': dict( post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_per_img=500, max_pool_nms=False, min_radius=[4, 12, 10, 1, 0.85, 0.175], score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], nms_type='circle', pre_max_size=1000, post_max_size=83, nms_thr=0.2), 'in_channels': 256, # Equal to bev_neck output_channels. 'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'), 'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25), 'gaussian_overlap': 0.1, 'min_radius': 2, } class BEVDepthLightningModel(LightningModule): MODEL_NAMES = sorted(name for name in models.__dict__ if name.islower() and not name.startswith('__') and callable(models.__dict__[name])) def __init__(self, gpus: int = 1, data_root='data/nuScenes', eval_interval=1, batch_size_per_device=8, class_names=CLASSES, backbone_img_conf=backbone_img_conf, head_conf=head_conf, ida_aug_conf=ida_aug_conf, bda_aug_conf=bda_aug_conf, rda_aug_conf=rda_aug_conf, default_root_dir='./outputs/', **kwargs): super().__init__() self.save_hyperparameters() self.gpus = gpus self.optimizer_config = optimizer_config self.pretrain_config = pretrain_config self.eval_interval = eval_interval self.batch_size_per_device = batch_size_per_device self.data_root = data_root self.class_names = class_names self.backbone_img_conf = backbone_img_conf self.head_conf = head_conf self.ida_aug_conf = ida_aug_conf self.bda_aug_conf = bda_aug_conf self.rda_aug_conf = rda_aug_conf mmcv.mkdir_or_exist(default_root_dir) self.default_root_dir = default_root_dir self.evaluator = DetNuscEvaluator(class_names=self.class_names, output_dir=self.default_root_dir) self.model = BaseBEVDepth(self.backbone_img_conf, self.head_conf) self.mode = 'valid' self.img_conf = img_conf self.data_use_cbgs = False self.load_interval = 1 self.num_sweeps = 1 self.sweep_idxes = list() self.key_idxes = list() self.data_return_depth = True self.downsample_factor = self.backbone_img_conf['downsample_factor'] self.dbound = self.backbone_img_conf['d_bound'] self.depth_channels = int( (self.dbound[1] - self.dbound[0]) / self.dbound[2]) self.use_fusion = False self.train_info_paths = 'data/nuScenes/nuscenes_infos_train.pkl' self.val_info_paths = 'data/nuScenes/nuscenes_infos_val.pkl' self.predict_info_paths = 'data/nuScenes/nuscenes_infos_test.pkl' self.return_image = True self.return_depth = True self.return_radar_pv = False self.remove_z_axis = True def forward(self, sweep_imgs, mats, is_train=False, **inputs): return self.model(sweep_imgs, mats, is_train=is_train) def training_step(self, batch): if self.global_rank == 0: for pg in self.trainer.optimizers[0].param_groups: self.log('learning_rate', pg["lr"]) (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d] gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d] preds, depth_preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=True) targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d) loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds) if len(depth_labels.shape) == 5: # only key-frame will calculate depth loss depth_labels = depth_labels[:, 0, ...].contiguous() loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds) self.log('train/detection', loss_detection) self.log('train/heatmap', loss_heatmap) self.log('train/bbox', loss_bbox) self.log('train/depth', loss_depth) return loss_detection + loss_depth def get_depth_loss(self, depth_labels, depth_preds, weight=3.): depth_labels = self.get_downsampled_gt_depth(depth_labels) depth_preds = depth_preds.permute(0, 2, 3, 1).contiguous().view( -1, self.depth_channels) fg_mask = torch.max(depth_labels, dim=1).values > 0.0 with autocast(enabled=False): loss_depth = (F.binary_cross_entropy( depth_preds[fg_mask], depth_labels[fg_mask], reduction='none', ).sum() / max(1.0, fg_mask.sum())) return weight * loss_depth def get_downsampled_gt_depth(self, gt_depths): """ Input: gt_depths: [B, N, H, W] Output: gt_depths: [B*N*h*w, d] """ B, N, H, W = gt_depths.shape gt_depths = gt_depths.view( B * N, H // self.downsample_factor, self.downsample_factor, W // self.downsample_factor, self.downsample_factor, 1, ) gt_depths = gt_depths.permute(0, 1, 3, 5, 2, 4).contiguous() gt_depths = gt_depths.view( -1, self.downsample_factor * self.downsample_factor) gt_depths_tmp = torch.where(gt_depths == 0.0, 1e5 * torch.ones_like(gt_depths), gt_depths) gt_depths = torch.min(gt_depths_tmp, dim=-1).values gt_depths = gt_depths.view(B * N, H // self.downsample_factor, W // self.downsample_factor) gt_depths = (gt_depths - (self.dbound[0] - self.dbound[2])) / self.dbound[2] gt_depths = torch.where( (gt_depths < self.depth_channels + 1) & (gt_depths > 0.), gt_depths, torch.zeros_like(gt_depths)) gt_depths = F.one_hot(gt_depths.long(), num_classes=self.depth_channels + 1).view( -1, self.depth_channels + 1)[:, 1:] return gt_depths.float() def eval_step(self, batch, batch_idx, prefix: str): (sweep_imgs, mats, img_metas, _, _, _, _, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=False) if isinstance(self.model, torch.nn.parallel.DistributedDataParallel): results = self.model.module.get_bboxes(preds, img_metas) else: results = self.model.get_bboxes(preds, img_metas) for i in range(len(results)): results[i][0] = results[i][0].tensor.detach().cpu().numpy() results[i][1] = results[i][1].detach().cpu().numpy() results[i][2] = results[i][2].detach().cpu().numpy() results[i].append(img_metas[i]) return results def validation_epoch_end(self, validation_step_outputs): detection_losses = list() heatmap_losses = list() bbox_losses = list() depth_losses = list() for validation_step_output in validation_step_outputs: detection_losses.append(validation_step_output[0]) heatmap_losses.append(validation_step_output[1]) bbox_losses.append(validation_step_output[2]) depth_losses.append(validation_step_output[3]) synchronize() self.log('val/detection', torch.mean(torch.stack(detection_losses)), on_epoch=True) self.log('val/heatmap', torch.mean(torch.stack(heatmap_losses)), on_epoch=True) self.log('val/bbox', torch.mean(torch.stack(bbox_losses)), on_epoch=True) self.log('val/depth', torch.mean(torch.stack(depth_losses)), on_epoch=True) def validation_step(self, batch, batch_idx): (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d] gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d] with torch.no_grad(): preds, depth_preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=True) targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d) loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds) if len(depth_labels.shape) == 5: # only key-frame will calculate depth loss depth_labels = depth_labels[:, 0, ...].contiguous() loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds, weight=3.) return loss_detection, loss_heatmap, loss_bbox, loss_depth def test_epoch_end(self, test_step_outputs): all_pred_results = list() all_img_metas = list() for test_step_output in test_step_outputs: for i in range(len(test_step_output)): all_pred_results.append(test_step_output[i][:3]) all_img_metas.append(test_step_output[i][3]) synchronize() # TODO: Change another way. dataset_length = len(self.val_dataloader().dataset) all_pred_results = sum( map(list, zip(*all_gather_object(all_pred_results))), [])[:dataset_length] all_img_metas = sum(map(list, zip(*all_gather_object(all_img_metas))), [])[:dataset_length] if self.global_rank == 0: self.evaluator.evaluate(all_pred_results, all_img_metas) def configure_optimizers(self): optimizer = build_optimizer(self.model, self.optimizer_config) scheduler = MultiStepLR(optimizer, [19, 23]) return [[optimizer], [scheduler]] def train_dataloader(self): train_dataset = NuscDatasetRadarDet( ida_aug_conf=self.ida_aug_conf, bda_aug_conf=self.bda_aug_conf, rda_aug_conf=self.rda_aug_conf, img_backbone_conf=self.backbone_img_conf, classes=self.class_names, data_root=self.data_root, info_paths=self.train_info_paths, is_train=True, use_cbgs=self.data_use_cbgs, img_conf=self.img_conf, load_interval=self.load_interval, num_sweeps=self.num_sweeps, sweep_idxes=self.sweep_idxes, key_idxes=self.key_idxes, return_image=self.return_image, return_depth=self.return_depth, return_radar_pv=self.return_radar_pv, remove_z_axis=self.remove_z_axis, depth_path='depth_gt', radar_pv_path='radar_pv_filter' ) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=self.batch_size_per_device, num_workers=4, drop_last=True, shuffle=False,
collate_fn=partial(collate_fn,
1
2023-12-06 14:57:49+00:00
24k
qitan/devops-backend-lite
common/ext_fun.py
[ { "identifier": "generate_docu", "path": "common/utils/ElasticSearchAPI.py", "snippet": "def generate_docu(table, index_version=None):\n index_name = f\"{table.name}-{index_version}\" if index_version else table.name\n _tbindex = Index(index_name)\n _tbindex.analyzer(my_normalizer)\n _tbinde...
from gitlab.exceptions import GitlabGetError from functools import reduce from common.utils.ElasticSearchAPI import generate_docu, Search from common.utils.GitLabAPI import GitLabAPI from common.utils.HarborAPI import HarborAPI from common.utils.JenkinsAPI import GlueJenkins from common.custom_format import convert_xml_to_str_with_pipeline from common.variables import DASHBOARD_TIME_FORMAT, DASHBOARD_TIME_FORMAT_T, DASHBOARD_TIME_FREQNAMES, \ DASHBOARD_TIME_FREQNAMES_T, SENSITIVE_KEYS, JENKINS_CALLBACK_KEY, \ JENKINS_STATUS_MAP, DEV_LANGUAGE_KEY from dbapp.models import AppInfo, Product, KubernetesCluster, KubernetesDeploy, MicroApp, Project, ProjectConfig, DevLanguage, BuildJob, UserProfile, SystemConfig, Role, Permission, Menu, DataDict from django.conf import settings from django.core.cache import cache from django.utils import timezone from django.db.models import Q from social_django.utils import load_strategy from rest_framework.utils.serializer_helpers import ReturnDict from config import SOCIAL_AUTH_GITLAB_API_URL, GITLAB_ADMIN_TOKEN from common.utils.K8sAPI import K8sAPI from urllib.parse import urlparse, quote_plus from dateutil.relativedelta import relativedelta from dateutil.rrule import rrule from ruamel import yaml from datetime import datetime, timedelta from celery import current_app import copy import operator import re import time import pytz import os import json import requests import math import shortuuid import logging
14,828
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : qqing_lai@hotmail.com @Time : 2020/12/21 上午10:00 @FileName: ext_fun.py @Blog :https://imaojia.com """ logger = logging.getLogger('drf') class ThirdPartyUser(object): def get_user(self):
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : qqing_lai@hotmail.com @Time : 2020/12/21 上午10:00 @FileName: ext_fun.py @Blog :https://imaojia.com """ logger = logging.getLogger('drf') class ThirdPartyUser(object): def get_user(self):
user = UserProfile.objects.get_or_create(username='thirdparty')[0]
14
2023-12-13 03:09:32+00:00
24k
MarilynKeller/aitviewer-skel
aitviewer/scene/scene.py
[ { "identifier": "CONFIG", "path": "aitviewer/configuration.py", "snippet": "CONFIG = Configuration()" }, { "identifier": "CoordinateSystem", "path": "aitviewer/renderables/coordinate_system.py", "snippet": "class CoordinateSystem(RigidBodies):\n \"\"\"\n Render a coordinate system ...
import moderngl import numpy as np from aitviewer.configuration import CONFIG as C from aitviewer.renderables.coordinate_system import CoordinateSystem from aitviewer.renderables.lines import Lines2D from aitviewer.renderables.plane import ChessboardPlane from aitviewer.scene.camera import ViewerCamera from aitviewer.scene.light import Light from aitviewer.scene.node import Node from aitviewer.utils.utils import ( compute_union_of_bounds, compute_union_of_current_bounds, )
17,064
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class Scene(Node): """Generic scene node""" def __init__(self, **kwargs): """Create a scene with a name.""" kwargs["gui_material"] = False super(Scene, self).__init__(**kwargs) # References resources in the scene self.lights = [] self.camera = None # Scene has a reference to ctx self.ctx = None self.backface_culling = True
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class Scene(Node): """Generic scene node""" def __init__(self, **kwargs): """Create a scene with a name.""" kwargs["gui_material"] = False super(Scene, self).__init__(**kwargs) # References resources in the scene self.lights = [] self.camera = None # Scene has a reference to ctx self.ctx = None self.backface_culling = True
self.fps = C.scene_fps
3
2023-12-07 16:13:50+00:00
24k
nexB/dejacode
reporting/admin.py
[ { "identifier": "DataspacedAdmin", "path": "dje/admin.py", "snippet": "class DataspacedAdmin(\n DataspacedFKMixin,\n ProtectedFieldsMixin,\n AdvancedSearchAdminMixin,\n HistoryAdminMixin,\n admin.ModelAdmin,\n):\n formfield_overrides = {\n models.DateField: {\"widget\": AdminDat...
from django.contrib import admin from django.contrib.admin.views.main import ORDER_VAR from django.contrib.contenttypes.models import ContentType from django.core.exceptions import NON_FIELD_ERRORS from django.db.models import PositiveSmallIntegerField from django.forms import HiddenInput from django.utils.html import format_html from django.utils.http import urlencode from django.utils.translation import gettext_lazy as _ from dje.admin import DataspacedAdmin from dje.admin import DataspacedFKMixin from dje.admin import dejacode_site from dje.admin import get_additional_information_fieldset from dje.client_data import add_client_data from dje.filters import MissingInFilter from dje.list_display import AsJoinList from dje.list_display import AsLink from dje.utils import CHANGELIST_LINK_TEMPLATE from dje.utils import queryset_to_html_list from reporting.filters import ReportingQueryListFilter from reporting.forms import ColumnTemplateForm from reporting.forms import QueryForm from reporting.forms import ReportForm from reporting.forms import ReportMassUpdateForm from reporting.forms import get_model_data_for_column_template from reporting.forms import get_model_data_for_order_field from reporting.forms import get_model_data_for_query from reporting.inlines import ColumnTemplateAssignedFieldInline from reporting.inlines import FilterInline from reporting.inlines import OrderFieldInline from reporting.introspection import get_model_label from reporting.models import Card from reporting.models import CardLayout from reporting.models import ColumnTemplate from reporting.models import Filter from reporting.models import LayoutAssignedCard from reporting.models import OrderField from reporting.models import Query from reporting.models import Report
18,290
data = form.cleaned_data # The 'id' field is a ModelChoiceField which validates to a model instance id = form.cleaned_data.get("id") if id: pk = id.pk d = { "pk": pk, "field_name": data.get("field_name", ""), "lookup": data.get("lookup", ""), "value": data.get("value", ""), "runtime_parameter": data.get("runtime_parameter", ""), "negate": data.get("negate", ""), } filters.append(d) return filters @staticmethod def get_order_fields_as_dicts(request, forms): order_fields = [] for form in forms: pk = None if request.method == "GET": data = form.initial pk = form.instance.pk if request.method == "POST": data = form.cleaned_data # The 'id' field is a ModelChoiceField which validates to a # model instance id = form.cleaned_data.get("id") if id: pk = id.pk d = { "pk": pk, "field_name": data.get("field_name", ""), "seq": data.get("seq", ""), "sort": data.get("sort", ""), } order_fields.append(d) return order_fields def _changeform_view(self, request, object_id, form_url, extra_context): response = super()._changeform_view(request, object_id, form_url, extra_context) if response.status_code == 200: if request.method == "POST" and "_popup" in request.GET: return response add_client_data(request, **self.get_client_data(request)) inline_admin_formsets = response.context_data["inline_admin_formsets"] filter_admin_formsets = [ formset for formset in inline_admin_formsets if formset.opts.__class__ == FilterInline ] if not filter_admin_formsets: return response filter_formset = filter_admin_formsets[0].formset order_field_admin_formsets = [ formset for formset in inline_admin_formsets if formset.opts.__class__ == OrderFieldInline ] if not order_field_admin_formsets: return response order_field_formset = order_field_admin_formsets[0].formset add_client_data( request, filter_formset_prefix=filter_formset.prefix, filter_formset_initial_form_count=filter_formset.initial_form_count(), filter_formset_has_errors=any(filter_formset.errors), filter_formset_all_errors=flatten_errors(filter_formset.errors), order_field_formset_prefix=order_field_formset.prefix, order_field_formset_initial_form_count=order_field_formset.initial_form_count(), filters=self.get_filters_as_dicts(request, filter_formset.forms), order_fields=self.get_order_fields_as_dicts(request, order_field_formset.forms), ) return response @admin.register(ColumnTemplate, site=dejacode_site) class ColumnTemplateAdmin(DataspacedAdmin): form = ColumnTemplateForm list_display = ("name", "description", "content_type", "get_field_names", "get_dataspace") list_filter = DataspacedAdmin.list_filter + ( "content_type", MissingInFilter, ) fieldsets = ( ("", {"fields": ("name", "description", "content_type")}), get_additional_information_fieldset(), ) search_fields = ("name",) inlines = (ColumnTemplateAssignedFieldInline,) change_form_template = "admin/reporting/columntemplate/change_form.html" long_description = ( "A Column template provides you with the ability to identify the data " "columns, with your own labels, to appear in a report using the column " "order that you specify. You can combine a column template with any " "query of the same object type (license, component, owner) to create " "your own Reports." ) @admin.display(description=_("Assigned fields")) def get_field_names(self, instance): field_names = [assigned_field.field_name for assigned_field in instance.fields.all()] return format_html("<br>".join(field_names)) def get_queryset(self, request): qs = super().get_queryset(request) return qs.select_related("content_type").prefetch_related("fields") def get_client_data(self, request):
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # def get_content_type_map(): return dict( [ (c.pk, get_model_label(c.model_class())) for c in ContentType.objects.all() if c.model_class() ] ) def flatten_errors(formset_errors): """ Convert a FormSet.errors, which is a list of dicts, into a flat list of error messages. """ flattened = [] for error_dict in formset_errors: for field_name, errors in error_dict.items(): for error in errors.as_data(): message = list(error)[0] if field_name != NON_FIELD_ERRORS: val = f'Field "{field_name}": {message}' else: val = message flattened.append(val) return flattened @admin.register(Query, site=dejacode_site) class QueryAdmin(DataspacedAdmin): form = QueryForm change_form_template = "admin/reporting/query/change_form.html" # We are using an inline for ``Filter``, but are not rendering the standard # Django inline UI in favor of a custom Ember-based inline UI. inlines = [FilterInline, OrderFieldInline] fieldsets = ( ("", {"fields": ("name", "description", "content_type", "operator")}), get_additional_information_fieldset(), ("Preview", {"fields": ("get_preview",)}), ) readonly_fields = DataspacedAdmin.readonly_fields + ("get_preview",) list_display = ("name", "description", "content_type", "operator", "get_dataspace") list_filter = DataspacedAdmin.list_filter + ( "content_type", MissingInFilter, ) search_fields = ("name",) long_description = ( "A Query provides you with the ability to select data from application " "licenses, components, and owners using the criteria that meet your " "business requirements. You can access a changelist of the data you " "select using the Preview feature, and you can also combine a query " "with a column template to create your own Report." ) def get_queryset(self, request): return ( super() .get_queryset(request) .select_related( "content_type", ) .prefetch_related( "filters", "order_fields", ) ) def get_object(self, request, object_id, from_field=None): """ Injects the `request` on the object instance. Required for `get_preview()`. """ obj = super().get_object(request, object_id, from_field) if obj: obj._request = request return obj @admin.display(description=_("Results")) def get_preview(self, query_instance): """ Return a preview of the Query results. The logic starts with a count and will not eval/render the QuerySet if over 100 results. Between 1 and 100 results, a 5 items list will be display, next to a link to the changelist for the QuerySet. See #9248. """ if query_instance.filters.count() == 0: return "No filters defined" request = getattr(query_instance, "_request", None) if not request: return try: qs = query_instance.get_qs(user=request.user) qs_count = qs.count() except Exception as e: return f"Error: {e}" if not qs_count: return "No results." model = query_instance.content_type.model_class() model_admin = dejacode_site._registry.get(model) if not model_admin: # Models like Request do not have a ModelAdmin. return f"{qs_count} results." params = {ReportingQueryListFilter.parameter_name: query_instance.id} order_list = query_instance.get_order_list_for_url(request, model_admin) if order_list: params[ORDER_VAR] = ".".join(order_list) if qs_count >= 100: url = query_instance.get_changelist_url() href = f"{url}?{urlencode(params)}" return format_html( CHANGELIST_LINK_TEMPLATE, href, qs_count, model._meta.verbose_name_plural ) return queryset_to_html_list(qs, params, qs_limit=5) def get_query_options(self, request): """ Return a dictionary of Query options to be injected in the client_data and user for the "Use a query as value" list of choices. The QuerySet is reused from self.get_queryset() and scoped to the current object dataspace on edition, or on the current user dataspace on addition. """ # The object instance is set on the request in the DataspacedAdmin.get_form() method if request._object: # Edition dataspace = request._object.dataspace else: # Addition dataspace = request.user.dataspace queryset = self.get_queryset(request).scope(dataspace) return [ { "id": query.pk, "name": query.name, "content_type": get_model_label(query.content_type.model_class()), } for query in queryset ] def get_client_data(self, request): return { "model_data": get_model_data_for_query(), "order_field_model_data": get_model_data_for_order_field(), "content_type_map": get_content_type_map(), "lookups": [ { "label": v, "value": k, } for k, v in Filter.LOOKUP_CHOICES ], "query_options": self.get_query_options(request), "sort_options": [v for k, v in OrderField.SORT_CHOICES], } @staticmethod def get_filters_as_dicts(request, forms): filters = [] for form in forms: pk = None if request.method == "GET": data = form.initial pk = form.instance.pk if request.method == "POST": data = form.cleaned_data # The 'id' field is a ModelChoiceField which validates to a model instance id = form.cleaned_data.get("id") if id: pk = id.pk d = { "pk": pk, "field_name": data.get("field_name", ""), "lookup": data.get("lookup", ""), "value": data.get("value", ""), "runtime_parameter": data.get("runtime_parameter", ""), "negate": data.get("negate", ""), } filters.append(d) return filters @staticmethod def get_order_fields_as_dicts(request, forms): order_fields = [] for form in forms: pk = None if request.method == "GET": data = form.initial pk = form.instance.pk if request.method == "POST": data = form.cleaned_data # The 'id' field is a ModelChoiceField which validates to a # model instance id = form.cleaned_data.get("id") if id: pk = id.pk d = { "pk": pk, "field_name": data.get("field_name", ""), "seq": data.get("seq", ""), "sort": data.get("sort", ""), } order_fields.append(d) return order_fields def _changeform_view(self, request, object_id, form_url, extra_context): response = super()._changeform_view(request, object_id, form_url, extra_context) if response.status_code == 200: if request.method == "POST" and "_popup" in request.GET: return response add_client_data(request, **self.get_client_data(request)) inline_admin_formsets = response.context_data["inline_admin_formsets"] filter_admin_formsets = [ formset for formset in inline_admin_formsets if formset.opts.__class__ == FilterInline ] if not filter_admin_formsets: return response filter_formset = filter_admin_formsets[0].formset order_field_admin_formsets = [ formset for formset in inline_admin_formsets if formset.opts.__class__ == OrderFieldInline ] if not order_field_admin_formsets: return response order_field_formset = order_field_admin_formsets[0].formset add_client_data( request, filter_formset_prefix=filter_formset.prefix, filter_formset_initial_form_count=filter_formset.initial_form_count(), filter_formset_has_errors=any(filter_formset.errors), filter_formset_all_errors=flatten_errors(filter_formset.errors), order_field_formset_prefix=order_field_formset.prefix, order_field_formset_initial_form_count=order_field_formset.initial_form_count(), filters=self.get_filters_as_dicts(request, filter_formset.forms), order_fields=self.get_order_fields_as_dicts(request, order_field_formset.forms), ) return response @admin.register(ColumnTemplate, site=dejacode_site) class ColumnTemplateAdmin(DataspacedAdmin): form = ColumnTemplateForm list_display = ("name", "description", "content_type", "get_field_names", "get_dataspace") list_filter = DataspacedAdmin.list_filter + ( "content_type", MissingInFilter, ) fieldsets = ( ("", {"fields": ("name", "description", "content_type")}), get_additional_information_fieldset(), ) search_fields = ("name",) inlines = (ColumnTemplateAssignedFieldInline,) change_form_template = "admin/reporting/columntemplate/change_form.html" long_description = ( "A Column template provides you with the ability to identify the data " "columns, with your own labels, to appear in a report using the column " "order that you specify. You can combine a column template with any " "query of the same object type (license, component, owner) to create " "your own Reports." ) @admin.display(description=_("Assigned fields")) def get_field_names(self, instance): field_names = [assigned_field.field_name for assigned_field in instance.fields.all()] return format_html("<br>".join(field_names)) def get_queryset(self, request): qs = super().get_queryset(request) return qs.select_related("content_type").prefetch_related("fields") def get_client_data(self, request):
model_data = get_model_data_for_column_template(request.user.dataspace)
15
2023-12-07 16:57:42+00:00
24k
wusize/CLIM
src/open_clip/model.py
[ { "identifier": "HFTextEncoder", "path": "src/open_clip/hf_model.py", "snippet": "class HFTextEncoder(nn.Module):\n \"\"\"HuggingFace model adapter\"\"\"\n output_tokens: torch.jit.Final[bool]\n\n def __init__(\n self,\n model_name_or_path: str,\n output_dim: in...
from dataclasses import dataclass from typing import Optional, Tuple, Union from torch import nn from torch.utils.checkpoint import checkpoint from .hf_model import HFTextEncoder from .modified_resnet import ModifiedResNet from .timm_model import TimmModel from .transformer import LayerNormFp32, LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer from .utils import to_2tuple import logging import math import numpy as np import torch import torch.nn.functional as F
17,623
l.bias.data = l.bias.data.to(dtype) if isinstance(l, (nn.MultiheadAttention, Attention)): for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: tensor = getattr(l, attr) if tensor is not None: tensor.data = tensor.data.to(dtype) for name in ["text_projection", "proj"]: if hasattr(l, name): attr = getattr(l, name) if attr is not None: attr.data = attr.data.to(dtype) model.apply(_convert_weights) convert_weights_to_fp16 = convert_weights_to_lp # backwards compat # used to maintain checkpoint compatibility def convert_to_custom_text_state_dict(state_dict: dict): if 'text_projection' in state_dict: # old format state_dict, move text tower -> .text new_state_dict = {} for k, v in state_dict.items(): if any(k.startswith(p) for p in ( 'text_projection', 'positional_embedding', 'token_embedding', 'transformer', 'ln_final', )): k = 'text.' + k new_state_dict[k] = v return new_state_dict return state_dict def build_model_from_openai_state_dict( state_dict: dict, quick_gelu=True, cast_dtype=torch.float16, ): vit = "visual.proj" in state_dict if vit: vision_width = state_dict["visual.conv1.weight"].shape[0] vision_layers = len( [k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) image_size = vision_patch_size * grid_size else: counts: list = [ len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]] vision_layers = tuple(counts) vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) vision_patch_size = None assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] image_size = output_width * 32 embed_dim = state_dict["text_projection"].shape[1] context_length = state_dict["positional_embedding"].shape[0] vocab_size = state_dict["token_embedding.weight"].shape[0] transformer_width = state_dict["ln_final.weight"].shape[0] transformer_heads = transformer_width // 64 transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) vision_cfg = CLIPVisionCfg( layers=vision_layers, width=vision_width, patch_size=vision_patch_size, image_size=image_size, ) text_cfg = CLIPTextCfg( context_length=context_length, vocab_size=vocab_size, width=transformer_width, heads=transformer_heads, layers=transformer_layers, ) model = CLIP( embed_dim, vision_cfg=vision_cfg, text_cfg=text_cfg, quick_gelu=quick_gelu, # OpenAI models were trained with QuickGELU cast_dtype=cast_dtype, ) for key in ["input_resolution", "context_length", "vocab_size"]: state_dict.pop(key, None) convert_weights_to_fp16(model) # OpenAI state dicts are partially converted to float16 model.load_state_dict(state_dict) return model.eval() def trace_model(model, batch_size=256, device=torch.device('cpu')): model.eval() image_size = model.visual.image_size example_images = torch.ones((batch_size, 3, image_size, image_size), device=device) example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device) model = torch.jit.trace_module( model, inputs=dict( forward=(example_images, example_text), encode_text=(example_text,), encode_image=(example_images,) )) model.visual.image_size = image_size return model def resize_pos_embed(state_dict, model, interpolation: str = 'bicubic', antialias: bool = True): # Rescale the grid of position embeddings when loading from state_dict old_pos_embed = state_dict.get('visual.positional_embedding', None) if old_pos_embed is None or not hasattr(model.visual, 'grid_size'): return
""" CLIP Model Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ @dataclass class CLIPVisionCfg: layers: Union[Tuple[int, int, int, int], int] = 12 width: int = 768 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[Tuple[int, int], int] = 224 ls_init_value: Optional[float] = None # layer scale initial value patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results input_patchnorm: bool = False # whether to use dual patchnorm - would only apply the input layernorm on each patch, as post-layernorm already exist in original clip vit design global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580) attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer n_queries: int = 256 # n_queries for attentional pooler attn_pooler_heads: int = 8 # n heads for attentional_pooling timm_model_name: str = None # a valid model name overrides layers, width, patch_size timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '') timm_proj_bias: bool = False # enable bias final projection timm_drop: float = 0. # head dropout timm_drop_path: Optional[float] = None # backbone stochastic depth output_tokens: bool = False freeze_output = True freeze_all_bns = True @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 ls_init_value: Optional[float] = None # layer scale initial value hf_model_name: str = None hf_tokenizer_name: str = None hf_model_pretrained: bool = True proj: str = 'mlp' pooler_type: str = 'mean_pooler' embed_cls: bool = False pad_id: int = 0 output_tokens: bool = False def get_cast_dtype(precision: str): cast_dtype = None if precision == 'bf16': cast_dtype = torch.bfloat16 elif precision == 'fp16': cast_dtype = torch.float16 return cast_dtype def _build_vision_tower( embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None ): if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name: visual = TimmModel( vision_cfg.timm_model_name, pretrained=vision_cfg.timm_model_pretrained, pool=vision_cfg.timm_pool, proj=vision_cfg.timm_proj, proj_bias=vision_cfg.timm_proj_bias, drop=vision_cfg.timm_drop, drop_path=vision_cfg.timm_drop_path, patch_drop=vision_cfg.patch_dropout if vision_cfg.patch_dropout > 0 else None, embed_dim=embed_dim, image_size=vision_cfg.image_size, ) act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models elif isinstance(vision_cfg.layers, (tuple, list)): vision_heads = vision_cfg.width * 32 // vision_cfg.head_width visual = ModifiedResNet( layers=vision_cfg.layers, output_dim=embed_dim, heads=vision_heads, image_size=vision_cfg.image_size, width=vision_cfg.width, freeze_output=vision_cfg.freeze_output, freeze_all_bns=vision_cfg.freeze_all_bns ) else: vision_heads = vision_cfg.width // vision_cfg.head_width norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm visual = VisionTransformer( image_size=vision_cfg.image_size, patch_size=vision_cfg.patch_size, width=vision_cfg.width, layers=vision_cfg.layers, heads=vision_heads, mlp_ratio=vision_cfg.mlp_ratio, ls_init_value=vision_cfg.ls_init_value, patch_dropout=vision_cfg.patch_dropout, input_patchnorm=vision_cfg.input_patchnorm, global_average_pool=vision_cfg.global_average_pool, attentional_pool=vision_cfg.attentional_pool, n_queries=vision_cfg.n_queries, attn_pooler_heads=vision_cfg.attn_pooler_heads, output_tokens=vision_cfg.output_tokens, output_dim=embed_dim, act_layer=act_layer, norm_layer=norm_layer, ) return visual def _build_text_tower( embed_dim: int, text_cfg: CLIPTextCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None, ): if isinstance(text_cfg, dict): text_cfg = CLIPTextCfg(**text_cfg) if text_cfg.hf_model_name: text = HFTextEncoder( text_cfg.hf_model_name, output_dim=embed_dim, proj=text_cfg.proj, pooler_type=text_cfg.pooler_type, pretrained=text_cfg.hf_model_pretrained, output_tokens=text_cfg.output_tokens, ) else: act_layer = QuickGELU if quick_gelu else nn.GELU norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm text = TextTransformer( context_length=text_cfg.context_length, vocab_size=text_cfg.vocab_size, width=text_cfg.width, heads=text_cfg.heads, layers=text_cfg.layers, ls_init_value=text_cfg.ls_init_value, output_dim=embed_dim, embed_cls=text_cfg.embed_cls, output_tokens=text_cfg.output_tokens, pad_id=text_cfg.pad_id, act_layer=act_layer, norm_layer=norm_layer, ) return text class CLIP(nn.Module): output_dict: torch.jit.Final[bool] def __init__( self, embed_dim: int, vision_cfg: CLIPVisionCfg, text_cfg: CLIPTextCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None, output_dict: bool = False, freeze_text=True, ): assert freeze_text, 'For now we must freeze text' super().__init__() self.output_dict = output_dict self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype) text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype) if freeze_text: print(f'Freeze text encoder parameters', flush=True) for param in text.parameters(): param.requires_grad = False text.eval() self.transformer = text.transformer self.vocab_size = text.vocab_size self.embed_dim = embed_dim self.token_embedding = text.token_embedding self.positional_embedding = text.positional_embedding self.ln_final = text.ln_final self.text_projection = text.text_projection self.register_buffer('attn_mask', text.attn_mask, persistent=False) self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False, **kwargs): self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.visual.set_grad_checkpointing(enable) self.transformer.grad_checkpointing = enable def encode_image(self, image, normalize: bool = False): features = self.visual(image) return F.normalize(features, dim=-1) if normalize else features def encode_dense(self, image, normalize: bool = False, keep_shape=False): features = self.visual.encode_dense(image, keep_shape=keep_shape) if normalize: if keep_shape: features = F.normalize(features, dim=1) else: features = F.normalize(features, dim=-1) return features def encode_pseudo_boxes(self, image, normed_boxes, normalize: bool = False, extract_type='v1'): features = self.visual.extract_roi_features(image, normed_boxes, extract_type=extract_type) if normalize: features = F.normalize(features, dim=-1) return features def _pool_masks(self, image, masks, normalize, mask_attn=False): if mask_attn: mask_pooled = self.visual.mask_attn_pool(image, masks) else: mask_pooled = self.visual.mask_pool(image, masks) if normalize: mask_pooled = F.normalize(mask_pooled, dim=-1) return mask_pooled def _pool_masks_v3(self, image, masks, normalize): mask_pooled_v1, x_dense = self.visual.mask_attn_pool(image, masks, return_dense=True) x_dense = F.normalize(x_dense, dim=-1).flatten(1, 2) # bs, h*w, c x_dense = torch.repeat_interleave( x_dense, torch.tensor([len(m) for m in masks], device=x_dense.device), dim=0) masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w mask_pooled_v2 = (x_dense * masks.unsqueeze(-1)).sum(1) / masks.sum(1, keepdim=True) if normalize: mask_pooled_v1 = F.normalize(mask_pooled_v1, dim=-1) mask_pooled_v2 = F.normalize(mask_pooled_v2, dim=-1) return mask_pooled_v1, mask_pooled_v2 def encode_masks(self, image, masks, normalize=True, mask_attn=False): return self._pool_masks(image, masks, normalize, mask_attn) def encode_text(self, text, normalize: bool = False): cast_dtype = self.transformer.get_cast_dtype() x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model] x = x + self.positional_embedding.to(cast_dtype) x = x.permute(1, 0, 2) # NLD -> LND x = self.transformer(x, attn_mask=self.attn_mask) x = x.permute(1, 0, 2) # LND -> NLD x = self.ln_final(x) # [batch_size, n_ctx, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection return F.normalize(x, dim=-1) if normalize else x def forward(self, image, text=None): image_features = self.encode_image(image, normalize=True) if text is None: text_features = None else: text_features = self.encode_text(text, normalize=True) if self.output_dict: return { "image_features": image_features, "text_features": text_features, "logit_scale": self.logit_scale.exp() } return image_features, text_features, self.logit_scale.exp() def train(self, mode: bool = True): if not isinstance(mode, bool): raise ValueError("training mode is expected to be boolean") self.training = mode for name, module in self.named_children(): if name == 'visual': if mode: logging.info(f'========Set module {name} as train mode========') else: logging.info(f'========Set module {name} as eval mode========') module.train(mode) else: logging.info(f'========Set module {name} as eval mode========') module.train(mode=False) return self class CustomTextCLIP(nn.Module): output_dict: torch.jit.Final[bool] def __init__( self, embed_dim: int, vision_cfg: CLIPVisionCfg, text_cfg: CLIPTextCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None, output_dict: bool = False, ): super().__init__() self.output_dict = output_dict self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype) self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype) self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False): # lock image tower as per LiT - https://arxiv.org/abs/2111.07991 self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats) def lock_text_tower(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True): self.text.lock(unlocked_layers, freeze_layer_norm) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.visual.set_grad_checkpointing(enable) self.text.set_grad_checkpointing(enable) def encode_pseudo_boxes(self, image, normed_boxes, normalize: bool = False): features = self.visual.extract_roi_features(image, normed_boxes) return F.normalize(features, dim=-1) if normalize else features def encode_image(self, image, normalize: bool = False): features = self.visual(image) return F.normalize(features, dim=-1) if normalize else features def encode_text(self, text, normalize: bool = False): features = self.text(text) return F.normalize(features, dim=-1) if normalize else features def forward(self, image, text): image_features = self.encode_image(image, normalize=True) if text is None: text_features = None else: text_features = self.encode_text(text, normalize=True) if self.output_dict: return { "image_features": image_features, "text_features": text_features, "logit_scale": self.logit_scale.exp() } return image_features, text_features, self.logit_scale.exp() def convert_weights_to_lp(model: nn.Module, dtype=torch.float16): """Convert applicable model parameters to low-precision (bf16 or fp16)""" def _convert_weights(l): if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): l.weight.data = l.weight.data.to(dtype) if l.bias is not None: l.bias.data = l.bias.data.to(dtype) if isinstance(l, (nn.MultiheadAttention, Attention)): for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: tensor = getattr(l, attr) if tensor is not None: tensor.data = tensor.data.to(dtype) for name in ["text_projection", "proj"]: if hasattr(l, name): attr = getattr(l, name) if attr is not None: attr.data = attr.data.to(dtype) model.apply(_convert_weights) convert_weights_to_fp16 = convert_weights_to_lp # backwards compat # used to maintain checkpoint compatibility def convert_to_custom_text_state_dict(state_dict: dict): if 'text_projection' in state_dict: # old format state_dict, move text tower -> .text new_state_dict = {} for k, v in state_dict.items(): if any(k.startswith(p) for p in ( 'text_projection', 'positional_embedding', 'token_embedding', 'transformer', 'ln_final', )): k = 'text.' + k new_state_dict[k] = v return new_state_dict return state_dict def build_model_from_openai_state_dict( state_dict: dict, quick_gelu=True, cast_dtype=torch.float16, ): vit = "visual.proj" in state_dict if vit: vision_width = state_dict["visual.conv1.weight"].shape[0] vision_layers = len( [k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) image_size = vision_patch_size * grid_size else: counts: list = [ len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]] vision_layers = tuple(counts) vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) vision_patch_size = None assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] image_size = output_width * 32 embed_dim = state_dict["text_projection"].shape[1] context_length = state_dict["positional_embedding"].shape[0] vocab_size = state_dict["token_embedding.weight"].shape[0] transformer_width = state_dict["ln_final.weight"].shape[0] transformer_heads = transformer_width // 64 transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) vision_cfg = CLIPVisionCfg( layers=vision_layers, width=vision_width, patch_size=vision_patch_size, image_size=image_size, ) text_cfg = CLIPTextCfg( context_length=context_length, vocab_size=vocab_size, width=transformer_width, heads=transformer_heads, layers=transformer_layers, ) model = CLIP( embed_dim, vision_cfg=vision_cfg, text_cfg=text_cfg, quick_gelu=quick_gelu, # OpenAI models were trained with QuickGELU cast_dtype=cast_dtype, ) for key in ["input_resolution", "context_length", "vocab_size"]: state_dict.pop(key, None) convert_weights_to_fp16(model) # OpenAI state dicts are partially converted to float16 model.load_state_dict(state_dict) return model.eval() def trace_model(model, batch_size=256, device=torch.device('cpu')): model.eval() image_size = model.visual.image_size example_images = torch.ones((batch_size, 3, image_size, image_size), device=device) example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device) model = torch.jit.trace_module( model, inputs=dict( forward=(example_images, example_text), encode_text=(example_text,), encode_image=(example_images,) )) model.visual.image_size = image_size return model def resize_pos_embed(state_dict, model, interpolation: str = 'bicubic', antialias: bool = True): # Rescale the grid of position embeddings when loading from state_dict old_pos_embed = state_dict.get('visual.positional_embedding', None) if old_pos_embed is None or not hasattr(model.visual, 'grid_size'): return
grid_size = to_2tuple(model.visual.grid_size)
9
2023-12-09 05:43:08+00:00
24k
LkPrtctrd/BSL-V53
Heart/Logic/LogicLaserMessageFactory.py
[ { "identifier": "ClientHelloMessage", "path": "Heart/Packets/Client/Authentification/ClientHelloMessage.py", "snippet": "class ClientHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields)...
from Heart.Packets.Client.Authentification.ClientHelloMessage import ClientHelloMessage from Heart.Packets.Client.Authentification.LoginMessage import LoginMessage from Heart.Packets.Client.Battle.AskForBattleEndMessage import AskForBattleEndMessage from Heart.Packets.Client.Home.ChangeAvatarNameMessage import ChangeAvatarNameMessage from Heart.Packets.Client.Home.EndClientTurnMessage import EndClientTurnMessage from Heart.Packets.Client.Home.GoHomeFromOfflinePractiseMessage import GoHomeFromOfflinePractiseMessage from Heart.Packets.Client.Home.GoHomeMessage import GoHomeMessage from Heart.Packets.Client.Home.GetPlayerProfileMessage import GetPlayerProfileMessage from Heart.Packets.Client.Home.AskForAllianceDataMessage import AskForAllianceDataMessage from Heart.Packets.Client.Socket.KeepAliveMessage import KeepAliveMessage from Heart.Packets.Server.Authentification.LoginFailedMessage import LoginFailedMessage from Heart.Packets.Server.Authentification.LoginOkMessage import LoginOkMessage from Heart.Packets.Server.Authentification.OutOfSyncMessage import OutOfSyncMessage from Heart.Packets.Server.Authentification.ServerHelloMessage import ServerHelloMessage from Heart.Packets.Server.Battle.BattleEndMessage import BattleEndMessage from Heart.Packets.Server.Home.AvailableServerCommandMessage import AvailableServerCommandMessage from Heart.Packets.Server.Home.LobbyInfoMessage import LobbyInfoMessage from Heart.Packets.Server.Home.OwnHomeDataMessage import OwnHomeDataMessage from Heart.Packets.Server.Socket.KeepAliveServerMessage import KeepAliveServerMessage from Heart.Packets.Server.Home.PlayerProfileMessage import PlayerProfileMessage from Heart.Packets.Server.Home.MyAllianceMessage import MyAllianceMessage from Heart.Packets.Server.Home.AllianceDataMessage import AllianceDataMessage
17,267
16939: 'AskApiTokenMessage', 17000: 'LogicAccountTransferCodeRequestMessage', 17190: 'JoinAllianceUsingTokenMessage', 17337: 'UnbotifyReportMessage', 17338: 'AdjustPackageMessage', 17750: GoHomeFromOfflinePractiseMessage, #v50 18686: 'SetSupportedCreatorMessage', 19001: 'LatencyTestResultMessage', 19002: 'UdpLatencyTestRequestMessage', 19003: 'TriggerStartLatencyTestMessage', 19004: 'RequestLatencyTestStatusMessage', 20000: 'SetEncryptionMessage', 20100: ServerHelloMessage, 20101: 'CreateAccountOkMessage', 20103: LoginFailedMessage, 20104: LoginOkMessage, 20105: 'FriendListMessage', 20106: 'FriendListUpdateMessage', 20107: 'AddableFriendsMessage', 20108: KeepAliveServerMessage, 20109: 'FriendOnlineStatusMessage', 20110: 'FriendLoggedInMessage', 20111: 'FriendLoggedOutMessage', 20112: 'AddFriendFailedMessage', 20117: 'ReportUserStatusMessage', 20118: 'ChatAccountBanStatusMessage', 20121: 'BillingRequestFailedMessage', 20132: 'UnlockAccountOkMessage', 20133: 'UnlockAccountFailedMessage', 20151: 'AppleBillingProcessedByServerMessage', 20152: 'GoogleBillingProcessedByServerMessage', 20153: 'TencentBillingProcessedByServerMessage', 20154: 'CafeBazaarBillingProcessedByServerMessage', 20156: 'KunlunBillingProcessedByServerMessage', 20161: 'ShutdownStartedMessage', 20171: 'PersonalBreakStartedMessage', 20173: 'YoozooBillingProcessedByServerMessage', 20199: 'FriendSuggestionsMessage', 20205: 'AvatarNameChangeFailedMessage', 20206: 'AvatarOnlineStatusUpdated', 20207: 'AllianceOnlineStatusUpdatedMessage', 20300: 'AvatarNameCheckResponseMessage', 20402: 'CreateGameFailedMessage', 20405: 'MatchMakingStatusMessage', 20406: 'MatchMakingCancelledMessage', 20501: 'AcceptFriendFailedMessage', 20523: 'YoozooOrderAvailableMessage', 20545: 'YoozooOrderDeliveryFailedMessage', 20559: 'StartLoadingMessage', 20801: 'NotificationMessage', 20802: 'OpponentRejoinsMatchNotificationMessage', 20931: 'AntiAddictionDataUpdatedMessage', 22089: 'GetTokenFriendResultMessage', 22100: 'CreatePlayerMapResponseMessage', 22101: 'DeletePlayerMapResponseMessage', 22102: 'PlayerMapsMessage', 22103: 'UpdatePlayerMapResponseMessage', 22104: 'SubmitPlayerMapResponseMessage', 22105: 'PublishPlayerMapResponseMessage', 22106: 'ChangePlayerMapNameMResponseMessage', 22107: 'PlayerMapInfoUpdatedMessage', 22109: 'DebugPlayerMapReviewResultOverrideSetMessage', 22111: 'PlayerMapGreenlightedMessage', 22125: 'ReportPlayerMapResponseMessage', 22150: 'RankedMatchStartMessage', 22151: 'RankedMatchBanStartedMessage', 22152: 'RankedMatchBanHeroResponseMessage', 22153: 'RankedMatchBanEndedMessage', 22154: 'RankedMatchPickStartedMessage', 22155: 'RankedMatchPickHeroFailedMessage', 22156: 'RankedMatchHeroPickedMessage', 22157: 'RankedMatchHeroDataUpdatedMessage', 22158: 'RankedMatchFinalPreparationStartedMessage', 22159: 'RankedMatchTerminatedMessage', 22202: 'MapPreviewMessage', 22377: 'GoogleServiceAccountBoundMessage', 22687: 'GamecenterAccountAlreadyBoundMessage', 22957: 'PvpMatchmakeNotificationMessage', 23067: 'SCIDLogoutAllDevicesResultMessage', 23302: 'GetAllianceInviteTokenResultMessage', 23456: BattleEndMessage, 23457: LobbyInfoMessage, 23458: 'BattleLogMessage', 23459: 'BattleLogReplayAvailableMessage', 23494: 'GoogleServiceAccountAlreadyBoundMessage', 23774: 'PlayerJWTokenMessage', 24101: OwnHomeDataMessage, 24104: OutOfSyncMessage, 24105: 'SpectacleFailedMessage', 24106: 'StopHomeLogicMessage', 24108: 'MatchmakeFailedMessage', 24109: 'VisionUpdateMessage', 24111: AvailableServerCommandMessage, 24112: 'UdpConnectionInfoMessage', 24113: PlayerProfileMessage, 24114: 'HomeBattleReplayDataMessage', 24115: 'ServerErrorMessage', 24116: 'HomeBattleReplayFailedMessage', 24117: 'HomeBattleReplayViewedMessage', 24123: 'SeasonRewardsMessage', 24124: 'TeamMessage', 24125: 'TeamLeftMessage', 24129: 'TeamErrorMessage', 24130: 'TeamGameStartingMessage', 24131: 'TeamStreamMessage', 24177: 'SetRegionResponseMessage', 24178: 'SetCountryResponseMessage', 24199: 'LookForGameRoomRequestMessage', 24201: 'FacebookAccountBoundMessage', 24202: 'FacebookAccountAlreadyBoundMessage', 24203: 'KakaoAccountBoundMessage', 24204: 'KakaoAccountAlreadyBoundMessage', 24205: 'LineAccountAlreadyBoundMessage', 24206: 'LineAccountBoundMessage', 24214: 'FacebookAccountUnboundMessage', 24215: 'KakaoAccountUnboundMessage', 24216: 'LineAccountUnboundMessage', 24220: 'TencentAccountBoundMessage', 24221: 'TencentAccountAlreadyBoundMessage', 24223: 'tencentCheckCanPayResponseMessage',
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage', 10100: ClientHelloMessage, 10101: LoginMessage, 10102: 'LoginUsingSessionMessage', 10103: 'CreateAccountMessage', 10107: 'ClientCapabilitiesMessage', 10108: KeepAliveMessage, 10109: 'UdpCheckConnectionMessage', 10110: 'AnalyticEventMessage', 10111: 'AccountIdentifiersMessage', 10112: 'AuthenticationCheckMessage', 10113: 'SetDeviceTokenMessage', 10116: 'ResetAccountMessage', 10117: 'ReportUserMessage', 10118: 'AccountSwitchedMessage', 10119: 'ReportAllianceStreamMessage', 10121: 'UnlockAccountMessage', 10150: 'AppleBillingRequestMessage', 10151: 'GoogleBillingRequestMessage', 10152: 'TencentBillingRequestMessage', 10153: 'CafeBazaarBillingRequestMessage', 10159: 'KunlunBillingRequestMessage', 10160: 'BillingCancelledByClientMessage', 10177: 'ClientInfoMessage', 10212: ChangeAvatarNameMessage, 10309: 'GetAllianceInviteTokenMessage', 10321: 'AttributionEventMessage', 10401: 'CreateGameMessage', 10501: 'AcceptFriendMessage', 10502: 'AddFriendMessage', 10503: 'AskForAddableFriendsMessage', 10504: 'AskForFriendListMessage', 10506: 'RemoveFriendMessage', 10507: 'AddFriendByEmailMessage', 10509: 'AddFriendByAvatarNameAndCodeMessage', 10512: 'AskForPlayingGamecenterFriendsMessage', 10513: 'AskForPlayingFacebookFriendsMessage', 10514: 'AskForPlayingKakaoFriendsMessage', 10515: 'AskForPlayingTencentFriendsMessage', 10516: 'AskForPlayingLineFriendsMessage', 10517: 'AskForPlayingSupercellFriendsMessage', 10523: 'YoozooBillingRequestMessage', 10555: 'ClientInputMessage', 10576: 'SetBlockFriendRequestsMessage', 10599: 'AskForFriendSuggestionsMessage', 10636: 'SCIDBindAccountMessage', 11736: 'SCIDLogoutAllDevicesMessage', 12100: 'CreatePlayerMapMessage', 12101: 'DeletePlayerMapMessage', 12102: 'GetPlayerMapsMessage', 12103: 'UpdatePlayerMapMessage', 12104: 'SubmitPlayerMapMessage', 12105: 'PublishPlayerMapMessage', 12106: 'ChangePlayerMapNameMessage', 12107: 'EnterMapEditorMessage', 12108: 'GoHomeFromMapEditorMessage', 12110: 'TeamSetPlayerMapMessage', 12111: 'SignoffPlayerMapMessage', 12125: 'ReportPlayerMapMessage', 12152: 'RankedMatchBanHeroMessage', 12155: 'RankedMatchPickHeroMessage', 12157: 'RankedMatchUpdateHeroDataMessage', 12905: 'GetCurrentBattleReplayDataMessage', 12998: 'SetCountryMessage', 13922: 'AcceptTokenFriendMessage', 14101: GoHomeMessage, 14102: EndClientTurnMessage, 14103: 'StartGameMessage', 14104: 'StartSpectateMessage', 14105: 'HomeLogicStoppedMessage', 14106: 'CancelMatchmakingMessage', 14107: 'StopSpectateMessage', 14108: 'GoHomeFromSpectateMessage', #14109: GoHomeFromOfflinePractiseMessage, //before v50 14110: AskForBattleEndMessage, #14113: GetPlayerProfileMessage, //before v50 14114: 'GetBattleLogMessage', 14115: 'BattleLogViewReplayMessage', 14116: 'ViewReplayByStringMessage', 14117: 'RequestMatchCancelMessage', 14118: 'SinglePlayerMatchRequestMessage', 14166: 'ChronosEventSeenMessage', 14167: 'ChronosEventSeenMessage', 14177: 'PlayAgainMessage', 14178: 'DebugCommandMessage', 14199: 'LookForGameRoomRequestMessage', 14211: 'UnbindFacebookAccountMessage', 14201: 'BindFacebookAccountMessage', 14202: 'BindKakaoAccountMessage', 14203: 'BingLineAccountMessage', 14212: 'BindGamecenterAccountMessage', 14213: 'UnbindKakaoAccountMessage', 14214: 'UnbindLineAccountMessage', 14262: 'BindGoogleServiceAccountMessage', 14266: 'BindTencentAccountMessage', 14268: 'TencentCheckCanPayMessage', 14276: 'TencentAntiAddictionInstructionExecutedMessage', 14277: 'GetSeasonRewardsMessage', 14299: 'SetAllianceCountryMessage', 14301: 'CreateAllianceMessage', 14302: AskForAllianceDataMessage, 14303: 'AskForJoinableAlliancesListMessage', 14304: 'AskForAllianceStreamMessage', 14305: 'JoinAllianceMessage', 14306: 'ChangeAllianceMemberRoleMessage', 14307: 'KickAllianceMemberMessage', 14308: 'LeaveAllianceMessage', 14315: 'ChatToAllianceStreamMessage', 14316: 'ChangeAllianceSettingsMessage', 14317: 'RequestJoinAllianceMessage', 14321: 'RespondToAllianceJoinRequestMessage', 14322: 'SendAllianceInvitationMessage', 14323: 'JoinAllianceUsingInvitationMessage', 14324: 'SearchAlliancesMessage', 14326: 'SendAllianceInvitationToFriendMessage', 14330: 'SendAllianceMailMessage', 14350: 'TeamCreateMessage', 14351: 'TeamJoinMessage', 14352: 'TeamKickMessage', 14353: 'TeamLeaveMessage', 14354: 'TeamChangeMemberSettingsMessage', 14355: 'TeamSetMemberReadyMessage', 14356: 'TeamTogglePractiseMessage', 14357: 'TeamToggleMemberSideMessage', 14358: 'TeamSpectateMessage', 14359: 'TeamChatMessage', 14360: 'TeamPostAdMessage', 14361: 'TeamMemberStatusMessage', 14362: 'TeamSetEventMessage', 14363: 'TeamSetLocationMessage', 14364: 'TeamReportChatMessage', 14365: 'TeamInviteMessage', 14366: 'PlayerStatusMessage', 14367: 'TeamClearInviteMessage', 14368: 'TeamInviteResponseMessage', 14369: 'TeamPremadeChatMessage', 14370: 'TeamAllianceMemberInviteMessage', 14371: 'TeamJoinOrCreateGameRoomMessage', 14372: 'TeamToggleSettingsMessage', 14373: 'TeamBotSlotDisableMessage', 14403: 'GetLeaderboardMessage', 14405: 'AskForAvatarStreamMessage', 14406: 'AskForBattleReplayStreamMessage', 14418: 'RemoveAvatarStreamEntryMessage', 14469: 'AlliancePremadeChatMessage', 14479: 'TeamInvitationResponseMessage', 14600: 'AvatarNameCheckRequestMessage', 14700: 'ListBrawlTvChannelsMessage', 14701: 'TuneBrawlTvChannelMessage', 14715: 'SendGlobalChatLineMessage', 14777: 'SetInvitesBlockedMessage', 14778: 'SetTeamChatMutedMessage', 14867: 'SetRegionMessage', 14880: 'TeamRequestJoinCancelMessage', 14881: 'TeamRequestJoinMessage', 14882: 'TeamRequestJoinApproveMessage', 15081: GetPlayerProfileMessage, #v50 15793: 'GetTokenFriendMessage', 16000: 'LogicDeviceLinkCodeRequestMessage', 16001: 'LogicDeviceLinkMenuClosedMessage', 16002: 'LogicDeviceLinkEnterCodeMessage', 16003: 'LogicDeviceLinkConfirmYesMessage', 16939: 'AskApiTokenMessage', 17000: 'LogicAccountTransferCodeRequestMessage', 17190: 'JoinAllianceUsingTokenMessage', 17337: 'UnbotifyReportMessage', 17338: 'AdjustPackageMessage', 17750: GoHomeFromOfflinePractiseMessage, #v50 18686: 'SetSupportedCreatorMessage', 19001: 'LatencyTestResultMessage', 19002: 'UdpLatencyTestRequestMessage', 19003: 'TriggerStartLatencyTestMessage', 19004: 'RequestLatencyTestStatusMessage', 20000: 'SetEncryptionMessage', 20100: ServerHelloMessage, 20101: 'CreateAccountOkMessage', 20103: LoginFailedMessage, 20104: LoginOkMessage, 20105: 'FriendListMessage', 20106: 'FriendListUpdateMessage', 20107: 'AddableFriendsMessage', 20108: KeepAliveServerMessage, 20109: 'FriendOnlineStatusMessage', 20110: 'FriendLoggedInMessage', 20111: 'FriendLoggedOutMessage', 20112: 'AddFriendFailedMessage', 20117: 'ReportUserStatusMessage', 20118: 'ChatAccountBanStatusMessage', 20121: 'BillingRequestFailedMessage', 20132: 'UnlockAccountOkMessage', 20133: 'UnlockAccountFailedMessage', 20151: 'AppleBillingProcessedByServerMessage', 20152: 'GoogleBillingProcessedByServerMessage', 20153: 'TencentBillingProcessedByServerMessage', 20154: 'CafeBazaarBillingProcessedByServerMessage', 20156: 'KunlunBillingProcessedByServerMessage', 20161: 'ShutdownStartedMessage', 20171: 'PersonalBreakStartedMessage', 20173: 'YoozooBillingProcessedByServerMessage', 20199: 'FriendSuggestionsMessage', 20205: 'AvatarNameChangeFailedMessage', 20206: 'AvatarOnlineStatusUpdated', 20207: 'AllianceOnlineStatusUpdatedMessage', 20300: 'AvatarNameCheckResponseMessage', 20402: 'CreateGameFailedMessage', 20405: 'MatchMakingStatusMessage', 20406: 'MatchMakingCancelledMessage', 20501: 'AcceptFriendFailedMessage', 20523: 'YoozooOrderAvailableMessage', 20545: 'YoozooOrderDeliveryFailedMessage', 20559: 'StartLoadingMessage', 20801: 'NotificationMessage', 20802: 'OpponentRejoinsMatchNotificationMessage', 20931: 'AntiAddictionDataUpdatedMessage', 22089: 'GetTokenFriendResultMessage', 22100: 'CreatePlayerMapResponseMessage', 22101: 'DeletePlayerMapResponseMessage', 22102: 'PlayerMapsMessage', 22103: 'UpdatePlayerMapResponseMessage', 22104: 'SubmitPlayerMapResponseMessage', 22105: 'PublishPlayerMapResponseMessage', 22106: 'ChangePlayerMapNameMResponseMessage', 22107: 'PlayerMapInfoUpdatedMessage', 22109: 'DebugPlayerMapReviewResultOverrideSetMessage', 22111: 'PlayerMapGreenlightedMessage', 22125: 'ReportPlayerMapResponseMessage', 22150: 'RankedMatchStartMessage', 22151: 'RankedMatchBanStartedMessage', 22152: 'RankedMatchBanHeroResponseMessage', 22153: 'RankedMatchBanEndedMessage', 22154: 'RankedMatchPickStartedMessage', 22155: 'RankedMatchPickHeroFailedMessage', 22156: 'RankedMatchHeroPickedMessage', 22157: 'RankedMatchHeroDataUpdatedMessage', 22158: 'RankedMatchFinalPreparationStartedMessage', 22159: 'RankedMatchTerminatedMessage', 22202: 'MapPreviewMessage', 22377: 'GoogleServiceAccountBoundMessage', 22687: 'GamecenterAccountAlreadyBoundMessage', 22957: 'PvpMatchmakeNotificationMessage', 23067: 'SCIDLogoutAllDevicesResultMessage', 23302: 'GetAllianceInviteTokenResultMessage', 23456: BattleEndMessage, 23457: LobbyInfoMessage, 23458: 'BattleLogMessage', 23459: 'BattleLogReplayAvailableMessage', 23494: 'GoogleServiceAccountAlreadyBoundMessage', 23774: 'PlayerJWTokenMessage', 24101: OwnHomeDataMessage, 24104: OutOfSyncMessage, 24105: 'SpectacleFailedMessage', 24106: 'StopHomeLogicMessage', 24108: 'MatchmakeFailedMessage', 24109: 'VisionUpdateMessage', 24111: AvailableServerCommandMessage, 24112: 'UdpConnectionInfoMessage', 24113: PlayerProfileMessage, 24114: 'HomeBattleReplayDataMessage', 24115: 'ServerErrorMessage', 24116: 'HomeBattleReplayFailedMessage', 24117: 'HomeBattleReplayViewedMessage', 24123: 'SeasonRewardsMessage', 24124: 'TeamMessage', 24125: 'TeamLeftMessage', 24129: 'TeamErrorMessage', 24130: 'TeamGameStartingMessage', 24131: 'TeamStreamMessage', 24177: 'SetRegionResponseMessage', 24178: 'SetCountryResponseMessage', 24199: 'LookForGameRoomRequestMessage', 24201: 'FacebookAccountBoundMessage', 24202: 'FacebookAccountAlreadyBoundMessage', 24203: 'KakaoAccountBoundMessage', 24204: 'KakaoAccountAlreadyBoundMessage', 24205: 'LineAccountAlreadyBoundMessage', 24206: 'LineAccountBoundMessage', 24214: 'FacebookAccountUnboundMessage', 24215: 'KakaoAccountUnboundMessage', 24216: 'LineAccountUnboundMessage', 24220: 'TencentAccountBoundMessage', 24221: 'TencentAccountAlreadyBoundMessage', 24223: 'tencentCheckCanPayResponseMessage',
24301: AllianceDataMessage,
21
2023-12-14 18:57:56+00:00
24k
GXNU-ZhongLab/ODTrack
lib/train/base_functions.py
[ { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, H...
import os import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.utils.misc import is_main_process
18,050
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", ] # Tracking Task if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb")
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", ] # Tracking Task if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb")
datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader))
8
2023-12-10 03:57:19+00:00
24k
lumina-test/lumina
lumina/e2e_test/test_gbn.py
[ { "identifier": "get_qp_info_list", "path": "lumina/analyzer/main.py", "snippet": "def get_qp_info_list(switch_msg_snapshot):\n \"\"\" Get the list of QP info from the switch message snapshot\n\n Args:\n switch_msg_snapshot (str): The path to the switch message snapshot\n\n Returns:\n ...
import argparse, os, math, glob, logging, time import lumina.analyzer.checker.integrity_check as integrity_check import lumina.analyzer.checker.host_check as host_check import lumina.analyzer.checker.gbn_check as gbn_check import lumina.analyzer.checker.read_gbn_check as read_gbn_check import lumina.orchestrator.host as host import lumina.orchestrator.switch as switch from lumina.analyzer.main import get_qp_info_list from lumina.orchestrator.main import Orchestrator from lumina.analyzer.counter.switch_counter import SwitchCounter from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter from lumina.analyzer.pcap_processor.pcap_process import get_packet_list from lumina.analyzer.measurer.latency_measure import LatencyMeasure from lumina.utils.config_loggers import config_stream_handler, config_file_handler from lumina.analyzer.packet_parser.roce_packet import TRIGGER_OOS, TRIGGER_TIMEOUT
15,233
trigger = nack.get_trigger() if trigger == TRIGGER_OOS: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: # For other verbs, we can only find a NACK in case of out of sequence arriving packets if latency_measurement.get_nack(pkt) != None: # Out of sequence/NACK triggered retransmission next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK response latency: %fus' % (nack_resp_latency * 1e6)) elif latency_measurement.get_qp_first_nack_before_retrans(pkt) != None: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK indicates a loss (%d) before this packet (%d)") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) def verify_results(orchestrator): """ Verify the experiment results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ result_dir = orchestrator.result_path num_repeats = orchestrator.num_repeats mtu = orchestrator.traffic_conf['mtu'] msg_size = orchestrator.traffic_conf['message-size'] num_msgs_per_qp = orchestrator.traffic_conf['num-msgs-per-qp'] aggregate_pcap_filename = orchestrator.aggregate_pcap_filename port_map = {'requester': orchestrator.requester.conf['nic']['switch-port'], 'responder': orchestrator.responder.conf['nic']['switch-port'], 'requester-mirror': orchestrator.requester_mirror.conf['nic']['switch-port'], 'responder-mirror': orchestrator.responder_mirror.conf['nic']['switch-port']} requester_ip_list = orchestrator.get_requester_ip_list() responder_ip_list = orchestrator.get_responder_ip_list() for iter in range(num_repeats): iter = str(iter) result_logger = logging.getLogger('Analysis iter %s' % (iter)) result_logger.handlers.clear() config_file_handler(logger=result_logger, log_file=os.path.join(result_dir, iter, RESULT_FILENAME), no_format=True) result_logger.info("=" * 100) result_logger.info("Iteration %s" % iter) switch_msg_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_MESSAGE_SNAPSHOT) switch_state_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_STATE_SNAPSHOT) pcap_filename = os.path.join(result_dir, iter, host.PCAP_RESULT_DIR, aggregate_pcap_filename) requester_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_START_COUNTER_FILE_NAME) requester_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_FINISH_COUNTER_FILE_NAME) responder_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_START_COUNTER_FILE_NAME) responder_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_FINISH_COUNTER_FILE_NAME) switch_counter = SwitchCounter(switch_state_snapshot, port_map) if orchestrator.requester.is_mlnx_nic(): requester_counter = MLNXHostCounter(requester_counter_start, requester_counter_finish) elif orchestrator.requester.is_intel_nic(): requester_counter = IntelHostCounter(requester_counter_start, requester_counter_finish) else: logging.error("Unkown NIC Vendor for rdma requester.") requester_counter = None if orchestrator.responder.is_mlnx_nic(): responder_counter = MLNXHostCounter(responder_counter_start, responder_counter_finish) elif orchestrator.responder.is_intel_nic(): responder_counter = IntelHostCounter(responder_counter_start, responder_counter_finish) else: logging.error("Unkown NIC Vendor for rdma responder.") responder_counter = None
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_gbn.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger) config_file_handler(logger=root_logger, log_file=os.path.join(orchestrator.result_path, LOG_FILENAME), no_format=False) def run_traffic(orchestrator): """ Run the traffic and collect the results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: bool: True if the experiment is successful, False otherwise """ orchestrator.rm_old_files() if orchestrator.sync_and_compile() == False: logging.error("Failed to sync and compile the code") sys.exit(-1) logging.info("Sync and compile completed") if orchestrator.generate_switch_config_file() == False: logging.error("Failed to generate switch configuration file") sys.exit(-1) num_repeats = orchestrator.get_num_repeats() for i in range(num_repeats): logging.info("=" * 100) nb_retry = 0 iter_result = False while nb_retry < MAX_NB_EXP_RETRIES: if orchestrator.run_experiment() == False: logging.error("Iteration %d: Failed to complete experiment" % i) logging.error("Iteration %d: Rerun experiment (retry: %d)" % i, nb_retry) nb_retry += 1 orchestrator.clean_up() time.sleep(5) continue logging.info("Iteration %d: Completed experiment" % i) try: orchestrator.clean_up() orchestrator.fetch_results(i) logging.info("Iteration %d: Fetch experiment results" % i) orchestrator.merge_traces(i) logging.info("Iteration %d: Merge the pcap files" % i) except: logging.error("Iteration %d: Result collection failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue if orchestrator.check_integrity(i) == False: logging.error("Iteration %d: Integrity check failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue iter_result = True break if iter_result is False: logging.error("Iteration %d: Still failed after %d retries" % (i, nb_retry)) return False return True def analyze_retrans_latency(pkt, latency_measurement, is_read, logger): """ Analyze the retransmission latency breakdown for an undelivered packet Args: pkt (Packet object): The undelivered packet latency_measurement (LatencyMeasure object): A LatencyMeasure object that can compute latency breakdown is_read (bool): If we use RDMA READ in this experiment logger (logging.Logger): A logger object Returns: N/A """ # All the undelivered packets should be retransmitted in our test cases if latency_measurement.get_retransmit_pkt(pkt) == None: logger.error("\t\t No retransmit packet found for this packet") logger.error("\t\t It is possible that this undelivered packet is a redundant transmission") return retrans_latency = latency_measurement.get_retransmit_latency(pkt) if is_read == True: # For RDMA READ, we should always find a NACK READ request that triggers retransmission nack = latency_measurement.get_nack(pkt) if nack is not None: trigger = nack.get_trigger() if trigger == TRIGGER_OOS: next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK READ request generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: nack = latency_measurement.get_qp_first_nack_before_retrans(pkt) if nack is None: logger.error("\t\t Cannot find the NACK READ request to recover this lost packet") return trigger = nack.get_trigger() if trigger == TRIGGER_OOS: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: # For other verbs, we can only find a NACK in case of out of sequence arriving packets if latency_measurement.get_nack(pkt) != None: # Out of sequence/NACK triggered retransmission next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK response latency: %fus' % (nack_resp_latency * 1e6)) elif latency_measurement.get_qp_first_nack_before_retrans(pkt) != None: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK indicates a loss (%d) before this packet (%d)") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) def verify_results(orchestrator): """ Verify the experiment results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ result_dir = orchestrator.result_path num_repeats = orchestrator.num_repeats mtu = orchestrator.traffic_conf['mtu'] msg_size = orchestrator.traffic_conf['message-size'] num_msgs_per_qp = orchestrator.traffic_conf['num-msgs-per-qp'] aggregate_pcap_filename = orchestrator.aggregate_pcap_filename port_map = {'requester': orchestrator.requester.conf['nic']['switch-port'], 'responder': orchestrator.responder.conf['nic']['switch-port'], 'requester-mirror': orchestrator.requester_mirror.conf['nic']['switch-port'], 'responder-mirror': orchestrator.responder_mirror.conf['nic']['switch-port']} requester_ip_list = orchestrator.get_requester_ip_list() responder_ip_list = orchestrator.get_responder_ip_list() for iter in range(num_repeats): iter = str(iter) result_logger = logging.getLogger('Analysis iter %s' % (iter)) result_logger.handlers.clear() config_file_handler(logger=result_logger, log_file=os.path.join(result_dir, iter, RESULT_FILENAME), no_format=True) result_logger.info("=" * 100) result_logger.info("Iteration %s" % iter) switch_msg_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_MESSAGE_SNAPSHOT) switch_state_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_STATE_SNAPSHOT) pcap_filename = os.path.join(result_dir, iter, host.PCAP_RESULT_DIR, aggregate_pcap_filename) requester_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_START_COUNTER_FILE_NAME) requester_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_FINISH_COUNTER_FILE_NAME) responder_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_START_COUNTER_FILE_NAME) responder_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_FINISH_COUNTER_FILE_NAME) switch_counter = SwitchCounter(switch_state_snapshot, port_map) if orchestrator.requester.is_mlnx_nic(): requester_counter = MLNXHostCounter(requester_counter_start, requester_counter_finish) elif orchestrator.requester.is_intel_nic(): requester_counter = IntelHostCounter(requester_counter_start, requester_counter_finish) else: logging.error("Unkown NIC Vendor for rdma requester.") requester_counter = None if orchestrator.responder.is_mlnx_nic(): responder_counter = MLNXHostCounter(responder_counter_start, responder_counter_finish) elif orchestrator.responder.is_intel_nic(): responder_counter = IntelHostCounter(responder_counter_start, responder_counter_finish) else: logging.error("Unkown NIC Vendor for rdma responder.") responder_counter = None
qp_info_list = get_qp_info_list(switch_msg_snapshot)
0
2023-12-09 08:21:14+00:00
24k
ebb-earl-co/tidal-wave
tidal_wave/main.py
[ { "identifier": "login", "path": "tidal_wave/login.py", "snippet": "def login(\n audio_format: AudioFormat,\n) -> Tuple[Optional[requests.Session], Optional[AudioFormat]]:\n \"\"\"Given a selected audio_format, either log in \"automatically\"\n via the Fire TV OAuth 2.0 flow, or ask for an Andr...
from contextlib import closing from pathlib import Path from typing import Optional, Union from .login import login, AudioFormat, LogLevel from .album import Album from .artist import Artist from .mix import Mix from .playlist import Playlist from .track import Track from .video import Video from .models import ( match_tidal_url, TidalAlbum, TidalArtist, TidalMix, TidalPlaylist, TidalTrack, TidalVideo, ) from platformdirs import user_music_path from typing_extensions import Annotated import logging import typer
17,086
app = typer.Typer() @app.command() def main( tidal_url: Annotated[ str, typer.Argument( help="The Tidal album or artist or mix or playlist or track or video to download" ), ], audio_format: Annotated[ AudioFormat, typer.Option(case_sensitive=False) ] = AudioFormat.lossless.value, output_directory: Annotated[ Path, typer.Argument( help="The parent directory under which directory(ies) of files will be written" ), ] = user_music_path(), loglevel: Annotated[ LogLevel, typer.Option(case_sensitive=False) ] = LogLevel.info.value, include_eps_singles: Annotated[ bool, typer.Option( "--include-eps-singles", help="No-op unless passing TIDAL artist. Whether to include artist's EPs and singles with albums", ), ] = False, ): logging.basicConfig( format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d:%H:%M:%S", level=logging.getLevelName(loglevel.value), ) logger = logging.getLogger(__name__) tidal_resource: Optional[
app = typer.Typer() @app.command() def main( tidal_url: Annotated[ str, typer.Argument( help="The Tidal album or artist or mix or playlist or track or video to download" ), ], audio_format: Annotated[ AudioFormat, typer.Option(case_sensitive=False) ] = AudioFormat.lossless.value, output_directory: Annotated[ Path, typer.Argument( help="The parent directory under which directory(ies) of files will be written" ), ] = user_music_path(), loglevel: Annotated[ LogLevel, typer.Option(case_sensitive=False) ] = LogLevel.info.value, include_eps_singles: Annotated[ bool, typer.Option( "--include-eps-singles", help="No-op unless passing TIDAL artist. Whether to include artist's EPs and singles with albums", ), ] = False, ): logging.basicConfig( format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d:%H:%M:%S", level=logging.getLevelName(loglevel.value), ) logger = logging.getLogger(__name__) tidal_resource: Optional[
Union[TidalAlbum, TidalMix, TidalPlaylist, TidalTrack, TidalVideo]
12
2023-12-12 21:50:25+00:00
24k
ZS-YANG/FemtoDet-v3
mmdet/datasets/transforms/transforms.py
[ { "identifier": "TRANSFORMS", "path": "mmdet/registry.py", "snippet": "TRANSFORMS = Registry(\n 'transform',\n parent=MMENGINE_TRANSFORMS,\n locations=['mmdet.datasets.transforms'])" }, { "identifier": "autocast_box_type", "path": "mmdet/structures/bbox/box_type.py", "snippet": ...
import copy import inspect import math import warnings import cv2 import mmcv import numpy as np import albumentations from typing import List, Optional, Sequence, Tuple, Union from mmcv.image import imresize from mmcv.image.geometric import _scale_size from mmcv.transforms import BaseTransform from mmcv.transforms import Pad as MMCV_Pad from mmcv.transforms import RandomFlip as MMCV_RandomFlip from mmcv.transforms import Resize as MMCV_Resize from mmcv.transforms.utils import avoid_cache_randomness, cache_randomness from mmengine.dataset import BaseDataset from mmengine.utils import is_str from numpy import random from mmdet.registry import TRANSFORMS from mmdet.structures.bbox import HorizontalBoxes, autocast_box_type from mmdet.structures.mask import BitmapMasks, PolygonMasks from mmdet.utils import log_img_scale from imagecorruptions import corrupt from albumentations import Compose
17,900
assert (cutout_shape is None) ^ (cutout_ratio is None), \ 'Either cutout_shape or cutout_ratio should be specified.' assert (isinstance(cutout_shape, (list, tuple)) or isinstance(cutout_ratio, (list, tuple))) if isinstance(n_holes, tuple): assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1] else: n_holes = (n_holes, n_holes) self.n_holes = n_holes self.fill_in = fill_in self.with_ratio = cutout_ratio is not None self.candidates = cutout_ratio if self.with_ratio else cutout_shape if not isinstance(self.candidates, list): self.candidates = [self.candidates] @autocast_box_type() def transform(self, results: dict) -> dict: """Call function to drop some regions of image.""" h, w, c = results['img'].shape n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1) for _ in range(n_holes): x1 = np.random.randint(0, w) y1 = np.random.randint(0, h) index = np.random.randint(0, len(self.candidates)) if not self.with_ratio: cutout_w, cutout_h = self.candidates[index] else: cutout_w = int(self.candidates[index][0] * w) cutout_h = int(self.candidates[index][1] * h) x2 = np.clip(x1 + cutout_w, 0, w) y2 = np.clip(y1 + cutout_h, 0, h) results['img'][y1:y2, x1:x2, :] = self.fill_in return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(n_holes={self.n_holes}, ' repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio else f'cutout_shape={self.candidates}, ') repr_str += f'fill_in={self.fill_in})' return repr_str @TRANSFORMS.register_module() class Mosaic(BaseTransform): """Mosaic augmentation. Given 4 images, mosaic transform combines them into one output image. The output image is composed of the parts from each sub- image. .. code:: text mosaic transform center_x +------------------------------+ | pad | pad | | +-----------+ | | | | | | | image1 |--------+ | | | | | | | | | image2 | | center_y |----+-------------+-----------| | | cropped | | |pad | image3 | image4 | | | | | +----|-------------+-----------+ | | +-------------+ The mosaic transform steps are as follows: 1. Choose the mosaic center as the intersections of 4 images 2. Get the left top image according to the index, and randomly sample another 3 images from the custom dataset. 3. Sub image will be cropped if image is larger than mosaic patch Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_ignore_flags (bool) (optional) - mix_results (List[dict]) Modified Keys: - img - img_shape - gt_bboxes (optional) - gt_bboxes_labels (optional) - gt_ignore_flags (optional) Args: img_scale (Sequence[int]): Image size before mosaic pipeline of single image. The shape order should be (width, height). Defaults to (640, 640). center_ratio_range (Sequence[float]): Center ratio range of mosaic output. Defaults to (0.5, 1.5). bbox_clip_border (bool, optional): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. pad_val (int): Pad value. Defaults to 114. prob (float): Probability of applying this transformation. Defaults to 1.0. """ def __init__(self, img_scale: Tuple[int, int] = (640, 640), center_ratio_range: Tuple[float, float] = (0.5, 1.5), bbox_clip_border: bool = True, pad_val: float = 114.0, prob: float = 1.0) -> None: assert isinstance(img_scale, tuple) assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \ f'got {prob}.'
# Copyright (c) OpenMMLab. All rights reserved. try: except ImportError: corrupt = None try: except ImportError: albumentations = None Compose = None Number = Union[int, float] def _fixed_scale_size( size: Tuple[int, int], scale: Union[float, int, tuple], ) -> Tuple[int, int]: """Rescale a size by a ratio. Args: size (tuple[int]): (w, h). scale (float | tuple(float)): Scaling factor. Returns: tuple[int]: scaled size. """ if isinstance(scale, (float, int)): scale = (scale, scale) w, h = size # don't need o.5 offset return int(w * float(scale[0])), int(h * float(scale[1])) def rescale_size(old_size: tuple, scale: Union[float, int, tuple], return_scale: bool = False) -> tuple: """Calculate the new size to be rescaled to. Args: old_size (tuple[int]): The old size (w, h) of image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image size. Returns: tuple[int]: The new rescaled image size. """ w, h = old_size if isinstance(scale, (float, int)): if scale <= 0: raise ValueError(f'Invalid scale {scale}, must be positive.') scale_factor = scale elif isinstance(scale, tuple): max_long_edge = max(scale) max_short_edge = min(scale) scale_factor = min(max_long_edge / max(h, w), max_short_edge / min(h, w)) else: raise TypeError( f'Scale must be a number or tuple of int, but got {type(scale)}') # only change this new_size = _fixed_scale_size((w, h), scale_factor) if return_scale: return new_size, scale_factor else: return new_size def imrescale( img: np.ndarray, scale: Union[float, Tuple[int, int]], return_scale: bool = False, interpolation: str = 'bilinear', backend: Optional[str] = None ) -> Union[np.ndarray, Tuple[np.ndarray, float]]: """Resize image while keeping the aspect ratio. Args: img (ndarray): The input image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image. interpolation (str): Same as :func:`resize`. backend (str | None): Same as :func:`resize`. Returns: ndarray: The rescaled image. """ h, w = img.shape[:2] new_size, scale_factor = rescale_size((w, h), scale, return_scale=True) rescaled_img = imresize( img, new_size, interpolation=interpolation, backend=backend) if return_scale: return rescaled_img, scale_factor else: return rescaled_img @TRANSFORMS.register_module() class Resize(MMCV_Resize): """Resize images & bbox & seg. This transform resizes the input image according to ``scale`` or ``scale_factor``. Bboxes, masks, and seg map are then resized with the same scale factor. if ``scale`` and ``scale_factor`` are both set, it will use ``scale`` to resize. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_masks - gt_seg_map Added Keys: - scale - scale_factor - keep_ratio - homography_matrix Args: scale (int or tuple): Images scales for resizing. Defaults to None scale_factor (float or tuple[float]): Scale factors for resizing. Defaults to None. keep_ratio (bool): Whether to keep the aspect ratio when resizing the image. Defaults to False. clip_object_border (bool): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. backend (str): Image resize backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. Defaults to 'bilinear'. """ def _resize_masks(self, results: dict) -> None: """Resize masks with ``results['scale']``""" if results.get('gt_masks', None) is not None: if self.keep_ratio: results['gt_masks'] = results['gt_masks'].rescale( results['scale']) else: results['gt_masks'] = results['gt_masks'].resize( results['img_shape']) def _resize_bboxes(self, results: dict) -> None: """Resize bounding boxes with ``results['scale_factor']``.""" if results.get('gt_bboxes', None) is not None: results['gt_bboxes'].rescale_(results['scale_factor']) if self.clip_object_border: results['gt_bboxes'].clip_(results['img_shape']) def _record_homography_matrix(self, results: dict) -> None: """Record the homography matrix for the Resize.""" w_scale, h_scale = results['scale_factor'] homography_matrix = np.array( [[w_scale, 0, 0], [0, h_scale, 0], [0, 0, 1]], dtype=np.float32) if results.get('homography_matrix', None) is None: results['homography_matrix'] = homography_matrix else: results['homography_matrix'] = homography_matrix @ results[ 'homography_matrix'] @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to resize images, bounding boxes and semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map', 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys are updated in result dict. """ if self.scale: results['scale'] = self.scale else: img_shape = results['img'].shape[:2] results['scale'] = _scale_size(img_shape[::-1], self.scale_factor) self._resize_img(results) self._resize_bboxes(results) self._resize_masks(results) self._resize_seg(results) self._record_homography_matrix(results) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(scale={self.scale}, ' repr_str += f'scale_factor={self.scale_factor}, ' repr_str += f'keep_ratio={self.keep_ratio}, ' repr_str += f'clip_object_border={self.clip_object_border}), ' repr_str += f'backend={self.backend}), ' repr_str += f'interpolation={self.interpolation})' return repr_str @TRANSFORMS.register_module() class FixScaleResize(Resize): """Compared to Resize, FixScaleResize fixes the scaling issue when `keep_ratio=true`.""" def _resize_img(self, results): """Resize images with ``results['scale']``.""" if results.get('img', None) is not None: if self.keep_ratio: img, scale_factor = imrescale( results['img'], results['scale'], interpolation=self.interpolation, return_scale=True, backend=self.backend) new_h, new_w = img.shape[:2] h, w = results['img'].shape[:2] w_scale = new_w / w h_scale = new_h / h else: img, w_scale, h_scale = mmcv.imresize( results['img'], results['scale'], interpolation=self.interpolation, return_scale=True, backend=self.backend) results['img'] = img results['img_shape'] = img.shape[:2] results['scale_factor'] = (w_scale, h_scale) results['keep_ratio'] = self.keep_ratio @TRANSFORMS.register_module() class ResizeShortestEdge(BaseTransform): """Resize the image and mask while keeping the aspect ratio unchanged. Modified from https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/transforms/augmentation_impl.py#L130 # noqa:E501 This transform attempts to scale the shorter edge to the given `scale`, as long as the longer edge does not exceed `max_size`. If `max_size` is reached, then downscale so that the longer edge does not exceed `max_size`. Required Keys: - img - gt_seg_map (optional) Modified Keys: - img - img_shape - gt_seg_map (optional)) Added Keys: - scale - scale_factor - keep_ratio Args: scale (Union[int, Tuple[int, int]]): The target short edge length. If it's tuple, will select the min value as the short edge length. max_size (int): The maximum allowed longest edge length. """ def __init__(self, scale: Union[int, Tuple[int, int]], max_size: Optional[int] = None, resize_type: str = 'Resize', **resize_kwargs) -> None: super().__init__() self.scale = scale self.max_size = max_size self.resize_cfg = dict(type=resize_type, **resize_kwargs) self.resize = TRANSFORMS.build({'scale': 0, **self.resize_cfg}) def _get_output_shape( self, img: np.ndarray, short_edge_length: Union[int, Tuple[int, int]]) -> Tuple[int, int]: """Compute the target image shape with the given `short_edge_length`. Args: img (np.ndarray): The input image. short_edge_length (Union[int, Tuple[int, int]]): The target short edge length. If it's tuple, will select the min value as the short edge length. """ h, w = img.shape[:2] if isinstance(short_edge_length, int): size = short_edge_length * 1.0 elif isinstance(short_edge_length, tuple): size = min(short_edge_length) * 1.0 scale = size / min(h, w) if h < w: new_h, new_w = size, scale * w else: new_h, new_w = scale * h, size if self.max_size and max(new_h, new_w) > self.max_size: scale = self.max_size * 1.0 / max(new_h, new_w) new_h *= scale new_w *= scale new_h = int(new_h + 0.5) new_w = int(new_w + 0.5) return new_w, new_h def transform(self, results: dict) -> dict: self.resize.scale = self._get_output_shape(results['img'], self.scale) return self.resize(results) @TRANSFORMS.register_module() class FixShapeResize(Resize): """Resize images & bbox & seg to the specified size. This transform resizes the input image according to ``width`` and ``height``. Bboxes, masks, and seg map are then resized with the same parameters. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_masks - gt_seg_map Added Keys: - scale - scale_factor - keep_ratio - homography_matrix Args: width (int): width for resizing. height (int): height for resizing. Defaults to None. pad_val (Number | dict[str, Number], optional): Padding value for if the pad_mode is "constant". If it is a single number, the value to pad the image is the number and to pad the semantic segmentation map is 255. If it is a dict, it should have the following keys: - img: The value to pad the image. - seg: The value to pad the semantic segmentation map. Defaults to dict(img=0, seg=255). keep_ratio (bool): Whether to keep the aspect ratio when resizing the image. Defaults to False. clip_object_border (bool): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. backend (str): Image resize backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. Defaults to 'bilinear'. """ def __init__(self, width: int, height: int, pad_val: Union[Number, dict] = dict(img=0, seg=255), keep_ratio: bool = False, clip_object_border: bool = True, backend: str = 'cv2', interpolation: str = 'bilinear') -> None: assert width is not None and height is not None, ( '`width` and' '`height` can not be `None`') self.width = width self.height = height self.scale = (width, height) self.backend = backend self.interpolation = interpolation self.keep_ratio = keep_ratio self.clip_object_border = clip_object_border if keep_ratio is True: # padding to the fixed size when keep_ratio=True self.pad_transform = Pad(size=self.scale, pad_val=pad_val) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to resize images, bounding boxes and semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map', 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys are updated in result dict. """ img = results['img'] h, w = img.shape[:2] if self.keep_ratio: scale_factor = min(self.width / w, self.height / h) results['scale_factor'] = (scale_factor, scale_factor) real_w, real_h = int(w * float(scale_factor) + 0.5), int(h * float(scale_factor) + 0.5) img, scale_factor = mmcv.imrescale( results['img'], (real_w, real_h), interpolation=self.interpolation, return_scale=True, backend=self.backend) # the w_scale and h_scale has minor difference # a real fix should be done in the mmcv.imrescale in the future results['img'] = img results['img_shape'] = img.shape[:2] results['keep_ratio'] = self.keep_ratio results['scale'] = (real_w, real_h) else: results['scale'] = (self.width, self.height) results['scale_factor'] = (self.width / w, self.height / h) super()._resize_img(results) self._resize_bboxes(results) self._resize_masks(results) self._resize_seg(results) self._record_homography_matrix(results) if self.keep_ratio: self.pad_transform(results) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(width={self.width}, height={self.height}, ' repr_str += f'keep_ratio={self.keep_ratio}, ' repr_str += f'clip_object_border={self.clip_object_border}), ' repr_str += f'backend={self.backend}), ' repr_str += f'interpolation={self.interpolation})' return repr_str @TRANSFORMS.register_module() class RandomFlip(MMCV_RandomFlip): """Flip the image & bbox & mask & segmentation map. Added or Updated keys: flip, flip_direction, img, gt_bboxes, and gt_seg_map. There are 3 flip modes: - ``prob`` is float, ``direction`` is string: the image will be ``direction``ly flipped with probability of ``prob`` . E.g., ``prob=0.5``, ``direction='horizontal'``, then image will be horizontally flipped with probability of 0.5. - ``prob`` is float, ``direction`` is list of string: the image will be ``direction[i]``ly flipped with probability of ``prob/len(direction)``. E.g., ``prob=0.5``, ``direction=['horizontal', 'vertical']``, then image will be horizontally flipped with probability of 0.25, vertically with probability of 0.25. - ``prob`` is list of float, ``direction`` is list of string: given ``len(prob) == len(direction)``, the image will be ``direction[i]``ly flipped with probability of ``prob[i]``. E.g., ``prob=[0.3, 0.5]``, ``direction=['horizontal', 'vertical']``, then image will be horizontally flipped with probability of 0.3, vertically with probability of 0.5. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - gt_bboxes - gt_masks - gt_seg_map Added Keys: - flip - flip_direction - homography_matrix Args: prob (float | list[float], optional): The flipping probability. Defaults to None. direction(str | list[str]): The flipping direction. Options If input is a list, the length must equal ``prob``. Each element in ``prob`` indicates the flip probability of corresponding direction. Defaults to 'horizontal'. """ def _record_homography_matrix(self, results: dict) -> None: """Record the homography matrix for the RandomFlip.""" cur_dir = results['flip_direction'] h, w = results['img'].shape[:2] if cur_dir == 'horizontal': homography_matrix = np.array([[-1, 0, w], [0, 1, 0], [0, 0, 1]], dtype=np.float32) elif cur_dir == 'vertical': homography_matrix = np.array([[1, 0, 0], [0, -1, h], [0, 0, 1]], dtype=np.float32) elif cur_dir == 'diagonal': homography_matrix = np.array([[-1, 0, w], [0, -1, h], [0, 0, 1]], dtype=np.float32) else: homography_matrix = np.eye(3, dtype=np.float32) if results.get('homography_matrix', None) is None: results['homography_matrix'] = homography_matrix else: results['homography_matrix'] = homography_matrix @ results[ 'homography_matrix'] @autocast_box_type() def _flip(self, results: dict) -> None: """Flip images, bounding boxes, and semantic segmentation map.""" # flip image results['img'] = mmcv.imflip( results['img'], direction=results['flip_direction']) img_shape = results['img'].shape[:2] # flip bboxes if results.get('gt_bboxes', None) is not None: results['gt_bboxes'].flip_(img_shape, results['flip_direction']) # flip masks if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'].flip( results['flip_direction']) # flip segs if results.get('gt_seg_map', None) is not None: results['gt_seg_map'] = mmcv.imflip( results['gt_seg_map'], direction=results['flip_direction']) # record homography matrix for flip self._record_homography_matrix(results) @TRANSFORMS.register_module() class RandomShift(BaseTransform): """Shift the image and box given shift pixels and probability. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) - gt_bboxes_labels (np.int64) - gt_ignore_flags (bool) (optional) Modified Keys: - img - gt_bboxes - gt_bboxes_labels - gt_ignore_flags (bool) (optional) Args: prob (float): Probability of shifts. Defaults to 0.5. max_shift_px (int): The max pixels for shifting. Defaults to 32. filter_thr_px (int): The width and height threshold for filtering. The bbox and the rest of the targets below the width and height threshold will be filtered. Defaults to 1. """ def __init__(self, prob: float = 0.5, max_shift_px: int = 32, filter_thr_px: int = 1) -> None: assert 0 <= prob <= 1 assert max_shift_px >= 0 self.prob = prob self.max_shift_px = max_shift_px self.filter_thr_px = int(filter_thr_px) @cache_randomness def _random_prob(self) -> float: return random.uniform(0, 1) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to random shift images, bounding boxes. Args: results (dict): Result dict from loading pipeline. Returns: dict: Shift results. """ if self._random_prob() < self.prob: img_shape = results['img'].shape[:2] random_shift_x = random.randint(-self.max_shift_px, self.max_shift_px) random_shift_y = random.randint(-self.max_shift_px, self.max_shift_px) new_x = max(0, random_shift_x) ori_x = max(0, -random_shift_x) new_y = max(0, random_shift_y) ori_y = max(0, -random_shift_y) # TODO: support mask and semantic segmentation maps. bboxes = results['gt_bboxes'].clone() bboxes.translate_([random_shift_x, random_shift_y]) # clip border bboxes.clip_(img_shape) # remove invalid bboxes valid_inds = (bboxes.widths > self.filter_thr_px).numpy() & ( bboxes.heights > self.filter_thr_px).numpy() # If the shift does not contain any gt-bbox area, skip this # image. if not valid_inds.any(): return results bboxes = bboxes[valid_inds] results['gt_bboxes'] = bboxes results['gt_bboxes_labels'] = results['gt_bboxes_labels'][ valid_inds] if results.get('gt_ignore_flags', None) is not None: results['gt_ignore_flags'] = \ results['gt_ignore_flags'][valid_inds] # shift img img = results['img'] new_img = np.zeros_like(img) img_h, img_w = img.shape[:2] new_h = img_h - np.abs(random_shift_y) new_w = img_w - np.abs(random_shift_x) new_img[new_y:new_y + new_h, new_x:new_x + new_w] \ = img[ori_y:ori_y + new_h, ori_x:ori_x + new_w] results['img'] = new_img return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(prob={self.prob}, ' repr_str += f'max_shift_px={self.max_shift_px}, ' repr_str += f'filter_thr_px={self.filter_thr_px})' return repr_str @TRANSFORMS.register_module() class Pad(MMCV_Pad): """Pad the image & segmentation map. There are three padding modes: (1) pad to a fixed size and (2) pad to the minimum size that is divisible by some number. and (3)pad to square. Also, pad to square and pad to the minimum size can be used as the same time. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_masks - gt_seg_map Added Keys: - pad_shape - pad_fixed_size - pad_size_divisor Args: size (tuple, optional): Fixed padding size. Expected padding shape (width, height). Defaults to None. size_divisor (int, optional): The divisor of padded size. Defaults to None. pad_to_square (bool): Whether to pad the image into a square. Currently only used for YOLOX. Defaults to False. pad_val (Number | dict[str, Number], optional) - Padding value for if the pad_mode is "constant". If it is a single number, the value to pad the image is the number and to pad the semantic segmentation map is 255. If it is a dict, it should have the following keys: - img: The value to pad the image. - seg: The value to pad the semantic segmentation map. Defaults to dict(img=0, seg=255). padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Defaults to 'constant'. - constant: pads with a constant value, this value is specified with pad_val. - edge: pads with the last value at the edge of the image. - reflect: pads with reflection of image without repeating the last value on the edge. For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode will result in [3, 2, 1, 2, 3, 4, 3, 2]. - symmetric: pads with reflection of image repeating the last value on the edge. For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode will result in [2, 1, 1, 2, 3, 4, 4, 3] """ def _pad_masks(self, results: dict) -> None: """Pad masks according to ``results['pad_shape']``.""" if results.get('gt_masks', None) is not None: pad_val = self.pad_val.get('masks', 0) pad_shape = results['pad_shape'][:2] results['gt_masks'] = results['gt_masks'].pad( pad_shape, pad_val=pad_val) def transform(self, results: dict) -> dict: """Call function to pad images, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: dict: Updated result dict. """ self._pad_img(results) self._pad_seg(results) self._pad_masks(results) return results @TRANSFORMS.register_module() class RandomCrop(BaseTransform): """Random crop the image & bboxes & masks. The absolute ``crop_size`` is sampled based on ``crop_type`` and ``image_size``, then the cropped results are generated. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_ignore_flags (bool) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes (optional) - gt_bboxes_labels (optional) - gt_masks (optional) - gt_ignore_flags (optional) - gt_seg_map (optional) - gt_instances_ids (options, only used in MOT/VIS) Added Keys: - homography_matrix Args: crop_size (tuple): The relative ratio or absolute pixels of (width, height). crop_type (str, optional): One of "relative_range", "relative", "absolute", "absolute_range". "relative" randomly crops (h * crop_size[0], w * crop_size[1]) part from an input of size (h, w). "relative_range" uniformly samples relative crop size from range [crop_size[0], 1] and [crop_size[1], 1] for height and width respectively. "absolute" crops from an input with absolute size (crop_size[0], crop_size[1]). "absolute_range" uniformly samples crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w in range [crop_size[0], min(w, crop_size[1])]. Defaults to "absolute". allow_negative_crop (bool, optional): Whether to allow a crop that does not contain any bbox area. Defaults to False. recompute_bbox (bool, optional): Whether to re-compute the boxes based on cropped instance masks. Defaults to False. bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Note: - If the image is smaller than the absolute crop size, return the original image. - The keys for bboxes, labels and masks must be aligned. That is, ``gt_bboxes`` corresponds to ``gt_labels`` and ``gt_masks``, and ``gt_bboxes_ignore`` corresponds to ``gt_labels_ignore`` and ``gt_masks_ignore``. - If the crop does not contain any gt-bbox region and ``allow_negative_crop`` is set to False, skip this image. """ def __init__(self, crop_size: tuple, crop_type: str = 'absolute', allow_negative_crop: bool = False, recompute_bbox: bool = False, bbox_clip_border: bool = True) -> None: if crop_type not in [ 'relative_range', 'relative', 'absolute', 'absolute_range' ]: raise ValueError(f'Invalid crop_type {crop_type}.') if crop_type in ['absolute', 'absolute_range']: assert crop_size[0] > 0 and crop_size[1] > 0 assert isinstance(crop_size[0], int) and isinstance( crop_size[1], int) if crop_type == 'absolute_range': assert crop_size[0] <= crop_size[1] else: assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1 self.crop_size = crop_size self.crop_type = crop_type self.allow_negative_crop = allow_negative_crop self.bbox_clip_border = bbox_clip_border self.recompute_bbox = recompute_bbox def _crop_data(self, results: dict, crop_size: Tuple[int, int], allow_negative_crop: bool) -> Union[dict, None]: """Function to randomly crop images, bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. crop_size (Tuple[int, int]): Expected absolute size after cropping, (h, w). allow_negative_crop (bool): Whether to allow a crop that does not contain any bbox area. Returns: results (Union[dict, None]): Randomly cropped results, 'img_shape' key in result dict is updated according to crop size. None will be returned when there is no valid bbox after cropping. """ assert crop_size[0] > 0 and crop_size[1] > 0 img = results['img'] margin_h = max(img.shape[0] - crop_size[0], 0) margin_w = max(img.shape[1] - crop_size[1], 0) offset_h, offset_w = self._rand_offset((margin_h, margin_w)) crop_y1, crop_y2 = offset_h, offset_h + crop_size[0] crop_x1, crop_x2 = offset_w, offset_w + crop_size[1] # Record the homography matrix for the RandomCrop homography_matrix = np.array( [[1, 0, -offset_w], [0, 1, -offset_h], [0, 0, 1]], dtype=np.float32) if results.get('homography_matrix', None) is None: results['homography_matrix'] = homography_matrix else: results['homography_matrix'] = homography_matrix @ results[ 'homography_matrix'] # crop the image img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] img_shape = img.shape results['img'] = img results['img_shape'] = img_shape[:2] # crop bboxes accordingly and clip to the image boundary if results.get('gt_bboxes', None) is not None: bboxes = results['gt_bboxes'] bboxes.translate_([-offset_w, -offset_h]) if self.bbox_clip_border: bboxes.clip_(img_shape[:2]) valid_inds = bboxes.is_inside(img_shape[:2]).numpy() # If the crop does not contain any gt-bbox area and # allow_negative_crop is False, skip this image. if (not valid_inds.any() and not allow_negative_crop): return None results['gt_bboxes'] = bboxes[valid_inds] if results.get('gt_ignore_flags', None) is not None: results['gt_ignore_flags'] = \ results['gt_ignore_flags'][valid_inds] if results.get('gt_bboxes_labels', None) is not None: results['gt_bboxes_labels'] = \ results['gt_bboxes_labels'][valid_inds] if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'][ valid_inds.nonzero()[0]].crop( np.asarray([crop_x1, crop_y1, crop_x2, crop_y2])) if self.recompute_bbox: results['gt_bboxes'] = results['gt_masks'].get_bboxes( type(results['gt_bboxes'])) # We should remove the instance ids corresponding to invalid boxes. if results.get('gt_instances_ids', None) is not None: results['gt_instances_ids'] = \ results['gt_instances_ids'][valid_inds] # crop semantic seg if results.get('gt_seg_map', None) is not None: results['gt_seg_map'] = results['gt_seg_map'][crop_y1:crop_y2, crop_x1:crop_x2] return results @cache_randomness def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]: """Randomly generate crop offset. Args: margin (Tuple[int, int]): The upper bound for the offset generated randomly. Returns: Tuple[int, int]: The random offset for the crop. """ margin_h, margin_w = margin offset_h = np.random.randint(0, margin_h + 1) offset_w = np.random.randint(0, margin_w + 1) return offset_h, offset_w @cache_randomness def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]: """Randomly generates the absolute crop size based on `crop_type` and `image_size`. Args: image_size (Tuple[int, int]): (h, w). Returns: crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels. """ h, w = image_size if self.crop_type == 'absolute': return min(self.crop_size[1], h), min(self.crop_size[0], w) elif self.crop_type == 'absolute_range': crop_h = np.random.randint( min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1) crop_w = np.random.randint( min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1) return crop_h, crop_w elif self.crop_type == 'relative': crop_w, crop_h = self.crop_size return int(h * crop_h + 0.5), int(w * crop_w + 0.5) else: # 'relative_range' crop_size = np.asarray(self.crop_size, dtype=np.float32) crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size) return int(h * crop_h + 0.5), int(w * crop_w + 0.5) @autocast_box_type() def transform(self, results: dict) -> Union[dict, None]: """Transform function to randomly crop images, bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: results (Union[dict, None]): Randomly cropped results, 'img_shape' key in result dict is updated according to crop size. None will be returned when there is no valid bbox after cropping. """ image_size = results['img'].shape[:2] crop_size = self._get_crop_size(image_size) results = self._crop_data(results, crop_size, self.allow_negative_crop) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(crop_size={self.crop_size}, ' repr_str += f'crop_type={self.crop_type}, ' repr_str += f'allow_negative_crop={self.allow_negative_crop}, ' repr_str += f'recompute_bbox={self.recompute_bbox}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @TRANSFORMS.register_module() class SegRescale(BaseTransform): """Rescale semantic segmentation maps. This transform rescale the ``gt_seg_map`` according to ``scale_factor``. Required Keys: - gt_seg_map Modified Keys: - gt_seg_map Args: scale_factor (float): The scale factor of the final output. Defaults to 1. backend (str): Image rescale backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. """ def __init__(self, scale_factor: float = 1, backend: str = 'cv2') -> None: self.scale_factor = scale_factor self.backend = backend def transform(self, results: dict) -> dict: """Transform function to scale the semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with semantic segmentation map scaled. """ if self.scale_factor != 1: results['gt_seg_map'] = mmcv.imrescale( results['gt_seg_map'], self.scale_factor, interpolation='nearest', backend=self.backend) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(scale_factor={self.scale_factor}, ' repr_str += f'backend={self.backend})' return repr_str @TRANSFORMS.register_module() class PhotoMetricDistortion(BaseTransform): """Apply photometric distortion to image sequentially, every transformation is applied with a probability of 0.5. The position of random contrast is in second or second to last. 1. random brightness 2. random contrast (mode 0) 3. convert color from BGR to HSV 4. random saturation 5. random hue 6. convert color from HSV to BGR 7. random contrast (mode 1) 8. randomly swap channels Required Keys: - img (np.uint8) Modified Keys: - img (np.float32) Args: brightness_delta (int): delta of brightness. contrast_range (sequence): range of contrast. saturation_range (sequence): range of saturation. hue_delta (int): delta of hue. """ def __init__(self, brightness_delta: int = 32, contrast_range: Sequence[Number] = (0.5, 1.5), saturation_range: Sequence[Number] = (0.5, 1.5), hue_delta: int = 18) -> None: self.brightness_delta = brightness_delta self.contrast_lower, self.contrast_upper = contrast_range self.saturation_lower, self.saturation_upper = saturation_range self.hue_delta = hue_delta @cache_randomness def _random_flags(self) -> Sequence[Number]: mode = random.randint(2) brightness_flag = random.randint(2) contrast_flag = random.randint(2) saturation_flag = random.randint(2) hue_flag = random.randint(2) swap_flag = random.randint(2) delta_value = random.uniform(-self.brightness_delta, self.brightness_delta) alpha_value = random.uniform(self.contrast_lower, self.contrast_upper) saturation_value = random.uniform(self.saturation_lower, self.saturation_upper) hue_value = random.uniform(-self.hue_delta, self.hue_delta) swap_value = random.permutation(3) return (mode, brightness_flag, contrast_flag, saturation_flag, hue_flag, swap_flag, delta_value, alpha_value, saturation_value, hue_value, swap_value) def transform(self, results: dict) -> dict: """Transform function to perform photometric distortion on images. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images distorted. """ assert 'img' in results, '`img` is not found in results' img = results['img'] img = img.astype(np.float32) (mode, brightness_flag, contrast_flag, saturation_flag, hue_flag, swap_flag, delta_value, alpha_value, saturation_value, hue_value, swap_value) = self._random_flags() # random brightness if brightness_flag: img += delta_value # mode == 0 --> do random contrast first # mode == 1 --> do random contrast last if mode == 1: if contrast_flag: img *= alpha_value # convert color from BGR to HSV img = mmcv.bgr2hsv(img) # random saturation if saturation_flag: img[..., 1] *= saturation_value # For image(type=float32), after convert bgr to hsv by opencv, # valid saturation value range is [0, 1] if saturation_value > 1: img[..., 1] = img[..., 1].clip(0, 1) # random hue if hue_flag: img[..., 0] += hue_value img[..., 0][img[..., 0] > 360] -= 360 img[..., 0][img[..., 0] < 0] += 360 # convert color from HSV to BGR img = mmcv.hsv2bgr(img) # random contrast if mode == 0: if contrast_flag: img *= alpha_value # randomly swap channels if swap_flag: img = img[..., swap_value] results['img'] = img return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(brightness_delta={self.brightness_delta}, ' repr_str += 'contrast_range=' repr_str += f'{(self.contrast_lower, self.contrast_upper)}, ' repr_str += 'saturation_range=' repr_str += f'{(self.saturation_lower, self.saturation_upper)}, ' repr_str += f'hue_delta={self.hue_delta})' return repr_str @TRANSFORMS.register_module() class Expand(BaseTransform): """Random expand the image & bboxes & masks & segmentation map. Randomly place the original image on a canvas of ``ratio`` x original image size filled with mean values. The ratio is in the range of ratio_range. Required Keys: - img - img_shape - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_masks - gt_seg_map Args: mean (sequence): mean value of dataset. to_rgb (bool): if need to convert the order of mean to align with RGB. ratio_range (sequence)): range of expand ratio. seg_ignore_label (int): label of ignore segmentation map. prob (float): probability of applying this transformation """ def __init__(self, mean: Sequence[Number] = (0, 0, 0), to_rgb: bool = True, ratio_range: Sequence[Number] = (1, 4), seg_ignore_label: int = None, prob: float = 0.5) -> None: self.to_rgb = to_rgb self.ratio_range = ratio_range if to_rgb: self.mean = mean[::-1] else: self.mean = mean self.min_ratio, self.max_ratio = ratio_range self.seg_ignore_label = seg_ignore_label self.prob = prob @cache_randomness def _random_prob(self) -> float: return random.uniform(0, 1) @cache_randomness def _random_ratio(self) -> float: return random.uniform(self.min_ratio, self.max_ratio) @cache_randomness def _random_left_top(self, ratio: float, h: int, w: int) -> Tuple[int, int]: left = int(random.uniform(0, w * ratio - w)) top = int(random.uniform(0, h * ratio - h)) return left, top @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to expand images, bounding boxes, masks, segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images, bounding boxes, masks, segmentation map expanded. """ if self._random_prob() > self.prob: return results assert 'img' in results, '`img` is not found in results' img = results['img'] h, w, c = img.shape ratio = self._random_ratio() # speedup expand when meets large image if np.all(self.mean == self.mean[0]): expand_img = np.empty((int(h * ratio), int(w * ratio), c), img.dtype) expand_img.fill(self.mean[0]) else: expand_img = np.full((int(h * ratio), int(w * ratio), c), self.mean, dtype=img.dtype) left, top = self._random_left_top(ratio, h, w) expand_img[top:top + h, left:left + w] = img results['img'] = expand_img results['img_shape'] = expand_img.shape[:2] # expand bboxes if results.get('gt_bboxes', None) is not None: results['gt_bboxes'].translate_([left, top]) # expand masks if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'].expand( int(h * ratio), int(w * ratio), top, left) # expand segmentation map if results.get('gt_seg_map', None) is not None: gt_seg = results['gt_seg_map'] expand_gt_seg = np.full((int(h * ratio), int(w * ratio)), self.seg_ignore_label, dtype=gt_seg.dtype) expand_gt_seg[top:top + h, left:left + w] = gt_seg results['gt_seg_map'] = expand_gt_seg return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, ' repr_str += f'ratio_range={self.ratio_range}, ' repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' repr_str += f'prob={self.prob})' return repr_str @TRANSFORMS.register_module() class MinIoURandomCrop(BaseTransform): """Random crop the image & bboxes & masks & segmentation map, the cropped patches have minimum IoU requirement with original image & bboxes & masks. & segmentation map, the IoU threshold is randomly selected from min_ious. Required Keys: - img - img_shape - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_ignore_flags (bool) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_bboxes_labels - gt_masks - gt_ignore_flags - gt_seg_map Args: min_ious (Sequence[float]): minimum IoU threshold for all intersections with bounding boxes. min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, where a >= min_crop_size). bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, min_ious: Sequence[float] = (0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size: float = 0.3, bbox_clip_border: bool = True) -> None: self.min_ious = min_ious self.sample_mode = (1, *min_ious, 0) self.min_crop_size = min_crop_size self.bbox_clip_border = bbox_clip_border @cache_randomness def _random_mode(self) -> Number: return random.choice(self.sample_mode) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to crop images and bounding boxes with minimum IoU constraint. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images and bounding boxes cropped, \ 'img_shape' key is updated. """ assert 'img' in results, '`img` is not found in results' assert 'gt_bboxes' in results, '`gt_bboxes` is not found in results' img = results['img'] boxes = results['gt_bboxes'] h, w, c = img.shape while True: mode = self._random_mode() self.mode = mode if mode == 1: return results min_iou = self.mode for i in range(50): new_w = random.uniform(self.min_crop_size * w, w) new_h = random.uniform(self.min_crop_size * h, h) # h / w in [0.5, 2] if new_h / new_w < 0.5 or new_h / new_w > 2: continue left = random.uniform(w - new_w) top = random.uniform(h - new_h) patch = np.array( (int(left), int(top), int(left + new_w), int(top + new_h))) # Line or point crop is not allowed if patch[2] == patch[0] or patch[3] == patch[1]: continue overlaps = boxes.overlaps( HorizontalBoxes(patch.reshape(-1, 4).astype(np.float32)), boxes).numpy().reshape(-1) if len(overlaps) > 0 and overlaps.min() < min_iou: continue # center of boxes should inside the crop img # only adjust boxes and instance masks when the gt is not empty if len(overlaps) > 0: # adjust boxes def is_center_of_bboxes_in_patch(boxes, patch): centers = boxes.centers.numpy() mask = ((centers[:, 0] > patch[0]) * (centers[:, 1] > patch[1]) * (centers[:, 0] < patch[2]) * (centers[:, 1] < patch[3])) return mask mask = is_center_of_bboxes_in_patch(boxes, patch) if not mask.any(): continue if results.get('gt_bboxes', None) is not None: boxes = results['gt_bboxes'] mask = is_center_of_bboxes_in_patch(boxes, patch) boxes = boxes[mask] boxes.translate_([-patch[0], -patch[1]]) if self.bbox_clip_border: boxes.clip_( [patch[3] - patch[1], patch[2] - patch[0]]) results['gt_bboxes'] = boxes # ignore_flags if results.get('gt_ignore_flags', None) is not None: results['gt_ignore_flags'] = \ results['gt_ignore_flags'][mask] # labels if results.get('gt_bboxes_labels', None) is not None: results['gt_bboxes_labels'] = results[ 'gt_bboxes_labels'][mask] # mask fields if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'][ mask.nonzero()[0]].crop(patch) # adjust the img no matter whether the gt is empty before crop img = img[patch[1]:patch[3], patch[0]:patch[2]] results['img'] = img results['img_shape'] = img.shape[:2] # seg fields if results.get('gt_seg_map', None) is not None: results['gt_seg_map'] = results['gt_seg_map'][ patch[1]:patch[3], patch[0]:patch[2]] return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(min_ious={self.min_ious}, ' repr_str += f'min_crop_size={self.min_crop_size}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @TRANSFORMS.register_module() class Corrupt(BaseTransform): """Corruption augmentation. Corruption transforms implemented based on `imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_. Required Keys: - img (np.uint8) Modified Keys: - img (np.uint8) Args: corruption (str): Corruption name. severity (int): The severity of corruption. Defaults to 1. """ def __init__(self, corruption: str, severity: int = 1) -> None: self.corruption = corruption self.severity = severity def transform(self, results: dict) -> dict: """Call function to corrupt image. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images corrupted. """ if corrupt is None: raise RuntimeError('imagecorruptions is not installed') results['img'] = corrupt( results['img'].astype(np.uint8), corruption_name=self.corruption, severity=self.severity) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(corruption={self.corruption}, ' repr_str += f'severity={self.severity})' return repr_str @TRANSFORMS.register_module() @avoid_cache_randomness class Albu(BaseTransform): """Albumentation augmentation. Adds custom transformations from Albumentations library. Please, visit `https://albumentations.readthedocs.io` to get more information. Required Keys: - img (np.uint8) - gt_bboxes (HorizontalBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) Modified Keys: - img (np.uint8) - gt_bboxes (HorizontalBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - img_shape (tuple) An example of ``transforms`` is as followed: .. code-block:: [ dict( type='ShiftScaleRotate', shift_limit=0.0625, scale_limit=0.0, rotate_limit=0, interpolation=1, p=0.5), dict( type='RandomBrightnessContrast', brightness_limit=[0.1, 0.3], contrast_limit=[0.1, 0.3], p=0.2), dict(type='ChannelShuffle', p=0.1), dict( type='OneOf', transforms=[ dict(type='Blur', blur_limit=3, p=1.0), dict(type='MedianBlur', blur_limit=3, p=1.0) ], p=0.1), ] Args: transforms (list[dict]): A list of albu transformations bbox_params (dict, optional): Bbox_params for albumentation `Compose` keymap (dict, optional): Contains {'input key':'albumentation-style key'} skip_img_without_anno (bool): Whether to skip the image if no ann left after aug. Defaults to False. """ def __init__(self, transforms: List[dict], bbox_params: Optional[dict] = None, keymap: Optional[dict] = None, skip_img_without_anno: bool = False) -> None: if Compose is None: raise RuntimeError('albumentations is not installed') # Args will be modified later, copying it will be safer transforms = copy.deepcopy(transforms) if bbox_params is not None: bbox_params = copy.deepcopy(bbox_params) if keymap is not None: keymap = copy.deepcopy(keymap) self.transforms = transforms self.filter_lost_elements = False self.skip_img_without_anno = skip_img_without_anno # A simple workaround to remove masks without boxes if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params and 'filter_lost_elements' in bbox_params): self.filter_lost_elements = True self.origin_label_fields = bbox_params['label_fields'] bbox_params['label_fields'] = ['idx_mapper'] del bbox_params['filter_lost_elements'] self.bbox_params = ( self.albu_builder(bbox_params) if bbox_params else None) self.aug = Compose([self.albu_builder(t) for t in self.transforms], bbox_params=self.bbox_params) if not keymap: self.keymap_to_albu = { 'img': 'image', 'gt_masks': 'masks', 'gt_bboxes': 'bboxes' } else: self.keymap_to_albu = keymap self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()} def albu_builder(self, cfg: dict) -> albumentations: """Import a module from albumentations. It inherits some of :func:`build_from_cfg` logic. Args: cfg (dict): Config dict. It should at least contain the key "type". Returns: obj: The constructed object. """ assert isinstance(cfg, dict) and 'type' in cfg args = cfg.copy() obj_type = args.pop('type') if is_str(obj_type): if albumentations is None: raise RuntimeError('albumentations is not installed') obj_cls = getattr(albumentations, obj_type) elif inspect.isclass(obj_type): obj_cls = obj_type else: raise TypeError( f'type must be a str or valid type, but got {type(obj_type)}') if 'transforms' in args: args['transforms'] = [ self.albu_builder(transform) for transform in args['transforms'] ] return obj_cls(**args) @staticmethod def mapper(d: dict, keymap: dict) -> dict: """Dictionary mapper. Renames keys according to keymap provided. Args: d (dict): old dict keymap (dict): {'old_key':'new_key'} Returns: dict: new dict. """ updated_dict = {} for k, v in zip(d.keys(), d.values()): new_k = keymap.get(k, k) updated_dict[new_k] = d[k] return updated_dict @autocast_box_type() def transform(self, results: dict) -> Union[dict, None]: """Transform function of Albu.""" # TODO: gt_seg_map is not currently supported # dict to albumentations format results = self.mapper(results, self.keymap_to_albu) results, ori_masks = self._preprocess_results(results) results = self.aug(**results) results = self._postprocess_results(results, ori_masks) if results is None: return None # back to the original format results = self.mapper(results, self.keymap_back) results['img_shape'] = results['img'].shape[:2] return results def _preprocess_results(self, results: dict) -> tuple: """Pre-processing results to facilitate the use of Albu.""" if 'bboxes' in results: # to list of boxes if not isinstance(results['bboxes'], HorizontalBoxes): raise NotImplementedError( 'Albu only supports horizontal boxes now') bboxes = results['bboxes'].numpy() results['bboxes'] = [x for x in bboxes] # add pseudo-field for filtration if self.filter_lost_elements: results['idx_mapper'] = np.arange(len(results['bboxes'])) # TODO: Support mask structure in albu ori_masks = None if 'masks' in results: if isinstance(results['masks'], PolygonMasks): raise NotImplementedError( 'Albu only supports BitMap masks now') ori_masks = results['masks'] if albumentations.__version__ < '0.5': results['masks'] = results['masks'].masks else: results['masks'] = [mask for mask in results['masks'].masks] return results, ori_masks def _postprocess_results( self, results: dict, ori_masks: Optional[Union[BitmapMasks, PolygonMasks]] = None) -> dict: """Post-processing Albu output.""" # albumentations may return np.array or list on different versions if 'gt_bboxes_labels' in results and isinstance( results['gt_bboxes_labels'], list): results['gt_bboxes_labels'] = np.array( results['gt_bboxes_labels'], dtype=np.int64) if 'gt_ignore_flags' in results and isinstance( results['gt_ignore_flags'], list): results['gt_ignore_flags'] = np.array( results['gt_ignore_flags'], dtype=bool) if 'bboxes' in results: if isinstance(results['bboxes'], list): results['bboxes'] = np.array( results['bboxes'], dtype=np.float32) results['bboxes'] = results['bboxes'].reshape(-1, 4) results['bboxes'] = HorizontalBoxes(results['bboxes']) # filter label_fields if self.filter_lost_elements: for label in self.origin_label_fields: results[label] = np.array( [results[label][i] for i in results['idx_mapper']]) if 'masks' in results: assert ori_masks is not None results['masks'] = np.array( [results['masks'][i] for i in results['idx_mapper']]) results['masks'] = ori_masks.__class__( results['masks'], ori_masks.height, ori_masks.width) if (not len(results['idx_mapper']) and self.skip_img_without_anno): return None elif 'masks' in results: results['masks'] = ori_masks.__class__(results['masks'], ori_masks.height, ori_masks.width) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ + f'(transforms={self.transforms})' return repr_str @TRANSFORMS.register_module() @avoid_cache_randomness class RandomCenterCropPad(BaseTransform): """Random center crop and random around padding for CornerNet. This operation generates randomly cropped image from the original image and pads it simultaneously. Different from :class:`RandomCrop`, the output shape may not equal to ``crop_size`` strictly. We choose a random value from ``ratios`` and the output shape could be larger or smaller than ``crop_size``. The padding operation is also different from :class:`Pad`, here we use around padding instead of right-bottom padding. The relation between output image (padding image) and original image: .. code:: text output image +----------------------------+ | padded area | +------|----------------------------|----------+ | | cropped area | | | | +---------------+ | | | | | . center | | | original image | | | range | | | | | +---------------+ | | +------|----------------------------|----------+ | padded area | +----------------------------+ There are 5 main areas in the figure: - output image: output image of this operation, also called padding image in following instruction. - original image: input image of this operation. - padded area: non-intersect area of output image and original image. - cropped area: the overlap of output image and original image. - center range: a smaller area where random center chosen from. center range is computed by ``border`` and original image's shape to avoid our random center is too close to original image's border. Also this operation act differently in train and test mode, the summary pipeline is listed below. Train pipeline: 1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image will be ``random_ratio * crop_size``. 2. Choose a ``random_center`` in center range. 3. Generate padding image with center matches the ``random_center``. 4. Initialize the padding image with pixel value equals to ``mean``. 5. Copy the cropped area to padding image. 6. Refine annotations. Test pipeline: 1. Compute output shape according to ``test_pad_mode``. 2. Generate padding image with center matches the original image center. 3. Initialize the padding image with pixel value equals to ``mean``. 4. Copy the ``cropped area`` to padding image. Required Keys: - img (np.float32) - img_shape (tuple) - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_ignore_flags (bool) (optional) Modified Keys: - img (np.float32) - img_shape (tuple) - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_ignore_flags (bool) (optional) Args: crop_size (tuple, optional): expected size after crop, final size will computed according to ratio. Requires (width, height) in train mode, and None in test mode. ratios (tuple, optional): random select a ratio from tuple and crop image to (crop_size[0] * ratio) * (crop_size[1] * ratio). Only available in train mode. Defaults to (0.9, 1.0, 1.1). border (int, optional): max distance from center select area to image border. Only available in train mode. Defaults to 128. mean (sequence, optional): Mean values of 3 channels. std (sequence, optional): Std values of 3 channels. to_rgb (bool, optional): Whether to convert the image from BGR to RGB. test_mode (bool): whether involve random variables in transform. In train mode, crop_size is fixed, center coords and ratio is random selected from predefined lists. In test mode, crop_size is image's original shape, center coords and ratio is fixed. Defaults to False. test_pad_mode (tuple, optional): padding method and padding shape value, only available in test mode. Default is using 'logical_or' with 127 as padding shape value. - 'logical_or': final_shape = input_shape | padding_shape_value - 'size_divisor': final_shape = int( ceil(input_shape / padding_shape_value) * padding_shape_value) Defaults to ('logical_or', 127). test_pad_add_pix (int): Extra padding pixel in test mode. Defaults to 0. bbox_clip_border (bool): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, crop_size: Optional[tuple] = None, ratios: Optional[tuple] = (0.9, 1.0, 1.1), border: Optional[int] = 128, mean: Optional[Sequence] = None, std: Optional[Sequence] = None, to_rgb: Optional[bool] = None, test_mode: bool = False, test_pad_mode: Optional[tuple] = ('logical_or', 127), test_pad_add_pix: int = 0, bbox_clip_border: bool = True) -> None: if test_mode: assert crop_size is None, 'crop_size must be None in test mode' assert ratios is None, 'ratios must be None in test mode' assert border is None, 'border must be None in test mode' assert isinstance(test_pad_mode, (list, tuple)) assert test_pad_mode[0] in ['logical_or', 'size_divisor'] else: assert isinstance(crop_size, (list, tuple)) assert crop_size[0] > 0 and crop_size[1] > 0, ( 'crop_size must > 0 in train mode') assert isinstance(ratios, (list, tuple)) assert test_pad_mode is None, ( 'test_pad_mode must be None in train mode') self.crop_size = crop_size self.ratios = ratios self.border = border # We do not set default value to mean, std and to_rgb because these # hyper-parameters are easy to forget but could affect the performance. # Please use the same setting as Normalize for performance assurance. assert mean is not None and std is not None and to_rgb is not None self.to_rgb = to_rgb self.input_mean = mean self.input_std = std if to_rgb: self.mean = mean[::-1] self.std = std[::-1] else: self.mean = mean self.std = std self.test_mode = test_mode self.test_pad_mode = test_pad_mode self.test_pad_add_pix = test_pad_add_pix self.bbox_clip_border = bbox_clip_border def _get_border(self, border, size): """Get final border for the target size. This function generates a ``final_border`` according to image's shape. The area between ``final_border`` and ``size - final_border`` is the ``center range``. We randomly choose center from the ``center range`` to avoid our random center is too close to original image's border. Also ``center range`` should be larger than 0. Args: border (int): The initial border, default is 128. size (int): The width or height of original image. Returns: int: The final border. """ k = 2 * border / size i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k))) return border // i def _filter_boxes(self, patch, boxes): """Check whether the center of each box is in the patch. Args: patch (list[int]): The cropped area, [left, top, right, bottom]. boxes (numpy array, (N x 4)): Ground truth boxes. Returns: mask (numpy array, (N,)): Each box is inside or outside the patch. """ center = boxes.centers.numpy() mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * ( center[:, 0] < patch[2]) * ( center[:, 1] < patch[3]) return mask def _crop_image_and_paste(self, image, center, size): """Crop image with a given center and size, then paste the cropped image to a blank image with two centers align. This function is equivalent to generating a blank image with ``size`` as its shape. Then cover it on the original image with two centers ( the center of blank image and the random center of original image) aligned. The overlap area is paste from the original image and the outside area is filled with ``mean pixel``. Args: image (np array, H x W x C): Original image. center (list[int]): Target crop center coord. size (list[int]): Target crop size. [target_h, target_w] Returns: cropped_img (np array, target_h x target_w x C): Cropped image. border (np array, 4): The distance of four border of ``cropped_img`` to the original image area, [top, bottom, left, right] patch (list[int]): The cropped area, [left, top, right, bottom]. """ center_y, center_x = center target_h, target_w = size img_h, img_w, img_c = image.shape x0 = max(0, center_x - target_w // 2) x1 = min(center_x + target_w // 2, img_w) y0 = max(0, center_y - target_h // 2) y1 = min(center_y + target_h // 2, img_h) patch = np.array((int(x0), int(y0), int(x1), int(y1))) left, right = center_x - x0, x1 - center_x top, bottom = center_y - y0, y1 - center_y cropped_center_y, cropped_center_x = target_h // 2, target_w // 2 cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype) for i in range(img_c): cropped_img[:, :, i] += self.mean[i] y_slice = slice(cropped_center_y - top, cropped_center_y + bottom) x_slice = slice(cropped_center_x - left, cropped_center_x + right) cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :] border = np.array([ cropped_center_y - top, cropped_center_y + bottom, cropped_center_x - left, cropped_center_x + right ], dtype=np.float32) return cropped_img, border, patch def _train_aug(self, results): """Random crop and around padding the original image. Args: results (dict): Image infomations in the augment pipeline. Returns: results (dict): The updated dict. """ img = results['img'] h, w, c = img.shape gt_bboxes = results['gt_bboxes'] while True: scale = random.choice(self.ratios) new_h = int(self.crop_size[1] * scale) new_w = int(self.crop_size[0] * scale) h_border = self._get_border(self.border, h) w_border = self._get_border(self.border, w) for i in range(50): center_x = random.randint(low=w_border, high=w - w_border) center_y = random.randint(low=h_border, high=h - h_border) cropped_img, border, patch = self._crop_image_and_paste( img, [center_y, center_x], [new_h, new_w]) if len(gt_bboxes) == 0: results['img'] = cropped_img results['img_shape'] = cropped_img.shape[:2] return results # if image do not have valid bbox, any crop patch is valid. mask = self._filter_boxes(patch, gt_bboxes) if not mask.any(): continue results['img'] = cropped_img results['img_shape'] = cropped_img.shape[:2] x0, y0, x1, y1 = patch left_w, top_h = center_x - x0, center_y - y0 cropped_center_x, cropped_center_y = new_w // 2, new_h // 2 # crop bboxes accordingly and clip to the image boundary gt_bboxes = gt_bboxes[mask] gt_bboxes.translate_([ cropped_center_x - left_w - x0, cropped_center_y - top_h - y0 ]) if self.bbox_clip_border: gt_bboxes.clip_([new_h, new_w]) keep = gt_bboxes.is_inside([new_h, new_w]).numpy() gt_bboxes = gt_bboxes[keep] results['gt_bboxes'] = gt_bboxes # ignore_flags if results.get('gt_ignore_flags', None) is not None: gt_ignore_flags = results['gt_ignore_flags'][mask] results['gt_ignore_flags'] = \ gt_ignore_flags[keep] # labels if results.get('gt_bboxes_labels', None) is not None: gt_labels = results['gt_bboxes_labels'][mask] results['gt_bboxes_labels'] = gt_labels[keep] if 'gt_masks' in results or 'gt_seg_map' in results: raise NotImplementedError( 'RandomCenterCropPad only supports bbox.') return results def _test_aug(self, results): """Around padding the original image without cropping. The padding mode and value are from ``test_pad_mode``. Args: results (dict): Image infomations in the augment pipeline. Returns: results (dict): The updated dict. """ img = results['img'] h, w, c = img.shape if self.test_pad_mode[0] in ['logical_or']: # self.test_pad_add_pix is only used for centernet target_h = (h | self.test_pad_mode[1]) + self.test_pad_add_pix target_w = (w | self.test_pad_mode[1]) + self.test_pad_add_pix elif self.test_pad_mode[0] in ['size_divisor']: divisor = self.test_pad_mode[1] target_h = int(np.ceil(h / divisor)) * divisor target_w = int(np.ceil(w / divisor)) * divisor else: raise NotImplementedError( 'RandomCenterCropPad only support two testing pad mode:' 'logical-or and size_divisor.') cropped_img, border, _ = self._crop_image_and_paste( img, [h // 2, w // 2], [target_h, target_w]) results['img'] = cropped_img results['img_shape'] = cropped_img.shape[:2] results['border'] = border return results @autocast_box_type() def transform(self, results: dict) -> dict: img = results['img'] assert img.dtype == np.float32, ( 'RandomCenterCropPad needs the input image of dtype np.float32,' ' please set "to_float32=True" in "LoadImageFromFile" pipeline') h, w, c = img.shape assert c == len(self.mean) if self.test_mode: return self._test_aug(results) else: return self._train_aug(results) def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(crop_size={self.crop_size}, ' repr_str += f'ratios={self.ratios}, ' repr_str += f'border={self.border}, ' repr_str += f'mean={self.input_mean}, ' repr_str += f'std={self.input_std}, ' repr_str += f'to_rgb={self.to_rgb}, ' repr_str += f'test_mode={self.test_mode}, ' repr_str += f'test_pad_mode={self.test_pad_mode}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @TRANSFORMS.register_module() class CutOut(BaseTransform): """CutOut operation. Randomly drop some regions of image used in `Cutout <https://arxiv.org/abs/1708.04552>`_. Required Keys: - img Modified Keys: - img Args: n_holes (int or tuple[int, int]): Number of regions to be dropped. If it is given as a list, number of holes will be randomly selected from the closed interval [``n_holes[0]``, ``n_holes[1]``]. cutout_shape (tuple[int, int] or list[tuple[int, int]], optional): The candidate shape of dropped regions. It can be ``tuple[int, int]`` to use a fixed cutout shape, or ``list[tuple[int, int]]`` to randomly choose shape from the list. Defaults to None. cutout_ratio (tuple[float, float] or list[tuple[float, float]], optional): The candidate ratio of dropped regions. It can be ``tuple[float, float]`` to use a fixed ratio or ``list[tuple[float, float]]`` to randomly choose ratio from the list. Please note that ``cutout_shape`` and ``cutout_ratio`` cannot be both given at the same time. Defaults to None. fill_in (tuple[float, float, float] or tuple[int, int, int]): The value of pixel to fill in the dropped regions. Defaults to (0, 0, 0). """ def __init__( self, n_holes: Union[int, Tuple[int, int]], cutout_shape: Optional[Union[Tuple[int, int], List[Tuple[int, int]]]] = None, cutout_ratio: Optional[Union[Tuple[float, float], List[Tuple[float, float]]]] = None, fill_in: Union[Tuple[float, float, float], Tuple[int, int, int]] = (0, 0, 0) ) -> None: assert (cutout_shape is None) ^ (cutout_ratio is None), \ 'Either cutout_shape or cutout_ratio should be specified.' assert (isinstance(cutout_shape, (list, tuple)) or isinstance(cutout_ratio, (list, tuple))) if isinstance(n_holes, tuple): assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1] else: n_holes = (n_holes, n_holes) self.n_holes = n_holes self.fill_in = fill_in self.with_ratio = cutout_ratio is not None self.candidates = cutout_ratio if self.with_ratio else cutout_shape if not isinstance(self.candidates, list): self.candidates = [self.candidates] @autocast_box_type() def transform(self, results: dict) -> dict: """Call function to drop some regions of image.""" h, w, c = results['img'].shape n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1) for _ in range(n_holes): x1 = np.random.randint(0, w) y1 = np.random.randint(0, h) index = np.random.randint(0, len(self.candidates)) if not self.with_ratio: cutout_w, cutout_h = self.candidates[index] else: cutout_w = int(self.candidates[index][0] * w) cutout_h = int(self.candidates[index][1] * h) x2 = np.clip(x1 + cutout_w, 0, w) y2 = np.clip(y1 + cutout_h, 0, h) results['img'][y1:y2, x1:x2, :] = self.fill_in return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(n_holes={self.n_holes}, ' repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio else f'cutout_shape={self.candidates}, ') repr_str += f'fill_in={self.fill_in})' return repr_str @TRANSFORMS.register_module() class Mosaic(BaseTransform): """Mosaic augmentation. Given 4 images, mosaic transform combines them into one output image. The output image is composed of the parts from each sub- image. .. code:: text mosaic transform center_x +------------------------------+ | pad | pad | | +-----------+ | | | | | | | image1 |--------+ | | | | | | | | | image2 | | center_y |----+-------------+-----------| | | cropped | | |pad | image3 | image4 | | | | | +----|-------------+-----------+ | | +-------------+ The mosaic transform steps are as follows: 1. Choose the mosaic center as the intersections of 4 images 2. Get the left top image according to the index, and randomly sample another 3 images from the custom dataset. 3. Sub image will be cropped if image is larger than mosaic patch Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_ignore_flags (bool) (optional) - mix_results (List[dict]) Modified Keys: - img - img_shape - gt_bboxes (optional) - gt_bboxes_labels (optional) - gt_ignore_flags (optional) Args: img_scale (Sequence[int]): Image size before mosaic pipeline of single image. The shape order should be (width, height). Defaults to (640, 640). center_ratio_range (Sequence[float]): Center ratio range of mosaic output. Defaults to (0.5, 1.5). bbox_clip_border (bool, optional): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. pad_val (int): Pad value. Defaults to 114. prob (float): Probability of applying this transformation. Defaults to 1.0. """ def __init__(self, img_scale: Tuple[int, int] = (640, 640), center_ratio_range: Tuple[float, float] = (0.5, 1.5), bbox_clip_border: bool = True, pad_val: float = 114.0, prob: float = 1.0) -> None: assert isinstance(img_scale, tuple) assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \ f'got {prob}.'
log_img_scale(img_scale, skip_square=True, shape_order='wh')
5
2023-12-11 15:23:03+00:00
24k
chinhsuanwu/ifusion
model/zero123.py
[ { "identifier": "inject_trainable_lora_extended", "path": "ldm/lora.py", "snippet": "def inject_trainable_lora_extended(\n model: nn.Module,\n target_replace_module: Set[str] = UNET_EXTENDED_TARGET_REPLACE,\n r: int = 4,\n loras=None, # path to lora .pt\n eval=True,\n):\n \"\"\"\n ...
import itertools import torch import torch.nn as nn from dataclasses import dataclass from diffusers import DDIMScheduler from einops import rearrange from omegaconf import OmegaConf from ldm.lora import ( inject_trainable_lora_extended, monkeypatch_remove_lora, save_lora_weight, ) from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import load_model_from_config from util.pose import make_T from util.typing import * from util.util import default
15,893
self.config.pretrained_model_name_or_path, device=self.device, vram_O=self.config.vram_O, ) for p in self.model.parameters(): p.requires_grad_(False) self.num_train_timesteps = self.pretrained_config.model.params.timesteps self.scheduler = DDIMScheduler( self.num_train_timesteps, self.pretrained_config.model.params.linear_start, self.pretrained_config.model.params.linear_end, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1, ) self.num_train_timesteps = self.scheduler.config.num_train_timesteps self.set_min_max_steps( min_step_percent=self.config.min_step_percent, max_step_percent=self.config.max_step_percent, ) print("[INFO] Loaded Zero123") @torch.cuda.amp.autocast(enabled=False) def set_min_max_steps( self, min_step_percent: float = 0.02, max_step_percent: float = 0.98 ): self.min_step = int(self.num_train_timesteps * min_step_percent) self.max_step = int(self.num_train_timesteps * max_step_percent) @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def get_image_embeds( self, image: Float[Tensor, "B 3 256 256"] ) -> Tuple[Float[Tensor, "B 1 768"], Float[Tensor, "B 4 32 32"]]: c_crossattn = self.model.get_learned_conditioning(image.to(self.weights_dtype)) c_concat = self.model.encode_first_stage(image.to(self.weights_dtype)).mode() return c_crossattn, c_concat @torch.cuda.amp.autocast(enabled=False) def encode_image( self, image: Float[Tensor, "B 3 256 256"] ) -> Float[Tensor, "B 4 32 32"]: input_dtype = image.dtype latent = self.model.get_first_stage_encoding( self.model.encode_first_stage(image.to(self.weights_dtype)) ) return latent.to(input_dtype) # [B, 4, 32, 32] Latent space image @torch.cuda.amp.autocast(enabled=False) def decode_latent( self, latent: Float[Tensor, "B 4 H W"], ) -> Float[Tensor, "B 3 512 512"]: input_dtype = latent.dtype image = self.model.decode_first_stage(latent) image = (image * 0.5 + 0.5).clamp(0, 1) return image.to(input_dtype) @staticmethod @torch.no_grad() def make_cond(cond): """Add zeros to the beginning of cond""" return {k: [torch.cat([torch.zeros_like(v), v])] for k, v in cond.items()} @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def clip_camera_projection( self, theta: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], distance: Float[Tensor, "B"], c_crossattn: Float[Tensor, "B 1 768"], in_deg: bool = False, ): T = make_T(theta, azimuth, distance, in_deg=in_deg).T[:, None, :] clip_emb = self.model.cc_projection(torch.cat([c_crossattn, T], dim=-1)) return clip_emb def inject_lora( self, ckpt_fp: str = None, rank: int = 12, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], eval: bool = False, ): print( f"[INFO] Injecting LoRA from " + (str(ckpt_fp) if ckpt_fp is not None else "scratch"), ) lora_params, _ = inject_trainable_lora_extended( self.model.model, target_replace_module=set(target_replace_module), r=rank, loras=ckpt_fp, eval=eval, ) if not eval: self.require_grad_params += itertools.chain(*lora_params) return self def save_lora( self, ckpt_fp: str, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], ): save_lora_weight( self.model.model, ckpt_fp, target_replace_module=set(target_replace_module), ) print(f"[INFO] Saved LoRA to {ckpt_fp}") def remove_lora(self): print("[INFO] Removing LoRA")
class Zero123(nn.Module): @dataclass class Config: pretrained_model_name_or_path: str = "ldm/ckpt/zero123-xl.ckpt" pretrained_config: str = "ldm/ckpt/sd-objaverse-finetune-c_concat-256.yaml" vram_O: bool = False min_step_percent: float = 0.02 max_step_percent: float = 0.98 config: Config def __init__(self, **kwargs) -> None: super().__init__() self.config = OmegaConf.structured(self.Config(**kwargs)) self.device = "cuda" self.require_grad_params = [] self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32 self.model: LatentDiffusion = load_model_from_config( self.pretrained_config, self.config.pretrained_model_name_or_path, device=self.device, vram_O=self.config.vram_O, ) for p in self.model.parameters(): p.requires_grad_(False) self.num_train_timesteps = self.pretrained_config.model.params.timesteps self.scheduler = DDIMScheduler( self.num_train_timesteps, self.pretrained_config.model.params.linear_start, self.pretrained_config.model.params.linear_end, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1, ) self.num_train_timesteps = self.scheduler.config.num_train_timesteps self.set_min_max_steps( min_step_percent=self.config.min_step_percent, max_step_percent=self.config.max_step_percent, ) print("[INFO] Loaded Zero123") @torch.cuda.amp.autocast(enabled=False) def set_min_max_steps( self, min_step_percent: float = 0.02, max_step_percent: float = 0.98 ): self.min_step = int(self.num_train_timesteps * min_step_percent) self.max_step = int(self.num_train_timesteps * max_step_percent) @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def get_image_embeds( self, image: Float[Tensor, "B 3 256 256"] ) -> Tuple[Float[Tensor, "B 1 768"], Float[Tensor, "B 4 32 32"]]: c_crossattn = self.model.get_learned_conditioning(image.to(self.weights_dtype)) c_concat = self.model.encode_first_stage(image.to(self.weights_dtype)).mode() return c_crossattn, c_concat @torch.cuda.amp.autocast(enabled=False) def encode_image( self, image: Float[Tensor, "B 3 256 256"] ) -> Float[Tensor, "B 4 32 32"]: input_dtype = image.dtype latent = self.model.get_first_stage_encoding( self.model.encode_first_stage(image.to(self.weights_dtype)) ) return latent.to(input_dtype) # [B, 4, 32, 32] Latent space image @torch.cuda.amp.autocast(enabled=False) def decode_latent( self, latent: Float[Tensor, "B 4 H W"], ) -> Float[Tensor, "B 3 512 512"]: input_dtype = latent.dtype image = self.model.decode_first_stage(latent) image = (image * 0.5 + 0.5).clamp(0, 1) return image.to(input_dtype) @staticmethod @torch.no_grad() def make_cond(cond): """Add zeros to the beginning of cond""" return {k: [torch.cat([torch.zeros_like(v), v])] for k, v in cond.items()} @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def clip_camera_projection( self, theta: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], distance: Float[Tensor, "B"], c_crossattn: Float[Tensor, "B 1 768"], in_deg: bool = False, ): T = make_T(theta, azimuth, distance, in_deg=in_deg).T[:, None, :] clip_emb = self.model.cc_projection(torch.cat([c_crossattn, T], dim=-1)) return clip_emb def inject_lora( self, ckpt_fp: str = None, rank: int = 12, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], eval: bool = False, ): print( f"[INFO] Injecting LoRA from " + (str(ckpt_fp) if ckpt_fp is not None else "scratch"), ) lora_params, _ = inject_trainable_lora_extended( self.model.model, target_replace_module=set(target_replace_module), r=rank, loras=ckpt_fp, eval=eval, ) if not eval: self.require_grad_params += itertools.chain(*lora_params) return self def save_lora( self, ckpt_fp: str, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], ): save_lora_weight( self.model.model, ckpt_fp, target_replace_module=set(target_replace_module), ) print(f"[INFO] Saved LoRA to {ckpt_fp}") def remove_lora(self): print("[INFO] Removing LoRA")
monkeypatch_remove_lora(self.model.model)
1
2023-12-17 12:45:38+00:00
24k
penghao-wu/vstar
VisualSearch/utils/dataset.py
[ { "identifier": "conversation", "path": "VisualSearch/model/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n ...
import glob import os import random import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image from pycocotools import mask from transformers import CLIPImageProcessor from transformers import OwlViTProcessor from VisualSearch.model.llava import conversation as conversation_lib from VisualSearch.model.llava.constants import (DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX) from VisualSearch.model.llava.mm_utils import tokenizer_image_token from VisualSearch.utils.data_processing import get_mask_from_json from VisualSearch.utils.refer import REFER from VisualSearch.utils.refer_seg_dataset import ReferSegDataset from VisualSearch.utils.general_segdet_dataset import SegDetDataset from VisualSearch.utils.mixed_grounding_dataset import MixedGroundingDataset from VisualSearch.utils.vqa_dataset import VQADataset from VisualSearch.utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN) from VisualSearch.utils.utils import box_xyxy_to_cxcywh, expand2square
15,725
print( "conversation: ", conversation, "tokenizer.decode(z): ", tokenizer.decode(z), ) if cur_len < tokenizer.model_max_length: assert cur_len == total_len if inferences[0] == False: truncate_len = tokenizer.model_max_length - 255 if input_ids.shape[1] > truncate_len: input_ids = input_ids[:, :truncate_len] targets = targets[:, :truncate_len] attention_masks = attention_masks[:, :truncate_len] return { "image_paths": image_path_list, "images": torch.stack(images_list, dim=0), "images_clip": torch.stack(images_clip_list, dim=0), "input_ids": input_ids, "labels": targets, "bboxes_labels_list": bboxes_labels_list, "bboxes_valid_list": torch.tensor(bboxes_valid_list), "masks_valid_list": masks_valid_list, "attention_masks": attention_masks, "masks_list": masks_list, "label_list": label_list, "resize_list": resize_list, "offset": torch.LongTensor(offset_list), "questions_list": questions_list, "sampled_classes_list": sampled_classes_list, "inference": inferences[0], "conversation_list": conversation_list, } class HybridDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, samples_per_epoch=500 * 8 * 2 * 10, precision: str = "fp32", num_classes_per_sample: int = 3, exclude_val=False, dataset="general_segdet||refer_seg||vqa||reason_seg", sample_rate=[9, 3, 3, 1], general_segdet_data="objects365||cocostuff||paco_lvis", general_segdet_sample_rate=[2,1,1], refer_seg_data="refclef||refcoco||refcoco+||refcocog", vqa_data="possible_locations_conv_86k||llava_instruct_80k", vqa_sample_rate=[2,1], ): self.exclude_val = exclude_val self.dataset = dataset self.samples_per_epoch = samples_per_epoch self.num_classes_per_sample = num_classes_per_sample sample_rate = np.array(sample_rate) self.sample_rate = sample_rate / sample_rate.sum() self.base_dir = base_dir self.tokenizer = tokenizer self.precision = precision self.datasets = dataset.split("||") self.all_datasets = [] for dataset in self.datasets: if dataset == "general_segdet": self.all_datasets.append( SegDetDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, general_segdet_data, general_segdet_sample_rate, ) ) elif dataset == "refer_seg": self.all_datasets.append( ReferSegDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, refer_seg_data, ) ) elif dataset == "vqa": self.all_datasets.append( VQADataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, vqa_data, vqa_sample_rate, ) ) elif dataset == "mixed_grounding": self.all_datasets.append(
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [ tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversation_list ] input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id ) attention_masks = input_ids.ne(tokenizer.pad_token_id) for i in range(len(bboxes_valid_list)): bboxes_valid = bboxes_valid_list[i] attention_mask = attention_masks[i] if not bboxes_valid: attention_mask = attention_mask & input_ids[i].ne(tokenizer("[LOC]", add_special_tokens=False).input_ids[0]) attention_masks[i] = attention_mask conv = conversation_lib.default_conversation.copy() targets = input_ids.clone() if conv_type == "llava_v1": sep = conv.sep + conv.roles[1] + ": " else: sep = "[/INST] " for conversation, target in zip(conversation_list, targets): total_len = int(target.ne(tokenizer.pad_token_id).sum()) rounds = conversation.split(conv.sep2) cur_len = 1 target[:cur_len] = IGNORE_INDEX for i, rou in enumerate(rounds): if rou == "": break parts = rou.split(sep) # if len(parts) != 2: # break assert len(parts) == 2, (len(parts), rou) parts[0] += sep if DEFAULT_IMAGE_TOKEN in conversation: round_len = len(tokenizer_image_token(rou, tokenizer)) instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2 else: round_len = len(tokenizer(rou).input_ids) instruction_len = len(tokenizer(parts[0]).input_ids) - 2 target[cur_len : cur_len + instruction_len] = IGNORE_INDEX cur_len += round_len target[cur_len:] = IGNORE_INDEX if False: z = target.clone() z = torch.where(z == IGNORE_INDEX, tokenizer.unk_token_id, z) if local_rank == 0: print( "conversation: ", conversation, "tokenizer.decode(z): ", tokenizer.decode(z), ) if cur_len < tokenizer.model_max_length: assert cur_len == total_len if inferences[0] == False: truncate_len = tokenizer.model_max_length - 255 if input_ids.shape[1] > truncate_len: input_ids = input_ids[:, :truncate_len] targets = targets[:, :truncate_len] attention_masks = attention_masks[:, :truncate_len] return { "image_paths": image_path_list, "images": torch.stack(images_list, dim=0), "images_clip": torch.stack(images_clip_list, dim=0), "input_ids": input_ids, "labels": targets, "bboxes_labels_list": bboxes_labels_list, "bboxes_valid_list": torch.tensor(bboxes_valid_list), "masks_valid_list": masks_valid_list, "attention_masks": attention_masks, "masks_list": masks_list, "label_list": label_list, "resize_list": resize_list, "offset": torch.LongTensor(offset_list), "questions_list": questions_list, "sampled_classes_list": sampled_classes_list, "inference": inferences[0], "conversation_list": conversation_list, } class HybridDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, samples_per_epoch=500 * 8 * 2 * 10, precision: str = "fp32", num_classes_per_sample: int = 3, exclude_val=False, dataset="general_segdet||refer_seg||vqa||reason_seg", sample_rate=[9, 3, 3, 1], general_segdet_data="objects365||cocostuff||paco_lvis", general_segdet_sample_rate=[2,1,1], refer_seg_data="refclef||refcoco||refcoco+||refcocog", vqa_data="possible_locations_conv_86k||llava_instruct_80k", vqa_sample_rate=[2,1], ): self.exclude_val = exclude_val self.dataset = dataset self.samples_per_epoch = samples_per_epoch self.num_classes_per_sample = num_classes_per_sample sample_rate = np.array(sample_rate) self.sample_rate = sample_rate / sample_rate.sum() self.base_dir = base_dir self.tokenizer = tokenizer self.precision = precision self.datasets = dataset.split("||") self.all_datasets = [] for dataset in self.datasets: if dataset == "general_segdet": self.all_datasets.append( SegDetDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, general_segdet_data, general_segdet_sample_rate, ) ) elif dataset == "refer_seg": self.all_datasets.append( ReferSegDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, refer_seg_data, ) ) elif dataset == "vqa": self.all_datasets.append( VQADataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, vqa_data, vqa_sample_rate, ) ) elif dataset == "mixed_grounding": self.all_datasets.append(
MixedGroundingDataset(
9
2023-12-15 14:58:24+00:00
24k
sinoyou/nelf-pro
nerfstudio/viewer/server/viewer_utils.py
[ { "identifier": "Cameras", "path": "nerfstudio/cameras/cameras.py", "snippet": "class Cameras(TensorDataclass):\n \"\"\"Dataparser outputs for the image dataset and the ray generator.\n\n Note: currently only supports cameras with the same principal points and types. The reason we type\n the fo...
import base64 import enum import os import sys import threading import time import warnings import cv2 import numpy as np import torch from pathlib import Path from typing import Any, Dict, Optional, Tuple from cryptography.utils import CryptographyDeprecationWarning from rich.console import Console from nerfstudio.cameras.cameras import Cameras from nerfstudio.cameras.rays import RayBundle from nerfstudio.configs import base_config as cfg from nerfstudio.data.datasets.base_dataset import InputDataset from nerfstudio.models.base_model import Model from nerfstudio.utils import colormaps, profiler, writer from nerfstudio.utils.decorators import check_main_thread, decorate_all from nerfstudio.utils.images import BasicImages from nerfstudio.utils.io import load_from_json, write_to_json from nerfstudio.utils.writer import GLOBAL_BUFFER, EventName, TimeWriter from nerfstudio.viewer.server.subprocess import run_viewer_bridge_server_as_subprocess from nerfstudio.viewer.server.utils import get_intrinsics_matrix_and_camera_to_world_h from nerfstudio.viewer.server.visualizer import Viewer
21,323
""" has_temporal_distortion = getattr(graph, "temporal_distortion", None) is not None self.vis["model/has_temporal_distortion"].write(str(has_temporal_distortion).lower()) is_training = self.vis["renderingState/isTraining"].read() self.step = step self._check_camera_path_payload(trainer, step) camera_object = self._get_camera_object() if camera_object is None: return if is_training is None or is_training: # in training mode if self.camera_moving: # if the camera is moving, then we pause training and update camera continuously while self.camera_moving: self._render_image_in_viewer(camera_object, graph, is_training) camera_object = self._get_camera_object() else: # if the camera is not moving, then we approximate how many training steps need to be taken # to render at a FPS defined by self.static_fps. if EventName.TRAIN_RAYS_PER_SEC.value in GLOBAL_BUFFER["events"]: train_rays_per_sec = GLOBAL_BUFFER["events"][EventName.TRAIN_RAYS_PER_SEC.value]["avg"] target_train_util = self.vis["renderingState/targetTrainUtil"].read() if target_train_util is None: target_train_util = 0.9 batches_per_sec = train_rays_per_sec / num_rays_per_batch num_steps = max(int(1 / self.static_fps * batches_per_sec), 1) else: num_steps = 1 if step % num_steps == 0: self._render_image_in_viewer(camera_object, graph, is_training) else: # in pause training mode, enter render loop with set graph local_step = step run_loop = not is_training while run_loop: # if self._is_render_step(local_step) and step > 0: if step > 0: self._render_image_in_viewer(camera_object, graph, is_training) camera_object = self._get_camera_object() is_training = self.vis["renderingState/isTraining"].read() self._check_camera_path_payload(trainer, step) run_loop = not is_training local_step += 1 def check_interrupt(self, frame, event, arg): # pylint: disable=unused-argument """Raises interrupt when flag has been set and not already on lowest resolution. Used in conjunction with SetTrace. """ if event == "line": if self.check_interrupt_vis and not self.camera_moving: raise IOChangeException return self.check_interrupt def _get_camera_object(self): """Gets the camera object from the viewer and updates the movement state if it has changed.""" data = self.vis["renderingState/camera"].read() if data is None: return None camera_object = data["object"] if self.prev_camera_matrix is not None and np.allclose(camera_object["matrix"], self.prev_camera_matrix): self.camera_moving = False else: self.prev_camera_matrix = camera_object["matrix"] self.camera_moving = True output_type = self.vis["renderingState/output_choice"].read() if output_type is None: output_type = OutputTypes.INIT if self.prev_output_type != output_type: self.camera_moving = True colormap_type = self.vis["renderingState/colormap_choice"].read() if colormap_type is None: colormap_type = ColormapTypes.INIT if self.prev_colormap_type != colormap_type: self.camera_moving = True return camera_object def _apply_colormap(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6): """Determines which colormap to use based on set colormap type Args: outputs: the output tensors for which to apply colormaps on colors: is only set if colormap is for semantics. Defaults to None. eps: epsilon to handle floating point comparisons """ if self.output_list: reformatted_output = self._process_invalid_output(self.prev_output_type) # default for rgb images if self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].shape[-1] == 3: return outputs[reformatted_output] # rendering depth outputs if self.prev_colormap_type == ColormapTypes.DEPTH or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.float and (torch.max(outputs[reformatted_output]) - 1.0) > eps # handle floating point arithmetic ): accumulation_str = ( OutputTypes.ACCUMULATION if OutputTypes.ACCUMULATION in self.output_list else OutputTypes.ACCUMULATION_FINE )
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code to interface with the `vis/` (the JS viewer). """ from __future__ import annotations warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning) CONSOLE = Console(width=120) def get_viewer_version() -> str: """Get the version of the viewer.""" json_filename = os.path.join(os.path.dirname(__file__), "../app/package.json") version = load_from_json(Path(json_filename))["version"] return version @check_main_thread def setup_viewer(config: cfg.ViewerConfig, log_filename: Path): """Sets up the viewer if enabled Args: config: the configuration to instantiate viewer """ viewer_state = ViewerState(config, log_filename=log_filename) banner_messages = [f"Viewer at: {viewer_state.viewer_url}"] return viewer_state, banner_messages class OutputTypes(str, enum.Enum): """Noncomprehsnive list of output render types""" INIT = "init" RGB = "rgb" RGB_FINE = "rgb_fine" ACCUMULATION = "accumulation" ACCUMULATION_FINE = "accumulation_fine" class ColormapTypes(str, enum.Enum): """Noncomprehsnive list of colormap render types""" INIT = "init" DEFAULT = "default" TURBO = "turbo" DEPTH = "depth" SEMANTIC = "semantic" BOOLEAN = "boolean" class IOChangeException(Exception): """Basic camera exception to interrupt viewer""" class SetTrace: """Basic trace function""" def __init__(self, func): self.func = func def __enter__(self): sys.settrace(self.func) return self def __exit__(self, ext_type, exc_value, traceback): sys.settrace(None) class RenderThread(threading.Thread): """Thread that does all the rendering calls while listening for interrupts Args: state: current viewer state object graph: current checkpoint of model camera_ray_bundle: input rays to pass through the graph to render out """ def __init__(self, state: "ViewerState", graph: Model, camera_ray_bundle: RayBundle): threading.Thread.__init__(self) self.state = state self.graph = graph self.camera_ray_bundle = camera_ray_bundle self.exc = None self.vis_outputs = None def run(self): """run function that renders out images given the current graph and ray bundles. Interlaced with a trace function that checks to see if any I/O changes were registered. Exits and continues program if IOChangeException thrown. """ outputs = None try: with SetTrace(self.state.check_interrupt): with torch.no_grad(): outputs = self.graph.get_outputs_for_camera_ray_bundle(self.camera_ray_bundle) except Exception as e: # pylint: disable=broad-except self.exc = e if outputs: self.vis_outputs = outputs self.state.check_done_render = True self.state.check_interrupt_vis = False def join(self, timeout=None): threading.Thread.join(self) if self.exc: raise self.exc class CheckThread(threading.Thread): """Thread the constantly checks for io changes and sets a flag indicating interrupt Args: state: current viewer state object """ def __init__(self, state): threading.Thread.__init__(self) self.state = state def run(self): """Run function that checks to see if any of the existing state has changed (e.g. camera pose/output type/resolutions). Sets the viewer state flag to true to signal to render thread that an interrupt was registered. """ self.state.check_done_render = False while not self.state.check_done_render: # check camera data = self.state.vis["renderingState/camera"].read() if data is not None: camera_object = data["object"] if self.state.prev_camera_matrix is None or ( not np.allclose(camera_object["matrix"], self.state.prev_camera_matrix) and not self.state.prev_moving ): self.state.check_interrupt_vis = True self.state.prev_moving = True return self.state.prev_moving = False # check output type output_type = self.state.vis["renderingState/output_choice"].read() if output_type is None: output_type = OutputTypes.INIT if self.state.prev_output_type != output_type: self.state.check_interrupt_vis = True return # check colormap type colormap_type = self.state.vis["renderingState/colormap_choice"].read() if colormap_type is None: colormap_type = ColormapTypes.INIT if self.state.prev_colormap_type != colormap_type: self.state.check_interrupt_vis = True return # check max render max_resolution = self.state.vis["renderingState/maxResolution"].read() if max_resolution is not None: if self.state.max_resolution != max_resolution: self.state.check_interrupt_vis = True return @decorate_all([check_main_thread]) class ViewerState: """Class to hold state for viewer variables Args: config: viewer setup configuration """ def __init__(self, config: cfg.ViewerConfig, log_filename: Path): self.config = config self.vis = None self.viewer_url = None self.log_filename = log_filename if self.config.launch_bridge_server: # start the viewer bridge server assert self.config.websocket_port is not None self.log_filename.parent.mkdir(exist_ok=True) zmq_port = run_viewer_bridge_server_as_subprocess( self.config.websocket_port, zmq_port=self.config.zmq_port, ip_address=self.config.ip_address, log_filename=str(self.log_filename), ) # TODO(ethan): log the output of the viewer bridge server in a file where the training logs go CONSOLE.line() version = get_viewer_version() websocket_url = f"ws://localhost:{self.config.websocket_port}" self.viewer_url = f"https://viewer.nerf.studio/versions/{version}/?websocket_url={websocket_url}" CONSOLE.rule(characters="=") CONSOLE.print(f"[Public] Open the viewer at {self.viewer_url}") CONSOLE.rule(characters="=") CONSOLE.line() self.vis = Viewer(zmq_port=zmq_port, ip_address=self.config.ip_address) else: assert self.config.zmq_port is not None self.vis = Viewer(zmq_port=self.config.zmq_port, ip_address=self.config.ip_address) # viewer specific variables self.prev_camera_matrix = None self.prev_output_type = OutputTypes.INIT self.prev_colormap_type = ColormapTypes.INIT self.prev_moving = False self.output_type_changed = True self.max_resolution = 1000 self.check_interrupt_vis = False self.check_done_render = True self.step = 0 self.static_fps = 1 self.moving_fps = 24 self.camera_moving = False self.prev_camera_timestamp = 0 self.probe_config = None self.output_list = None def _pick_drawn_image_idxs(self, total_num: int) -> list[int]: """Determine indicies of images to display in viewer. Args: total_num: total number of training images. Returns: List of indices from [0, total_num-1]. """ if self.config.max_num_display_images < 0: num_display_images = total_num else: num_display_images = min(self.config.max_num_display_images, total_num) # draw indices, roughly evenly spaced return np.linspace(0, total_num - 1, num_display_images, dtype=np.int32).tolist() def init_scene(self, dataset: InputDataset, start_train=True) -> None: """Draw some images and the scene aabb in the viewer. Args: dataset: dataset to render in the scene start_train: whether to start train when viewer init; if False, only displays dataset until resume train is toggled """ # set the config base dir self.vis["renderingState/config_base_dir"].write(str(self.log_filename.parents[0])) # clear the current scene self.vis["sceneState/sceneBox"].delete() self.vis["sceneState/cameras"].delete() # draw the training cameras and images image_indices = self._pick_drawn_image_idxs(len(dataset)) for idx in image_indices: image = dataset[idx]["image"] if isinstance(image, BasicImages): bgr = image.images[0][..., [2, 1, 0]] else: bgr = image[..., [2, 1, 0]] camera_json = dataset.cameras.to_json(camera_idx=idx, image=bgr, max_size=100) self.vis[f"sceneState/cameras/{idx:06d}"].write(camera_json) # draw the scene box (i.e., the bounding box) json_ = dataset.scene_box.to_json() self.vis["sceneState/sceneBox"].write(json_) # set the initial state whether to train or not self.vis["renderingState/isTraining"].write(start_train) # self.vis["renderingState/render_time"].write(str(0)) self.probe_config = dataset.cameras.probe_config # set the properties of the camera # self.vis["renderingState/camera"].write(json_) # set the main camera intrinsics to one from the dataset # K = camera.get_intrinsics_matrix() # set_persp_intrinsics_matrix(self.vis, K.double().numpy()) def _check_camera_path_payload(self, trainer, step: int): """Check to see if the camera path export button was pressed.""" # check if we should interrupt from a button press? camera_path_payload = self.vis["camera_path_payload"].read() if camera_path_payload: # save a model checkpoint trainer.save_checkpoint(step) # write to json file camera_path_filename = camera_path_payload["camera_path_filename"] + '.json' camera_path = camera_path_payload["camera_path"] write_to_json(Path(camera_path_filename), camera_path) self.vis["camera_path_payload"].delete() def update_scene(self, trainer, step: int, graph: Model, num_rays_per_batch: int) -> None: """updates the scene based on the graph weights Args: step: iteration step of training graph: the current checkpoint of the model """ has_temporal_distortion = getattr(graph, "temporal_distortion", None) is not None self.vis["model/has_temporal_distortion"].write(str(has_temporal_distortion).lower()) is_training = self.vis["renderingState/isTraining"].read() self.step = step self._check_camera_path_payload(trainer, step) camera_object = self._get_camera_object() if camera_object is None: return if is_training is None or is_training: # in training mode if self.camera_moving: # if the camera is moving, then we pause training and update camera continuously while self.camera_moving: self._render_image_in_viewer(camera_object, graph, is_training) camera_object = self._get_camera_object() else: # if the camera is not moving, then we approximate how many training steps need to be taken # to render at a FPS defined by self.static_fps. if EventName.TRAIN_RAYS_PER_SEC.value in GLOBAL_BUFFER["events"]: train_rays_per_sec = GLOBAL_BUFFER["events"][EventName.TRAIN_RAYS_PER_SEC.value]["avg"] target_train_util = self.vis["renderingState/targetTrainUtil"].read() if target_train_util is None: target_train_util = 0.9 batches_per_sec = train_rays_per_sec / num_rays_per_batch num_steps = max(int(1 / self.static_fps * batches_per_sec), 1) else: num_steps = 1 if step % num_steps == 0: self._render_image_in_viewer(camera_object, graph, is_training) else: # in pause training mode, enter render loop with set graph local_step = step run_loop = not is_training while run_loop: # if self._is_render_step(local_step) and step > 0: if step > 0: self._render_image_in_viewer(camera_object, graph, is_training) camera_object = self._get_camera_object() is_training = self.vis["renderingState/isTraining"].read() self._check_camera_path_payload(trainer, step) run_loop = not is_training local_step += 1 def check_interrupt(self, frame, event, arg): # pylint: disable=unused-argument """Raises interrupt when flag has been set and not already on lowest resolution. Used in conjunction with SetTrace. """ if event == "line": if self.check_interrupt_vis and not self.camera_moving: raise IOChangeException return self.check_interrupt def _get_camera_object(self): """Gets the camera object from the viewer and updates the movement state if it has changed.""" data = self.vis["renderingState/camera"].read() if data is None: return None camera_object = data["object"] if self.prev_camera_matrix is not None and np.allclose(camera_object["matrix"], self.prev_camera_matrix): self.camera_moving = False else: self.prev_camera_matrix = camera_object["matrix"] self.camera_moving = True output_type = self.vis["renderingState/output_choice"].read() if output_type is None: output_type = OutputTypes.INIT if self.prev_output_type != output_type: self.camera_moving = True colormap_type = self.vis["renderingState/colormap_choice"].read() if colormap_type is None: colormap_type = ColormapTypes.INIT if self.prev_colormap_type != colormap_type: self.camera_moving = True return camera_object def _apply_colormap(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6): """Determines which colormap to use based on set colormap type Args: outputs: the output tensors for which to apply colormaps on colors: is only set if colormap is for semantics. Defaults to None. eps: epsilon to handle floating point comparisons """ if self.output_list: reformatted_output = self._process_invalid_output(self.prev_output_type) # default for rgb images if self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].shape[-1] == 3: return outputs[reformatted_output] # rendering depth outputs if self.prev_colormap_type == ColormapTypes.DEPTH or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.float and (torch.max(outputs[reformatted_output]) - 1.0) > eps # handle floating point arithmetic ): accumulation_str = ( OutputTypes.ACCUMULATION if OutputTypes.ACCUMULATION in self.output_list else OutputTypes.ACCUMULATION_FINE )
return colormaps.apply_depth_colormap(outputs[reformatted_output], accumulation=outputs[accumulation_str])
5
2023-12-15 20:07:22+00:00
24k
alipay/PainlessInferenceAcceleration
pia/lookahead/models/chatglm/modeling_chatglm.py
[ { "identifier": "ChatGLMConfig", "path": "pia/lookahead/models/chatglm/configuration_chatglm.py", "snippet": "class ChatGLMConfig(PretrainedConfig):\n model_type = \"chatglm\"\n\n def __init__(\n self,\n num_layers=28,\n padded_vocab_size=65024,\n hidden...
import copy import math import warnings import torch import torch.nn.functional as F import torch.utils.checkpoint from typing import Optional, Tuple, List, Callable, Dict, Any from pia.lookahead.models.chatglm.configuration_chatglm import ChatGLMConfig from torch import nn from torch.nn import CrossEntropyLoss, LayerNorm from torch.nn.utils import skip_init from transformers.generation.logits_process import LogitsProcessor from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from transformers.utils import logging from pia.lookahead.common.pretrained_model import LookaheadPreTrainedModel
16,924
attention_output, kv_cache = self.self_attention( layernorm_output, attention_mask, rotary_pos_emb, kv_cache=kv_cache, use_cache=use_cache ) # Residual connection. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states # layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training) layernorm_input = attention_output layernorm_input = residual + layernorm_input # Layer norm post the self attention. layernorm_output = self.post_attention_layernorm(layernorm_input) # MLP. mlp_output = self.mlp(layernorm_output) # Second residual connection. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = layernorm_input # output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training) output = mlp_output output = residual + output return output, kv_cache class GLMTransformer(torch.nn.Module): """Transformer class.""" def __init__(self, config: ChatGLMConfig, device=None): super(GLMTransformer, self).__init__() self.fp32_residual_connection = config.fp32_residual_connection self.post_layer_norm = config.post_layer_norm # Number of layers. self.num_layers = config.num_layers # Transformer layers. def build_layer(layer_number): return GLMBlock(config, layer_number, device=device) self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)]) if self.post_layer_norm: LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm # Final layer norm before output. self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, dtype=config.torch_dtype) self.gradient_checkpointing = False def _get_layer(self, layer_number): return self.layers[layer_number] def forward( self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None, use_cache: Optional[bool] = True, output_hidden_states: Optional[bool] = False, ): num_layers = len(self.layers) if not kv_caches: kv_caches = [None for _ in range(num_layers)] presents = () if use_cache else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False all_self_attentions = None all_hidden_states = () if output_hidden_states else None for index in range(num_layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer = self._get_layer(index) if self.gradient_checkpointing and self.training: layer_ret = torch.utils.checkpoint.checkpoint( layer, hidden_states, attention_mask, rotary_pos_emb, kv_caches[index], use_cache ) else: layer_ret = layer( hidden_states, attention_mask, rotary_pos_emb, kv_cache=kv_caches[index], use_cache=use_cache ) hidden_states, kv_cache = layer_ret if use_cache: presents = presents + (kv_cache,) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # Final layer norm. if self.post_layer_norm: hidden_states = self.final_layernorm(hidden_states) return hidden_states, presents, all_hidden_states, all_self_attentions
""" PyTorch ChatGLM model. """ # from transformers.modeling_utils import PreTrainedModel # flags required to enable jit fusion kernels # if sys.platform != 'darwin': # torch._C._jit_set_profiling_mode(False) # torch._C._jit_set_profiling_executor(False) # torch._C._jit_override_can_fuse_on_cpu(True) # torch._C._jit_override_can_fuse_on_gpu(True) logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "THUDM/ChatGLM2-6B" _CONFIG_FOR_DOC = "ChatGLM6BConfig" CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [ "THUDM/chatglm2-6b", # See all ChatGLM models at https://huggingface.co/models?filter=chatglm ] def default_init(cls, *args, **kwargs): return cls(*args, **kwargs) class InvalidScoreLogitsProcessor(LogitsProcessor): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: if torch.isnan(scores).any() or torch.isinf(scores).any(): scores.zero_() scores[..., 5] = 5e4 return scores class PrefixEncoder(torch.nn.Module): """ The torch.nn model to encode the prefix Input shape: (batch-size, prefix-length) Output shape: (batch-size, prefix-length, 2*layers*hidden) """ def __init__(self, config: ChatGLMConfig): super().__init__() self.prefix_projection = config.prefix_projection if self.prefix_projection: # Use a two-layer MLP to encode the prefix self.embedding = torch.nn.Embedding(config.pre_seq_len, config.hidden_size) self.trans = torch.nn.Sequential( torch.nn.Linear(config.hidden_size, config.hidden_size), torch.nn.Tanh(), torch.nn.Linear(config.hidden_size, config.num_layers * config.hidden_size * 2) ) else: self.embedding = torch.nn.Embedding(config.pre_seq_len, config.num_layers * config.kv_channels * config.multi_query_group_num * 2) def forward(self, prefix: torch.Tensor): if self.prefix_projection: prefix_tokens = self.embedding(prefix) past_key_values = self.trans(prefix_tokens) else: past_key_values = self.embedding(prefix) return past_key_values def split_tensor_along_last_dim( tensor: torch.Tensor, num_partitions: int, contiguous_split_chunks: bool = False, ) -> List[torch.Tensor]: """Split a tensor along its last dimension. Arguments: tensor: input tensor. num_partitions: number of partitions to split the tensor contiguous_split_chunks: If True, make each chunk contiguous in memory. Returns: A list of Tensors """ # Get the size and dimension. last_dim = tensor.dim() - 1 last_dim_size = tensor.size()[last_dim] // num_partitions # Split. tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) # Note: torch.split does not create contiguous tensors by default. if contiguous_split_chunks: return tuple(chunk.contiguous() for chunk in tensor_list) return tensor_list class RotaryEmbedding(nn.Module): def __init__(self, dim, original_impl=False, device=None, dtype=None): super().__init__() inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim)) self.register_buffer("inv_freq", inv_freq) self.dim = dim self.original_impl = original_impl def forward_impl( self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000 ): """Enhanced Transformer with Rotary Position Embedding. Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/ transformers/rope/__init__.py. MIT License: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license. """ # $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$ theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=dtype, device=device) / n_elem)) # Create position indexes `[0, 1, ..., seq_len - 1]` seq_idx = torch.arange(seq_len, dtype=dtype, device=device) # Calculate the product of position index and $\theta_i$ idx_theta = torch.outer(seq_idx, theta).float() cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1) # this is to mimic the behaviour of complex32, else we will get different results if dtype in (torch.float16, torch.bfloat16, torch.int8): cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half() return cache def forward(self, max_seq_len, offset=0): return self.forward_impl( max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device ) # @torch.jit.script def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor: # x: [sq, b, np, hn] sq, b, np, hn = x.size(0), x.size(1), x.size(2), x.size(3) rot_dim = rope_cache.shape[-2] * 2 x, x_pass = x[..., :rot_dim], x[..., rot_dim:] # truncate to support variable sizes rope_cache = rope_cache[:sq] xshaped = x.reshape(sq, -1, np, rot_dim // 2, 2) rope_cache = rope_cache.view(sq, -1, 1, xshaped.size(3), 2) x_out2 = torch.stack( [ xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1], xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1], ], -1, ) x_out2 = x_out2.flatten(3) return torch.cat((x_out2, x_pass), dim=-1) class RMSNorm(torch.nn.Module): def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs): super().__init__() self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype)) self.eps = eps def forward(self, hidden_states: torch.Tensor): input_dtype = hidden_states.dtype variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.eps) return (self.weight * hidden_states).to(input_dtype) class CoreAttention(torch.nn.Module): def __init__(self, config: ChatGLMConfig, layer_number): super(CoreAttention, self).__init__() self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32 if self.apply_query_key_layer_scaling: self.attention_softmax_in_fp32 = True self.layer_number = max(1, layer_number) projection_size = config.kv_channels * config.num_attention_heads # Per attention head and per partition values. self.hidden_size_per_partition = projection_size self.hidden_size_per_attention_head = projection_size // config.num_attention_heads self.num_attention_heads_per_partition = config.num_attention_heads coeff = None self.norm_factor = math.sqrt(self.hidden_size_per_attention_head) if self.apply_query_key_layer_scaling: coeff = self.layer_number self.norm_factor *= coeff self.coeff = coeff # self.attention_dropout = torch.nn.Dropout(config.attention_dropout) def forward(self, query_layer, key_layer, value_layer, attention_mask): pytorch_major_version = int(torch.__version__.split('.')[0]) if pytorch_major_version >= 2: query_layer, key_layer, value_layer = [k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer]] if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]: context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, is_causal=True) else: if attention_mask is not None: attention_mask = ~attention_mask context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer, attention_mask) context_layer = context_layer.permute(2, 0, 1, 3) new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) context_layer = context_layer.reshape(*new_context_layer_shape) else: # Raw attention scores # [b, np, sq, sk] output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0)) # [sq, b, np, hn] -> [sq, b * np, hn] query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1) # [sk, b, np, hn] -> [sk, b * np, hn] key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1) # preallocting input tensor: [b * np, sq, sk] matmul_input_buffer = torch.empty( output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype, device=query_layer.device ) # Raw attention scores. [b * np, sq, sk] matmul_result = torch.baddbmm( matmul_input_buffer, query_layer.transpose(0, 1), # [b * np, sq, hn] key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk] beta=0.0, alpha=(1.0 / self.norm_factor), ) # change view to [b, np, sq, sk] attention_scores = matmul_result.view(*output_size) # =========================== # Attention probs and dropout # =========================== # attention scores and attention mask [b, np, sq, sk] if self.attention_softmax_in_fp32: attention_scores = attention_scores.float() if self.coeff is not None: attention_scores = attention_scores * self.coeff if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]: attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3], device=attention_scores.device, dtype=torch.bool) attention_mask.tril_() attention_mask = ~attention_mask if attention_mask is not None: attention_scores = attention_scores.masked_fill(attention_mask, float("-inf")) attention_probs = F.softmax(attention_scores, dim=-1) attention_probs = attention_probs.type_as(value_layer) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. # attention_probs = self.attention_dropout(attention_probs) # ========================= # Context layer. [sq, b, hp] # ========================= # value_layer -> context layer. # [sk, b, np, hn] --> [b, np, sq, hn] # context layer shape: [b, np, sq, hn] output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3)) # change view [sk, b * np, hn] value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1) # change view [b * np, sq, sk] attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) # matmul: [b * np, sq, hn] context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1)) # change view [b, np, sq, hn] context_layer = context_layer.view(*output_size) # [b, np, sq, hn] --> [sq, b, np, hn] context_layer = context_layer.permute(2, 0, 1, 3).contiguous() # [sq, b, np, hn] --> [sq, b, hp] new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class SelfAttention(torch.nn.Module): """Parallel self-attention layer abstract class. Self-attention layer takes input with size [s, b, h] and returns output of the same size. """ def __init__(self, config: ChatGLMConfig, layer_number, device=None): super(SelfAttention, self).__init__() self.layer_number = max(1, layer_number) self.projection_size = config.kv_channels * config.num_attention_heads # Per attention head and per partition values. self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads self.num_attention_heads_per_partition = config.num_attention_heads self.multi_query_attention = config.multi_query_attention self.qkv_hidden_size = 3 * self.projection_size if self.multi_query_attention: self.num_multi_query_groups_per_partition = config.multi_query_group_num self.qkv_hidden_size = ( self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num ) self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size, bias=config.add_bias_linear or config.add_qkv_bias, device=device, **_config_to_kwargs(config) ) self.core_attention = CoreAttention(config, self.layer_number) # Output. self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear, device=device, **_config_to_kwargs(config) ) def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None): if self.multi_query_attention: num_attention_heads = self.num_multi_query_groups_per_partition else: num_attention_heads = self.num_attention_heads_per_partition return torch.empty( inference_max_sequence_len, batch_size, num_attention_heads, self.hidden_size_per_attention_head, dtype=dtype, device=device, ) def forward( self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True ): # hidden_states: [sq, b, h] # ================================================= # Pre-allocate memory for key-values for inference. # ================================================= # ===================== # Query, Key, and Value # ===================== # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)] mixed_x_layer = self.query_key_value(hidden_states) if self.multi_query_attention: (query_layer, key_layer, value_layer) = mixed_x_layer.split( [ self.num_attention_heads_per_partition * self.hidden_size_per_attention_head, self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, ], dim=-1, ) query_layer = query_layer.view( query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head) ) key_layer = key_layer.view( key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head) ) value_layer = value_layer.view( value_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head) ) else: new_tensor_shape = mixed_x_layer.size()[:-1] + \ (self.num_attention_heads_per_partition, 3 * self.hidden_size_per_attention_head) mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn] (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3) # apply relative positional encoding (rotary embedding) if rotary_pos_emb is not None: query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb) key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb) # adjust key and value for inference if kv_cache is not None: cache_k, cache_v = kv_cache key_layer = torch.cat((cache_k, key_layer), dim=0) value_layer = torch.cat((cache_v, value_layer), dim=0) if use_cache: kv_cache = (key_layer, value_layer) else: kv_cache = None if self.multi_query_attention: key_layer = key_layer.unsqueeze(-2) key_layer = key_layer.expand( -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1 ) key_layer = key_layer.contiguous().view( key_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head) ) value_layer = value_layer.unsqueeze(-2) value_layer = value_layer.expand( -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1 ) value_layer = value_layer.contiguous().view( value_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head) ) # ================================== # core attention computation # ================================== context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask) # ================= # Output. [sq, b, h] # ================= output = self.dense(context_layer) return output, kv_cache def _config_to_kwargs(args): common_kwargs = { "dtype": args.torch_dtype, } return common_kwargs class MLP(torch.nn.Module): """MLP. MLP will take the input with h hidden state, project it to 4*h hidden dimension, perform nonlinear transformation, and project the state back into h hidden dimension. """ def __init__(self, config: ChatGLMConfig, device=None): super(MLP, self).__init__() self.add_bias = config.add_bias_linear # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf self.dense_h_to_4h = nn.Linear( config.hidden_size, config.ffn_hidden_size * 2, bias=self.add_bias, device=device, **_config_to_kwargs(config) ) def swiglu(x): x = torch.chunk(x, 2, dim=-1) return F.silu(x[0]) * x[1] self.activation_func = swiglu # Project back to h. self.dense_4h_to_h = nn.Linear( config.ffn_hidden_size, config.hidden_size, bias=self.add_bias, device=device, **_config_to_kwargs(config) ) def forward(self, hidden_states): # [s, b, 4hp] intermediate_parallel = self.dense_h_to_4h(hidden_states) intermediate_parallel = self.activation_func(intermediate_parallel) # [s, b, h] output = self.dense_4h_to_h(intermediate_parallel) return output class GLMBlock(torch.nn.Module): """A single transformer layer. Transformer layer takes input with size [s, b, h] and returns an output of the same size. """ def __init__(self, config: ChatGLMConfig, layer_number, device=None): super(GLMBlock, self).__init__() self.layer_number = layer_number self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm self.fp32_residual_connection = config.fp32_residual_connection LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm # Layernorm on the input data. self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, dtype=config.torch_dtype) # Self attention. self.self_attention = SelfAttention(config, layer_number, device=device) # self.hidden_dropout = config.hidden_dropout # Layernorm on the attention output self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, dtype=config.torch_dtype) # MLP self.mlp = MLP(config, device=device) def forward( self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True, ): # hidden_states: [s, b, h] # Layer norm at the beginning of the transformer layer. layernorm_output = self.input_layernorm(hidden_states) # Self attention. attention_output, kv_cache = self.self_attention( layernorm_output, attention_mask, rotary_pos_emb, kv_cache=kv_cache, use_cache=use_cache ) # Residual connection. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states # layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training) layernorm_input = attention_output layernorm_input = residual + layernorm_input # Layer norm post the self attention. layernorm_output = self.post_attention_layernorm(layernorm_input) # MLP. mlp_output = self.mlp(layernorm_output) # Second residual connection. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = layernorm_input # output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training) output = mlp_output output = residual + output return output, kv_cache class GLMTransformer(torch.nn.Module): """Transformer class.""" def __init__(self, config: ChatGLMConfig, device=None): super(GLMTransformer, self).__init__() self.fp32_residual_connection = config.fp32_residual_connection self.post_layer_norm = config.post_layer_norm # Number of layers. self.num_layers = config.num_layers # Transformer layers. def build_layer(layer_number): return GLMBlock(config, layer_number, device=device) self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)]) if self.post_layer_norm: LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm # Final layer norm before output. self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device, dtype=config.torch_dtype) self.gradient_checkpointing = False def _get_layer(self, layer_number): return self.layers[layer_number] def forward( self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None, use_cache: Optional[bool] = True, output_hidden_states: Optional[bool] = False, ): num_layers = len(self.layers) if not kv_caches: kv_caches = [None for _ in range(num_layers)] presents = () if use_cache else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False all_self_attentions = None all_hidden_states = () if output_hidden_states else None for index in range(num_layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer = self._get_layer(index) if self.gradient_checkpointing and self.training: layer_ret = torch.utils.checkpoint.checkpoint( layer, hidden_states, attention_mask, rotary_pos_emb, kv_caches[index], use_cache ) else: layer_ret = layer( hidden_states, attention_mask, rotary_pos_emb, kv_cache=kv_caches[index], use_cache=use_cache ) hidden_states, kv_cache = layer_ret if use_cache: presents = presents + (kv_cache,) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # Final layer norm. if self.post_layer_norm: hidden_states = self.final_layernorm(hidden_states) return hidden_states, presents, all_hidden_states, all_self_attentions
class ChatGLMPreTrainedModel(LookaheadPreTrainedModel):
1
2023-12-19 13:11:38+00:00
24k
MingtaoGuo/AnimateAnyone_unofficial
aldm/aldm.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(...
import einops import torch import torch as th import torch.nn as nn from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer, SpatialTransformerPlus from ldm.modules.diffusionmodules.openaimodel import ResBlock, TimestepEmbedSequential, Downsample, AttentionBlock, Upsample, normalization, checkpoint, convert_module_to_f16, convert_module_to_f32 from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig from omegaconf.listconfig import ListConfig
18,306
num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order,
class ReferenceNet(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
4
2023-12-16 03:31:33+00:00
24k
yasserben/CLOUDS
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "clouds/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME...
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import ( MetadataCatalog, build_detection_train_loader, build_detection_test_loader, ) from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.modeling import build_model from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, inference_on_dataset, print_csv_format, DatasetEvaluator, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from fvcore.nn.precise_bn import get_bn_modules from clouds import ( CityscapesSemSegEvaluator, ClassicalSemSegEvaluator, MapperTrain, MapperTest, add_maskformer2_config, add_clouds_config, add_wandb_config, add_prerocessing_training_set_config, PersoEvalHook, add_repeat_factors, ) from clouds.utils import setup_wandb, WandbWriter import warnings import copy import itertools import logging import os import ast import torch import detectron2.utils.comm as comm
14,540
def test(cls, cfg, model, output_folder=None, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg) add_clouds_config(cfg)
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper mapper = MapperTrain(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_test_loader(cls, cfg, dataset_name): mapper = MapperTest(cfg, False) return build_detection_test_loader( cfg, dataset_name, batch_size=1, mapper=mapper ) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue if cfg.MODEL.CLOUDS.OVERWRITING: if any( ignored_module in module_name for ignored_module in ["sem_seg_head_ema.", "sam.sam."] ): continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = ( hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER ) if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain( *[x["params"] for x in self.param_groups] ) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test(cls, cfg, model, output_folder=None, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg) add_clouds_config(cfg)
add_wandb_config(cfg)
2
2023-12-15 15:40:58+00:00
24k
Ruiyuan-Zhang/CCS
multi_part_assembly/utils/wx_transformer_utilities/transformer_layer.py
[ { "identifier": "LayerNorm", "path": "multi_part_assembly/utils/wx_transformer_utilities/layer_norm.py", "snippet": "def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):\n if not export and torch.cuda.is_available() and has_fused_layernorm:\n return FusedLayerNorm(...
from typing import Dict, List, Optional from .layer_norm import LayerNorm from .multihead_attention import MultiheadAttention from .relational_memory import RelationalMemory from .group_linear_layer import GroupLinearLayer from .basic_mha import MemoryAttention from .quant_noise import quant_noise from .fairseq_dropout import FairseqDropout from torch import Tensor import torch import torch.nn as nn import multi_part_assembly.utils.wx_transformer_utilities.fairseq_utils as utils import random import torch.nn.functional as F
17,572
self.use_nfm = use_nfm #self.n_heads = n_heads self.n_heads = 12 self.n_blocks = n_blocks self.dim = dim self.block_dim = dim // self.n_blocks #self.head_dim = self.block_dim // self.n_heads self.head_dim = 64 self.scale = self.head_dim ** -0.5 self.query_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.key_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.value_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.final = GroupLinearLayer(self.head_dim * self.n_heads, self.block_dim, n_blocks) def forward(self, x, qkv=None): use_exshare = False if qkv is not None: klst, vlst = qkv seq_len, bsz, _ = x.shape if use_exshare: x = x.view(seq_len, bsz, self.n_blocks * self.block_dim) q = self.query_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) k = self.key_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) v = self.value_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) else: x = x.view(seq_len, bsz, self.n_blocks * self.block_dim) q = self.query_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) k = self.key_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) v = self.value_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) q = q.transpose(2,3) * self.scale k = k.transpose(2,3) v = v.transpose(2,3) if random.uniform(0,1) < 0.00001: print('use NFM?', self.use_nfm) if self.use_nfm: if qkv is not None: klst.append(k) vlst.append(v) #print('len qlst', len(qlst)) #for kval in klst: # print(kval.shape) k = torch.cat(klst, dim=3) v = torch.cat(vlst, dim=3) #should return these q,k,v and save to a big list. Also pull in from the list passed in and concat along dim=3, i.e. so that it's nblocks * nlayers. #print('running comm attention with shapes', q.shape, k.shape, v.shape) score = torch.matmul(q, k.transpose(3,4)) #print('score shape', score.shape) score = F.softmax(score, dim=-1) out = torch.matmul(score, v).transpose(2,3) #print('out shape', out.shape) score = score.mean(dim=2) out = out.reshape(seq_len, bsz, self.n_blocks * self.head_dim * self.n_heads) out = self.final(out) out = out.view(seq_len, bsz, self.dim) return out, score class NormLayer(nn.Module): def __init__(self, num_rims, dim, export=False): super(NormLayer, self).__init__() self.num_rims = num_rims self.dim = dim self.weight = nn.Parameter(torch.ones(1,1,dim*num_rims,)) self.bias = nn.Parameter(torch.zeros(1,1,dim*num_rims,)) self.norm = LayerNorm(dim, export=export, elementwise_affine=False) def forward(self, x): seq_len, bsz, _ = x.shape x = x.view(seq_len, bsz, self.num_rims, self.dim) x = self.norm(x) x = x.view(seq_len, bsz, self.num_rims * self.dim) weight_use = self.weight.repeat(seq_len, bsz, 1) bias_use = self.bias.repeat(seq_len, bsz, 1) x = x * weight_use + bias_use return x class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, nb, blockatt, blockatt_memory, use_nfm, out_proj_dim=None): super().__init__() self.blockatt = blockatt self.blockatt_memory = blockatt_memory self.embed_dim = args.encoder_embed_dim
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer class TransformerEncoderLayerVanilla(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, out_proj = None): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = self.build_self_attention(self.embed_dim, args) self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) self.activation_dropout = getattr(args, "activation_dropout", 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, "relu_dropout", 0) self.normalize_before = args.encoder_normalize_before self.fc1 = self.build_fc1(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = self.build_fc2(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) if out_proj is not None: self.final_linear = nn.Linear(args.encoder_embed_dim, out_proj) else: self.final_linear = None def build_fc1(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_fc2(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_self_attention(self, embed_dim, args): return MultiheadAttention( embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=args.self_attention, shared_memory_attention = args.shared_memory_attention, use_topk = args.use_topk, topk = args.topk, num_steps = args.num_steps, mem_slots = args.mem_slots, null_attention = args.null_attention, regressive = args.regressive ) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"} for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layer_norms.{}.{}".format(name, old, m) if k in state_dict: state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor] = None, state = None, memory = None): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. attn_mask (ByteTensor): binary tensor of shape (T_tgt, T_src), where T_tgt is the length of query, while T_src is the length of key, though here both query and key is x here, attn_mask[t_tgt, t_src] = 1 means when calculating embedding for t_tgt, t_src is excluded (or masked out), =0 means it is included in attention Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) if attn_mask is not None: attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8) # anything in original attn_mask = 1, becomes -1e8 # anything in original attn_mask = 0, becomes 0 # Note that we cannot use -inf here, because at some edge cases, # the attention weight (before softmax) for some padded element in query # will become -inf, which results in NaN in model parameters # TODO: to formally solve this problem, we need to change fairseq's # MultiheadAttention. We will do this later on. #print(state is not None) x, memory, _ = self.self_attn( query=state if state is not None else x, key=x, value=x, key_padding_mask=encoder_padding_mask, attn_mask=attn_mask, memory = memory ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=float(self.activation_dropout), training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x if not self.normalize_before: x = self.final_layer_norm(x) if self.final_linear is not None: x = self.final_linear(x) return x, memory class Attention(nn.Module): def __init__(self, n_heads, n_blocks, dim, use_nfm): super(Attention, self).__init__() self.use_nfm = use_nfm #self.n_heads = n_heads self.n_heads = 12 self.n_blocks = n_blocks self.dim = dim self.block_dim = dim // self.n_blocks #self.head_dim = self.block_dim // self.n_heads self.head_dim = 64 self.scale = self.head_dim ** -0.5 self.query_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.key_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.value_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.final = GroupLinearLayer(self.head_dim * self.n_heads, self.block_dim, n_blocks) def forward(self, x, qkv=None): use_exshare = False if qkv is not None: klst, vlst = qkv seq_len, bsz, _ = x.shape if use_exshare: x = x.view(seq_len, bsz, self.n_blocks * self.block_dim) q = self.query_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) k = self.key_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) v = self.value_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) else: x = x.view(seq_len, bsz, self.n_blocks * self.block_dim) q = self.query_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) k = self.key_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) v = self.value_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) q = q.transpose(2,3) * self.scale k = k.transpose(2,3) v = v.transpose(2,3) if random.uniform(0,1) < 0.00001: print('use NFM?', self.use_nfm) if self.use_nfm: if qkv is not None: klst.append(k) vlst.append(v) #print('len qlst', len(qlst)) #for kval in klst: # print(kval.shape) k = torch.cat(klst, dim=3) v = torch.cat(vlst, dim=3) #should return these q,k,v and save to a big list. Also pull in from the list passed in and concat along dim=3, i.e. so that it's nblocks * nlayers. #print('running comm attention with shapes', q.shape, k.shape, v.shape) score = torch.matmul(q, k.transpose(3,4)) #print('score shape', score.shape) score = F.softmax(score, dim=-1) out = torch.matmul(score, v).transpose(2,3) #print('out shape', out.shape) score = score.mean(dim=2) out = out.reshape(seq_len, bsz, self.n_blocks * self.head_dim * self.n_heads) out = self.final(out) out = out.view(seq_len, bsz, self.dim) return out, score class NormLayer(nn.Module): def __init__(self, num_rims, dim, export=False): super(NormLayer, self).__init__() self.num_rims = num_rims self.dim = dim self.weight = nn.Parameter(torch.ones(1,1,dim*num_rims,)) self.bias = nn.Parameter(torch.zeros(1,1,dim*num_rims,)) self.norm = LayerNorm(dim, export=export, elementwise_affine=False) def forward(self, x): seq_len, bsz, _ = x.shape x = x.view(seq_len, bsz, self.num_rims, self.dim) x = self.norm(x) x = x.view(seq_len, bsz, self.num_rims * self.dim) weight_use = self.weight.repeat(seq_len, bsz, 1) bias_use = self.bias.repeat(seq_len, bsz, 1) x = x * weight_use + bias_use return x class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, nb, blockatt, blockatt_memory, use_nfm, out_proj_dim=None): super().__init__() self.blockatt = blockatt self.blockatt_memory = blockatt_memory self.embed_dim = args.encoder_embed_dim
self.quant_noise = getattr(args, "quant_noise_pq", 0)
5
2023-12-15 13:13:01+00:00
24k
m-abr/FCPCodebase
scripts/utils/Localization.py
[ { "identifier": "Agent", "path": "agent/Agent.py", "snippet": "class Agent(Base_Agent):\n def __init__(self, host:str, agent_port:int, monitor_port:int, unum:int,\n team_name:str, enable_log, enable_draw, wait_for_server=True, is_fat_proxy=False) -> None:\n \n # define r...
from agent.Agent import Agent as Agent from cpp.localization import localization from math_ops.Math_Ops import Math_Ops as M from scripts.commons.Script import Script from world.commons.Draw import Draw from world.commons.Other_Robot import Other_Robot
16,305
class Localization(): def __init__(self,script:Script) -> None: self.script = script def execute(self): a = self.script.args d = self.draw = Draw(True, 0, a.i, 32769) # using independent draw object so that the internal agent drawings can be disabled # Args: Server IP, Agent Port, Monitor Port, Uniform No., Team name, Enable Log, Enable Draw self.script.batch_create(Agent, ((a.i,a.p,a.m,1,a.t,False,False),)) # one teammate (dummy goalkeeper without communication) self.script.batch_create(Agent, ((a.i,a.p,a.m,5,"Opponent",False,False),)) # one opponent self.script.batch_create(Agent, ((a.i,a.p,a.m,9,a.t,False,False),)) # one main agent (the one who draws its world) # Beam dummy goalkeeper self.script.batch_unofficial_beam( ((-14,0,0.5,0),), slice(0,1)) p : Agent = self.script.players[-1] # p identifies the main agent p.scom.unofficial_set_play_mode("PlayOn") # Execute while True: self.script.batch_commit_and_send(slice(0,1)) # dummy agent does not think self.script.batch_execute_agent(slice(1,None)) # execute normal agents self.script.batch_receive(slice(0,1), False) # receive & don't update dummy's world state (to save cpu resources) self.script.batch_receive(slice(1,None)) # receive & update world state if p.world.vision_is_up_to_date: if p.world.robot.loc_is_up_to_date: # localization will draw the world of the last agent to be executed localization.print_python_data() # print data received by the localization module localization.draw_visible_elements(not p.world.team_side_is_left) # draw visible elements localization.print_report() # print report with stats print("\nPress ctrl+c to return.") d.circle( p.world.ball_abs_pos, 0.1,6,Draw.Color.purple_magenta,"world", False) else: d.annotation( p.world.robot.cheat_abs_pos, "Not enough visual data!", Draw.Color.red,"world", False) for o in p.world.teammates: if o.state_last_update != 0 and not o.is_self: # skip if other robot was not yet seen self._draw_other_robot(p, o, Draw.Color.white) for o in p.world.opponents: if o.state_last_update != 0: # skip if other robot was not yet seen self._draw_other_robot(p, o, Draw.Color.red) d.flush("world") def _draw_other_robot(self, p:Agent, o:Other_Robot, team_color): #p - player that sees #o - other robot (player that is seen) d = self.draw white = Draw.Color.white green = Draw.Color.green_light gray = Draw.Color.gray_20 time_diff = p.world.time_local_ms - o.state_last_update if time_diff > 0: white = Draw.Color.gray_40 green = Draw.Color.get(107, 139, 107) gray = Draw.Color.gray_50 #orientation if len(o.state_abs_pos)==3:
class Localization(): def __init__(self,script:Script) -> None: self.script = script def execute(self): a = self.script.args d = self.draw = Draw(True, 0, a.i, 32769) # using independent draw object so that the internal agent drawings can be disabled # Args: Server IP, Agent Port, Monitor Port, Uniform No., Team name, Enable Log, Enable Draw self.script.batch_create(Agent, ((a.i,a.p,a.m,1,a.t,False,False),)) # one teammate (dummy goalkeeper without communication) self.script.batch_create(Agent, ((a.i,a.p,a.m,5,"Opponent",False,False),)) # one opponent self.script.batch_create(Agent, ((a.i,a.p,a.m,9,a.t,False,False),)) # one main agent (the one who draws its world) # Beam dummy goalkeeper self.script.batch_unofficial_beam( ((-14,0,0.5,0),), slice(0,1)) p : Agent = self.script.players[-1] # p identifies the main agent p.scom.unofficial_set_play_mode("PlayOn") # Execute while True: self.script.batch_commit_and_send(slice(0,1)) # dummy agent does not think self.script.batch_execute_agent(slice(1,None)) # execute normal agents self.script.batch_receive(slice(0,1), False) # receive & don't update dummy's world state (to save cpu resources) self.script.batch_receive(slice(1,None)) # receive & update world state if p.world.vision_is_up_to_date: if p.world.robot.loc_is_up_to_date: # localization will draw the world of the last agent to be executed localization.print_python_data() # print data received by the localization module localization.draw_visible_elements(not p.world.team_side_is_left) # draw visible elements localization.print_report() # print report with stats print("\nPress ctrl+c to return.") d.circle( p.world.ball_abs_pos, 0.1,6,Draw.Color.purple_magenta,"world", False) else: d.annotation( p.world.robot.cheat_abs_pos, "Not enough visual data!", Draw.Color.red,"world", False) for o in p.world.teammates: if o.state_last_update != 0 and not o.is_self: # skip if other robot was not yet seen self._draw_other_robot(p, o, Draw.Color.white) for o in p.world.opponents: if o.state_last_update != 0: # skip if other robot was not yet seen self._draw_other_robot(p, o, Draw.Color.red) d.flush("world") def _draw_other_robot(self, p:Agent, o:Other_Robot, team_color): #p - player that sees #o - other robot (player that is seen) d = self.draw white = Draw.Color.white green = Draw.Color.green_light gray = Draw.Color.gray_20 time_diff = p.world.time_local_ms - o.state_last_update if time_diff > 0: white = Draw.Color.gray_40 green = Draw.Color.get(107, 139, 107) gray = Draw.Color.gray_50 #orientation if len(o.state_abs_pos)==3:
line_tip = o.state_abs_pos + (0.5*M.deg_cos(o.state_orientation),0.5*M.deg_sin(o.state_orientation),0)
0
2023-12-16 23:40:23+00:00
24k
Sam-Izdat/tinycio
src/tinycio/lut.py
[ { "identifier": "ColorSpace", "path": "src/tinycio/colorspace.py", "snippet": "class ColorSpace:\n \"\"\"\n Color space conversion. Applies OETFs and EOTFs as needed but omits tonemapping. Cylindrical transformations are \n treated as distinct color spaces. Example:\n\n .. highlight:: python...
import typing import os import torch import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from typing import Union from enum import IntEnum from contextlib import nullcontext from .colorspace import ColorSpace from .fsio.lutfile import load_lut, save_lut, _infer_lut_file_format, _generate_linear_cube_lut from .fsio.format import LUTFormat from .util.colorutil import srgb_luminance from .util.miscutil import trilinear_interpolation from .loss import feature_moments_calculation
19,440
indices = (im * (self.lattice.size(0) - 1)).clamp(0, self.lattice.size(0) - 1) im_out = trilinear_interpolation(self.lattice, indices) return im_out @classmethod def get_linear(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns linear LUT. Has no effect: when applied, output matches input ([0, 1] range). :param size: Size of the LUT. :param lut_format: Format of the LUT. """ if lut_format == LUTFormat.CUBE_3D: assert cls.__min_size <= size <= cls.__max_size, f"LUT size must be between {cls.__min_size} and {cls.__max_size}" variant = LUTFormat.CUBE_3D lattice = _generate_linear_cube_lut(size) else: raise Exception(f"Backpropagation not implemented for: {lut_format.name}") return cls(size, lattice, variant) @classmethod def get_negative(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns negative LUT. Output is inverted ([0, 1] range). :param size: Size of the LUT. :param lut_format: Format of the LUT. """ lut = cls.get_linear(size, lut_format) lut.lattice = 1. - lut.lattice return lut @classmethod def get_random(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns random LUT. Everything mapped to random values ([0, 1] range). :param size: Size of the LUT. :param lut_format: Format of the LUT. """ lut = cls.get_linear(size, lut_format) lut.lattice = torch.randn_like(lut.lattice) return lut @classmethod def get_empty(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns empty LUT. All values mapped to 0. :param size: Size of the LUT. :param lut_format: Format of the LUT. """ lut = cls.get_linear(size, lut_format) lut.lattice = lut.lattice * 0. return lut def fit_to_image(self, im_source:Union[torch.Tensor, ColorImage], im_target:Union[torch.Tensor, ColorImage], steps:int=500, learning_rate:float=0.003, strength:float=1., fit_height:int=512, fit_width:int=512, device:str='cuda', context:callable=None ) -> bool: """ Perform gradient descent on the lattice, so that the appearance of the source image matches the target. :param im_source: Source image tensor. Values must be in range [0, 1]. :type im_source: torch.Tensor | ColorImage :param im_target: Target image tensor. :type im_target: torch.Tensor | ColorImage :param steps: Number of optimization steps. :param learning_rate: Learning rate for gradient descent. :param strength: Strength of the effect in range [0, 1]. :param fit_height: Image tensors will be interpolated to this height for evaluation. :param fit_width: Image tensors will be interpolated to this width for evaluation. :param device: Device for gradient descent (if None will use input tensor device). :return: True when completed """ assert 0. <= strength <= 1., "strength must be in range [0, 1]" im_source = im_source.clone() device = torch.device(device.strip().lower()) if device is not None else im_source.device im_source = F.interpolate( im_source.unsqueeze(0), size=[fit_height, fit_width], mode='bicubic', align_corners=False).squeeze(0).clamp(0.,1.).to(device) im_target = F.interpolate( im_target.unsqueeze(0), size=[fit_height, fit_width], mode='bicubic', align_corners=False).squeeze(0).clamp(0.,1.).to(device) __ctx = context if context is not None and callable(context) else nullcontext with __ctx() as ctx: cb_callable = hasattr(ctx, 'update_fit_status') and callable(ctx.update_fit_status) cb = ctx.update_fit_status if cb_callable else lambda a, b, c, d: None if self.lut_format == LUTFormat.CUBE_3D: lut = torch.nn.Parameter(self.lattice) lut.requires_grad_() optimizer = optim.Adam([lut], lr=learning_rate) indices = (im_source * (lut.size(0) - 1)).clamp(0, lut.size(0) - 1).to(device) area = fit_height * fit_height fm_mean_scale = area fm_p2_scale = area / 32. fm_p3_scale = area / 64. selfsim_scale = area sat_scale = area # lut optimization goes a bit wild with this for step in range(steps): t_source = trilinear_interpolation(lut.to(device), indices).to(device) loss = 0. # Main feature loss
from __future__ import annotations class LookupTable: """ Color lookup table. Example: .. highlight:: python .. code-block:: python lut = LookupTable.get_negative() im_negative = lut.apply(im) :param size: Size of the LUT. :param lattice: Lattice as tensor (defaults to linear). :param lut_format: Format of the LUT. """ size = 32 lattice = None lut_format= LUTFormat.UNKNOWN __min_size, __max_size = 4, 512 def __init__(self, size:int, lattice:torch.Tensor=None, lut_format:LUTFormat=LUTFormat.CUBE_3D): assert self.__min_size <= size <= self.__max_size, f"LUT size must be between {self.__min_size} and {self.__max_size}" self.size == size self.lattice = lattice if lattice is not None else _generate_linear_cube_lut(size) self.lut_format = lut_format @classmethod def load(cls, fp:str, lut_format:LUTFormat=LUTFormat.UNKNOWN) -> LookupTable: """ Load LUT from file. :param fp: File path. :param lut_format: Format of the LUT. """ fp = os.path.realpath(fp) fn, fnext = os.path.splitext(fp) variant = lut_format if lut_format > LUTFormat.UNKNOWN else _infer_lut_file_format(fnext) assert variant > LUTFormat.UNKNOWN, "Unrecognized LUT format" lattice = load_lut(fp, variant) return cls(lattice.size(0), lattice, variant) def save(self, fp:str, lut_format:LUTFormat=LUTFormat.UNKNOWN): """ Save LUT to file. .. warning:: This will overwrite existing files. :param fp: File path. :param lut_format: Format of the LUT. """ fp = os.path.realpath(fp) fn, fnext = os.path.splitext(fp) variant = lut_format if lut_format > LUTFormat.UNKNOWN else _infer_lut_file_format(fnext) or self.variant assert variant > LUTFormat.UNKNOWN, "Unrecognized LUT format" lattice = save_lut(self.lattice, fp, variant) return True def apply(self, im:Union[torch.Tensor, ColorImage]) -> torch.Tensor: """ Apply LUT to image tensor. :param im: Input image tensor :type im: torch.Tensor | ColorImage :return: Image tensor with LUT applied """ assert self.lut_format > LUTFormat.UNKNOWN and self.lattice != None, "No LUT has been loaded" assert im.size(0) == 3, "Image should have three color channels (RGB)" assert self.lattice.size(-1) == 3, "Cube LUT should have three color channels" indices = (im * (self.lattice.size(0) - 1)).clamp(0, self.lattice.size(0) - 1) im_out = trilinear_interpolation(self.lattice, indices) return im_out @classmethod def get_linear(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns linear LUT. Has no effect: when applied, output matches input ([0, 1] range). :param size: Size of the LUT. :param lut_format: Format of the LUT. """ if lut_format == LUTFormat.CUBE_3D: assert cls.__min_size <= size <= cls.__max_size, f"LUT size must be between {cls.__min_size} and {cls.__max_size}" variant = LUTFormat.CUBE_3D lattice = _generate_linear_cube_lut(size) else: raise Exception(f"Backpropagation not implemented for: {lut_format.name}") return cls(size, lattice, variant) @classmethod def get_negative(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns negative LUT. Output is inverted ([0, 1] range). :param size: Size of the LUT. :param lut_format: Format of the LUT. """ lut = cls.get_linear(size, lut_format) lut.lattice = 1. - lut.lattice return lut @classmethod def get_random(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns random LUT. Everything mapped to random values ([0, 1] range). :param size: Size of the LUT. :param lut_format: Format of the LUT. """ lut = cls.get_linear(size, lut_format) lut.lattice = torch.randn_like(lut.lattice) return lut @classmethod def get_empty(cls, size:int=32, lut_format:LUTFormat=LUTFormat.CUBE_3D) -> LookupTable: """ Returns empty LUT. All values mapped to 0. :param size: Size of the LUT. :param lut_format: Format of the LUT. """ lut = cls.get_linear(size, lut_format) lut.lattice = lut.lattice * 0. return lut def fit_to_image(self, im_source:Union[torch.Tensor, ColorImage], im_target:Union[torch.Tensor, ColorImage], steps:int=500, learning_rate:float=0.003, strength:float=1., fit_height:int=512, fit_width:int=512, device:str='cuda', context:callable=None ) -> bool: """ Perform gradient descent on the lattice, so that the appearance of the source image matches the target. :param im_source: Source image tensor. Values must be in range [0, 1]. :type im_source: torch.Tensor | ColorImage :param im_target: Target image tensor. :type im_target: torch.Tensor | ColorImage :param steps: Number of optimization steps. :param learning_rate: Learning rate for gradient descent. :param strength: Strength of the effect in range [0, 1]. :param fit_height: Image tensors will be interpolated to this height for evaluation. :param fit_width: Image tensors will be interpolated to this width for evaluation. :param device: Device for gradient descent (if None will use input tensor device). :return: True when completed """ assert 0. <= strength <= 1., "strength must be in range [0, 1]" im_source = im_source.clone() device = torch.device(device.strip().lower()) if device is not None else im_source.device im_source = F.interpolate( im_source.unsqueeze(0), size=[fit_height, fit_width], mode='bicubic', align_corners=False).squeeze(0).clamp(0.,1.).to(device) im_target = F.interpolate( im_target.unsqueeze(0), size=[fit_height, fit_width], mode='bicubic', align_corners=False).squeeze(0).clamp(0.,1.).to(device) __ctx = context if context is not None and callable(context) else nullcontext with __ctx() as ctx: cb_callable = hasattr(ctx, 'update_fit_status') and callable(ctx.update_fit_status) cb = ctx.update_fit_status if cb_callable else lambda a, b, c, d: None if self.lut_format == LUTFormat.CUBE_3D: lut = torch.nn.Parameter(self.lattice) lut.requires_grad_() optimizer = optim.Adam([lut], lr=learning_rate) indices = (im_source * (lut.size(0) - 1)).clamp(0, lut.size(0) - 1).to(device) area = fit_height * fit_height fm_mean_scale = area fm_p2_scale = area / 32. fm_p3_scale = area / 64. selfsim_scale = area sat_scale = area # lut optimization goes a bit wild with this for step in range(steps): t_source = trilinear_interpolation(lut.to(device), indices).to(device) loss = 0. # Main feature loss
feat_source_mean, feat_source_p2, feat_source_p3 = feature_moments_calculation(t_source.view(1,3,-1))
8
2023-12-15 15:39:08+00:00
24k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/urllib3/connectionpool.py
[ { "identifier": "_TYPE_BODY", "path": ".venv/Lib/site-packages/urllib3/_base_connection.py", "snippet": "_TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str]" }, { "identifier": "HTTPHeaderDict", "path": ".venv/Lib/site-packages/urllib3/_collections.py", "...
import errno import logging import queue import sys import typing import warnings import weakref import ssl from socket import timeout as SocketTimeout from types import TracebackType from ._base_connection import _TYPE_BODY from ._collections import HTTPHeaderDict from ._request_methods import RequestMethods from .connection import ( BaseSSLError, BrokenPipeError, DummyConnection, HTTPConnection, HTTPException, HTTPSConnection, ProxyConfig, _wrap_proxy_error, ) from .connection import port_by_scheme as port_by_scheme from .exceptions import ( ClosedPoolError, EmptyPoolError, FullPoolError, HostChangedError, InsecureRequestWarning, LocationValueError, MaxRetryError, NewConnectionError, ProtocolError, ProxyError, ReadTimeoutError, SSLError, TimeoutError, ) from .response import BaseHTTPResponse from .util.connection import is_connection_dropped from .util.proxy import connection_requires_http_tunnel from .util.request import _TYPE_BODY_POSITION, set_file_position from .util.retry import Retry from .util.ssl_match_hostname import CertificateError from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout from .util.url import Url, _encode_target from .util.url import _normalize_host as normalize_host from .util.url import parse_url from .util.util import to_str from typing import Literal from ._base_connection import BaseHTTPConnection, BaseHTTPSConnection
20,866
from __future__ import annotations if typing.TYPE_CHECKING: log = logging.getLogger(__name__) _TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None] _SelfT = typing.TypeVar("_SelfT") # Pool objects class ConnectionPool: """ Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. .. note:: ConnectionPool.urlopen() does not normalize or percent-encode target URIs which is useful if your target server doesn't support percent-encoded target URIs. """ scheme: str | None = None QueueCls = queue.LifoQueue def __init__(self, host: str, port: int | None = None) -> None: if not host: raise LocationValueError("No host specified.") self.host = _normalize_host(host, scheme=self.scheme) self.port = port # This property uses 'normalize_host()' (not '_normalize_host()') # to avoid removing square braces around IPv6 addresses. # This value is sent to `HTTPConnection.set_tunnel()` if called # because square braces are required for HTTP CONNECT tunneling. self._tunnel_host = normalize_host(host, scheme=self.scheme).lower() def __str__(self) -> str: return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})" def __enter__(self: _SelfT) -> _SelfT: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> Literal[False]: self.close() # Return False to re-raise any potential exceptions return False def close(self) -> None: """ Close all pooled connections and disable the pool. """ # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
from __future__ import annotations if typing.TYPE_CHECKING: log = logging.getLogger(__name__) _TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None] _SelfT = typing.TypeVar("_SelfT") # Pool objects class ConnectionPool: """ Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. .. note:: ConnectionPool.urlopen() does not normalize or percent-encode target URIs which is useful if your target server doesn't support percent-encoded target URIs. """ scheme: str | None = None QueueCls = queue.LifoQueue def __init__(self, host: str, port: int | None = None) -> None: if not host: raise LocationValueError("No host specified.") self.host = _normalize_host(host, scheme=self.scheme) self.port = port # This property uses 'normalize_host()' (not '_normalize_host()') # to avoid removing square braces around IPv6 addresses. # This value is sent to `HTTPConnection.set_tunnel()` if called # because square braces are required for HTTP CONNECT tunneling. self._tunnel_host = normalize_host(host, scheme=self.scheme).lower() def __str__(self) -> str: return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})" def __enter__(self: _SelfT) -> _SelfT: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> Literal[False]: self.close() # Return False to re-raise any potential exceptions return False def close(self) -> None: """ Close all pooled connections and disable the pool. """ # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
class HTTPConnectionPool(ConnectionPool, RequestMethods):
2
2023-12-16 04:12:01+00:00
24k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/scene_manager.py
[ { "identifier": "SimpleTableCell", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableCell(object):\n \"\"\"A table class to create table cells.\n\n Example:\n cell = SimpleTableCell('Hello, world!')\n \"\"\"\n\n def __init__(self, text, header=False):\...
import csv import threading import queue import logging import math import sys import cv2 import numpy as np from enum import Enum from typing import Iterable, List, Tuple, Optional, Dict, Callable, Union, TextIO from backend.scenedetect._thirdparty.simpletable import (SimpleTableCell, SimpleTableImage, SimpleTableRow, SimpleTable, HTMLPage) from backend.scenedetect.platform import (tqdm, get_and_create_path, get_cv2_imwrite_params, Template) from backend.scenedetect.frame_timecode import FrameTimecode from backend.scenedetect.video_stream import VideoStream from backend.scenedetect.scene_detector import SceneDetector, SparseSceneDetector from backend.scenedetect.stats_manager import StatsManager, FrameMetricRegistered
14,678
row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]], video: VideoStream, num_images: int = 3, frame_margin: int = 1, image_extension: str = 'jpg', encoder_param: int = 95, image_name_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir: Optional[str] = None, show_progress: Optional[bool] = False, scale: Optional[float] = None, height: Optional[int] = None, width: Optional[int] = None, interpolation: Interpolation = Interpolation.CUBIC, video_manager=None) -> Dict[int, List[str]]: """Save a set number of images from each scene, given a list of scenes and the associated video/frame source. Arguments: scene_list: A list of scenes (pairs of FrameTimecode objects) returned from calling a SceneManager's detect_scenes() method. video: A VideoStream object corresponding to the scene list. Note that the video will be closed/re-opened and seeked through. num_images: Number of images to generate for each scene. Minimum is 1. frame_margin: Number of frames to pad each scene around the beginning and end (e.g. moves the first/last image into the scene by N frames). Can set to 0, but will result in some video files failing to extract the very last frame. image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp'). encoder_param: Quality/compression efficiency, based on type of image: 'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp. 'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode. image_name_template: Template to use when creating the images on disk. Can use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image extension is applied automatically as per the argument image_extension. output_dir: Directory to output the images into. If not set, the output is created in the working directory. show_progress: If True, shows a progress bar if tqdm is installed. scale: Optional factor by which to rescale saved images. A scaling factor of 1 would not result in rescaling. A value < 1 results in a smaller saved image, while a value > 1 results in an image larger than the original. This value is ignored if either the height or width values are specified. height: Optional value for the height of the saved images. Specifying both the height and width will resize images to an exact size, regardless of aspect ratio. Specifying only height will rescale the image to that number of pixels in height while preserving the aspect ratio. width: Optional value for the width of the saved images. Specifying both the width and height will resize images to an exact size, regardless of aspect ratio. Specifying only width will rescale the image to that number of pixels wide while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png. imwrite_param = [get_cv2_imwrite_params()[image_extension], encoder_param ] if encoder_param is not None else [] video.reset() # Setup flags and init progress bar if available. completed = True logger.info('Generating output images (%d per scene)...', num_images) progress_bar = None if show_progress:
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """``scenedetect.scene_manager`` Module This module implements :class:`SceneManager`, coordinates running a :mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts( cut_list: Iterable[FrameTimecode], start_pos: Union[int, FrameTimecode], end_pos: Union[int, FrameTimecode], base_timecode: Optional[FrameTimecode] = None, ) -> List[Tuple[FrameTimecode, FrameTimecode]]: """Returns a list of tuples of start/end FrameTimecodes for each scene based on a list of detected scene cuts/breaks. This function is called when using the :meth:`SceneManager.get_scene_list` method. The scene list is generated from a cutting list (:meth:`SceneManager.get_cut_list`), noting that each scene is contiguous, starting from the first to last frame of the input. If `cut_list` is empty, the resulting scene will span from `start_pos` to `end_pos`. Arguments: cut_list: List of FrameTimecode objects where scene cuts/breaks occur. base_timecode: The base_timecode of which all FrameTimecodes in the cut_list are based on. num_frames: The number of frames, or FrameTimecode representing duration, of the video that was processed (used to generate last scene's end time). start_frame: The start frame or FrameTimecode of the cut list. Used to generate the first scene's start time. base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: List of tuples in the form (start_time, end_time), where both start_time and end_time are FrameTimecode objects representing the exact time/frame where each scene occupies based on the input cut_list. """ # TODO(v0.7): Use the warnings module to turn this into a warning. if base_timecode is not None: logger.error('`base_timecode` argument is deprecated has no effect.') # Scene list, where scenes are tuples of (Start FrameTimecode, End FrameTimecode). scene_list = [] if not cut_list: scene_list.append((start_pos, end_pos)) return scene_list # Initialize last_cut to the first frame we processed,as it will be # the start timecode for the first scene in the list. last_cut = start_pos for cut in cut_list: scene_list.append((last_cut, cut)) last_cut = cut # Last scene is from last cut to end of video. scene_list.append((last_cut, end_pos)) return scene_list def write_scene_list(output_csv_file: TextIO, scene_list: Iterable[Tuple[FrameTimecode, FrameTimecode]], include_cut_list: bool = True, cut_list: Optional[Iterable[FrameTimecode]] = None) -> None: """Writes the given list of scenes to an output file handle in CSV format. Arguments: output_csv_file: Handle to open file in write mode. scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. include_cut_list: Bool indicating if the first row should include the timecodes where each scene starts. Should be set to False if RFC 4180 compliant CSV output is required. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not specified, the cut list is generated using the start times of each scene following the first one. """ csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]], video: VideoStream, num_images: int = 3, frame_margin: int = 1, image_extension: str = 'jpg', encoder_param: int = 95, image_name_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir: Optional[str] = None, show_progress: Optional[bool] = False, scale: Optional[float] = None, height: Optional[int] = None, width: Optional[int] = None, interpolation: Interpolation = Interpolation.CUBIC, video_manager=None) -> Dict[int, List[str]]: """Save a set number of images from each scene, given a list of scenes and the associated video/frame source. Arguments: scene_list: A list of scenes (pairs of FrameTimecode objects) returned from calling a SceneManager's detect_scenes() method. video: A VideoStream object corresponding to the scene list. Note that the video will be closed/re-opened and seeked through. num_images: Number of images to generate for each scene. Minimum is 1. frame_margin: Number of frames to pad each scene around the beginning and end (e.g. moves the first/last image into the scene by N frames). Can set to 0, but will result in some video files failing to extract the very last frame. image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp'). encoder_param: Quality/compression efficiency, based on type of image: 'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp. 'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode. image_name_template: Template to use when creating the images on disk. Can use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image extension is applied automatically as per the argument image_extension. output_dir: Directory to output the images into. If not set, the output is created in the working directory. show_progress: If True, shows a progress bar if tqdm is installed. scale: Optional factor by which to rescale saved images. A scaling factor of 1 would not result in rescaling. A value < 1 results in a smaller saved image, while a value > 1 results in an image larger than the original. This value is ignored if either the height or width values are specified. height: Optional value for the height of the saved images. Specifying both the height and width will resize images to an exact size, regardless of aspect ratio. Specifying only height will rescale the image to that number of pixels in height while preserving the aspect ratio. width: Optional value for the width of the saved images. Specifying both the width and height will resize images to an exact size, regardless of aspect ratio. Specifying only width will rescale the image to that number of pixels wide while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png. imwrite_param = [get_cv2_imwrite_params()[image_extension], encoder_param ] if encoder_param is not None else [] video.reset() # Setup flags and init progress bar if available. completed = True logger.info('Generating output images (%d per scene)...', num_images) progress_bar = None if show_progress:
progress_bar = tqdm(total=len(scene_list) * num_images, unit='images', dynamic_ncols=True)
5
2023-10-25 02:50:01+00:00
24k
EulerSearch/embedding_studio
embedding_studio/embeddings/training/embeddings_finetuner.py
[ { "identifier": "QueryRetriever", "path": "embedding_studio/embeddings/data/clickstream/query_retriever.py", "snippet": "class QueryRetriever(object):\n \"\"\"As we can't exactly predict a schema of storing queries:\n 1. As text exceptly in clickstream service\n 2. As ID of a record with a text...
import logging import numpy as np import pytorch_lightning as pl import torch from collections import defaultdict from typing import Callable, List, Optional, Tuple, Union from datasets import DatasetDict from torch import FloatTensor, Tensor from torch.optim import SGD, Optimizer from torch.optim.lr_scheduler import LRScheduler, StepLR from embedding_studio.embeddings.data.clickstream.query_retriever import ( QueryRetriever, ) from embedding_studio.embeddings.data.clickstream.raw_session import ( ClickstreamSession, ) from embedding_studio.embeddings.features.event_confidences import ( dummy_confidences, ) from embedding_studio.embeddings.features.extractor import ( COSINE_SIMILARITY, FeaturesExtractor, ) from embedding_studio.embeddings.features.session_features import ( SessionFeatures, ) from embedding_studio.embeddings.losses.ranking_loss_interface import ( RankingLossInterface, ) from embedding_studio.embeddings.metrics.distance_shift import DistanceShift from embedding_studio.embeddings.metrics.metric import MetricCalculator from embedding_studio.embeddings.models.interface import ( EmbeddingsModelInterface, ) from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import ( ExperimentsManager, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_params import ( FineTuningParams, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import ( FineTuningSettings, ) from embedding_studio.workers.fine_tuning.experiments.metrics_accumulator import ( MetricValue, )
16,773
if not isinstance(fine_tuning_params, FineTuningParams): raise TypeError( "fine_tuning_params must be an instance of FineTuningParams" ) if not isinstance(tracker, ExperimentsManager): raise TypeError( "tracker must be an instance of ExperimentsManager" ) if not isinstance(fine_tuning_params, FineTuningParams): raise TypeError( "fine_tuning_params must be an instance of FineTuningParams" ) super(EmbeddingsFineTuner, self).__init__() self.features_extractor = FeaturesExtractor( model, ranker, is_similarity, fine_tuning_params.not_irrelevant_only, fine_tuning_params.negative_downsampling, fine_tuning_params.min_abs_difference_threshold, fine_tuning_params.max_abs_difference_threshold, confidence_calculator, ) self.items_storages = items_storages self.query_retriever = query_retriever if not metric_calculators: logger.debug( "metric_calculators list is empty - DistanceShift metric will be used by default." ) self.calculators = ( metric_calculators if metric_calculators is not None else [DistanceShift()] ) self.loss_func = loss_func self.loss_func.set_margin(fine_tuning_params.margin) self.fine_tuning_params = fine_tuning_params self.tracker = tracker self.step_size = step_size self.gamma = gamma self._validation_metrics = defaultdict(list) # Fix layers self.features_extractor.model.fix_item_model( fine_tuning_params.num_fixed_layers ) self.features_extractor.model.fix_query_model( fine_tuning_params.num_fixed_layers ) self.automatic_optimization = False def preprocess_sessions(self, clickstream_dataset: DatasetDict): for key in clickstream_dataset.keys(): item_storage = self.items_storages[key] logger.info( f"Calculate ranks for {key} not irrelevant clickstream sessions" ) for session in clickstream_dataset[key].not_irrelevant: unique_values = set(session.ranks.values()) if len(unique_values) == 0 or None in unique_values: session.ranks = self.features_extractor.calculate_ranks( session, item_storage, self.query_retriever ) logger.info( f"Calculate ranks for {key} irrelevant clickstream sessions" ) for session in clickstream_dataset[key].irrelevant: unique_values = set(session.ranks.values()) if len(unique_values) == 0 or None in unique_values: session.ranks = self.features_extractor.calculate_ranks( session, item_storage, self.query_retriever ) # Standart LightningModule methods to be overrided to be used in PytorchLightning Trainer # 1. Configure optimizers and schedulers def configure_optimizers( self, ) -> Tuple[List[Optimizer], List[LRScheduler]]: if not (isinstance(self.step_size, int) and self.step_size > 0): raise ValueError("step_size must be a positive integer") if not (isinstance(self.gamma, float) and 0 < self.gamma < 1): raise ValueError("gamma must be a float in the range (0, 1)") items_optimizer: SGD = SGD( self.features_extractor.model.get_items_model_params(), lr=self.fine_tuning_params.items_lr, weight_decay=self.fine_tuning_params.items_weight_decay, ) items_scheduler: StepLR = StepLR( items_optimizer, step_size=self.step_size, gamma=self.gamma ) if self.features_extractor.model.same_query_and_items: return [items_optimizer], [items_scheduler] query_optimizer: SGD = SGD( self.features_extractor.model.get_query_model_params(), lr=self.fine_tuning_params.query_lr, weight_decay=self.fine_tuning_params.query_weight_decay, ) query_scheduler: StepLR = torch.optim.lr_scheduler.StepLR( query_optimizer, step_size=self.step_size, gamma=self.gamma ) return [items_optimizer, query_optimizer], [ items_scheduler, query_scheduler, ] # 2. Training step code with one batch def training_step( self,
logger = logging.getLogger(__name__) class EmbeddingsFineTuner(pl.LightningModule): def __init__( self, model: EmbeddingsModelInterface, items_storages: DatasetDict, query_retriever: QueryRetriever, loss_func: RankingLossInterface, fine_tuning_params: FineTuningParams, tracker: ExperimentsManager, metric_calculators: Optional[List[MetricCalculator]] = None, ranker: Callable[ [FloatTensor, FloatTensor], FloatTensor ] = COSINE_SIMILARITY, is_similarity: bool = True, confidence_calculator: Callable = dummy_confidences, step_size: int = 500, gamma: float = 0.9, ): """This is a class, that represents embeddings fine-tuning logic, designed in the way to be use PytorchLightning Trainer. :param model: embedding model itself :param items_storages: items storage related to a given iteration, as a datasetdict with train and test keys :param query_retriever: object to get item related to query, that can be used in "forward" :param loss_func: loss object for a ranking task :param fine_tuning_params: hyper params of fine-tuning task :param tracker: experiment management object :param metric_calculators: list of trackable metrics calculators (default: None) by default_params only DistanceShift metric :param ranker: ranking function (query, items) -> ranks (defult: cosine similarity) :param is_similarity: is ranking function similarity like or distance (default: True) :param confidence_calculator: function to calculate results confidences (default: dummy_confidences) :param step_size: optimizer steps (default: 500) :param gamma: optimizers gamma (default: 0.9) """ if not isinstance(model, EmbeddingsModelInterface): raise TypeError( "model must be an instance of EmbeddingsModelInterface" ) if not isinstance(items_storages, DatasetDict): raise TypeError("items_storages must be a DatasetDict") if not isinstance(query_retriever, QueryRetriever): raise TypeError( "query_retriever must be an instance of QueryRetriever" ) if not isinstance(loss_func, RankingLossInterface): raise TypeError( "loss_func must be an instance of RankingLossInterface" ) if not isinstance(fine_tuning_params, FineTuningParams): raise TypeError( "fine_tuning_params must be an instance of FineTuningParams" ) if not isinstance(tracker, ExperimentsManager): raise TypeError( "tracker must be an instance of ExperimentsManager" ) if not isinstance(fine_tuning_params, FineTuningParams): raise TypeError( "fine_tuning_params must be an instance of FineTuningParams" ) super(EmbeddingsFineTuner, self).__init__() self.features_extractor = FeaturesExtractor( model, ranker, is_similarity, fine_tuning_params.not_irrelevant_only, fine_tuning_params.negative_downsampling, fine_tuning_params.min_abs_difference_threshold, fine_tuning_params.max_abs_difference_threshold, confidence_calculator, ) self.items_storages = items_storages self.query_retriever = query_retriever if not metric_calculators: logger.debug( "metric_calculators list is empty - DistanceShift metric will be used by default." ) self.calculators = ( metric_calculators if metric_calculators is not None else [DistanceShift()] ) self.loss_func = loss_func self.loss_func.set_margin(fine_tuning_params.margin) self.fine_tuning_params = fine_tuning_params self.tracker = tracker self.step_size = step_size self.gamma = gamma self._validation_metrics = defaultdict(list) # Fix layers self.features_extractor.model.fix_item_model( fine_tuning_params.num_fixed_layers ) self.features_extractor.model.fix_query_model( fine_tuning_params.num_fixed_layers ) self.automatic_optimization = False def preprocess_sessions(self, clickstream_dataset: DatasetDict): for key in clickstream_dataset.keys(): item_storage = self.items_storages[key] logger.info( f"Calculate ranks for {key} not irrelevant clickstream sessions" ) for session in clickstream_dataset[key].not_irrelevant: unique_values = set(session.ranks.values()) if len(unique_values) == 0 or None in unique_values: session.ranks = self.features_extractor.calculate_ranks( session, item_storage, self.query_retriever ) logger.info( f"Calculate ranks for {key} irrelevant clickstream sessions" ) for session in clickstream_dataset[key].irrelevant: unique_values = set(session.ranks.values()) if len(unique_values) == 0 or None in unique_values: session.ranks = self.features_extractor.calculate_ranks( session, item_storage, self.query_retriever ) # Standart LightningModule methods to be overrided to be used in PytorchLightning Trainer # 1. Configure optimizers and schedulers def configure_optimizers( self, ) -> Tuple[List[Optimizer], List[LRScheduler]]: if not (isinstance(self.step_size, int) and self.step_size > 0): raise ValueError("step_size must be a positive integer") if not (isinstance(self.gamma, float) and 0 < self.gamma < 1): raise ValueError("gamma must be a float in the range (0, 1)") items_optimizer: SGD = SGD( self.features_extractor.model.get_items_model_params(), lr=self.fine_tuning_params.items_lr, weight_decay=self.fine_tuning_params.items_weight_decay, ) items_scheduler: StepLR = StepLR( items_optimizer, step_size=self.step_size, gamma=self.gamma ) if self.features_extractor.model.same_query_and_items: return [items_optimizer], [items_scheduler] query_optimizer: SGD = SGD( self.features_extractor.model.get_query_model_params(), lr=self.fine_tuning_params.query_lr, weight_decay=self.fine_tuning_params.query_weight_decay, ) query_scheduler: StepLR = torch.optim.lr_scheduler.StepLR( query_optimizer, step_size=self.step_size, gamma=self.gamma ) return [items_optimizer, query_optimizer], [ items_scheduler, query_scheduler, ] # 2. Training step code with one batch def training_step( self,
batch: List[Tuple[ClickstreamSession, ClickstreamSession]],
1
2023-10-31 00:33:13+00:00
24k
nv-tlabs/vid2player3d
poselib/poselib/skeleton/tests/test_skeleton.py
[ { "identifier": "SkeletonTree", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonTree(Serializable):\n \"\"\"\n A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure\n over a list of nodes with their names indicated by strings...
from ...core import * from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion from ...visualization.common import ( plot_skeleton_state, plot_skeleton_motion_interactive, ) from ...visualization.plt_plotter import Matplotlib3DPlotter from ...visualization.skeleton_plotter_tasks import ( Draw3DSkeletonMotion, Draw3DSkeletonState, ) import numpy as np import torch
17,997
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion():
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion():
skel_motion = SkeletonMotion.from_file(
2
2023-10-30 20:43:43+00:00
24k
masked-spacetime-hashing/msth
nerfstudio/models/tensorf.py
[ { "identifier": "RayBundle", "path": "nerfstudio/cameras/rays.py", "snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions...
from dataclasses import dataclass, field from typing import Dict, List, Tuple, Type from torch.nn import Parameter from torchmetrics import PeakSignalNoiseRatio from torchmetrics.functional import structural_similarity_index_measure from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity from typing_extensions import Literal from nerfstudio.cameras.rays import RayBundle from nerfstudio.configs.config_utils import to_immutable_dict from nerfstudio.engine.callbacks import ( TrainingCallback, TrainingCallbackAttributes, TrainingCallbackLocation, ) from nerfstudio.field_components.encodings import ( NeRFEncoding, TensorCPEncoding, TensorVMEncoding, TriplaneEncoding, ) from nerfstudio.field_components.field_heads import FieldHeadNames from nerfstudio.fields.tensorf_field import TensoRFField from nerfstudio.model_components.losses import MSELoss from nerfstudio.model_components.ray_samplers import PDFSampler, UniformSampler from nerfstudio.model_components.renderers import ( AccumulationRenderer, DepthRenderer, RGBRenderer, ) from nerfstudio.model_components.scene_colliders import AABBBoxCollider from nerfstudio.models.base_model import Model, ModelConfig from nerfstudio.utils import colormaps, colors, misc import numpy as np import torch
15,552
np.exp( np.linspace( np.log(config.init_resolution), np.log(config.final_resolution), len(config.upsampling_iters) + 1, ) ) ) .astype("int") .tolist()[1:] ) super().__init__(config=config, **kwargs) def get_training_callbacks( self, training_callback_attributes: TrainingCallbackAttributes ) -> List[TrainingCallback]: # the callback that we want to run every X iterations after the training iteration def reinitialize_optimizer( self, training_callback_attributes: TrainingCallbackAttributes, step: int # pylint: disable=unused-argument ): index = self.upsampling_iters.index(step) resolution = self.upsampling_steps[index] # upsample the position and direction grids self.field.density_encoding.upsample_grid(resolution) self.field.color_encoding.upsample_grid(resolution) # reinitialize the encodings optimizer optimizers_config = training_callback_attributes.optimizers.config enc = training_callback_attributes.pipeline.get_param_groups()["encodings"] lr_init = optimizers_config["encodings"]["optimizer"].lr training_callback_attributes.optimizers.optimizers["encodings"] = optimizers_config["encodings"][ "optimizer" ].setup(params=enc) if optimizers_config["encodings"]["scheduler"]: training_callback_attributes.optimizers.schedulers["encodings"] = ( optimizers_config["encodings"]["scheduler"] .setup() .get_scheduler( optimizer=training_callback_attributes.optimizers.optimizers["encodings"], lr_init=lr_init ) ) callbacks = [ TrainingCallback( where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], iters=self.upsampling_iters, func=reinitialize_optimizer, args=[self, training_callback_attributes], ) ] return callbacks def update_to_step(self, step: int) -> None: if step < self.upsampling_iters[0]: return new_iters = list(self.upsampling_iters) + [step + 1] new_iters.sort() index = new_iters.index(step + 1) new_grid_resolution = self.upsampling_steps[index - 1] self.field.density_encoding.upsample_grid(new_grid_resolution) # type: ignore self.field.color_encoding.upsample_grid(new_grid_resolution) # type: ignore def populate_modules(self): """Set the fields and modules""" super().populate_modules() # setting up fields if self.config.tensorf_encoding == "vm": density_encoding = TensorVMEncoding( resolution=self.init_resolution, num_components=self.num_den_components, ) color_encoding = TensorVMEncoding( resolution=self.init_resolution, num_components=self.num_color_components, ) elif self.config.tensorf_encoding == "cp": density_encoding = TensorCPEncoding( resolution=self.init_resolution, num_components=self.num_den_components, ) color_encoding = TensorCPEncoding( resolution=self.init_resolution, num_components=self.num_color_components, ) elif self.config.tensorf_encoding == "triplane": density_encoding = TriplaneEncoding( resolution=self.init_resolution, num_components=self.num_den_components, ) color_encoding = TriplaneEncoding( resolution=self.init_resolution, num_components=self.num_color_components, ) else: raise ValueError(f"Encoding {self.config.tensorf_encoding} not supported") feature_encoding = NeRFEncoding(in_dim=self.appearance_dim, num_frequencies=2, min_freq_exp=0, max_freq_exp=2) direction_encoding = NeRFEncoding(in_dim=3, num_frequencies=2, min_freq_exp=0, max_freq_exp=2) self.field = TensoRFField( self.scene_box.aabb, feature_encoding=feature_encoding, direction_encoding=direction_encoding, density_encoding=density_encoding, color_encoding=color_encoding, appearance_dim=self.appearance_dim, head_mlp_num_layers=2, head_mlp_layer_width=128, use_sh=False, ) # samplers self.sampler_uniform = UniformSampler(num_samples=self.config.num_samples, single_jitter=True)
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TensorRF implementation. """ from __future__ import annotations @dataclass class TensoRFModelConfig(ModelConfig): """TensoRF model config""" _target: Type = field(default_factory=lambda: TensoRFModel) """target class to instantiate""" init_resolution: int = 128 """initial render resolution""" final_resolution: int = 300 """final render resolution""" upsampling_iters: Tuple[int, ...] = (2000, 3000, 4000, 5500, 7000) """specifies a list of iteration step numbers to perform upsampling""" loss_coefficients: Dict[str, float] = to_immutable_dict({"rgb_loss": 1.0}) """Loss specific weights.""" num_samples: int = 256 """Number of samples in field evaluation""" num_den_components: int = 16 """Number of components in density encoding""" num_color_components: int = 48 """Number of components in color encoding""" appearance_dim: int = 27 """Number of channels for color encoding""" tensorf_encoding: Literal["triplane", "vm", "cp"] = "vm" class TensoRFModel(Model): """TensoRF Model Args: config: TensoRF configuration to instantiate model """ def __init__( self, config: TensoRFModelConfig, **kwargs, ) -> None: self.init_resolution = config.init_resolution self.upsampling_iters = config.upsampling_iters self.num_den_components = config.num_den_components self.num_color_components = config.num_color_components self.appearance_dim = config.appearance_dim self.upsampling_steps = ( np.round( np.exp( np.linspace( np.log(config.init_resolution), np.log(config.final_resolution), len(config.upsampling_iters) + 1, ) ) ) .astype("int") .tolist()[1:] ) super().__init__(config=config, **kwargs) def get_training_callbacks( self, training_callback_attributes: TrainingCallbackAttributes ) -> List[TrainingCallback]: # the callback that we want to run every X iterations after the training iteration def reinitialize_optimizer( self, training_callback_attributes: TrainingCallbackAttributes, step: int # pylint: disable=unused-argument ): index = self.upsampling_iters.index(step) resolution = self.upsampling_steps[index] # upsample the position and direction grids self.field.density_encoding.upsample_grid(resolution) self.field.color_encoding.upsample_grid(resolution) # reinitialize the encodings optimizer optimizers_config = training_callback_attributes.optimizers.config enc = training_callback_attributes.pipeline.get_param_groups()["encodings"] lr_init = optimizers_config["encodings"]["optimizer"].lr training_callback_attributes.optimizers.optimizers["encodings"] = optimizers_config["encodings"][ "optimizer" ].setup(params=enc) if optimizers_config["encodings"]["scheduler"]: training_callback_attributes.optimizers.schedulers["encodings"] = ( optimizers_config["encodings"]["scheduler"] .setup() .get_scheduler( optimizer=training_callback_attributes.optimizers.optimizers["encodings"], lr_init=lr_init ) ) callbacks = [ TrainingCallback( where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], iters=self.upsampling_iters, func=reinitialize_optimizer, args=[self, training_callback_attributes], ) ] return callbacks def update_to_step(self, step: int) -> None: if step < self.upsampling_iters[0]: return new_iters = list(self.upsampling_iters) + [step + 1] new_iters.sort() index = new_iters.index(step + 1) new_grid_resolution = self.upsampling_steps[index - 1] self.field.density_encoding.upsample_grid(new_grid_resolution) # type: ignore self.field.color_encoding.upsample_grid(new_grid_resolution) # type: ignore def populate_modules(self): """Set the fields and modules""" super().populate_modules() # setting up fields if self.config.tensorf_encoding == "vm": density_encoding = TensorVMEncoding( resolution=self.init_resolution, num_components=self.num_den_components, ) color_encoding = TensorVMEncoding( resolution=self.init_resolution, num_components=self.num_color_components, ) elif self.config.tensorf_encoding == "cp": density_encoding = TensorCPEncoding( resolution=self.init_resolution, num_components=self.num_den_components, ) color_encoding = TensorCPEncoding( resolution=self.init_resolution, num_components=self.num_color_components, ) elif self.config.tensorf_encoding == "triplane": density_encoding = TriplaneEncoding( resolution=self.init_resolution, num_components=self.num_den_components, ) color_encoding = TriplaneEncoding( resolution=self.init_resolution, num_components=self.num_color_components, ) else: raise ValueError(f"Encoding {self.config.tensorf_encoding} not supported") feature_encoding = NeRFEncoding(in_dim=self.appearance_dim, num_frequencies=2, min_freq_exp=0, max_freq_exp=2) direction_encoding = NeRFEncoding(in_dim=3, num_frequencies=2, min_freq_exp=0, max_freq_exp=2) self.field = TensoRFField( self.scene_box.aabb, feature_encoding=feature_encoding, direction_encoding=direction_encoding, density_encoding=density_encoding, color_encoding=color_encoding, appearance_dim=self.appearance_dim, head_mlp_num_layers=2, head_mlp_layer_width=128, use_sh=False, ) # samplers self.sampler_uniform = UniformSampler(num_samples=self.config.num_samples, single_jitter=True)
self.sampler_pdf = PDFSampler(num_samples=self.config.num_samples // 2, single_jitter=True)
12
2023-10-26 04:39:15+00:00
24k
chenruduan/OAReactDiff
oa_reactdiff/trainer/pl_trainer.py
[ { "identifier": "ProcessedQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=2,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kw...
from typing import Dict, List, Optional, Tuple from pathlib import Path from torch import nn from torch.utils.data import DataLoader from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, StepLR from pytorch_lightning import LightningModule from torchmetrics.classification import ( BinaryAccuracy, BinaryAUROC, BinaryF1Score, BinaryPrecision, BinaryCohenKappa, ) from torchmetrics import PearsonCorrCoef, SpearmanCorrCoef, MeanAbsoluteError from oa_reactdiff.dataset import ( ProcessedQM9, ProcessedDoubleQM9, ProcessedTripleQM9, ProcessedTS1x, ) from oa_reactdiff.dynamics import EGNNDynamics, Confidence from oa_reactdiff.diffusion._schedule import DiffSchedule, PredefinedNoiseSchedule from oa_reactdiff.diffusion._normalizer import Normalizer, FEATURE_MAPPING from oa_reactdiff.diffusion.en_diffusion import EnVariationalDiffusion from oa_reactdiff.trainer._metrics import average_over_batch_metrics, pretty_print from oa_reactdiff.analyze.rmsd import batch_rmsd import torch import copy import torch.nn.functional as F import numpy as np import pandas as pd import oa_reactdiff.utils.training_tools as utils
19,488
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9,
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9,
"TS1x": ProcessedTS1x,
3
2023-10-30 02:53:38+00:00
24k
nv-tlabs/pacer
pacer/env/tasks/humanoid_amp.py
[ { "identifier": "MotionLib", "path": "pacer/utils/motion_lib.py", "snippet": "class MotionLib():\n def __init__(self, motion_file, dof_body_ids, dof_offsets, key_body_ids,\n device):\n self._dof_body_ids = dof_body_ids\n self._dof_offsets = dof_offsets\n self._num...
from ast import Try from enum import Enum from matplotlib.pyplot import flag from torch import Tensor from typing import Dict, Optional from isaacgym import gymapi from isaacgym import gymtorch from env.tasks.humanoid import Humanoid, dof_to_obs, remove_base_rot, dof_to_obs_smpl from env.util import gym_util from pacer.utils.motion_lib import MotionLib from pacer.utils.motion_lib_smpl import MotionLib as MotionLibSMPL from isaacgym.torch_utils import * from utils import torch_utils from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from pacer.utils.flags import flags from poselib.poselib.skeleton.skeleton3d import SkeletonMotion, SkeletonState from poselib.poselib.visualization.common import plot_skeleton_motion_interactive import glob import os import sys import pdb import os.path as osp import numpy as np import torch import gc
15,851
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. sys.path.append(os.getcwd()) HACK_MOTION_SYNC = False # HACK_MOTION_SYNC = True HACK_CONSISTENCY_TEST = False HACK_OUTPUT_MOTION = False HACK_OUTPUT_MOTION_ALL = False class HumanoidAMP(Humanoid): class StateInit(Enum): Default = 0 Start = 1 Random = 2 Hybrid = 3 def __init__(self, cfg, sim_params, physics_engine, device_type, device_id, headless): # jp hack if (HACK_MOTION_SYNC or HACK_CONSISTENCY_TEST): control_freq_inv = cfg["env"]["controlFrequencyInv"] self._motion_sync_dt = control_freq_inv * sim_params.dt cfg["env"]["controlFrequencyInv"] = 1 cfg["env"]["pdControl"] = False state_init = cfg["env"]["stateInit"] self._state_init = HumanoidAMP.StateInit[state_init] self._hybrid_init_prob = cfg["env"]["hybridInitProb"] self._num_amp_obs_steps = cfg["env"]["numAMPObsSteps"] self._amp_root_height_obs = cfg["env"].get("ampRootHeightObs", False) assert (self._num_amp_obs_steps >= 2) if ("enableHistObs" in cfg["env"]): self._enable_hist_obs = cfg["env"]["enableHistObs"] else: self._enable_hist_obs = False self._reset_default_env_ids = [] self._reset_ref_env_ids = [] self._state_reset_happened = False super().__init__(cfg=cfg, sim_params=sim_params, physics_engine=physics_engine, device_type=device_type, device_id=device_id, headless=headless) self._motion_start_times = torch.zeros(self.num_envs).to(self.device) self._sampled_motion_ids = torch.zeros(self.num_envs).long().to(self.device) motion_file = cfg['env']['motion_file'] self._load_motion(motion_file) self._amp_obs_buf = torch.zeros( (self.num_envs, self._num_amp_obs_steps, self._num_amp_obs_per_step), device=self.device, dtype=torch.float) self._curr_amp_obs_buf = self._amp_obs_buf[:, 0] self._hist_amp_obs_buf = self._amp_obs_buf[:, 1:] self._amp_obs_demo_buf = None data_dir = "data/smpl" self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral").to(self.device) self.smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male").to(self.device) self.smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female").to(self.device) self.start = True # camera flag self.ref_motion_cache = {} return def resample_motions(self): # self.gym.destroy_sim(self.sim) # del self.sim # if not self.headless: # self.gym.destroy_viewer(self.viewer) # self.create_sim() # self.gym.prepare_sim(self.sim) # self.create_viewer() # self._setup_tensors() print("Partial solution, only resample motions...") self._motion_lib.load_motions(skeleton_trees = self.skeleton_trees, limb_weights = self.humanoid_limb_and_weights.cpu(), gender_betas = self.humanoid_betas.cpu()) # For now, only need to sample motions since there are only 400 hmanoids # self.reset() # torch.cuda.empty_cache() # gc.collect() def pre_physics_step(self, actions): if (HACK_MOTION_SYNC or HACK_CONSISTENCY_TEST): actions *= 0
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. sys.path.append(os.getcwd()) HACK_MOTION_SYNC = False # HACK_MOTION_SYNC = True HACK_CONSISTENCY_TEST = False HACK_OUTPUT_MOTION = False HACK_OUTPUT_MOTION_ALL = False class HumanoidAMP(Humanoid): class StateInit(Enum): Default = 0 Start = 1 Random = 2 Hybrid = 3 def __init__(self, cfg, sim_params, physics_engine, device_type, device_id, headless): # jp hack if (HACK_MOTION_SYNC or HACK_CONSISTENCY_TEST): control_freq_inv = cfg["env"]["controlFrequencyInv"] self._motion_sync_dt = control_freq_inv * sim_params.dt cfg["env"]["controlFrequencyInv"] = 1 cfg["env"]["pdControl"] = False state_init = cfg["env"]["stateInit"] self._state_init = HumanoidAMP.StateInit[state_init] self._hybrid_init_prob = cfg["env"]["hybridInitProb"] self._num_amp_obs_steps = cfg["env"]["numAMPObsSteps"] self._amp_root_height_obs = cfg["env"].get("ampRootHeightObs", False) assert (self._num_amp_obs_steps >= 2) if ("enableHistObs" in cfg["env"]): self._enable_hist_obs = cfg["env"]["enableHistObs"] else: self._enable_hist_obs = False self._reset_default_env_ids = [] self._reset_ref_env_ids = [] self._state_reset_happened = False super().__init__(cfg=cfg, sim_params=sim_params, physics_engine=physics_engine, device_type=device_type, device_id=device_id, headless=headless) self._motion_start_times = torch.zeros(self.num_envs).to(self.device) self._sampled_motion_ids = torch.zeros(self.num_envs).long().to(self.device) motion_file = cfg['env']['motion_file'] self._load_motion(motion_file) self._amp_obs_buf = torch.zeros( (self.num_envs, self._num_amp_obs_steps, self._num_amp_obs_per_step), device=self.device, dtype=torch.float) self._curr_amp_obs_buf = self._amp_obs_buf[:, 0] self._hist_amp_obs_buf = self._amp_obs_buf[:, 1:] self._amp_obs_demo_buf = None data_dir = "data/smpl" self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral").to(self.device) self.smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male").to(self.device) self.smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female").to(self.device) self.start = True # camera flag self.ref_motion_cache = {} return def resample_motions(self): # self.gym.destroy_sim(self.sim) # del self.sim # if not self.headless: # self.gym.destroy_viewer(self.viewer) # self.create_sim() # self.gym.prepare_sim(self.sim) # self.create_viewer() # self._setup_tensors() print("Partial solution, only resample motions...") self._motion_lib.load_motions(skeleton_trees = self.skeleton_trees, limb_weights = self.humanoid_limb_and_weights.cpu(), gender_betas = self.humanoid_betas.cpu()) # For now, only need to sample motions since there are only 400 hmanoids # self.reset() # torch.cuda.empty_cache() # gc.collect() def pre_physics_step(self, actions): if (HACK_MOTION_SYNC or HACK_CONSISTENCY_TEST): actions *= 0
if flags.debug:
5
2023-10-31 20:47:12+00:00
24k
ai-safety-foundation/sparse_autoencoder
sparse_autoencoder/activation_resampler/tests/test_activation_resampler.py
[ { "identifier": "ActivationResampler", "path": "sparse_autoencoder/activation_resampler/activation_resampler.py", "snippet": "class ActivationResampler:\n \"\"\"Activation resampler.\n\n Collates the number of times each neuron fires over a set number of learned activation vectors,\n and then p...
from jaxtyping import Float, Int64 from torch import Tensor from torch.nn import Parameter from sparse_autoencoder.activation_resampler.activation_resampler import ActivationResampler from sparse_autoencoder.activation_store.base_store import ActivationStore from sparse_autoencoder.activation_store.tensor_store import TensorActivationStore from sparse_autoencoder.autoencoder.model import SparseAutoencoder, SparseAutoencoderConfig from sparse_autoencoder.loss.decoded_activations_l2 import L2ReconstructionLoss from sparse_autoencoder.loss.learned_activations_l1 import LearnedActivationsL1Loss from sparse_autoencoder.loss.reducer import LossReducer from sparse_autoencoder.tensor_types import Axis import pytest import torch
17,931
def test_gets_loss_and_correct_activations( self, full_activation_store: ActivationStore, autoencoder_model: SparseAutoencoder, ) -> None: """Test it gets loss and also returns the input activations.""" resampler = ActivationResampler( n_components=DEFAULT_N_COMPONENTS, n_learned_features=DEFAULT_N_LEARNED_FEATURES, resample_dataset_size=DEFAULT_N_ACTIVATIONS_STORE, ) loss, input_activations = resampler.compute_loss_and_get_activations( store=full_activation_store, autoencoder=autoencoder_model, loss_fn=L2ReconstructionLoss(), train_batch_size=DEFAULT_N_ACTIVATIONS_STORE, ) assert isinstance(loss, Tensor) assert isinstance(input_activations, Tensor) # Check that the activations are the same as the input data assert torch.equal(input_activations, full_activation_store._data) # type: ignore # noqa: SLF001 def test_more_items_than_in_store_error( self, full_activation_store: ActivationStore, autoencoder_model: SparseAutoencoder, ) -> None: """Test that an error is raised if there are more items than in the store.""" with pytest.raises( ValueError, match=r"Cannot get \d+ items from the store, as only \d+ were available.", ): ActivationResampler( resample_dataset_size=DEFAULT_N_ACTIVATIONS_STORE + 1, n_learned_features=DEFAULT_N_LEARNED_FEATURES, ).compute_loss_and_get_activations( store=full_activation_store, autoencoder=autoencoder_model, loss_fn=L2ReconstructionLoss(), train_batch_size=DEFAULT_N_ACTIVATIONS_STORE + 1, ) class TestAssignSamplingProbabilities: """Test the assign sampling probabilities method.""" @pytest.mark.parametrize( ("loss"), [ (torch.tensor([1.0, 2.0, 3.0])), (torch.tensor([2.0, 3.0, 5.0])), (torch.tensor([0.0, 100.0])), ], ) def test_assign_sampling_probabilities(self, loss: Tensor) -> None: """Test that sampling probabilities are correctly assigned based on loss.""" probabilities = ActivationResampler.assign_sampling_probabilities(loss) # Compare against non-vectorized implementation squared_loss = [batch_item_loss.item() ** 2 for batch_item_loss in loss] sum_squared = sum(squared_loss) proportions = [item / sum_squared for item in squared_loss] expected_probabilities = torch.tensor(proportions) assert torch.allclose( probabilities, expected_probabilities, atol=1e-4 ), f"Expected probabilities {expected_probabilities} but got {probabilities}" class TestSampleInput: """Tests for sample_input.""" def test_distribution(self) -> None: """Test that sample approximately matches a multinomial distribution.""" torch.manual_seed(0) probabilities = torch.tensor([0.1, 0.2, 0.7]) results = [0, 0, 0] for _ in range(10_000): input_activations = torch.tensor([[0.0, 0], [1, 1], [2, 2]]) sampled_input = ActivationResampler.sample_input(probabilities, input_activations, [1]) # Get the input activation index (the first element is also the index) sampled_activation_idx = sampled_input[0][0, 0].item() results[int(sampled_activation_idx)] += 1 resulting_probabilities = torch.tensor([item / sum(results) for item in results]) assert torch.allclose( resulting_probabilities, probabilities, atol=1e-2 ), f"Expected probabilities {probabilities} but got {resulting_probabilities}" def test_zero_probabilities(self) -> None: """Test where there are no dead neurons.""" probabilities = torch.tensor([[0.0], [0.0], [1.0]]) input_activations = torch.tensor([[[0.0, 0]], [[1, 1]], [[2, 2]]]) sampled_input = ActivationResampler.sample_input(probabilities, input_activations, [0]) assert sampled_input[0].shape == (0, 2), "Should return an empty tensor" def test_sample_input_raises_value_error(self) -> None: """Test that ValueError is raised on length miss-match.""" probabilities = torch.tensor([0.1, 0.2, 0.7]) input_activations = torch.tensor([[1.0, 2], [3, 4], [5, 6]]) n_samples = [4] # More than the number of input activations with pytest.raises( ValueError, match=r"Cannot sample \d+ inputs from \d+ input activations." ): ActivationResampler.sample_input(probabilities, input_activations, n_samples) class TestRenormalizeAndScale: """Tests for renormalize_and_scale.""" @staticmethod def calculate_expected_output(
"""Tests for the resample_neurons module.""" DEFAULT_N_ACTIVATIONS_STORE: int = 100 DEFAULT_N_INPUT_FEATURES: int = 3 DEFAULT_N_LEARNED_FEATURES: int = 5 DEFAULT_N_COMPONENTS: int = 2 @pytest.fixture() def full_activation_store() -> ActivationStore: """Create a dummy activation store, pre-populated with data.""" store = TensorActivationStore( max_items=DEFAULT_N_ACTIVATIONS_STORE, n_components=DEFAULT_N_COMPONENTS, n_neurons=DEFAULT_N_INPUT_FEATURES, ) store.fill_with_test_data( batch_size=DEFAULT_N_ACTIVATIONS_STORE, input_features=DEFAULT_N_INPUT_FEATURES, n_batches=1, n_components=DEFAULT_N_COMPONENTS, ) return store @pytest.fixture() def autoencoder_model() -> SparseAutoencoder: """Create a dummy autoencoder model.""" return SparseAutoencoder( SparseAutoencoderConfig( n_input_features=DEFAULT_N_INPUT_FEATURES, n_learned_features=DEFAULT_N_LEARNED_FEATURES, n_components=DEFAULT_N_COMPONENTS, ) ) @pytest.fixture() def loss_fn() -> LossReducer: """Loss function fixture.""" return LossReducer(LearnedActivationsL1Loss(0.01), L2ReconstructionLoss()) @pytest.fixture() def activation_resampler_single_item_triggers() -> ActivationResampler: """Activation resampler where any call to step will result in resampling.""" return ActivationResampler( n_activations_activity_collate=1, n_learned_features=DEFAULT_N_LEARNED_FEATURES, resample_dataset_size=1, resample_interval=1, threshold_is_dead_portion_fires=0.0, ) class TestInit: """Tests for the activation resampler initialisation.""" @pytest.mark.parametrize( ("resample_interval", "n_steps_collate", "expected_window_start"), [(100, 50, 50), (100, 100, 0)], ) def test_neuron_activity_window_start( self, resample_interval: int, n_steps_collate: int, expected_window_start: int ) -> None: """Test the neuron activity window start is set correctly.""" resampler = ActivationResampler( n_learned_features=10, resample_interval=resample_interval, n_activations_activity_collate=n_steps_collate, ) assert resampler.neuron_activity_window_start == expected_window_start class TestComputeLossAndGetActivations: """Tests for compute_loss_and_get_activations.""" def test_gets_loss_and_correct_activations( self, full_activation_store: ActivationStore, autoencoder_model: SparseAutoencoder, ) -> None: """Test it gets loss and also returns the input activations.""" resampler = ActivationResampler( n_components=DEFAULT_N_COMPONENTS, n_learned_features=DEFAULT_N_LEARNED_FEATURES, resample_dataset_size=DEFAULT_N_ACTIVATIONS_STORE, ) loss, input_activations = resampler.compute_loss_and_get_activations( store=full_activation_store, autoencoder=autoencoder_model, loss_fn=L2ReconstructionLoss(), train_batch_size=DEFAULT_N_ACTIVATIONS_STORE, ) assert isinstance(loss, Tensor) assert isinstance(input_activations, Tensor) # Check that the activations are the same as the input data assert torch.equal(input_activations, full_activation_store._data) # type: ignore # noqa: SLF001 def test_more_items_than_in_store_error( self, full_activation_store: ActivationStore, autoencoder_model: SparseAutoencoder, ) -> None: """Test that an error is raised if there are more items than in the store.""" with pytest.raises( ValueError, match=r"Cannot get \d+ items from the store, as only \d+ were available.", ): ActivationResampler( resample_dataset_size=DEFAULT_N_ACTIVATIONS_STORE + 1, n_learned_features=DEFAULT_N_LEARNED_FEATURES, ).compute_loss_and_get_activations( store=full_activation_store, autoencoder=autoencoder_model, loss_fn=L2ReconstructionLoss(), train_batch_size=DEFAULT_N_ACTIVATIONS_STORE + 1, ) class TestAssignSamplingProbabilities: """Test the assign sampling probabilities method.""" @pytest.mark.parametrize( ("loss"), [ (torch.tensor([1.0, 2.0, 3.0])), (torch.tensor([2.0, 3.0, 5.0])), (torch.tensor([0.0, 100.0])), ], ) def test_assign_sampling_probabilities(self, loss: Tensor) -> None: """Test that sampling probabilities are correctly assigned based on loss.""" probabilities = ActivationResampler.assign_sampling_probabilities(loss) # Compare against non-vectorized implementation squared_loss = [batch_item_loss.item() ** 2 for batch_item_loss in loss] sum_squared = sum(squared_loss) proportions = [item / sum_squared for item in squared_loss] expected_probabilities = torch.tensor(proportions) assert torch.allclose( probabilities, expected_probabilities, atol=1e-4 ), f"Expected probabilities {expected_probabilities} but got {probabilities}" class TestSampleInput: """Tests for sample_input.""" def test_distribution(self) -> None: """Test that sample approximately matches a multinomial distribution.""" torch.manual_seed(0) probabilities = torch.tensor([0.1, 0.2, 0.7]) results = [0, 0, 0] for _ in range(10_000): input_activations = torch.tensor([[0.0, 0], [1, 1], [2, 2]]) sampled_input = ActivationResampler.sample_input(probabilities, input_activations, [1]) # Get the input activation index (the first element is also the index) sampled_activation_idx = sampled_input[0][0, 0].item() results[int(sampled_activation_idx)] += 1 resulting_probabilities = torch.tensor([item / sum(results) for item in results]) assert torch.allclose( resulting_probabilities, probabilities, atol=1e-2 ), f"Expected probabilities {probabilities} but got {resulting_probabilities}" def test_zero_probabilities(self) -> None: """Test where there are no dead neurons.""" probabilities = torch.tensor([[0.0], [0.0], [1.0]]) input_activations = torch.tensor([[[0.0, 0]], [[1, 1]], [[2, 2]]]) sampled_input = ActivationResampler.sample_input(probabilities, input_activations, [0]) assert sampled_input[0].shape == (0, 2), "Should return an empty tensor" def test_sample_input_raises_value_error(self) -> None: """Test that ValueError is raised on length miss-match.""" probabilities = torch.tensor([0.1, 0.2, 0.7]) input_activations = torch.tensor([[1.0, 2], [3, 4], [5, 6]]) n_samples = [4] # More than the number of input activations with pytest.raises( ValueError, match=r"Cannot sample \d+ inputs from \d+ input activations." ): ActivationResampler.sample_input(probabilities, input_activations, n_samples) class TestRenormalizeAndScale: """Tests for renormalize_and_scale.""" @staticmethod def calculate_expected_output(
sampled_input: Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)],
8
2023-10-27 07:37:15+00:00
24k