repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9 values |
|---|---|---|---|---|---|---|---|---|---|---|
jkulhanek/nerfbaselines | nerfbaselines/registry.py | [
{
"identifier": "Method",
"path": "nerfbaselines/types.py",
"snippet": "class Method(Protocol):\n @classmethod\n def install(cls):\n \"\"\"\n Install the method.\n \"\"\"\n pass\n\n @abstractmethod\n def get_info(self) -> MethodInfo:\n \"\"\"\n Get m... | import inspect
import types
import typing
import os
import importlib
import dataclasses
import subprocess
from typing import Optional, Type, Any, Tuple, Dict, Set
from typing import Literal
from typing_extensions import Literal # type: ignore
from typing import FrozenSet
from typing_extensions import FrozenSet # type: ignore
from typing import get_args
from typing_extensions import get_args # type: ignore
from dataclasses import dataclass, field
from .types import Method
from .backends import DockerMethod, CondaMethod, ApptainerMethod
from .utils import partialclass
from . import methods
from . import methods | 6,114 |
try:
except ImportError:
try:
except ImportError:
try:
except ImportError:
DEFAULT_DOCKER_IMAGE = "kulhanek/nerfbaselines:v1"
Backend = Literal["conda", "docker", "apptainer", "python"]
ALL_BACKENDS = list(get_args(Backend))
registry = {}
# Auto register
_auto_register_completed = False
def auto_register(force=False):
global _auto_register_completed
if _auto_register_completed and not force:
return
# TODO: do this more robustly
for package in os.listdir(os.path.dirname(methods.__file__)):
if package.endswith(".py") and not package.startswith("_") and package != "__init__.py":
package = package[:-3]
importlib.import_module(f".methods.{package}", __package__)
_auto_register_completed = True
def register(spec: "MethodSpec", name: str, *args, metadata=None, **kwargs) -> "MethodSpec":
assert name not in registry, f"Method {name} already registered"
if metadata is None:
metadata = {}
metadata = {**spec.metadata, **metadata}
spec = dataclasses.replace(spec, args=spec.args + args, kwargs={**spec.kwargs, **kwargs}, metadata=metadata)
registry[name] = spec
return spec
class _LazyMethodMeta(type):
def __getitem__(cls, __name: Tuple[str, str]) -> Type[Method]:
module, name = __name
module_base = methods.__package__
def build(ns):
def new(ncls, *args, **kwargs):
old_init = ncls.__init__
# For partialclass
if hasattr(old_init, "__original_func__"):
args = old_init.__args__ + args
kwargs = {**old_init.__kwargs__, **kwargs}
mod = importlib.import_module(module, methods.__package__)
ncls = getattr(mod, name)
assert inspect.isclass(ncls)
return ncls(*args, **kwargs)
ns["__new__"] = new
ncls = types.new_class(name, exec_body=build, bases=(Method,))
ncls.__module__ = module_base + module if module.startswith(".") else module
ncls.__name__ = name
return typing.cast(Type[Method], ncls)
class LazyMethod(object, metaclass=_LazyMethodMeta):
def __class_getitem__(cls, __name: Tuple[str, str]) -> Type[Method]:
return _LazyMethodMeta.__getitem__(cls, __name)
@dataclass(frozen=True)
class MethodSpec:
method: Type[Method]
conda: Optional[Type[CondaMethod]] = None
docker: Optional[Type[DockerMethod]] = None
|
try:
except ImportError:
try:
except ImportError:
try:
except ImportError:
DEFAULT_DOCKER_IMAGE = "kulhanek/nerfbaselines:v1"
Backend = Literal["conda", "docker", "apptainer", "python"]
ALL_BACKENDS = list(get_args(Backend))
registry = {}
# Auto register
_auto_register_completed = False
def auto_register(force=False):
global _auto_register_completed
if _auto_register_completed and not force:
return
# TODO: do this more robustly
for package in os.listdir(os.path.dirname(methods.__file__)):
if package.endswith(".py") and not package.startswith("_") and package != "__init__.py":
package = package[:-3]
importlib.import_module(f".methods.{package}", __package__)
_auto_register_completed = True
def register(spec: "MethodSpec", name: str, *args, metadata=None, **kwargs) -> "MethodSpec":
assert name not in registry, f"Method {name} already registered"
if metadata is None:
metadata = {}
metadata = {**spec.metadata, **metadata}
spec = dataclasses.replace(spec, args=spec.args + args, kwargs={**spec.kwargs, **kwargs}, metadata=metadata)
registry[name] = spec
return spec
class _LazyMethodMeta(type):
def __getitem__(cls, __name: Tuple[str, str]) -> Type[Method]:
module, name = __name
module_base = methods.__package__
def build(ns):
def new(ncls, *args, **kwargs):
old_init = ncls.__init__
# For partialclass
if hasattr(old_init, "__original_func__"):
args = old_init.__args__ + args
kwargs = {**old_init.__kwargs__, **kwargs}
mod = importlib.import_module(module, methods.__package__)
ncls = getattr(mod, name)
assert inspect.isclass(ncls)
return ncls(*args, **kwargs)
ns["__new__"] = new
ncls = types.new_class(name, exec_body=build, bases=(Method,))
ncls.__module__ = module_base + module if module.startswith(".") else module
ncls.__name__ = name
return typing.cast(Type[Method], ncls)
class LazyMethod(object, metaclass=_LazyMethodMeta):
def __class_getitem__(cls, __name: Tuple[str, str]) -> Type[Method]:
return _LazyMethodMeta.__getitem__(cls, __name)
@dataclass(frozen=True)
class MethodSpec:
method: Type[Method]
conda: Optional[Type[CondaMethod]] = None
docker: Optional[Type[DockerMethod]] = None | apptainer: Optional[Type[ApptainerMethod]] = None | 2 | 2023-11-07 20:22:35+00:00 | 8k |
microsoft/Everything-of-Thoughts-XoT | xot_all_in_one/xot/controller/solver/xot_solver.py | [
{
"identifier": "MCTS",
"path": "xot_all_in_one/xot/controller/solver/MCTS.py",
"snippet": "class MCTS():\n \"\"\"\n This class handles the MCTS tree.\n \"\"\"\n\n def __init__(self, game, nnet, args, player=1):\n self.game = game\n self.player = player\n self.nnet = nne... | import os
import json
import itertools
import random
import ast
import re
import numpy as np
import pandas as pd
from collections import Counter
from .MCTS import MCTS
from .Coach import Coach
from .pytorch_game24.NNet import NNetWrapper as nn
from .pytorch_cube.NNet import NNetWrapper as nn
from .pytorch_npuzzle.NNet import NNetWrapper as nn | 5,334 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
class XoT_Solver:
"""
The XoT_Solver class is designed to solve a variety of games using a combination of Monte Carlo Tree Search (MCTS),
Neural Networks (NN), and a coaching mechanism. It supports both single and multiple solutions, and can revise its
solutions based on feedback.
Attributes:
args: A configuration object containing various parameters.
gpt: An instance of a GPT model for generating prompts.
game: An instance of the game to be solved.
prompter: An instance of a class for generating prompts.
parser: An instance of a class for parsing actions and thoughts.
nmcts: An instance of MCTS.
c: An instance of a Coach.
to_print: A boolean indicating whether to print debug information.
"""
def __init__(self, args, gpt, game, prompter, parser, to_print=False):
"""
Initializes the XoT_Solver with the given arguments, GPT model, game, prompter, parser, and print option.
"""
self.args = args
self.gpt = gpt
self.game = game
self.prompter = prompter
self.parser = parser
self.nmcts, self.c = self.initial_xot(args)
self.to_print = to_print
def initial_xot(self, args):
"""
Initializes the Neural Network and MCTS based on the game environment specified in the arguments.
"""
if args.env.lower() == 'game24':
elif args.env.lower() == 'cube':
elif args.env.lower() == 'npuzzle':
else:
raise ValueError
nnet = nn(self.game)
nnet.load_checkpoint(folder=self.args.model.checkpoint, filename=self.args.model.filename)
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
class XoT_Solver:
"""
The XoT_Solver class is designed to solve a variety of games using a combination of Monte Carlo Tree Search (MCTS),
Neural Networks (NN), and a coaching mechanism. It supports both single and multiple solutions, and can revise its
solutions based on feedback.
Attributes:
args: A configuration object containing various parameters.
gpt: An instance of a GPT model for generating prompts.
game: An instance of the game to be solved.
prompter: An instance of a class for generating prompts.
parser: An instance of a class for parsing actions and thoughts.
nmcts: An instance of MCTS.
c: An instance of a Coach.
to_print: A boolean indicating whether to print debug information.
"""
def __init__(self, args, gpt, game, prompter, parser, to_print=False):
"""
Initializes the XoT_Solver with the given arguments, GPT model, game, prompter, parser, and print option.
"""
self.args = args
self.gpt = gpt
self.game = game
self.prompter = prompter
self.parser = parser
self.nmcts, self.c = self.initial_xot(args)
self.to_print = to_print
def initial_xot(self, args):
"""
Initializes the Neural Network and MCTS based on the game environment specified in the arguments.
"""
if args.env.lower() == 'game24':
elif args.env.lower() == 'cube':
elif args.env.lower() == 'npuzzle':
else:
raise ValueError
nnet = nn(self.game)
nnet.load_checkpoint(folder=self.args.model.checkpoint, filename=self.args.model.filename) | nmcts = MCTS(self.game, nnet, args) | 0 | 2023-11-08 09:48:34+00:00 | 8k |
ultraleap/leapc-python-bindings | leapc-python-api/src/leap/connection.py | [
{
"identifier": "Device",
"path": "leapc-python-api/src/leap/device.py",
"snippet": "class Device:\n def __init__(self, device_ref=None, *, device=None, owner=None):\n \"\"\"A Device is usually constructed from a LEAP_DEVICE_REF object.\n\n Some functions require the device to be opened... | from contextlib import contextmanager
from typing import Dict, Optional, List, Callable
from timeit import default_timer as timer
from leapc_cffi import ffi, libleapc
from .device import Device
from .enums import (
ConnectionStatus,
EventType,
RS as LeapRS,
ConnectionConfig as ConnectionConfigEnum,
TrackingMode,
PolicyFlag,
)
from .event_listener import LatestEventListener, Listener
from .events import create_event, Event
from .exceptions import (
create_exception,
success_or_raise,
LeapError,
LeapConnectionAlreadyOpen,
LeapConcurrentPollError,
LeapNotConnectedError,
LeapTimeoutError,
)
import sys
import threading
import time
import json | 3,911 |
class ConnectionConfig:
"""Configuration for a Connection
Allows a user to enable multi device functionality prior to connection.
"""
def __init__(
self,
*,
server_namespace: Optional[Dict[str, str]] = None,
multi_device_aware: bool = False,
):
self._data_ptr = ffi.new("LEAP_CONNECTION_CONFIG*")
self._data_ptr.server_namespace = server_namespace
self._data_ptr.flags = 0
self._data_ptr.size = ffi.sizeof(self._data_ptr[0])
if multi_device_aware:
self._data_ptr.flags |= ConnectionConfigEnum.MultiDeviceAware.value
class Connection:
"""Connection to a Leap Server
:param listeners: A List of event listeners. Defaults to None
:param poll_timeout: A timeout of poll messages, in seconds. Defaults to 1 second.
:param response_timeout: A timeout to wait for specific events in response to events.
Defaults to 10 seconds.
"""
def __init__(
self,
*,
server_namespace: Optional[Dict[str, str]] = None,
multi_device_aware: bool = False,
listeners: Optional[List[Listener]] = None,
poll_timeout: float = 1,
response_timeout: float = 10,
):
if listeners is None:
listeners = []
self._listeners = listeners
self._connection_ptr = self._create_connection(server_namespace, multi_device_aware)
self._poll_timeout = int(poll_timeout * 1000) # Seconds to milliseconds
self._response_timeout = int(response_timeout)
self._stop_poll_flag = False
self._is_open = False
self._poll_thread = None
def __del__(self):
# Since 'destroy_connection' only tells C to free the memory that it allocated
# for our connection, it is appropriate to leave the deletion of this to the garbage
# collector.
if hasattr(self, "_connection_ptr"):
# We have this 'if' statement to deal with the possibility that an Exception
# could be raised in the __init__ method, before this has been assigned.
self._destroy_connection(self._connection_ptr)
def add_listener(self, listener: Listener):
self._listeners.append(listener)
def remove_listener(self, listener: Listener):
self._listeners.remove(listener)
def poll(self, timeout: Optional[float] = None) -> Event:
"""Manually poll the connection from this thread
Do not notify listeners about the result of this poll.
:param timeout: The timeout of the poll, in seconds.
Defaults to the number the Connection was initialised with.
"""
if self._poll_thread is not None:
raise LeapConcurrentPollError
if timeout is None:
timeout = self._poll_timeout
else:
timeout = int(timeout * 1000) # Seconds to milliseconds
event_ptr = ffi.new("LEAP_CONNECTION_MESSAGE*")
success_or_raise(libleapc.LeapPollConnection, self._connection_ptr[0], timeout, event_ptr)
|
class ConnectionConfig:
"""Configuration for a Connection
Allows a user to enable multi device functionality prior to connection.
"""
def __init__(
self,
*,
server_namespace: Optional[Dict[str, str]] = None,
multi_device_aware: bool = False,
):
self._data_ptr = ffi.new("LEAP_CONNECTION_CONFIG*")
self._data_ptr.server_namespace = server_namespace
self._data_ptr.flags = 0
self._data_ptr.size = ffi.sizeof(self._data_ptr[0])
if multi_device_aware:
self._data_ptr.flags |= ConnectionConfigEnum.MultiDeviceAware.value
class Connection:
"""Connection to a Leap Server
:param listeners: A List of event listeners. Defaults to None
:param poll_timeout: A timeout of poll messages, in seconds. Defaults to 1 second.
:param response_timeout: A timeout to wait for specific events in response to events.
Defaults to 10 seconds.
"""
def __init__(
self,
*,
server_namespace: Optional[Dict[str, str]] = None,
multi_device_aware: bool = False,
listeners: Optional[List[Listener]] = None,
poll_timeout: float = 1,
response_timeout: float = 10,
):
if listeners is None:
listeners = []
self._listeners = listeners
self._connection_ptr = self._create_connection(server_namespace, multi_device_aware)
self._poll_timeout = int(poll_timeout * 1000) # Seconds to milliseconds
self._response_timeout = int(response_timeout)
self._stop_poll_flag = False
self._is_open = False
self._poll_thread = None
def __del__(self):
# Since 'destroy_connection' only tells C to free the memory that it allocated
# for our connection, it is appropriate to leave the deletion of this to the garbage
# collector.
if hasattr(self, "_connection_ptr"):
# We have this 'if' statement to deal with the possibility that an Exception
# could be raised in the __init__ method, before this has been assigned.
self._destroy_connection(self._connection_ptr)
def add_listener(self, listener: Listener):
self._listeners.append(listener)
def remove_listener(self, listener: Listener):
self._listeners.remove(listener)
def poll(self, timeout: Optional[float] = None) -> Event:
"""Manually poll the connection from this thread
Do not notify listeners about the result of this poll.
:param timeout: The timeout of the poll, in seconds.
Defaults to the number the Connection was initialised with.
"""
if self._poll_thread is not None:
raise LeapConcurrentPollError
if timeout is None:
timeout = self._poll_timeout
else:
timeout = int(timeout * 1000) # Seconds to milliseconds
event_ptr = ffi.new("LEAP_CONNECTION_MESSAGE*")
success_or_raise(libleapc.LeapPollConnection, self._connection_ptr[0], timeout, event_ptr) | return create_event(event_ptr) | 9 | 2023-11-08 13:35:40+00:00 | 8k |
UMass-Foundation-Model/CoVLM | transformers/src/transformers/trainer_utils.py | [
{
"identifier": "ExplicitEnum",
"path": "transformers/src/transformers/utils/generic.py",
"snippet": "class ExplicitEnum(str, Enum):\n \"\"\"\n Enum with more explicit error message for missing values.\n \"\"\"\n\n @classmethod\n def _missing_(cls, value):\n raise ValueError(\n ... | import copy
import functools
import gc
import inspect
import os
import random
import re
import threading
import time
import numpy as np
import torch
import tensorflow as tf
import torch_xla.core.xla_model as xm
import torch_xla.core.xla_model as xm
import torch
import psutil # noqa
import torch
import torch
import torch
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
from .utils import (
ExplicitEnum,
is_psutil_available,
is_tf_available,
is_torch_available,
is_torch_cuda_available,
is_torch_mps_available,
is_torch_npu_available,
is_torch_tpu_available,
is_torch_xpu_available,
requires_backends,
)
from .integrations import is_optuna_available
from .integrations import is_ray_tune_available
from ray import tune
from .integrations import is_wandb_available
from accelerate.utils import find_executable_batch_size as accelerate_find_executable_batch_size | 3,676 | raise ImportError("This function needs wandb installed: `pip install wandb`")
return {
"method": "random",
"metric": {"name": "objective", "goal": "minimize"},
"parameters": {
"learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4},
"num_train_epochs": {"distribution": "int_uniform", "min": 1, "max": 6},
"seed": {"distribution": "int_uniform", "min": 1, "max": 40},
"per_device_train_batch_size": {"values": [4, 8, 16, 32, 64]},
},
}
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
SIGOPT = "sigopt"
WANDB = "wandb"
def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available(check_device=True):
return xm.get_ordinal() == 0
return local_rank in [-1, 0]
def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available(check_device=True):
return xm.xrt_world_size()
elif local_rank != -1 and is_torch_available():
return torch.distributed.get_world_size()
return 1
def speed_metrics(split, start_time, num_samples=None, num_steps=None, num_tokens=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
- num_tokens: number of tokens processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if runtime == 0:
return result
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
if num_steps is not None:
steps_per_second = num_steps / runtime
result[f"{split}_steps_per_second"] = round(steps_per_second, 3)
if num_tokens is not None:
tokens_per_second = num_tokens / runtime
result[f"{split}_tokens_per_second"] = round(tokens_per_second, 3)
return result
class SchedulerType(ExplicitEnum):
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
INVERSE_SQRT = "inverse_sqrt"
REDUCE_ON_PLATEAU = "reduce_lr_on_plateau"
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless `psutil` is available. Install with `pip install psutil`.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example :
```python
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
```
At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`.
To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`].
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"_inner_training_loop": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
| # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for the Trainer and TFTrainer class. Should be independent from PyTorch and TensorFlow.
"""
if is_torch_available():
if is_tf_available():
def seed_worker(_):
"""
Helper function to set worker seed during Dataloader initialization.
"""
worker_seed = torch.initial_seed() % 2**32
set_seed(worker_seed)
def enable_full_determinism(seed: int, warn_only: bool = False):
"""
Helper function for reproducible behavior during distributed training. See
- https://pytorch.org/docs/stable/notes/randomness.html for pytorch
- https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism for tensorflow
"""
# set seed first
set_seed(seed)
if is_torch_available():
# Enable PyTorch deterministic mode. This potentially requires either the environment
# variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set,
# depending on the CUDA version, so we set them both here
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
torch.use_deterministic_algorithms(True, warn_only=warn_only)
# Enable CUDNN deterministic mode
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if is_tf_available():
tf.config.experimental.enable_op_determinism()
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed).
Args:
seed (`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_torch_npu_available():
torch.npu.manual_seed_all(seed)
if is_torch_xpu_available():
torch.xpu.manual_seed_all(seed)
if is_tf_available():
tf.random.set_seed(seed)
class EvalPrediction:
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (`np.ndarray`): Predictions of the model.
label_ids (`np.ndarray`): Targets to be matched.
inputs (`np.ndarray`, *optional*)
"""
def __init__(
self,
predictions: Union[np.ndarray, Tuple[np.ndarray]],
label_ids: Union[np.ndarray, Tuple[np.ndarray]],
inputs: Optional[Union[np.ndarray, Tuple[np.ndarray]]] = None,
):
self.predictions = predictions
self.label_ids = label_ids
self.inputs = inputs
def __iter__(self):
if self.inputs is not None:
return iter((self.predictions, self.label_ids, self.inputs))
else:
return iter((self.predictions, self.label_ids))
def __getitem__(self, idx):
if idx < 0 or idx > 2:
raise IndexError("tuple index out of range")
if idx == 2 and self.inputs is None:
raise IndexError("tuple index out of range")
if idx == 0:
return self.predictions
elif idx == 1:
return self.label_ids
elif idx == 2:
return self.inputs
class EvalLoopOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
num_samples: Optional[int]
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
metrics: Dict[str, float]
PREFIX_CHECKPOINT_DIR = "checkpoint"
_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
class IntervalStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class EvaluationStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class HubStrategy(ExplicitEnum):
END = "end"
EVERY_SAVE = "every_save"
CHECKPOINT = "checkpoint"
ALL_CHECKPOINTS = "all_checkpoints"
class BestRun(NamedTuple):
"""
The best run found by a hyperparameter search (see [`~Trainer.hyperparameter_search`]).
Parameters:
run_id (`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (`float`):
The objective that was obtained for this run.
hyperparameters (`Dict[str, Any]`):
The hyperparameters picked to get this run.
run_summary (`Optional[Any]`):
A summary of tuning experiments. `ray.tune.ExperimentAnalysis` object for Ray backend.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
run_summary: Optional[Any] = None
def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the [`Trainer`], the sum of all metrics otherwise.
Args:
metrics (`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
# Remove speed metrics
speed_metrics = [
m
for m in metrics.keys()
if m.endswith("_runtime") or m.endswith("_per_second") or m.endswith("_compilation_time")
]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values())
def default_hp_space_optuna(trial) -> Dict[str, float]:
assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`"
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]),
}
def default_hp_space_ray(trial) -> Dict[str, float]:
assert is_ray_tune_available(), "This function needs ray installed: `pip install ray[tune]`"
return {
"learning_rate": tune.loguniform(1e-6, 1e-4),
"num_train_epochs": tune.choice(list(range(1, 6))),
"seed": tune.uniform(1, 40),
"per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
}
def default_hp_space_sigopt(trial):
return [
{"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"},
{"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"},
{"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"},
{
"categorical_values": ["4", "8", "16", "32", "64"],
"name": "per_device_train_batch_size",
"type": "categorical",
},
]
def default_hp_space_wandb(trial) -> Dict[str, float]:
if not is_wandb_available():
raise ImportError("This function needs wandb installed: `pip install wandb`")
return {
"method": "random",
"metric": {"name": "objective", "goal": "minimize"},
"parameters": {
"learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4},
"num_train_epochs": {"distribution": "int_uniform", "min": 1, "max": 6},
"seed": {"distribution": "int_uniform", "min": 1, "max": 40},
"per_device_train_batch_size": {"values": [4, 8, 16, 32, 64]},
},
}
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
SIGOPT = "sigopt"
WANDB = "wandb"
def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available(check_device=True):
return xm.get_ordinal() == 0
return local_rank in [-1, 0]
def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available(check_device=True):
return xm.xrt_world_size()
elif local_rank != -1 and is_torch_available():
return torch.distributed.get_world_size()
return 1
def speed_metrics(split, start_time, num_samples=None, num_steps=None, num_tokens=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
- num_tokens: number of tokens processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if runtime == 0:
return result
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
if num_steps is not None:
steps_per_second = num_steps / runtime
result[f"{split}_steps_per_second"] = round(steps_per_second, 3)
if num_tokens is not None:
tokens_per_second = num_tokens / runtime
result[f"{split}_tokens_per_second"] = round(tokens_per_second, 3)
return result
class SchedulerType(ExplicitEnum):
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
INVERSE_SQRT = "inverse_sqrt"
REDUCE_ON_PLATEAU = "reduce_lr_on_plateau"
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless `psutil` is available. Install with `pip install psutil`.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example :
```python
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
```
At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`.
To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`].
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"_inner_training_loop": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
| if not is_psutil_available(): | 1 | 2023-11-07 04:23:57+00:00 | 8k |
HKU-BAL/ClairS-TO | src/compare_vcf.py | [
{
"identifier": "str2bool",
"path": "shared/utils.py",
"snippet": "def str2bool(v):\n if v is None:\n return v\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'ture', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'flase', 'false', 'f', 'n... | import os
import sys
import subprocess
from argparse import ArgumentParser, SUPPRESS
from collections import defaultdict
from shared.utils import str2bool, str_none
from shared.vcf import VcfReader, VcfWriter
from shared.interval_tree import bed_tree_from, is_region_in
from shared.utils import file_path_from
from src.cal_af_distribution import cal_af | 6,130 | # BSD 3-Clause License
#
# Copyright 2023 The University of Hong Kong, Department of Computer Science
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
major_contigs_order = ["chr" + str(a) for a in list(range(1, 23)) + ["X", "Y"]] + [str(a) for a in
list(range(1, 23)) + ["X", "Y"]]
major_contigs = {"chr" + str(a) for a in list(range(1, 23)) + ["X", "Y"]}.union(
{str(a) for a in list(range(1, 23)) + ["X", "Y"]})
def sort_key(item):
order_map = {value: index for index, value in enumerate(major_contigs_order)}
chr = order_map[item[0]]
pos = item[1]
return (chr, pos)
def cal_metrics(tp, fp, fn):
precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0
recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
f1_score = 2 * precision * recall / (precision + recall) if precision + recall > 0 else 0.0
return round(precision, 4), round(recall, 4), round(f1_score, 4)
def output_best_cut_off(fp_qual_dict, tp_qual_dict, fn_count, use_int_cut_off=True, add_tp_fn=False):
results = []
if use_int_cut_off:
qual_list = set([int(q) for q in list(fp_qual_dict.values()) + list(tp_qual_dict.values())])
else:
qual_list = [item / 100.0 for item in range(0, 101)]
for qual in qual_list:
fp_snv = sum([1 for k, v in fp_qual_dict.items() if v >= qual])
tp_snv = sum([1 for k, v in tp_qual_dict.items() if v >= qual])
fn_snv = fn_count + len(tp_qual_dict) - tp_snv
snv_pre, snv_rec, snv_f1 = cal_metrics(tp=tp_snv, fp=fp_snv, fn=fn_snv)
tp_fn = tp_snv + fn_snv
results.append([qual, snv_pre, snv_rec, snv_f1, tp_snv, fp_snv, fn_snv, tp_fn])
results = sorted(results, key=lambda x: x[3], reverse=True)
return results
def compare_vcf(args):
"""
Follow how som.py works
## https://github.com/Illumina/hap.py/blob/master/doc/sompy.md
"""
output_fn = args.output_fn
output_dir = args.output_dir
truth_vcf_fn = args.truth_vcf_fn
input_vcf_fn = args.input_vcf_fn
bed_fn = args.bed_fn
high_confident_only = args.high_confident_only
ctg_name = args.ctg_name
skip_genotyping = args.skip_genotyping
input_filter_tag = args.input_filter_tag
truth_filter_tag = args.truth_filter_tag
discard_fn_out_of_fp_bed = args.discard_fn_out_of_fp_bed
benchmark_indel = args.benchmark_indel
fp_bed_tree = bed_tree_from(bed_file_path=bed_fn, contig_name=ctg_name)
strat_bed_tree_list = []
if args.strat_bed_fn is not None and ',' in args.strat_bed_fn:
for strat_bed_fn in args.strat_bed_fn.split(','):
strat_bed_tree_list.append(bed_tree_from(bed_file_path=strat_bed_fn, contig_name=ctg_name))
elif args.strat_bed_fn is not None:
strat_bed_tree_list = [bed_tree_from(bed_file_path=args.strat_bed_fn, contig_name=ctg_name)]
truth_vcf_fn = file_path_from(file_name=truth_vcf_fn, exit_on_not_found=True, allow_none=False)
input_vcf_fn = file_path_from(file_name=input_vcf_fn, exit_on_not_found=True, allow_none=False)
| # BSD 3-Clause License
#
# Copyright 2023 The University of Hong Kong, Department of Computer Science
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
major_contigs_order = ["chr" + str(a) for a in list(range(1, 23)) + ["X", "Y"]] + [str(a) for a in
list(range(1, 23)) + ["X", "Y"]]
major_contigs = {"chr" + str(a) for a in list(range(1, 23)) + ["X", "Y"]}.union(
{str(a) for a in list(range(1, 23)) + ["X", "Y"]})
def sort_key(item):
order_map = {value: index for index, value in enumerate(major_contigs_order)}
chr = order_map[item[0]]
pos = item[1]
return (chr, pos)
def cal_metrics(tp, fp, fn):
precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0
recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
f1_score = 2 * precision * recall / (precision + recall) if precision + recall > 0 else 0.0
return round(precision, 4), round(recall, 4), round(f1_score, 4)
def output_best_cut_off(fp_qual_dict, tp_qual_dict, fn_count, use_int_cut_off=True, add_tp_fn=False):
results = []
if use_int_cut_off:
qual_list = set([int(q) for q in list(fp_qual_dict.values()) + list(tp_qual_dict.values())])
else:
qual_list = [item / 100.0 for item in range(0, 101)]
for qual in qual_list:
fp_snv = sum([1 for k, v in fp_qual_dict.items() if v >= qual])
tp_snv = sum([1 for k, v in tp_qual_dict.items() if v >= qual])
fn_snv = fn_count + len(tp_qual_dict) - tp_snv
snv_pre, snv_rec, snv_f1 = cal_metrics(tp=tp_snv, fp=fp_snv, fn=fn_snv)
tp_fn = tp_snv + fn_snv
results.append([qual, snv_pre, snv_rec, snv_f1, tp_snv, fp_snv, fn_snv, tp_fn])
results = sorted(results, key=lambda x: x[3], reverse=True)
return results
def compare_vcf(args):
"""
Follow how som.py works
## https://github.com/Illumina/hap.py/blob/master/doc/sompy.md
"""
output_fn = args.output_fn
output_dir = args.output_dir
truth_vcf_fn = args.truth_vcf_fn
input_vcf_fn = args.input_vcf_fn
bed_fn = args.bed_fn
high_confident_only = args.high_confident_only
ctg_name = args.ctg_name
skip_genotyping = args.skip_genotyping
input_filter_tag = args.input_filter_tag
truth_filter_tag = args.truth_filter_tag
discard_fn_out_of_fp_bed = args.discard_fn_out_of_fp_bed
benchmark_indel = args.benchmark_indel
fp_bed_tree = bed_tree_from(bed_file_path=bed_fn, contig_name=ctg_name)
strat_bed_tree_list = []
if args.strat_bed_fn is not None and ',' in args.strat_bed_fn:
for strat_bed_fn in args.strat_bed_fn.split(','):
strat_bed_tree_list.append(bed_tree_from(bed_file_path=strat_bed_fn, contig_name=ctg_name))
elif args.strat_bed_fn is not None:
strat_bed_tree_list = [bed_tree_from(bed_file_path=args.strat_bed_fn, contig_name=ctg_name)]
truth_vcf_fn = file_path_from(file_name=truth_vcf_fn, exit_on_not_found=True, allow_none=False)
input_vcf_fn = file_path_from(file_name=input_vcf_fn, exit_on_not_found=True, allow_none=False)
| truth_vcf_reader = VcfReader(vcf_fn=truth_vcf_fn, | 2 | 2023-11-07 04:39:16+00:00 | 8k |
the-siesta-group/edfio | edfio/edf.py | [
{
"identifier": "RawHeaderFieldDate",
"path": "edfio/_header_field.py",
"snippet": "class RawHeaderFieldDate(RawHeaderField[datetime.date]):\n def __init__(self, length: int, *, is_settable: bool = False) -> None:\n super().__init__(length, is_settable=is_settable)\n\n def decode(self, fiel... | import contextlib
import copy
import datetime
import io
import math
import re
import warnings
import numpy as np
import numpy.typing as npt
from collections.abc import Iterable, Sequence
from dataclasses import dataclass
from decimal import Decimal
from fractions import Fraction
from functools import singledispatch
from math import ceil, floor
from pathlib import Path
from typing import Any, Literal, NamedTuple
from edfio._header_field import (
RawHeaderFieldDate,
RawHeaderFieldFloat,
RawHeaderFieldInt,
RawHeaderFieldStr,
RawHeaderFieldTime,
encode_str,
get_header_fields,
)
from edfio._utils import (
FloatRange,
IntRange,
calculate_gain_and_offset,
decode_edfplus_date,
encode_annotation_duration,
encode_annotation_onset,
encode_edfplus_date,
repr_from_init,
round_float_to_8_characters,
validate_subfields,
) | 4,179 | index = seconds * signal.sampling_frequency
if index != int(index):
raise ValueError(
f"{seconds}s is not a sample time of signal {i} ({signal.label}) with fs={signal.sampling_frequency}Hz"
)
def _shift_startdatetime(self, seconds: float) -> None:
timedelta = datetime.timedelta(seconds=seconds)
try:
startdate = self.startdate
startdate_anonymized = False
except AnonymizedDateError:
startdate = datetime.date.fromtimestamp(0)
startdate_anonymized = True
startdatetime = datetime.datetime.combine(startdate, self.starttime)
startdatetime += timedelta
if not startdate_anonymized:
self.startdate = startdatetime.date()
self.starttime = startdatetime.time()
def copy(self) -> Edf:
"""
Create a deep copy of the Edf.
Returns
-------
Edf
The copied Edf object.
"""
return copy.deepcopy(self)
def _slice_annotations_signal(
self,
signal: EdfSignal,
*,
start: float,
stop: float,
keep_all_annotations: bool,
) -> EdfSignal:
is_timekeeping_signal = signal == self._timekeeping_signal
annotations: list[EdfAnnotation] = []
for data_record in signal._digital.reshape(
(-1, signal.samples_per_data_record)
):
annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())
if is_timekeeping_signal:
annotations.extend(annot_dr.annotations[1:])
else:
annotations.extend(annot_dr.annotations)
annotations = [
EdfAnnotation(round(a.onset - start, 12), a.duration, a.text)
for a in annotations
if keep_all_annotations or start <= a.onset < stop
]
return _create_annotations_signal(
annotations,
num_data_records=self.num_data_records,
data_record_duration=self.data_record_duration,
with_timestamps=is_timekeeping_signal,
subsecond_offset=self._subsecond_offset + start - int(start),
)
def _create_annotations_signal(
annotations: Iterable[EdfAnnotation],
*,
num_data_records: int,
data_record_duration: float,
with_timestamps: bool = True,
subsecond_offset: float = 0,
) -> EdfSignal:
data_record_starts = np.arange(num_data_records) * data_record_duration
annotations = sorted(annotations)
data_records = []
for i, start in enumerate(data_record_starts):
end = start + data_record_duration
tals: list[_EdfTAL] = []
if with_timestamps:
tals.append(_EdfTAL(np.round(start + subsecond_offset, 12), None, [""]))
for ann in annotations:
if (
(i == 0 and ann.onset < 0)
or (i == (num_data_records - 1) and end <= ann.onset)
or (start <= ann.onset < end)
):
tals.append(
_EdfTAL(
np.round(ann.onset + subsecond_offset, 12),
ann.duration,
[ann.text],
)
)
data_records.append(_EdfAnnotationsDataRecord(tals).to_bytes())
maxlen = max(len(data_record) for data_record in data_records)
if maxlen % 2:
maxlen += 1
raw = b"".join(dr.ljust(maxlen, b"\x00") for dr in data_records)
divisor = data_record_duration if data_record_duration else 1
signal = EdfSignal(
np.arange(1.0), # placeholder signal, as argument `data` is non-optional
sampling_frequency=maxlen // 2 / divisor,
physical_range=(-32768, 32767),
)
signal._label = "EDF Annotations"
signal._samples_per_data_record = EdfSignal.samples_per_data_record.encode( # type: ignore[attr-defined]
maxlen // 2
)
signal._digital = np.frombuffer(raw, dtype=np.int16).copy()
return signal
@dataclass
class _EdfTAL:
onset: float
duration: float | None
texts: list[str]
def to_bytes(self) -> bytes:
timing = encode_annotation_onset(self.onset)
if self.duration is not None:
| from __future__ import annotations
_ANNOTATIONS_PATTERN = re.compile(
"""
([+-]\\d+(?:\\.?\\d+)?) # onset
(?:\x15(\\d+(?:\\.?\\d+)?))? # duration, optional
(?:\x14(.*?)) # annotation texts
\x14\x00 # terminator
""",
re.VERBOSE,
)
class AnonymizedDateError(ValueError):
"""Raised when trying to access an anonymized startdate or birthdate."""
class EdfAnnotation(NamedTuple):
"""A single EDF+ annotation.
Parameters
----------
onset : float
The annotation onset in seconds from recording start.
duration : float | None
The annotation duration in seconds (`None` if annotation has no duration).
text : str
The annotation text, can be empty.
"""
onset: float
duration: float | None
text: str
class EdfSignal:
"""A single EDF signal.
Attributes that might break the signal or file on modification (i.e.,
`sampling_frequency`, `physical_range`, `digital_range`, `samples_per_data_record`,
and `reserved`) can not be set after instantiation.
To reduce memory consumption, signal data is always stored as a 16-bit integer array
containing the digital values that would be written to the corresponding EDF file.
Therefore, it is expected that `EdfSignal.data` does not match the physical
values passed during instantiation exactly.
Parameters
----------
data : npt.NDArray[np.float64]
The signal data (physical values).
sampling_frequency : float
The sampling frequency in Hz.
label : str, default: `""`
The signal's label, e.g., `"EEG Fpz-Cz"` or `"Body temp"`.
transducer_type : str, default: `""`
The transducer type, e.g., `"AgAgCl electrode"`.
physical_dimension : str, default: `""`
The physical dimension, e.g., `"uV"` or `"degreeC"`
physical_range : tuple[float, float] | None, default: None
The physical range given as a tuple of `(physical_min, physical_max)`. If
`None`, this is determined from the data.
digital_range : tuple[int, int], default: `(-32768, 32767)`
The digital range given as a tuple of `(digital_min, digital_max)`. Uses the
maximum resolution of 16-bit integers by default.
prefiltering : str, default: `""`
The signal prefiltering, e.g., `"HP:0.1Hz LP:75Hz"`.
"""
_label = RawHeaderFieldStr(16, is_settable=True)
transducer_type = RawHeaderFieldStr(80, is_settable=True)
"""Transducer type, e.g., `"AgAgCl electrode"`."""
physical_dimension = RawHeaderFieldStr(8, is_settable=True)
"""Physical dimension, e.g., `"uV"` or `"degreeC"`."""
physical_min = RawHeaderFieldFloat(8)
"""Physical minimum, e.g., `-500` or `34`."""
physical_max = RawHeaderFieldFloat(8)
"""Physical maximum, e.g., `500` or `40`."""
digital_min = RawHeaderFieldInt(8)
"""Digital minimum, e.g., `-2048`."""
digital_max = RawHeaderFieldInt(8)
"""Digital maximum, e.g., `2047`."""
prefiltering = RawHeaderFieldStr(80, is_settable=True)
"""Signal prefiltering, e.g., `"HP:0.1Hz LP:75Hz"`."""
samples_per_data_record = RawHeaderFieldInt(8)
"""
Number of samples in each data record.
For newly instantiated :class:`EdfSignal` objects, this is only set once
:meth:`Edf.write` is called.
"""
reserved = RawHeaderFieldStr(32)
"""Reserved signal header field, always `""`"""
def __init__(
self,
data: npt.NDArray[np.float64],
sampling_frequency: float,
*,
label: str = "",
transducer_type: str = "",
physical_dimension: str = "",
physical_range: tuple[float, float] | None = None,
digital_range: tuple[int, int] = (-32768, 32767),
prefiltering: str = "",
):
self._sampling_frequency = sampling_frequency
self.label = label
self.transducer_type = transducer_type
self.physical_dimension = physical_dimension
self.prefiltering = prefiltering
self._reserved = EdfSignal.reserved.encode("")
if not np.all(np.isfinite(data)):
raise ValueError("Signal data must contain only finite values")
self._set_physical_range(physical_range, data)
self._set_digital_range(digital_range)
self._set_data(data)
def __repr__(self) -> str:
info = f"{self.sampling_frequency:g}Hz"
if self.label:
info = f"{self.label} " + info
return f"<EdfSignal {info}>"
@classmethod
def _from_raw_header(
cls,
sampling_frequency: float,
*,
_label: bytes,
transducer_type: bytes,
physical_dimension: bytes,
physical_min: bytes,
physical_max: bytes,
digital_min: bytes,
digital_max: bytes,
prefiltering: bytes,
samples_per_data_record: bytes,
reserved: bytes,
) -> EdfSignal:
sig = object.__new__(cls)
sig._sampling_frequency = sampling_frequency
sig._label = EdfSignal._label.decode(_label) # type: ignore[attr-defined]
sig._transducer_type = transducer_type # type: ignore[attr-defined]
sig._physical_dimension = physical_dimension # type: ignore[attr-defined]
sig._physical_min = physical_min # type: ignore[attr-defined]
sig._physical_max = physical_max # type: ignore[attr-defined]
sig._digital_min = digital_min # type: ignore[attr-defined]
sig._digital_max = digital_max # type: ignore[attr-defined]
sig._prefiltering = prefiltering # type: ignore[attr-defined]
sig._samples_per_data_record = samples_per_data_record # type: ignore[attr-defined]
sig._reserved = reserved # type: ignore[attr-defined]
return sig
@classmethod
def from_hypnogram(
cls,
stages: npt.NDArray[np.float64],
stage_duration: float = 30,
*,
label: str = "",
) -> EdfSignal:
"""Create an EDF signal from a hypnogram, with scaling according to EDF specs.
According to the EDF FAQ [1]_, use integer numbers 0, 1, 2, 3, 4, 5, 6, and 9
for sleep stages W, 1, 2, 3, 4, R, MT, und unscored, respectively. The digital
range is set to `(0, 9)`.
Parameters
----------
stages : npt.NDArray[np.float64]
The sleep stages, coded as integer numbers.
stage_duration : float, default: `30`
The duration of each sleep stage in seconds, used to set the sampling
frequency to its inverse.
label : str, default: `""`
The signal's label.
Returns
-------
EdfSignal
The resulting :class:`EdfSignal` object.
References
----------
.. [1] EDF FAQ, https://www.edfplus.info/specs/edffaq.html
"""
allowed_stages = {0, 1, 2, 3, 4, 5, 6, 9}
if invalid_stages := set(stages) - allowed_stages:
raise ValueError(f"stages contains invalid values: {invalid_stages}")
return EdfSignal(
data=stages,
sampling_frequency=1 / stage_duration,
label=label,
physical_range=(0, 9),
digital_range=(0, 9),
)
@property
def label(self) -> str:
"""Signal label, e.g., `"EEG Fpz-Cz"` or `"Body temp"`."""
return self._label
@label.setter
def label(self, label: str) -> None:
if label == "EDF Annotations":
raise ValueError("Ordinary signal label must not be 'EDF Annotations'.")
self._label = label
@property
def physical_range(self) -> FloatRange:
"""The physical range as a tuple of `(physical_min, physical_max)`."""
return FloatRange(self.physical_min, self.physical_max)
@property
def digital_range(self) -> IntRange:
"""The digital range as a tuple of `(digital_min, digital_max)`."""
return IntRange(self.digital_min, self.digital_max)
@property
def sampling_frequency(self) -> float:
"""The sampling frequency in Hz."""
return self._sampling_frequency
@property
def data(self) -> npt.NDArray[np.float64]:
"""
Numpy array containing the physical signal values as floats.
To simplify avoiding inconsistencies between signal data and header fields,
individual values in the returned array can not be modified. Use
:meth:`EdfSignal.update_data` to overwrite with new physical data.
"""
try:
gain, offset = calculate_gain_and_offset(
self.digital_min,
self.digital_max,
self.physical_min,
self.physical_max,
)
except ZeroDivisionError:
data = self._digital.astype(np.float64)
warnings.warn(
f"Digital minimum equals digital maximum ({self.digital_min}) for {self.label}, returning uncalibrated signal."
)
except ValueError:
data = self._digital.astype(np.float64)
else:
data = (self._digital + offset) * gain
data.setflags(write=False)
return data
def update_data(
self,
data: npt.NDArray[np.float64],
*,
keep_physical_range: bool = False,
sampling_frequency: float | None = None,
) -> None:
"""
Overwrite physical signal values with an array of equal length.
Parameters
----------
data : npt.NDArray[np.float64]
The new physical data.
keep_physical_range : bool, default: False
If `True`, the `physical_range` is not modified to accomodate the new data.
sampling_frequency : float | None, default: None
If not `None`, the `sampling_frequency` is updated to the new value. The new
data must match the expected length for the new sampling frequency.
"""
expected_length = len(self._digital)
if (
sampling_frequency is not None
and sampling_frequency != self._sampling_frequency
):
expected_length = self._get_expected_new_length(sampling_frequency)
if len(data) != expected_length:
raise ValueError(
f"Signal lengths must match: got {len(data)}, expected {len(self._digital)}."
)
physical_range = self.physical_range if keep_physical_range else None
self._set_physical_range(physical_range, data)
if sampling_frequency is not None:
self._sampling_frequency = sampling_frequency
self._set_data(data)
def _get_expected_new_length(self, sampling_frequency: float) -> int:
if sampling_frequency <= 0:
raise ValueError(
f"Sampling frequency must be positive, got {sampling_frequency}"
)
current_length = len(self._digital)
expected_length_f = (
sampling_frequency / self._sampling_frequency * current_length
)
if not math.isclose(expected_length_f, round(expected_length_f), rel_tol=1e-10):
raise ValueError(
f"Sampling frequency of {sampling_frequency} results in non-integer number of samples ({expected_length_f})"
)
return round(expected_length_f)
def _set_digital_range(self, digital_range: tuple[int, int]) -> None:
digital_range = IntRange(*digital_range)
if digital_range.min == digital_range.max:
raise ValueError(
f"Digital minimum ({digital_range.min}) must differ from digital maximum ({digital_range.max})."
)
self._digital_min = EdfSignal.digital_min.encode(digital_range.min)
self._digital_max = EdfSignal.digital_max.encode(digital_range.max)
def _set_physical_range(
self,
physical_range: tuple[float, float] | None,
data: npt.NDArray[np.float64],
) -> None:
if physical_range is None:
physical_range = FloatRange(data.min(), data.max())
if physical_range.min == physical_range.max:
physical_range = FloatRange(physical_range.min, physical_range.max + 1)
else:
physical_range = FloatRange(*physical_range)
if physical_range.min == physical_range.max:
raise ValueError(
f"Physical minimum ({physical_range.min}) must differ from physical maximum ({physical_range.max})."
)
data_min = data.min()
data_max = data.max()
if data_min < physical_range.min or data_max > physical_range.max:
raise ValueError(
f"Signal range [{data_min}, {data_max}] out of physical range: [{physical_range.min}, {physical_range.max}]"
)
self._physical_min = EdfSignal.physical_min.encode(
round_float_to_8_characters(physical_range.min, math.floor)
)
self._physical_max = EdfSignal.physical_max.encode(
round_float_to_8_characters(physical_range.max, math.ceil)
)
def _set_data(self, data: npt.NDArray[np.float64]) -> None:
gain, offset = calculate_gain_and_offset(
self.digital_min,
self.digital_max,
self.physical_min,
self.physical_max,
)
self._digital = np.round(data / gain - offset).astype(np.int16)
class Patient:
"""
Object representation of the local patient identification.
Parsing from/to the string containing the local_patient_identification header field
is done according to the EDF+ specs. Subfields must be ASCII (32..126) and may not
contain spaces.
Parameters
----------
code : str, default: `"X"`
The code by which the patient is known in the hospital administration.
sex : `{"X", "F", "M"}`, default: `"X"`
Sex, `F` for female, `M` for male, `X` if anonymized.
birthdate : datetime.date | None, default: None
Patient birthdate, stored as `X` if `None`.
name : str, default: `"X"`
The patient's name, stored as `X` if `None`.
additional : Sequence[str], default: `()`
Optional additional subfields. Will be stored in the header field separated by
spaces.
"""
def __init__(
self,
*,
code: str = "X",
sex: Literal["F", "M", "X"] = "X",
birthdate: datetime.date | None = None,
name: str = "X",
additional: Sequence[str] = (),
) -> None:
if sex not in ("F", "M", "X"):
raise ValueError(f"Invalid sex: {sex}, must be one of F, M, X")
if birthdate is None:
birthdate_field = "X"
else:
birthdate_field = encode_edfplus_date(birthdate)
subfields = {
"code": code,
"sex": sex,
"birthdate": birthdate_field,
"name": name,
**{f"additional[{i}]": v for i, v in enumerate(additional)},
}
validate_subfields(subfields)
local_patient_identification = " ".join(subfields.values())
encode_str(local_patient_identification, 80)
self._local_patient_identification = local_patient_identification
def __repr__(self) -> str:
try:
return repr_from_init(self)
except Exception:
return repr(self._local_patient_identification)
@classmethod
def _from_str(cls, string: str) -> Patient:
encode_str(string, 80)
obj = object.__new__(cls)
obj._local_patient_identification = string
return obj
def _to_str(self) -> str:
return self._local_patient_identification
@property
def code(self) -> str:
"""The code by which the patient is known in the hospital administration."""
return self.get_subfield(0)
@property
def sex(self) -> str:
"""Sex, `F` for female, `M` for male, `X` if anonymized."""
return self.get_subfield(1)
@property
def birthdate(self) -> datetime.date:
"""Patient birthdate."""
birthdate_field = self.get_subfield(2)
if birthdate_field == "X":
raise AnonymizedDateError("Patient birthdate is not available ('X').")
return decode_edfplus_date(birthdate_field)
@property
def name(self) -> str:
"""The patient's name."""
return self.get_subfield(3)
@property
def additional(self) -> tuple[str, ...]:
"""Optional additional subfields."""
return tuple(self._local_patient_identification.split()[4:])
def get_subfield(self, idx: int) -> str:
"""
Access a subfield of the local patient identification field by index.
Parameters
----------
idx : int
The index of the subfield to access.
Returns
-------
str
The subfield at the specified index. If the index exceeds the actually
available number of subfields, the return value is `"X"`.
"""
subfields = self._local_patient_identification.split()
if len(subfields) <= idx:
return "X"
return subfields[idx]
class Recording:
"""
Object representation of the local recording identification.
Parsing from/to the string containing the local_recording_identification header
field is done according to the EDF+ specs. Subfields must be ASCII (32..126) and may
not contain spaces.
Parameters
----------
startdate : datetime.date | None, default: None
The recording startdate.
hospital_administration_code : str, default: `"X"`
The hospital administration code of the investigation, e.g., EEG number or PSG
number.
investigator_technician_code : str, default: `"X"`
A code specifying the responsible investigator or technician.
equipment_code : str, default: `"X"`
A code specifying the used equipment.
additional : Sequence[str], default: `()`
Optional additional subfields. Will be stored in the header field separated by
spaces.
"""
def __init__(
self,
*,
startdate: datetime.date | None = None,
hospital_administration_code: str = "X",
investigator_technician_code: str = "X",
equipment_code: str = "X",
additional: Sequence[str] = (),
) -> None:
if startdate is None:
startdate_field = "X"
else:
startdate_field = encode_edfplus_date(startdate)
subfields = {
"startdate": startdate_field,
"hospital_administration_code": hospital_administration_code,
"investigator_technician_code": investigator_technician_code,
"equipment_code": equipment_code,
**{f"additional[{i}]": v for i, v in enumerate(additional)},
}
validate_subfields(subfields)
local_recording_identification = " ".join(("Startdate", *subfields.values()))
encode_str(local_recording_identification, 80)
self._local_recording_identification = local_recording_identification
def __repr__(self) -> str:
try:
return repr_from_init(self)
except Exception:
return repr(self._local_recording_identification)
@classmethod
def _from_str(cls, string: str) -> Recording:
encode_str(string, 80)
obj = object.__new__(cls)
obj._local_recording_identification = string
return obj
def _to_str(self) -> str:
return self._local_recording_identification
@property
def startdate(self) -> datetime.date:
"""The recording startdate."""
if not self._local_recording_identification.startswith("Startdate "):
raise ValueError(
f"Local recording identification field {self._local_recording_identification!r} does not follow EDF+ standard."
)
startdate_field = self.get_subfield(1)
if startdate_field == "X":
raise AnonymizedDateError("Recording startdate is not available ('X').")
return decode_edfplus_date(startdate_field)
@property
def hospital_administration_code(self) -> str:
"""The hospital administration code of the investigation."""
return self.get_subfield(2)
@property
def investigator_technician_code(self) -> str:
"""A code specifying the responsible investigator or technician."""
return self.get_subfield(3)
@property
def equipment_code(self) -> str:
"""A code specifying the used equipment."""
return self.get_subfield(4)
@property
def additional(self) -> tuple[str, ...]:
"""Optional additional subfields."""
return tuple(self._local_recording_identification.split()[5:])
def get_subfield(self, idx: int) -> str:
"""
Access a subfield of the local recording identification field by index.
Parameters
----------
idx : int
The index of the subfield to access. The first subfield (starting at
index 0) should always be "Startdate" according to the EDF+ spedification.
Returns
-------
str
The subfield at the specified index. If the index exceeds the actually
available number of subfields, the return value is `"X"`.
"""
subfields = self._local_recording_identification.split()
if len(subfields) <= idx:
return "X"
return subfields[idx]
class Edf:
"""Python representation of an EDF file.
EDF header fields are exposed as properties with appropriate data types (i.e.,
string, numeric, date, or time objects). Fields that might break the file on
modification (i.e., `version`, `bytes_in_header_record`, `reserved`,
`num_data_records`, `data_record_duration`, and `num_signals`) can not be set after
instantiation.
Note that the startdate has to be set via the parameter `recording`.
For writing an EDF file with a non-integer seconds duration, currently an
appropriate value for `data_record_duration` has to be provided manually.
Parameters
----------
signals : Sequence[EdfSignal]
The (non-annotation) signals to be contained in the EDF file.
patient : Patient | None, default: None
The "local patient identification", containing patient code, sex, birthdate,
name, and optional additional fields. If `None`, the field is set to `X X X X`
in accordance with EDF+ specs.
recording : Recording | None, default: None
The "local recording identification", containing recording startdate, hospital
administration code, investigator/technical code, equipment code, and optional
additional fields. If `None`, the field is set to `Startdate X X X X` in
accordance with EDF+ specs.
starttime : datetime.time | None, default: None
The starttime of the recording. If `None`, `00.00.00` is used. If `starttime`
contains microseconds, an EDF+C file is created.
data_record_duration : float | None, default: None
The duration of each data record in seconds. If `None`, an appropriate value is
chosen automatically.
annotations : Iterable[EdfAnnotation] | None, default: None
The annotations, consisting of onset, duration (optional), and text. If not
`None`, an EDF+C file is created.
"""
version = RawHeaderFieldInt(8)
"""EDF version, always `0`"""
local_patient_identification = RawHeaderFieldStr(80, is_settable=True)
"""
Unparsed string representation of the legacy local patient identification.
See also
--------
patient: Parsed representation, as a :class:`Patient` object.
"""
local_recording_identification = RawHeaderFieldStr(80, is_settable=True)
"""
Unparsed string representation of the legacy local recording identification.
See also
--------
recording: Parsed representation, as a :class:`Recording` object.
"""
_startdate = RawHeaderFieldDate(8, is_settable=True)
_starttime = RawHeaderFieldTime(8, is_settable=True)
bytes_in_header_record = RawHeaderFieldInt(8)
"""Number of bytes in the header record."""
reserved = RawHeaderFieldStr(44)
"""`"EDF+C"` for an EDF+C file, else `""`."""
num_data_records = RawHeaderFieldInt(8)
"""Number of data records in the recording."""
_data_record_duration = RawHeaderFieldFloat(8, is_settable=True)
_num_signals = RawHeaderFieldInt(4, is_settable=True)
def __init__(
self,
signals: Sequence[EdfSignal],
*,
patient: Patient | None = None,
recording: Recording | None = None,
starttime: datetime.time | None = None,
data_record_duration: float | None = None,
annotations: Iterable[EdfAnnotation] | None = None,
):
if not signals and not annotations:
raise ValueError("Edf must contain either signals or annotations")
if patient is None:
patient = Patient()
if recording is None:
recording = Recording()
if starttime is None:
starttime = datetime.time(0, 0, 0)
if data_record_duration is None:
data_record_duration = _calculate_data_record_duration(signals)
elif len(signals) == 0 and data_record_duration != 0:
raise ValueError(
"Data record duration must be zero for annotation-only files"
)
self._data_record_duration = data_record_duration
self._set_num_data_records_with_signals(signals)
self._version = Edf.version.encode(0)
self.local_patient_identification = patient._to_str()
self.local_recording_identification = recording._to_str()
self._set_startdate_with_recording(recording)
self._starttime = starttime.replace(microsecond=0)
self._reserved = Edf.reserved.encode("")
if starttime.microsecond and annotations is None:
warnings.warn("Creating EDF+C to store microsecond starttime.")
if annotations is not None or starttime.microsecond:
signals = (
*signals,
_create_annotations_signal(
annotations if annotations is not None else (),
num_data_records=self.num_data_records,
data_record_duration=self.data_record_duration,
subsecond_offset=starttime.microsecond / 1_000_000,
),
)
self._reserved = Edf.reserved.encode("EDF+C")
self._set_signals(signals)
def __repr__(self) -> str:
signals_text = f"{len(self.signals)} signal"
if len(self.signals) != 1:
signals_text += "s"
annotations_text = f"{len(self.annotations)} annotation"
if len(self.annotations) != 1:
annotations_text += "s"
return f"<Edf {signals_text} {annotations_text}>"
def _load_data(self, file: Path | io.BufferedReader | io.BytesIO) -> None:
lens = [signal.samples_per_data_record for signal in self._signals]
datarecord_len = sum(lens)
if not isinstance(file, Path):
datarecords = np.frombuffer(file.read(), dtype=np.int16)
else:
datarecords = np.memmap(
file,
dtype=np.int16,
mode="r",
offset=self.bytes_in_header_record,
)
datarecords.shape = (self.num_data_records, datarecord_len)
ends = np.cumsum(lens)
starts = ends - lens
for signal, start, end in zip(self._signals, starts, ends):
signal._digital = datarecords[:, start:end].flatten()
def _read_header(self, buffer: io.BufferedReader | io.BytesIO) -> None:
for header_name, length in get_header_fields(Edf):
setattr(self, "_" + header_name, buffer.read(length))
self._signals = self._parse_signal_headers(buffer.read(256 * self._num_signals))
@property
def signals(self) -> tuple[EdfSignal, ...]:
"""
Ordinary signals contained in the recording.
Annotation signals are excluded. Individual signals can not be removed, added,
or replaced by modifying this property. Use :meth:`Edf.append_signals`,
:meth:`Edf.drop_signals`, or :attr:`EdfSignal.data`, respectively.
"""
return tuple(s for s in self._signals if s.label != "EDF Annotations")
def _set_signals(self, signals: Sequence[EdfSignal]) -> None:
signals = tuple(signals)
self._set_num_data_records_with_signals(signals)
self._signals = signals
self._bytes_in_header_record = Edf.bytes_in_header_record.encode(
256 * (len(signals) + 1)
)
self._num_signals = len(signals)
if all(s.label == "EDF Annotations" for s in signals):
self._data_record_duration = 0
def _set_num_data_records_with_signals(
self,
signals: Sequence[EdfSignal],
) -> None:
if not signals:
num_data_records = 1
else:
signal_durations = [
round(len(s._digital) / s.sampling_frequency, 12) for s in signals
]
if any(v != signal_durations[0] for v in signal_durations[1:]):
raise ValueError(
f"Inconsistent signal durations (in seconds): {signal_durations}"
)
num_data_records = _calculate_num_data_records(
signal_durations[0],
self.data_record_duration,
)
signal_lengths = [len(s._digital) for s in signals]
if any(l % num_data_records for l in signal_lengths):
raise ValueError(
f"Not all signal lengths can be split into {num_data_records} data records: {signal_lengths}"
)
self._num_data_records = Edf.num_data_records.encode(num_data_records)
def _parse_signal_headers(self, raw_signal_headers: bytes) -> tuple[EdfSignal, ...]:
raw_headers_split: dict[str, list[bytes]] = {}
start = 0
for header_name, length in get_header_fields(EdfSignal):
end = start + length * self._num_signals
raw_header = raw_signal_headers[start:end]
raw_headers_split[header_name] = [
raw_header[i : length + i] for i in range(0, len(raw_header), length)
]
start = end
signals = []
for i in range(self._num_signals):
raw_signal_header = {
key: raw_headers_split[key][i] for key in raw_headers_split
}
try:
sampling_frequency = (
int(raw_signal_header["samples_per_data_record"])
/ self.data_record_duration
)
except ZeroDivisionError:
if raw_signal_header["_label"].rstrip() == b"EDF Annotations":
sampling_frequency = 0
signals.append(
EdfSignal._from_raw_header(sampling_frequency, **raw_signal_header)
)
return tuple(signals)
def write(self, target: Path | str | io.BufferedWriter | io.BytesIO) -> None:
"""
Write an Edf to a file or file-like object.
Parameters
----------
target : Path | str | io.BufferedWriter | io.BytesIO
The file location (path object or string) or file-like object to write to.
"""
if self.num_data_records == -1:
warnings.warn("num_data_records=-1, determining correct value from data")
num_data_records = _calculate_num_data_records(
len(self._signals[0]._digital) * self._signals[0].sampling_frequency,
self.data_record_duration,
)
else:
num_data_records = self.num_data_records
for signal in self._signals:
signal._samples_per_data_record = EdfSignal.samples_per_data_record.encode( # type: ignore[attr-defined]
len(signal._digital) // num_data_records
)
header_records = []
for header_name, _ in get_header_fields(Edf):
header_records.append(getattr(self, "_" + header_name))
for header_name, _ in get_header_fields(EdfSignal):
for signal in self._signals:
header_records.append(getattr(signal, "_" + header_name))
header_record = b"".join(header_records)
lens = [signal.samples_per_data_record for signal in self._signals]
ends = np.cumsum(lens)
starts = ends - lens
data_record = np.empty((num_data_records, sum(lens)), dtype=np.int16)
for signal, start, end in zip(self._signals, starts, ends):
data_record[:, start:end] = signal._digital.reshape((-1, end - start))
if isinstance(target, str):
target = Path(target)
if isinstance(target, io.BufferedWriter):
target.write(header_record)
data_record.tofile(target)
elif isinstance(target, io.BytesIO):
target.write(header_record)
target.write(data_record.tobytes())
else:
with target.expanduser().open("wb") as file:
file.write(header_record)
data_record.tofile(file)
@property
def labels(self) -> tuple[str, ...]:
"""
The labels of all signals contained in the Edf.
Returns
-------
tuple[str, ...]
The labels, in order of the signals.
"""
return tuple(s.label for s in self.signals)
def get_signal(self, label: str) -> EdfSignal:
"""
Retrieve a single signal by its label.
The label has to be unique - a ValueError is raised if it is ambiguous or does
not exist.
Parameters
----------
label : str
A label identifying a single signal
Returns
-------
EdfSignal
The signal corresponding to the given label.
"""
count = self.labels.count(label)
if count == 0:
raise ValueError(
f"No signal with label {label!r}, possible options: {self.labels}"
)
if count > 1:
indices = [i for i, l in enumerate(self.labels) if l == label]
raise ValueError(f"Ambiguous label {label!r} identifies indices {indices}")
return self.signals[self.labels.index(label)]
@property
def patient(self) -> Patient:
"""
Parsed object representation of the local patient identification.
See :class:`Patient` for information on its attributes.
"""
return Patient._from_str(self.local_patient_identification)
@patient.setter
def patient(self, patient: Patient) -> None:
self.local_patient_identification = patient._to_str()
@property
def recording(self) -> Recording:
"""
Parsed object representation of the local recording identification.
See :class:`Recording` for information on its attributes.
"""
return Recording._from_str(self.local_recording_identification)
@recording.setter
def recording(self, recording: Recording) -> None:
self._set_startdate_with_recording(recording)
self.local_recording_identification = recording._to_str()
@property
def startdate(self) -> datetime.date:
"""
Recording startdate.
If the :attr:`local_recording_identification` conforms to the EDF+ standard, the
startdate provided there is used. If not, this falls back to the legacy
:attr:`startdate` field. If both differ, a warning is issued and the EDF+ field
is preferred. Raises an `AnonymizedDateError` if the EDF+ field is anonymized
(i.e., begins with `Startdate X`).
"""
with contextlib.suppress(Exception):
if self._startdate != self.recording.startdate:
warnings.warn(
f"Different values in startdate fields: {self._startdate}, {self.recording.startdate}"
)
try:
return self.recording.startdate
except AnonymizedDateError:
raise
except ValueError:
return self._startdate
@startdate.setter
def startdate(self, startdate: datetime.date) -> None:
self._startdate = startdate
try:
self.recording.startdate # noqa: B018
except AnonymizedDateError:
pass
except Exception:
return
recording_subfields = self.local_recording_identification.split()
recording_subfields[1] = encode_edfplus_date(startdate)
self.local_recording_identification = " ".join(recording_subfields)
@property
def _subsecond_offset(self) -> float:
try:
timekeeping_raw = self._timekeeping_signal._digital.tobytes()
first_data_record = timekeeping_raw[: timekeeping_raw.find(b"\x00") + 1]
return _EdfAnnotationsDataRecord.from_bytes(first_data_record).tals[0].onset
except StopIteration:
return 0
@property
def starttime(self) -> datetime.time:
"""
Recording starttime.
In EDF+ files, microsecond accuracy is supported.
"""
subsecond_offset = self._subsecond_offset
try:
return self._starttime.replace(
microsecond=round(subsecond_offset * 1000000)
)
except ValueError as e:
raise ValueError(
f"Subsecond offset in first annotation must be 0.X, is {subsecond_offset}"
) from e
@starttime.setter
def starttime(self, starttime: datetime.time) -> None:
onset_change = starttime.microsecond / 1000000 - self._subsecond_offset
self._starttime = starttime.replace(microsecond=0)
if starttime.microsecond != self.starttime.microsecond:
timekeeping_signal = self._timekeeping_signal
data_records = []
for data_record in timekeeping_signal._digital.reshape(
(-1, timekeeping_signal.samples_per_data_record)
):
annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())
for tal in annot_dr.tals:
tal.onset = round(tal.onset + onset_change, 12)
data_records.append(annot_dr.to_bytes())
maxlen = max(len(data_record) for data_record in data_records)
if maxlen % 2:
maxlen += 1
raw = b"".join(dr.ljust(maxlen, b"\x00") for dr in data_records)
timekeeping_signal._samples_per_data_record = EdfSignal.samples_per_data_record.encode( # type: ignore[attr-defined]
maxlen // 2
)
timekeeping_signal._sampling_frequency = (
maxlen // 2 * self.data_record_duration
)
timekeeping_signal._digital = np.frombuffer(raw, dtype=np.int16)
def _set_startdate_with_recording(self, recording: Recording) -> None:
try:
self._startdate = recording.startdate
except AnonymizedDateError:
self._startdate = datetime.date(1985, 1, 1)
@property
def data_record_duration(self) -> float:
"""Duration of each data record in seconds."""
return self._data_record_duration
def update_data_record_duration(
self,
data_record_duration: float,
method: Literal["strict", "pad", "truncate"] = "strict",
) -> None:
"""
Update the data record duration.
This operation will fail if the new duration is incompatible with the current
sampling frequencies.
Parameters
----------
data_record_duration : float
The new data record duration in seconds.
method : `{"strict", "pad", "truncate"}`, default: `"strict"`
How to handle the case where the new duration does not divide the Edf
duration evenly
- "strict": Raise a ValueError
- "pad": Pad the data with zeros to the next compatible duration. If zero
is outside the physical range, data is padded with the physical minimum.
- "truncate": Truncate the data to the previous compatible duration (might
lead to loss of data)
"""
if data_record_duration == self.data_record_duration:
return
if data_record_duration <= 0:
raise ValueError(
f"Data record duration must be positive, got {data_record_duration}"
)
if not self.signals:
raise ValueError(
"Data record duration must be zero for annotation-only files"
)
for signal in self.signals:
spr = signal.sampling_frequency * data_record_duration
if spr % 1:
raise ValueError(
f"Cannot set data record duration to {data_record_duration}: Incompatible sampling frequency {signal.sampling_frequency} Hz"
)
num_data_records = self._pad_or_truncate_signals(data_record_duration, method)
self._update_record_duration_in_annotation_signals(
data_record_duration, num_data_records
)
self._data_record_duration = data_record_duration
self._num_data_records = Edf.num_data_records.encode(num_data_records)
@property
def num_signals(self) -> int:
"""Return the number of signals, excluding annotation signals for EDF+."""
return len(self.signals)
def _pad_or_truncate_signals(
self, data_record_duration: float, method: Literal["strict", "pad", "truncate"]
) -> int:
if method == "pad":
new_duration = (
ceil(self.duration / data_record_duration) * data_record_duration
)
self._pad_or_truncate_data(new_duration)
return round(new_duration / data_record_duration)
if method == "truncate":
new_duration = (
floor(self.duration / data_record_duration) * data_record_duration
)
self._pad_or_truncate_data(new_duration)
return round(new_duration / data_record_duration)
return _calculate_num_data_records(self.duration, data_record_duration)
def _update_record_duration_in_annotation_signals(
self, data_record_duration: float, num_data_records: int
) -> None:
signals = list(self._signals)
for idx, signal in enumerate(self._signals):
if signal not in self._annotation_signals:
continue
annotations = []
for data_record in signal._digital.reshape(
(-1, signal.samples_per_data_record)
):
annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())
if signal is self._timekeeping_signal:
annotations.extend(annot_dr.annotations[1:])
else:
annotations.extend(annot_dr.annotations)
signals[idx] = _create_annotations_signal(
[
EdfAnnotation(a.onset - self._subsecond_offset, a.duration, a.text)
for a in annotations
],
num_data_records=num_data_records,
data_record_duration=data_record_duration,
with_timestamps=signal is self._timekeeping_signal,
subsecond_offset=self._subsecond_offset,
)
self._signals = tuple(signals)
def _pad_or_truncate_data(self, new_duration: float) -> None:
for signal in self.signals:
n_samples = round(new_duration * signal.sampling_frequency)
diff = n_samples - len(signal._digital)
if diff > 0:
physical_pad_value = 0.0
if signal.physical_min > 0 or signal.physical_max < 0:
physical_pad_value = signal.physical_min
signal._set_data(
np.pad(signal.data, (0, diff), constant_values=physical_pad_value)
)
elif diff < 0:
signal._set_data(signal.data[:diff])
def anonymize(self) -> None:
"""
Anonymize a recording.
Header fields are modified as follows:
- local patient identification is set to `X X X X`
- local recording identification is set to `Startdate X X X X`
- startdate is set to `01.01.85`
- starttime is set to `00.00.00`
For EDF+ files, subsecond starttimes specified via an annotations signal are
removed.
"""
self.patient = Patient()
self.recording = Recording()
self.starttime = datetime.time(0, 0, 0)
def drop_signals(self, drop: Iterable[int | str]) -> None:
"""
Drop signals by index or label.
Signal indices (int) and labels (str) can be provided in the same iterable. For
ambiguous labels, all corresponding signals are dropped. Raises a ValueError if
at least one of the provided identifiers does not correspond to a signal.
Parameters
----------
drop : Iterable[int | str]
The signals to drop, identified by index or label.
"""
if isinstance(drop, str):
drop = [drop]
selected: list[EdfSignal] = []
dropped: list[int | str] = []
i = 0
for signal in self._signals:
if signal.label == "EDF Annotations":
selected.append(signal)
continue
if i in drop or signal.label in drop:
dropped.append(i)
dropped.append(signal.label)
else:
selected.append(signal)
i += 1
if not_dropped := set(drop) - set(dropped):
raise ValueError(f"No signal found with index/label {not_dropped}")
self._signals = tuple(selected)
self._bytes_in_header_record = Edf.bytes_in_header_record.encode(
256 * (len(selected) + 1)
)
self._num_signals = len(selected)
def append_signals(self, new_signals: EdfSignal | Iterable[EdfSignal]) -> None:
"""
Append one or more signal(s) to the Edf recording.
Every signal must be compatible with the current `data_record_duration` and all
signal durations must match the overall recording duration. For recordings
containing EDF+ annotation signals, the new signals are inserted after the last
ordinary (i.e. non-annotation) signal.
Parameters
----------
new_signals : EdfSignal | Iterable[EdfSignal]
The signal(s) to add.
"""
if isinstance(new_signals, EdfSignal):
new_signals = [new_signals]
last_ordinary_index = 0
for i, signal in enumerate(self._signals):
if signal.label != "EDF Annotations":
last_ordinary_index = i
self._set_signals(
[
*self._signals[: last_ordinary_index + 1],
*new_signals,
*self._signals[last_ordinary_index + 1 :],
]
)
@property
def _annotation_signals(self) -> Iterable[EdfSignal]:
return (signal for signal in self._signals if signal.label == "EDF Annotations")
@property
def _timekeeping_signal(self) -> EdfSignal:
return next(iter(self._annotation_signals))
@property
def duration(self) -> float:
"""Recording duration in seconds."""
return self.num_data_records * self.data_record_duration
@property
def annotations(self) -> tuple[EdfAnnotation, ...]:
"""
All annotations contained in the Edf, sorted chronologically.
Does not include timekeeping annotations.
"""
annotations: list[EdfAnnotation] = []
for i, signal in enumerate(self._annotation_signals):
for data_record in signal._digital.reshape(
(-1, signal.samples_per_data_record)
):
annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())
if i == 0:
# from https://www.edfplus.info/specs/edfplus.html#timekeeping:
# The first annotation of the first 'EDF Annotations' signal in each
# data record is empty, but its timestamp specifies how many seconds
# after the file startdate/time that data record starts.
annotations.extend(annot_dr.annotations[1:])
else:
annotations.extend(annot_dr.annotations)
subsecond_offset = self._subsecond_offset
annotations = [
EdfAnnotation(
round(ann.onset - subsecond_offset, 12), ann.duration, ann.text
)
for ann in annotations
]
return tuple(sorted(annotations))
def drop_annotations(self, text: str) -> None:
"""
Drop annotations with a given text.
Parameters
----------
text : str
All annotations whose text exactly matches this parameter are removed.
"""
for signal in self._annotation_signals:
for data_record in signal._digital.reshape(
(-1, signal.samples_per_data_record)
):
annotations = _EdfAnnotationsDataRecord.from_bytes(
data_record.tobytes()
)
annotations.drop_annotations_with_text(text)
data_record[:] = np.frombuffer(
annotations.to_bytes().ljust(len(data_record) * 2, b"\x00"),
dtype=np.int16,
)
def to_bytes(self) -> bytes:
"""
Convert an Edf to a `bytes` object.
Returns
-------
bytes
The binary representation of the Edf object (i.e., what a file created with
`Edf.write` would contain).
"""
stream = io.BytesIO()
self.write(stream)
stream.seek(0)
return stream.read()
def slice_between_seconds(
self,
start: float,
stop: float,
*,
keep_all_annotations: bool = False,
) -> None:
"""
Slice to the interval between two times.
The sample point corresponding to `stop` is excluded. `start` and `stop` are
given in seconds from recording start and have to correspond exactly to a sample
time in all non-annotation signals.
Parameters
----------
start : float
Start time in seconds from recording start.
stop : float
Stop time in seconds from recording start.
keep_all_annotations : bool, default: False
If set to `True`, annotations outside the selected time interval are kept.
"""
signals: list[EdfSignal] = []
self._verify_seconds_inside_recording_time(start)
self._verify_seconds_inside_recording_time(stop)
self._verify_seconds_coincide_with_sample_time(start)
self._verify_seconds_coincide_with_sample_time(stop)
self._num_data_records = Edf.num_data_records.encode(
int((stop - start) / self.data_record_duration)
)
for signal in self._signals:
if signal.label == "EDF Annotations":
signals.append(
self._slice_annotations_signal(
signal,
start=start,
stop=stop,
keep_all_annotations=keep_all_annotations,
)
)
else:
start_index = start * signal.sampling_frequency
stop_index = stop * signal.sampling_frequency
signal._digital = signal._digital[int(start_index) : int(stop_index)]
signals.append(signal)
self._set_signals(signals)
self._shift_startdatetime(int(start))
def slice_between_annotations(
self,
start_text: str,
stop_text: str,
*,
keep_all_annotations: bool = False,
) -> None:
"""
Slice to the interval between two EDF+ annotations.
The sample point corresponding to the onset of the annotation identified by
`stop_text` is excluded. `start_text` and `stop_text` each have to uniquely
identify a single annotation, whose onset corresponds exactly to a sample time
in all non-annotation signals.
Parameters
----------
start_text : str
Text identifying the start annotation.
stop_text : str
Text identifying the stop annotation.
keep_all_annotations : bool, default: False
If set to `True`, annotations outside the selected time interval are kept.
"""
self.slice_between_seconds(
self._get_annotation_by_text(start_text).onset,
self._get_annotation_by_text(stop_text).onset,
keep_all_annotations=keep_all_annotations,
)
def _get_annotation_by_text(self, text: str) -> EdfAnnotation:
matches = []
for annotation in self.annotations:
if annotation.text == text:
matches.append(annotation)
if len(matches) == 1:
return matches[0]
if len(matches) > 1:
raise ValueError(
f"Ambiguous annotation text {text!r}, found {len(matches)} matches"
)
raise ValueError(f"No annotation found with text {text!r}")
def _verify_seconds_inside_recording_time(self, seconds: float) -> None:
if not 0 <= seconds <= self.duration:
raise ValueError(
f"{seconds} is an invalid slice time for recording duration {self.duration}"
)
def _verify_seconds_coincide_with_sample_time(self, seconds: float) -> None:
for i, signal in enumerate(self.signals):
index = seconds * signal.sampling_frequency
if index != int(index):
raise ValueError(
f"{seconds}s is not a sample time of signal {i} ({signal.label}) with fs={signal.sampling_frequency}Hz"
)
def _shift_startdatetime(self, seconds: float) -> None:
timedelta = datetime.timedelta(seconds=seconds)
try:
startdate = self.startdate
startdate_anonymized = False
except AnonymizedDateError:
startdate = datetime.date.fromtimestamp(0)
startdate_anonymized = True
startdatetime = datetime.datetime.combine(startdate, self.starttime)
startdatetime += timedelta
if not startdate_anonymized:
self.startdate = startdatetime.date()
self.starttime = startdatetime.time()
def copy(self) -> Edf:
"""
Create a deep copy of the Edf.
Returns
-------
Edf
The copied Edf object.
"""
return copy.deepcopy(self)
def _slice_annotations_signal(
self,
signal: EdfSignal,
*,
start: float,
stop: float,
keep_all_annotations: bool,
) -> EdfSignal:
is_timekeeping_signal = signal == self._timekeeping_signal
annotations: list[EdfAnnotation] = []
for data_record in signal._digital.reshape(
(-1, signal.samples_per_data_record)
):
annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())
if is_timekeeping_signal:
annotations.extend(annot_dr.annotations[1:])
else:
annotations.extend(annot_dr.annotations)
annotations = [
EdfAnnotation(round(a.onset - start, 12), a.duration, a.text)
for a in annotations
if keep_all_annotations or start <= a.onset < stop
]
return _create_annotations_signal(
annotations,
num_data_records=self.num_data_records,
data_record_duration=self.data_record_duration,
with_timestamps=is_timekeeping_signal,
subsecond_offset=self._subsecond_offset + start - int(start),
)
def _create_annotations_signal(
annotations: Iterable[EdfAnnotation],
*,
num_data_records: int,
data_record_duration: float,
with_timestamps: bool = True,
subsecond_offset: float = 0,
) -> EdfSignal:
data_record_starts = np.arange(num_data_records) * data_record_duration
annotations = sorted(annotations)
data_records = []
for i, start in enumerate(data_record_starts):
end = start + data_record_duration
tals: list[_EdfTAL] = []
if with_timestamps:
tals.append(_EdfTAL(np.round(start + subsecond_offset, 12), None, [""]))
for ann in annotations:
if (
(i == 0 and ann.onset < 0)
or (i == (num_data_records - 1) and end <= ann.onset)
or (start <= ann.onset < end)
):
tals.append(
_EdfTAL(
np.round(ann.onset + subsecond_offset, 12),
ann.duration,
[ann.text],
)
)
data_records.append(_EdfAnnotationsDataRecord(tals).to_bytes())
maxlen = max(len(data_record) for data_record in data_records)
if maxlen % 2:
maxlen += 1
raw = b"".join(dr.ljust(maxlen, b"\x00") for dr in data_records)
divisor = data_record_duration if data_record_duration else 1
signal = EdfSignal(
np.arange(1.0), # placeholder signal, as argument `data` is non-optional
sampling_frequency=maxlen // 2 / divisor,
physical_range=(-32768, 32767),
)
signal._label = "EDF Annotations"
signal._samples_per_data_record = EdfSignal.samples_per_data_record.encode( # type: ignore[attr-defined]
maxlen // 2
)
signal._digital = np.frombuffer(raw, dtype=np.int16).copy()
return signal
@dataclass
class _EdfTAL:
onset: float
duration: float | None
texts: list[str]
def to_bytes(self) -> bytes:
timing = encode_annotation_onset(self.onset)
if self.duration is not None: | timing += f"\x15{encode_annotation_duration(self.duration)}" | 11 | 2023-11-09 09:53:27+00:00 | 8k |
microsoft/folx | folx/hessian.py | [
{
"identifier": "JAC_DIM",
"path": "folx/api.py",
"snippet": "T = TypeVar(\"T\", bound=PyTree[Array])\nR = TypeVar(\"R\", bound=PyTree[Array])\nJAC_DIM = 0 # should be either 0 or -1. TODO: switching is not support.\n GENERAL = 0\n LINEAR_IN_FIRST = 1\n LINEAR_IN_ONE = 2 | LINEAR_IN_FIRST\n ... | import functools
import logging
import jax
import jax.flatten_util as jfu
import jax.numpy as jnp
import jax.tree_util as jtu
import jaxlib.xla_extension
import numpy as np
from typing import Callable, Sequence
from jax import core
from .api import (
JAC_DIM,
Array,
Axes,
CustomTraceJacHessianJac,
ExtraArgs,
ForwardFn,
FunctionFlags,
FwdJacobian,
FwdLaplArgs,
FwdLaplArray,
MergeFn,
PyTree,
)
from .utils import (
add_vmap_jacobian_dim,
array_wise_flat_wrap,
flat_wrap,
get_reduced_jacobians,
jac_jacT,
trace_jac_jacT,
trace_of_product,
vmap_sequences_and_squeeze,
) | 4,414 | return arrs
if arrs[0].ndim >= 1:
return [remove_fill(x, find_unique=find_unique) for x in arrs]
if find_unique:
arrs = np.unique(arrs)
return arrs[arrs >= 0] # type: ignore
def merge_and_populate(arrs: Sequence[np.ndarray], operation: Callable[[np.ndarray, np.ndarray], np.ndarray]):
"""
The arrays are assumed to be of the same shape. We look at the intersection of all arrays.
We then find the maximum intersection size and fill all arrays to that size.
Args:
- arrs: list of arrays
Returns:
- arrs: np.ndarray where only intersections are kept and all arrays are filled to the same size.
"""
result = jtu.tree_map(
lambda *x: functools.reduce(operation, tuple(x[1:]), x[0]),
*arrs,
is_leaf=lambda x: isinstance(x, np.ndarray)
)
sizes = jtu.tree_map(lambda x: x.size, result, is_leaf=lambda x: isinstance(x, np.ndarray))
max_size = np.max(jtu.tree_leaves(sizes))
result = jtu.tree_map(
lambda x: np.concatenate([x, np.full(max_size - x.size, -1, dtype=x.dtype)]),
result,
is_leaf=lambda x: isinstance(x, np.ndarray),
)
return np.asarray(result, dtype=int)
def find_materialization_idx(lapl_args: FwdLaplArgs, in_axes, flags: FunctionFlags, threshold: int):
if not lapl_args.any_jacobian_weak:
return None
# TODO: Rewrite this!! This is quity messy and inefficient.
# it assumes that we're only interested in the last dimension.
with core.new_main(core.EvalTrace, dynamic=True):
vmap_seq, (inp,) = vmap_sequences_and_squeeze(
([j.mask for j in lapl_args.jacobian],),
([j for j in add_vmap_jacobian_dim(lapl_args, FwdLaplArgs(in_axes)).jacobian],),
)
max_size = np.max([np.sum(j.unique_idx >= 0, dtype=int) for j in lapl_args.jacobian])
# This can be quite memory intensive, so we try to do it on the GPU and
# if that fails we just use the CPU. On the CPU this takes quite some time.
# TODO: work on a more memory efficient implementation!
unique_fn = functools.partial(jnp.unique, size=max_size + 1, fill_value=-1)
def idx_fn(x):
return jtu.tree_map(unique_fn, x)
for s in vmap_seq[::-1]:
idx_fn = jax.vmap(idx_fn, in_axes=s)
try:
# This path is more memory intensive by using the GPU to find uniques but
# potentially fails if the arrays are too large.
# +1 because we need to accomodate the -1.
arrs = np.asarray(idx_fn(inp), dtype=int)
except jaxlib.xla_extension.XlaRuntimeError:
logging.info(
"Failed to find unique elements on GPU, falling back to CPU. This will be slow."
)
with jax.default_device(jax.devices("cpu")[0]):
arrs = np.asarray(idx_fn(inp), dtype=int)
filtered_arrs = remove_fill(arrs, False)
if FunctionFlags.LINEAR_IN_ONE in flags:
# For off diagonal Hessians we only need to look at the intersection between
# all arrays rather than their union.
idx = merge_and_populate(filtered_arrs, np.intersect1d) # type: ignore
else:
idx = merge_and_populate(filtered_arrs, np.union1d) # type: ignore
idx = np.moveaxis(idx, -1, JAC_DIM)
if idx.shape[JAC_DIM] >= max_size or idx.shape[JAC_DIM] > threshold:
idx = None
return idx
def remove_zero_entries(lapl_args: FwdLaplArgs, materialize_idx: np.ndarray | None):
if materialize_idx is None:
return lapl_args, None, None
mask = (materialize_idx != -1).any(0)
if mask.sum() > 0.5 * mask.size:
# this is a heuristic to avoid having unnecessary indexing overhead for
# insufficiently sparse masks.
return lapl_args, materialize_idx, None
indices = np.where(mask)
new_mat_idx = materialize_idx[(slice(None), *indices)]
new_arrs = []
for arg in lapl_args.arrays:
brdcast_dims = np.where(np.array(arg.x.shape) == 1)[0]
idx = tuple(
0 if i in brdcast_dims else x
for i, x in enumerate(indices)
)
new_arrs.append(FwdLaplArray(
x=arg.x[idx],
jacobian=FwdJacobian(
data=arg.jacobian.data[(slice(None), *idx)],
x0_idx=arg.jacobian.x0_idx[(slice(None), *idx)], # type: ignore
),
laplacian=arg.laplacian[idx],
))
new_args = FwdLaplArgs(tuple(new_arrs))
return new_args, new_mat_idx, mask
def vmapped_jac_hessian_jac(
fwd: ForwardFn,
flags: FunctionFlags,
custom_jac_hessian_jac: CustomTraceJacHessianJac | None,
extra_args: ExtraArgs,
in_axes: Axes,
extra_in_axes: Axes,
merge: MergeFn,
sparsity_threshold: int,
lapl_args: FwdLaplArgs,
|
def general_jac_hessian_jac(fn: ForwardFn, args: FwdLaplArgs, materialize_idx: Array | None):
# It's conceptually easier to work with the flattened version of the
# Hessian, since we can then use einsum to compute the trace.
flat_fn = flat_wrap(fn, *args.x)
flat_x = jfu.ravel_pytree(args.x)[0]
out, unravel = jfu.ravel_pytree(fn(*args.x))
# We have to decide on an order in which we execute tr(HJJ^T).
# H will be of shape NxDxD, J is DxK where N could potentially be D.
# We will do the following:
# if K >= D, we compute
# JJ^T first and then the trace.
# if D < K, we compute HJ first and then the trace.
# We should also flatten our gradient tensor to a 2D matrix where the first dimension
# is the x0 dim and the second dim is the input dim.
grads_2d = get_reduced_jacobians(*args.jacobian, idx=materialize_idx)
grad_2d = jnp.concatenate([x.T for x in grads_2d], axis=0)
D, K = grad_2d.shape
if K > D:
# jax.hessian uses Fwd on Reverse AD
flat_hessian = jax.hessian(flat_fn)(flat_x)
flat_out = trace_of_product(flat_hessian, grad_2d @ grad_2d.T)
elif D > K:
# Directly copmute the trace of tr(HJJ^T)=tr(J^THJ)
@functools.partial(jax.vmap, in_axes=-1, out_axes=-1)
def vhvp(tangent):
def vjp(x):
@functools.partial(jax.vmap, in_axes=(None, -1), out_axes=-1)
def jvp(x, tangent):
return jax.jvp(flat_fn, (x,), (tangent,))[1]
return jvp(x, grad_2d)
return jax.jvp(vjp, (flat_x,), (tangent,))[1]
flat_out = jnp.trace(vhvp(grad_2d), axis1=-2, axis2=-1)
else:
# Implementation where we compute HJ and then the trace via
# the sum of hadamard product
@functools.partial(jax.vmap, in_axes=-1, out_axes=-1)
def hvp(tangent):
def jacobian(x):
return jax.jacrev(flat_fn)(x)
return jax.jvp(jacobian, (flat_x,), (tangent,))[1]
HJ = hvp(grad_2d) # N x D x K
flat_out = trace_of_product(HJ, grad_2d) # N x D x K and D x K
return unravel(flat_out)
def off_diag_jac_hessian_jac(fn: ForwardFn, args: FwdLaplArgs, materialize_idx: Array | None):
# if we know that a function is linear in one arguments, it's hessian must be off diagonal
# thus we can safe some computation by only computing the off diagonal part of the hessian.
assert len(args) == 2, "Off diag hessian only supports 2 args at the moment."
def flat_arr(x: FwdLaplArray) -> Array:
return jfu.ravel_pytree(x.x)[0]
flat_fn = array_wise_flat_wrap(fn, *args.x)
def jac_lhs(lhs, rhs):
return jax.jacobian(flat_fn, argnums=0)(lhs, rhs)
hessian = jax.jacobian(jac_lhs, argnums=1)(flat_arr(args.arrays[0]), flat_arr(args.arrays[1]))
flat_out = 2 * trace_of_product(
hessian, jac_jacT(args.arrays[0].jacobian, args.arrays[1].jacobian, materialize_idx)
)
unravel = jfu.ravel_pytree(fn(*args.x))[1]
return unravel(flat_out)
def mul_jac_hessian_jac(fn: ForwardFn, args: FwdLaplArgs, shared_idx: Array | None):
# For a dot product we know that the hessian looks like this:
# [0, I]
# [I, 0]
# where I is the identity matrix of the same shape as the input.
assert len(args) == 2, "Dot product only supports two args."
flat_out = (
2 * trace_jac_jacT(args.arrays[0].jacobian, args.arrays[1].jacobian, shared_idx)[None]
)
unravel = jfu.ravel_pytree(fn(*args.x))[1]
return unravel(flat_out)
def remove_fill(arrs: np.ndarray, find_unique: bool = False):
"""
Remove the fill value from an array. As the tensors might not be shaped correctly
afterwards, we reduce all the leading dimensions by lists.
Args:
- arrs: array to remove fill value from
Returns:
- arrs: nested lists of arrays without fill value
"""
if arrs.size == 0:
return arrs
if arrs[0].ndim >= 1:
return [remove_fill(x, find_unique=find_unique) for x in arrs]
if find_unique:
arrs = np.unique(arrs)
return arrs[arrs >= 0] # type: ignore
def merge_and_populate(arrs: Sequence[np.ndarray], operation: Callable[[np.ndarray, np.ndarray], np.ndarray]):
"""
The arrays are assumed to be of the same shape. We look at the intersection of all arrays.
We then find the maximum intersection size and fill all arrays to that size.
Args:
- arrs: list of arrays
Returns:
- arrs: np.ndarray where only intersections are kept and all arrays are filled to the same size.
"""
result = jtu.tree_map(
lambda *x: functools.reduce(operation, tuple(x[1:]), x[0]),
*arrs,
is_leaf=lambda x: isinstance(x, np.ndarray)
)
sizes = jtu.tree_map(lambda x: x.size, result, is_leaf=lambda x: isinstance(x, np.ndarray))
max_size = np.max(jtu.tree_leaves(sizes))
result = jtu.tree_map(
lambda x: np.concatenate([x, np.full(max_size - x.size, -1, dtype=x.dtype)]),
result,
is_leaf=lambda x: isinstance(x, np.ndarray),
)
return np.asarray(result, dtype=int)
def find_materialization_idx(lapl_args: FwdLaplArgs, in_axes, flags: FunctionFlags, threshold: int):
if not lapl_args.any_jacobian_weak:
return None
# TODO: Rewrite this!! This is quity messy and inefficient.
# it assumes that we're only interested in the last dimension.
with core.new_main(core.EvalTrace, dynamic=True):
vmap_seq, (inp,) = vmap_sequences_and_squeeze(
([j.mask for j in lapl_args.jacobian],),
([j for j in add_vmap_jacobian_dim(lapl_args, FwdLaplArgs(in_axes)).jacobian],),
)
max_size = np.max([np.sum(j.unique_idx >= 0, dtype=int) for j in lapl_args.jacobian])
# This can be quite memory intensive, so we try to do it on the GPU and
# if that fails we just use the CPU. On the CPU this takes quite some time.
# TODO: work on a more memory efficient implementation!
unique_fn = functools.partial(jnp.unique, size=max_size + 1, fill_value=-1)
def idx_fn(x):
return jtu.tree_map(unique_fn, x)
for s in vmap_seq[::-1]:
idx_fn = jax.vmap(idx_fn, in_axes=s)
try:
# This path is more memory intensive by using the GPU to find uniques but
# potentially fails if the arrays are too large.
# +1 because we need to accomodate the -1.
arrs = np.asarray(idx_fn(inp), dtype=int)
except jaxlib.xla_extension.XlaRuntimeError:
logging.info(
"Failed to find unique elements on GPU, falling back to CPU. This will be slow."
)
with jax.default_device(jax.devices("cpu")[0]):
arrs = np.asarray(idx_fn(inp), dtype=int)
filtered_arrs = remove_fill(arrs, False)
if FunctionFlags.LINEAR_IN_ONE in flags:
# For off diagonal Hessians we only need to look at the intersection between
# all arrays rather than their union.
idx = merge_and_populate(filtered_arrs, np.intersect1d) # type: ignore
else:
idx = merge_and_populate(filtered_arrs, np.union1d) # type: ignore
idx = np.moveaxis(idx, -1, JAC_DIM)
if idx.shape[JAC_DIM] >= max_size or idx.shape[JAC_DIM] > threshold:
idx = None
return idx
def remove_zero_entries(lapl_args: FwdLaplArgs, materialize_idx: np.ndarray | None):
if materialize_idx is None:
return lapl_args, None, None
mask = (materialize_idx != -1).any(0)
if mask.sum() > 0.5 * mask.size:
# this is a heuristic to avoid having unnecessary indexing overhead for
# insufficiently sparse masks.
return lapl_args, materialize_idx, None
indices = np.where(mask)
new_mat_idx = materialize_idx[(slice(None), *indices)]
new_arrs = []
for arg in lapl_args.arrays:
brdcast_dims = np.where(np.array(arg.x.shape) == 1)[0]
idx = tuple(
0 if i in brdcast_dims else x
for i, x in enumerate(indices)
)
new_arrs.append(FwdLaplArray(
x=arg.x[idx],
jacobian=FwdJacobian(
data=arg.jacobian.data[(slice(None), *idx)],
x0_idx=arg.jacobian.x0_idx[(slice(None), *idx)], # type: ignore
),
laplacian=arg.laplacian[idx],
))
new_args = FwdLaplArgs(tuple(new_arrs))
return new_args, new_mat_idx, mask
def vmapped_jac_hessian_jac(
fwd: ForwardFn,
flags: FunctionFlags,
custom_jac_hessian_jac: CustomTraceJacHessianJac | None,
extra_args: ExtraArgs,
in_axes: Axes,
extra_in_axes: Axes,
merge: MergeFn,
sparsity_threshold: int,
lapl_args: FwdLaplArgs, | ) -> PyTree[Array]: | 0 | 2023-11-07 16:32:46+00:00 | 8k |
shuttworth/NICE-SLAM-Easyread | run.py | [
{
"identifier": "config",
"path": "src/config.py",
"snippet": "def load_config(path, default_path=None):\ndef update_recursive(dict1, dict2):\ndef get_model(cfg, nice=True):"
},
{
"identifier": "NICE_SLAM",
"path": "src/NICE_SLAM.py",
"snippet": "class NICE_SLAM():\n \"\"\"\n NICE_... | import argparse
import random
import numpy as np
import torch
from src import config
from src.NICE_SLAM import NICE_SLAM | 4,419 |
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def main():
# setup_seed(20)
parser = argparse.ArgumentParser(
description='Arguments for running the NICE-SLAM/iMAP*.'
)
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('--input_folder', type=str,
help='input folder, this have higher priority, can overwrite the one in config file')
parser.add_argument('--output', type=str,
help='output folder, this have higher priority, can overwrite the one in config file')
nice_parser = parser.add_mutually_exclusive_group(required=False)
nice_parser.add_argument('--nice', dest='nice', action='store_true')
nice_parser.add_argument('--imap', dest='nice', action='store_false')
parser.set_defaults(nice=True)
# parse_args()访问在命令行中传递的所有参数
args = parser.parse_args()
|
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def main():
# setup_seed(20)
parser = argparse.ArgumentParser(
description='Arguments for running the NICE-SLAM/iMAP*.'
)
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('--input_folder', type=str,
help='input folder, this have higher priority, can overwrite the one in config file')
parser.add_argument('--output', type=str,
help='output folder, this have higher priority, can overwrite the one in config file')
nice_parser = parser.add_mutually_exclusive_group(required=False)
nice_parser.add_argument('--nice', dest='nice', action='store_true')
nice_parser.add_argument('--imap', dest='nice', action='store_false')
parser.set_defaults(nice=True)
# parse_args()访问在命令行中传递的所有参数
args = parser.parse_args()
| cfg = config.load_config( | 0 | 2023-11-07 05:09:36+00:00 | 8k |
sb-ai-lab/HypEx | hypex/algorithms/faiss_matcher.py | [
{
"identifier": "check_repeats",
"path": "hypex/utils/metrics.py",
"snippet": "def check_repeats(index: np.array, silent: bool = False) -> float:\n \"\"\"Checks the fraction of duplicated indexes in the given array.\n\n Args:\n index:\n The array of indexes to check for duplicat... | import datetime as dt
import functools
import logging
import time
import faiss
import numpy as np
import pandas as pd
from typing import Any
from typing import Dict
from typing import Tuple
from typing import Union
from scipy.stats import norm
from tqdm.auto import tqdm
from ..utils.metrics import check_repeats
from ..utils.metrics import matching_quality | 3,896 | df_pred_treated = self._create_outcome_matched_df(self.dict_outcome_treated, True)
df_pred_untreated = self._create_outcome_matched_df(self.dict_outcome_untreated, False)
df_matched = pd.concat([df_pred_treated, df_pred_untreated])
treated_x = self._create_features_matched_df(self.treated_index, True)
untreated_x = self._create_features_matched_df(self.untreated_index, False)
untreated_x = pd.concat([treated_x, untreated_x])
columns = list(untreated_x.columns) + list(df_matched.columns)
df_matched = pd.concat([untreated_x, df_matched], axis=1, ignore_index=True)
df_matched.columns = columns
return df_matched
def calc_atc(self, df: pd.DataFrame, outcome: str) -> tuple:
"""Calculates Average Treatment Effect for the control group (ATC).
Effect on control group if it was affected
Args:
df:
Input dataframe
outcome:
The outcome to be considered for treatment effect
Returns:
Contains ATC, scaled counts, and variances as numpy arrays
"""
logger.debug("Calculating ATC")
df = df[df[self.treatment] == 0]
N_c = len(df)
ITT_c = df[outcome + POSTFIX_BIAS]
scaled_counts_c = scaled_counts(N_c, self.treated_index, self.silent)
vars_c = np.repeat(ITT_c.var(), N_c) # conservative
atc = ITT_c.mean()
return atc, scaled_counts_c, vars_c
def calc_att(self, df: pd.DataFrame, outcome: str) -> tuple:
"""Calculates Average Treatment Effect for the treated (ATT).
Args:
df:
Input dataframe
outcome:
The outcome to be considered for treatment effect
Returns:
Contains ATT, scaled counts, and variances as numpy arrays
"""
logger.debug("Calculating ATT")
df = df[df[self.treatment] == 1]
N_t = len(df)
ITT_t = df[outcome + POSTFIX_BIAS]
scaled_counts_t = scaled_counts(N_t, self.untreated_index, self.silent)
vars_t = np.repeat(ITT_t.var(), N_t) # conservative
att = ITT_t.mean()
return att, scaled_counts_t, vars_t
def _calculate_ate_all_target(self, df: pd.DataFrame):
"""Creates dictionaries of all effect: ATE, ATC, ATT.
Args:
df:
Input dataframe
"""
logger.debug("Creating dicts of all effects: ATE, ATC, ATT")
att_dict = {}
atc_dict = {}
ate_dict = {}
N = len(df)
N_t = df[self.treatment].sum()
N_c = N - N_t
for outcome in self.outcomes:
att, scaled_counts_t, vars_t = self.calc_att(df, outcome)
atc, scaled_counts_c, vars_c = self.calc_atc(df, outcome)
ate = (N_c / N) * atc + (N_t / N) * att
att_se = calc_att_se(vars_c, vars_t, scaled_counts_c)
atc_se = calc_atc_se(vars_c, vars_t, scaled_counts_t)
ate_se = calc_ate_se(vars_c, vars_t, scaled_counts_c, scaled_counts_t)
ate_dict[outcome] = [
ate,
ate_se,
pval_calc(ate / ate_se),
ate - self.sigma * ate_se,
ate + self.sigma * ate_se,
]
atc_dict[outcome] = [
atc,
atc_se,
pval_calc(atc / atc_se),
atc - self.sigma * atc_se,
atc + self.sigma * atc_se,
]
att_dict[outcome] = [
att,
att_se,
pval_calc(att / att_se),
att - self.sigma * att_se,
att + self.sigma * att_se,
]
self.ATE, self.ATC, self.ATT = ate_dict, atc_dict, att_dict
self.val_dict = ate_dict
| """Class that searches indexes."""
def timer(func):
"""Decorator to measure the execution time of a function.
Uses time.perf_counter() to determine the start and end times
of the decorated function and then prints the total execution time
Usage Example:
@timer
def example_function():
...
Args:
func: The function whose execution time is to be measured
Returns:
Wrapped version of the original function with added time measurement
"""
@functools.wraps(func)
def _wrapper(*args, **kwargs):
start = time.perf_counter()
result = func(*args, **kwargs)
runtime = time.perf_counter() - start
print(f"{func.__name__} took {runtime:.4f} secs")
return result
return _wrapper
faiss.cvar.distance_compute_blas_threshold = 100000
POSTFIX = "_matched"
POSTFIX_BIAS = "_matched_bias"
logger = logging.getLogger("Faiss hypex")
console_out = logging.StreamHandler()
logging.basicConfig(
handlers=(console_out,),
format="[%(asctime)s | %(name)s | %(levelname)s]: %(message)s",
datefmt="%d.%m.%Y %H:%M:%S",
level=logging.INFO,
)
class FaissMatcher:
"""A class used to match instances using Faiss library."""
def __init__(
self,
df: pd.DataFrame,
outcomes: str,
treatment: str,
info_col: list,
features: [list, pd.DataFrame] = None,
group_col: str = None,
weights: dict = None,
sigma: float = 1.96,
validation: bool = None,
n_neighbors: int = 10,
silent: bool = True,
pbar: bool = True,
):
"""Construct all the necessary attributes.
Args:
df:
The input dataframe
outcomes:
The target column name
treatment:
The column name with treatment
info_col:
A list with informational column names
features:
A list with names of feature using to matching. Defaults to None
group_col:
The column for stratification. Defaults to None
weights:
Dict with wight of features to matching. If you would like that matching will be more for
1 feature and less for another one
sigma:
The significant level for confidence interval calculation Defaults to 1.96
validation:
The flag for validation of estimated ATE with default method `random_feature`
n_neighbors:
The number of neighbors to find for each object. Defaults to 10
silent:
Write logs in debug mode
pbar:
Display progress bar while get index
"""
self.n_neighbors = n_neighbors
if group_col is None:
self.df = df
else:
self.df = df.sort_values([treatment, group_col])
self.columns_del = [outcomes]
if info_col:
self.info_col = info_col
else:
self.info_col = []
if self.info_col is not None:
self.columns_del = self.columns_del + [x for x in self.info_col if x in self.df.columns]
self.outcomes = outcomes if type(outcomes) == list else [outcomes]
self.treatment = treatment
if features is None:
self.columns_match = list(
set([x for x in list(self.df.columns) if x not in self.info_col] + [self.treatment] + self.outcomes)
)
else:
try:
self.columns_match = features["Feature"].tolist() + [self.treatment] + self.outcomes
except TypeError:
self.columns_match = features + [self.treatment] + self.outcomes
self.features_quality = (
self.df.drop(columns=[self.treatment] + self.outcomes + self.info_col)
.select_dtypes(include=["int16", "int32", "int64", "float16", "float32", "float64"])
.columns
)
self.dict_outcome_untreated = {}
self.dict_outcome_treated = {}
self.group_col = group_col
self.weights = weights
self.treated_index = None
self.untreated_index = None
self.orig_treated_index = None
self.orig_untreated_index = None
self.results = {}
self.ATE = None
self.sigma = sigma
self.quality_dict = {}
self.rep_dict = None
self.validation = validation
self.silent = silent
self.pbar = pbar
self.tqdm = None
self.results = pd.DataFrame()
def __getstate__(self) -> dict:
"""Prepare the object for serialization.
This method is called when the object is about to be serialized.
It removes the `tqdm` attribute from the object's dictionary
because `tqdm` objects cannot be serialized.
Returns:
A copy of the object's dictionary with the `tqdm` attribute removed.
"""
state = self.__dict__.copy()
if "tqdm" in state:
del state["tqdm"]
return state
def __setstate__(self, state: dict):
"""Restore the object after deserialization.
This method is called when the object is deserialized.
It adds the `tqdm` attribute back to the object's dictionary
if the `pbar` attribute is True.
Args:
state:
The deserialized state of the object
"""
if "pbar" in state and state["pbar"]:
state["tqdm"] = None
self.__dict__.update(state)
def _get_split(self, df: pd.DataFrame) -> (pd.DataFrame, pd.DataFrame):
"""Creates split data by treatment column.
Separate treatment column with 1 (treated) an 0 (untreated),
scales and transforms treatment column
Args:
df:
The input dataframe
Returns:
Tuple of dataframes - one for treated (df[self.treatment] == 1]) and
one for untreated (df[self.treatment] == 0]). Drops self.outcomes and
`self.treatment` columns
"""
logger.debug("Creating split data by treatment column")
treated = df[df[self.treatment] == 1].drop([self.treatment] + self.outcomes, axis=1)
untreated = df[df[self.treatment] == 0].drop([self.treatment] + self.outcomes, axis=1)
return treated, untreated
def _predict_outcome(self, std_treated: pd.DataFrame, std_untreated: pd.DataFrame):
"""Applies LinearRegression to input arrays.
Calculate biases of treated and untreated values,
creates dict of y - regular, matched and without bias.
Args:
std_treated:
The dataframe of treated data
std_untreated:
The dataframe of untreated data
"""
logger.debug("Predicting target by Linear Regression")
start_time = dt.datetime.now()
logger.debug("start --")
self.dict_outcome_untreated = {}
self.dict_outcome_treated = {}
df = self.df.drop(columns=self.info_col)
for outcome in self.outcomes:
y_untreated = df[df[self.treatment] == 0][outcome].to_numpy()
y_treated = df[df[self.treatment] == 1][outcome].to_numpy()
x_treated = std_treated.to_numpy()
x_untreated = std_untreated.to_numpy()
y_match_treated = np.array([y_untreated[idx].mean() for idx in self.treated_index])
y_match_untreated = np.array([y_treated[idx].mean() for idx in self.untreated_index])
x_match_treated = np.array([x_untreated[idx].mean(0) for idx in self.treated_index])
x_match_untreated = np.array([x_treated[idx].mean(0) for idx in self.untreated_index])
bias_coefs_c = bias_coefs(self.untreated_index, y_treated, x_treated)
bias_coefs_t = bias_coefs(self.treated_index, y_untreated, x_untreated)
bias_c = bias(x_untreated, x_match_untreated, bias_coefs_c)
bias_t = bias(x_treated, x_match_treated, bias_coefs_t)
y_match_treated_bias = y_treated - y_match_treated + bias_t
y_match_untreated_bias = y_match_untreated - y_untreated - bias_c
self.dict_outcome_untreated[outcome] = y_untreated
self.dict_outcome_untreated[outcome + POSTFIX] = y_match_untreated
self.dict_outcome_untreated[outcome + POSTFIX_BIAS] = y_match_untreated_bias
self.dict_outcome_treated[outcome] = y_treated
self.dict_outcome_treated[outcome + POSTFIX] = y_match_treated
self.dict_outcome_treated[outcome + POSTFIX_BIAS] = y_match_treated_bias
end_time = dt.datetime.now()
total = dt.datetime.strptime(str(end_time - start_time), "%H:%M:%S.%f").strftime("%H:%M:%S")
logger.debug(f"end -- [work time{total}]")
def _create_outcome_matched_df(self, dict_outcome: dict, is_treated: bool) -> pd.DataFrame:
"""Creates dataframe with outcomes values and treatment.
Args:
dict_outcome:
A dictionary containing outcomes
is_treated:
A boolean value indicating whether the outcome is treated or not
Returns:
A dataframe with matched outcome and treatment columns
"""
df_pred = pd.DataFrame(dict_outcome)
df_pred[self.treatment] = int(is_treated)
df_pred[self.treatment + POSTFIX] = int(not is_treated)
return df_pred
def _create_features_matched_df(self, index: np.ndarray, is_treated: bool) -> pd.DataFrame:
"""Creates matched dataframe with features.
Args:
index:
An array of indices
is_treated:
A boolean value indicating whether the outcome is treated or not
Returns:
A dataframe of matched features
"""
df = self.df.drop(columns=self.outcomes + self.info_col)
if self.group_col is None:
untreated_index = df[df[self.treatment] == int(not is_treated)].index.to_numpy()
converted_index = [untreated_index[i] for i in index]
filtered = df.loc[df[self.treatment] == int(not is_treated)].values
untreated_df = pd.DataFrame(
data=np.array([filtered[idx].mean(axis=0) for idx in index]), columns=df.columns
) # добавить дату в данные и пофиксить баги с этим (тут ломалось)
if self.info_col is not None and len(self.info_col) != 1:
untreated_df["index"] = pd.Series(converted_index)
treated_df = df[df[self.treatment] == int(is_treated)].reset_index()
else:
ids = self.df[df[self.treatment] == int(not is_treated)][self.info_col].values.ravel()
converted_index = [ids[i] for i in index]
untreated_df["index"] = pd.Series(converted_index)
treated_df = df[df[self.treatment] == int(is_treated)].reset_index()
treated_df["index"] = self.df[self.df[self.treatment] == int(is_treated)][self.info_col].values.ravel()
else:
df = df.sort_values([self.treatment, self.group_col])
untreated_index = df[df[self.treatment] == int(not is_treated)].index.to_numpy()
converted_index = [untreated_index[i] for i in index]
filtered = df.loc[df[self.treatment] == int(not is_treated)]
cols_untreated = [col for col in filtered.columns if col != self.group_col]
filtered = filtered.drop(columns=self.group_col).to_numpy()
untreated_df = pd.DataFrame(
data=np.array([filtered[idx].mean(axis=0) for idx in index]), columns=cols_untreated
)
treated_df = df[df[self.treatment] == int(is_treated)].reset_index()
grp = treated_df[self.group_col]
untreated_df[self.group_col] = grp
if self.info_col is not None and len(self.info_col) != 1:
untreated_df["index"] = pd.Series(converted_index)
else:
ids = (
self.df[df[self.treatment] == int(not is_treated)]
.sort_values([self.treatment, self.group_col])[self.info_col]
.values.ravel()
)
converted_index = [ids[i] for i in index]
untreated_df["index"] = pd.Series(converted_index)
treated_df["index"] = self.df[self.df[self.treatment] == int(is_treated)][self.info_col].values.ravel()
untreated_df.columns = [col + POSTFIX for col in untreated_df.columns]
x = pd.concat([treated_df, untreated_df], axis=1).drop(
columns=[self.treatment, self.treatment + POSTFIX], axis=1
)
return x
def _create_matched_df(self) -> pd.DataFrame:
"""Creates matched df of features and outcome.
Returns:
Matched dataframe
"""
df_pred_treated = self._create_outcome_matched_df(self.dict_outcome_treated, True)
df_pred_untreated = self._create_outcome_matched_df(self.dict_outcome_untreated, False)
df_matched = pd.concat([df_pred_treated, df_pred_untreated])
treated_x = self._create_features_matched_df(self.treated_index, True)
untreated_x = self._create_features_matched_df(self.untreated_index, False)
untreated_x = pd.concat([treated_x, untreated_x])
columns = list(untreated_x.columns) + list(df_matched.columns)
df_matched = pd.concat([untreated_x, df_matched], axis=1, ignore_index=True)
df_matched.columns = columns
return df_matched
def calc_atc(self, df: pd.DataFrame, outcome: str) -> tuple:
"""Calculates Average Treatment Effect for the control group (ATC).
Effect on control group if it was affected
Args:
df:
Input dataframe
outcome:
The outcome to be considered for treatment effect
Returns:
Contains ATC, scaled counts, and variances as numpy arrays
"""
logger.debug("Calculating ATC")
df = df[df[self.treatment] == 0]
N_c = len(df)
ITT_c = df[outcome + POSTFIX_BIAS]
scaled_counts_c = scaled_counts(N_c, self.treated_index, self.silent)
vars_c = np.repeat(ITT_c.var(), N_c) # conservative
atc = ITT_c.mean()
return atc, scaled_counts_c, vars_c
def calc_att(self, df: pd.DataFrame, outcome: str) -> tuple:
"""Calculates Average Treatment Effect for the treated (ATT).
Args:
df:
Input dataframe
outcome:
The outcome to be considered for treatment effect
Returns:
Contains ATT, scaled counts, and variances as numpy arrays
"""
logger.debug("Calculating ATT")
df = df[df[self.treatment] == 1]
N_t = len(df)
ITT_t = df[outcome + POSTFIX_BIAS]
scaled_counts_t = scaled_counts(N_t, self.untreated_index, self.silent)
vars_t = np.repeat(ITT_t.var(), N_t) # conservative
att = ITT_t.mean()
return att, scaled_counts_t, vars_t
def _calculate_ate_all_target(self, df: pd.DataFrame):
"""Creates dictionaries of all effect: ATE, ATC, ATT.
Args:
df:
Input dataframe
"""
logger.debug("Creating dicts of all effects: ATE, ATC, ATT")
att_dict = {}
atc_dict = {}
ate_dict = {}
N = len(df)
N_t = df[self.treatment].sum()
N_c = N - N_t
for outcome in self.outcomes:
att, scaled_counts_t, vars_t = self.calc_att(df, outcome)
atc, scaled_counts_c, vars_c = self.calc_atc(df, outcome)
ate = (N_c / N) * atc + (N_t / N) * att
att_se = calc_att_se(vars_c, vars_t, scaled_counts_c)
atc_se = calc_atc_se(vars_c, vars_t, scaled_counts_t)
ate_se = calc_ate_se(vars_c, vars_t, scaled_counts_c, scaled_counts_t)
ate_dict[outcome] = [
ate,
ate_se,
pval_calc(ate / ate_se),
ate - self.sigma * ate_se,
ate + self.sigma * ate_se,
]
atc_dict[outcome] = [
atc,
atc_se,
pval_calc(atc / atc_se),
atc - self.sigma * atc_se,
atc + self.sigma * atc_se,
]
att_dict[outcome] = [
att,
att_se,
pval_calc(att / att_se),
att - self.sigma * att_se,
att + self.sigma * att_se,
]
self.ATE, self.ATC, self.ATT = ate_dict, atc_dict, att_dict
self.val_dict = ate_dict
| def matching_quality(self, df_matched) -> Dict[str, Union[Dict[str, float], float]]: | 1 | 2023-11-01 08:58:57+00:00 | 8k |
TianrongChen/DMSB | runner.py | [
{
"identifier": "MMD_loss",
"path": "metrics.py",
"snippet": "class MMD_loss(torch.nn.Module):\n '''\n fork from: https://github.com/ZongxianLee/MMD_Loss.Pytorch\n '''\n def __init__(self, kernel_mul = 2.0, kernel_num = 5):\n super(MMD_loss, self).__init__()\n self.kernel_num =... | import os, time, gc
import numpy as np
import torch
import torch.nn.functional as F
import policy
import sde
import data
import util
from torch.optim import SGD, RMSprop, Adagrad, AdamW, lr_scheduler, Adam
from torch.utils.tensorboard import SummaryWriter
from torch_ema import ExponentialMovingAverage
from metrics import MMD_loss,compute_metrics,metric_build
from loss import compute_sb_DSB_train
from ipdb import set_trace as debug | 4,071 | def sb_alternate_train_stage(self, opt, stage, epoch, direction, reused_sampler=None, rollout=False, resample=True):
policy_opt, policy_impt = {
'forward': [self.z_f, self.z_b], # train forwad, sample from backward
'backward': [self.z_b, self.z_f], # train backward, sample from forward
}.get(direction)
for ep in range(epoch):
# prepare training data
train_ms, train_zs, train_ts, train_labels = self.sample_train_data(
opt, policy_opt, policy_impt, reused_sampler, rollout=rollout, resample=resample
)
# train one epoch
policy_impt = freeze_policy(policy_impt)
policy_opt = activate_policy(policy_opt)
self.DSB_alternate_train_ep(
opt, ep, stage, direction, train_ms, train_zs, train_ts, train_labels, policy_opt, epoch
)
def DSB_alternate_train_ep(
self, opt, ep, stage, direction, train_xs, train_zs, train_ts, train_labels, policy, num_epoch
):
assert train_xs.shape[0] == opt.samp_bs
assert train_zs.shape[0] == opt.samp_bs
assert direction == policy.direction
optimizer, ema, sched = self.get_optimizer_ema_sched(policy)
use_amp=opt.use_amp
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
for it in range(opt.num_itr):
# -------- sample x_idx and t_idx \in [0, interval] --------
samp_m_idx = torch.randint(opt.samp_bs, (opt.train_bs_x,),device='cpu')
samp_t_idx = util.time_sample(opt.interval, policy.direction, opt.train_bs_t)
if opt.use_arange_t: samp_t_idx = util.time_arange(train_ts.shape[0], policy.direction)
# -------- build sample --------
sign=1 if policy.direction=='forward' else -1
ts = train_ts[samp_t_idx].detach().to(opt.device)
ms = train_xs[samp_m_idx][:, samp_t_idx, ...].to(opt.device)
zs_impt = train_zs[samp_m_idx][:, samp_t_idx+sign, ...].to(opt.device)
train_label = train_labels[samp_m_idx][:, samp_t_idx+sign, ...].to(opt.device)
optimizer.zero_grad(set_to_none=True)
# -------- handle for batch_x and batch_t ---------
# (batch, T, xdim) --> (batch*T, xdim)
ms = util.flatten_dim01(ms)
zs_impt = util.flatten_dim01(zs_impt)
train_label = util.flatten_dim01(train_label)
ts = ts.repeat(opt.train_bs_x)
assert ms.shape[0] == ts.shape[0]
assert zs_impt.shape[0] == ts.shape[0]
# -------- compute loss and backprop --------
with torch.cuda.amp.autocast(enabled=use_amp):
loss, zs = compute_sb_DSB_train(
opt, train_label, zs_impt,self.dyn, ts, ms, policy, return_z=True,itr=it
)
assert not torch.isnan(loss)
scaler.scale(loss).backward()
if opt.grad_clip is not None:
torch.nn.utils.clip_grad_norm(policy.parameters(), opt.grad_clip)
scaler.step(optimizer)
scaler.update()
optimizer.step()
ema.update()
if sched is not None: sched.step()
# -------- logging --------
zs = util.unflatten_dim01(zs, [len(samp_m_idx), len(samp_t_idx)])
zs_impt = zs_impt.reshape(zs.shape)
self.log_sb_alternate_train(
opt, it, ep, stage, loss, optimizer, direction, num_epoch
)
@torch.no_grad()
def evaluate(self, opt, stage, rollout=None, resample=False, ode_samp=False):
corrector = (lambda x,t: self.z_f(x,t) + self.z_b(x,t)) if opt.use_corrector else None
ODE_drift = (lambda x,t: 0.5*(self.z_b(x,t) - self.z_f(x,t))) if ode_samp else None
snapshot, ckpt = util.evaluate_stage(opt, stage)
snapshot=True
if ckpt:
self.v_dists = self.dyn.prev_v_boundary
keys = ['z_f','optimizer_f','ema_f','z_b','optimizer_b','ema_b','v_dists']
util.save_checkpoint(opt, self, keys, stage)
if snapshot:
print(util.blue('======Ploting visualization image======'))
for z in [self.z_b, self.z_f]:
z = freeze_policy(z)
ms, _, _, _,_ = self.dyn.sample_traj(
self.ts,
z,
save_traj=True,
corrector=corrector,
rollout=rollout,
resample=resample,
test=True,
ode_drift= ODE_drift
)
fn = "{}/xs-stage{}-{}".format(z.direction, stage,z.direction)
if opt.problem_name =='semicircle':
util.save_toy_traj(
opt, fn, ms.detach().cpu().numpy(), n_snapshot=5, direction=z.direction
)
elif opt.problem_name == 'petal':
util.save_petal_traj(
opt, fn, ms.detach().cpu().numpy(), n_snapshot=5, direction=z.direction
)
elif opt.problem_name =='gmm':
util.save_toy_seg_traj(
opt, fn, ms.detach().cpu().numpy(), n_snapshot=5, direction=z.direction
)
elif opt.problem_name =='RNAsc' and z.direction=='forward':
|
def build_optimizer_ema_sched(opt, policy):
direction = policy.direction
optim_name = {
'Adam': Adam,
'AdamW': AdamW,
'Adagrad': Adagrad,
'RMSprop': RMSprop,
'SGD': SGD,
}.get(opt.optimizer)
optim_dict = {
"lr": opt.lr_f if direction=='forward' else opt.lr_b,
'weight_decay':opt.l2_norm,
}
if opt.optimizer == 'SGD':
optim_dict['momentum'] = 0.9
optimizer = optim_name(policy.parameters(), **optim_dict)
ema = ExponentialMovingAverage(policy.parameters(), decay=0.999)
if opt.lr_gamma < 1.0:
sched = lr_scheduler.StepLR(optimizer, step_size=opt.lr_step, gamma=opt.lr_gamma)
else:
sched = None
return optimizer, ema, sched
def freeze_policy(policy):
for p in policy.parameters():
p.requires_grad = False
policy.eval()
return policy
def activate_policy(policy):
for p in policy.parameters():
p.requires_grad = True
policy.train()
return policy
class Runner():
def __init__(self,opt):
super(Runner,self).__init__()
self.start_time = time.time()
self.ts = torch.linspace(opt.t0, opt.T, opt.interval)
self.x_dists = data.build(opt)
# for visualize training data
if opt.problem_name == 'petal' or opt.problem_name =='RNAsc':
self.x_data = [dist.ground_truth for dist in self.x_dists]
#Initialize velocity, all gaussian
self.v_dists = {dist:opt.v_scale*torch.randn(opt.samp_bs, *opt.data_dim) for dist in range(len(self.x_dists))}
# Build metrics
self.metrics = metric_build(opt)
# build dynamics, forward (z_f) and backward (z_b) policies and corresponding optimizer
self.dyn = sde.build(opt, self.x_dists, self.v_dists)
self.z_f = policy.build(opt, self.dyn, 'forward') # p -> q
self.z_b = policy.build(opt, self.dyn, 'backward') # q -> p
self.optimizer_f, self.ema_f, self.sched_f = build_optimizer_ema_sched(opt, self.z_f)
self.optimizer_b, self.ema_b, self.sched_b = build_optimizer_ema_sched(opt, self.z_b)
if opt.load:
util.restore_checkpoint(opt, self, opt.load)
self.dyn.prev_v_boundary = self.v_dists
# tensorboard related things
if opt.log_tb:
self.it_f = 0
self.it_b = 0
self.writer =SummaryWriter(
log_dir =os.path.join('runs', opt.dir)
)
def update_count(self, direction):
if direction == 'forward':
self.it_f += 1
return self.it_f
elif direction == 'backward':
self.it_b += 1
return self.it_b
else:
raise RuntimeError()
def get_optimizer_ema_sched(self, z):
if z == self.z_f:
return self.optimizer_f, self.ema_f, self.sched_f
elif z == self.z_b:
return self.optimizer_b, self.ema_b, self.sched_b
else:
raise RuntimeError()
@torch.no_grad()
def sample_train_data(self, opt, policy_opt, policy_impt, reused_sampler, rollout=None, resample=None):
# reuse or sample training ms and zs
try:
reused_traj = next(reused_sampler)
train_ms, train_zs = reused_traj[:,0,...], reused_traj[:,1,...]
print('generate train data from [{}]!'.format(util.green('reused samper')))
except:
_, ema, _ = self.get_optimizer_ema_sched(policy_opt)
_, ema_impt, _ = self.get_optimizer_ema_sched(policy_impt)
with ema.average_parameters(), ema_impt.average_parameters():
policy_impt = freeze_policy(policy_impt)
policy_opt = freeze_policy(policy_opt)
corrector = (lambda x,t: policy_impt(x,t) + policy_opt(x,t)) if opt.use_corrector else None
ms, zs, _, labels, ts = self.dyn.sample_traj(self.ts, policy_impt, corrector=corrector, rollout=rollout, resample=resample)
train_ms = ms.detach().cpu(); del ms
train_zs = zs.detach().cpu(); del zs
train_labels = labels.detach().cpu(); del labels
train_ts = ts.detach().cpu(); del ts
print('generate train data from [{}]!'.format(util.red('sampling')))
assert train_ms.shape[0] == opt.samp_bs
assert train_ms.shape[1] == len(train_ts)
gc.collect()
return train_ms, train_zs, train_ts, train_labels
def sb_alternate_train(self, opt):
reused_sampler = self.evaluate(opt, 0, rollout = [0,opt.num_dist-1], resample=False,ode_samp=False)
bridge_ep = boundry_ep = opt.num_epoch
if opt.problem_name =='petal': bridge_ep = 1 #Special handle for petal. the distance between distributions are too close.
for stage in range(opt.num_stage):
self.sb_alternate_train_stage(
opt, stage, boundry_ep, 'backward', rollout = [0,opt.num_dist-1], resample=True # train backward Kboundary
)
self.sb_alternate_train_stage(
opt, stage, boundry_ep, 'forward', rollout = [0,opt.num_dist-1], resample=True # train forward Kboundary
)
self.sb_alternate_train_stage(
opt, stage, bridge_ep, 'backward', rollout = [0,opt.num_dist-1], resample=False #Train K bridge backward
)
self.sb_alternate_train_stage(
opt, stage, boundry_ep, 'forward', rollout = [0,opt.num_dist-1], resample=True #Train forward Kboundary
)
self.sb_alternate_train_stage(
opt, stage, boundry_ep, 'backward', rollout = [0,opt.num_dist-1], resample=True #Train backward Kboundary
)
self.sb_alternate_train_stage(
opt, stage, bridge_ep, 'forward', rollout = [0,opt.num_dist-1], resample=False #Train K bridge forward
)
reused_sampler = self.evaluate(opt, stage+1, rollout = [0,opt.num_dist-1],resample=False)
if opt.log_tb: self.writer.close()
def sb_alternate_train_stage(self, opt, stage, epoch, direction, reused_sampler=None, rollout=False, resample=True):
policy_opt, policy_impt = {
'forward': [self.z_f, self.z_b], # train forwad, sample from backward
'backward': [self.z_b, self.z_f], # train backward, sample from forward
}.get(direction)
for ep in range(epoch):
# prepare training data
train_ms, train_zs, train_ts, train_labels = self.sample_train_data(
opt, policy_opt, policy_impt, reused_sampler, rollout=rollout, resample=resample
)
# train one epoch
policy_impt = freeze_policy(policy_impt)
policy_opt = activate_policy(policy_opt)
self.DSB_alternate_train_ep(
opt, ep, stage, direction, train_ms, train_zs, train_ts, train_labels, policy_opt, epoch
)
def DSB_alternate_train_ep(
self, opt, ep, stage, direction, train_xs, train_zs, train_ts, train_labels, policy, num_epoch
):
assert train_xs.shape[0] == opt.samp_bs
assert train_zs.shape[0] == opt.samp_bs
assert direction == policy.direction
optimizer, ema, sched = self.get_optimizer_ema_sched(policy)
use_amp=opt.use_amp
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
for it in range(opt.num_itr):
# -------- sample x_idx and t_idx \in [0, interval] --------
samp_m_idx = torch.randint(opt.samp_bs, (opt.train_bs_x,),device='cpu')
samp_t_idx = util.time_sample(opt.interval, policy.direction, opt.train_bs_t)
if opt.use_arange_t: samp_t_idx = util.time_arange(train_ts.shape[0], policy.direction)
# -------- build sample --------
sign=1 if policy.direction=='forward' else -1
ts = train_ts[samp_t_idx].detach().to(opt.device)
ms = train_xs[samp_m_idx][:, samp_t_idx, ...].to(opt.device)
zs_impt = train_zs[samp_m_idx][:, samp_t_idx+sign, ...].to(opt.device)
train_label = train_labels[samp_m_idx][:, samp_t_idx+sign, ...].to(opt.device)
optimizer.zero_grad(set_to_none=True)
# -------- handle for batch_x and batch_t ---------
# (batch, T, xdim) --> (batch*T, xdim)
ms = util.flatten_dim01(ms)
zs_impt = util.flatten_dim01(zs_impt)
train_label = util.flatten_dim01(train_label)
ts = ts.repeat(opt.train_bs_x)
assert ms.shape[0] == ts.shape[0]
assert zs_impt.shape[0] == ts.shape[0]
# -------- compute loss and backprop --------
with torch.cuda.amp.autocast(enabled=use_amp):
loss, zs = compute_sb_DSB_train(
opt, train_label, zs_impt,self.dyn, ts, ms, policy, return_z=True,itr=it
)
assert not torch.isnan(loss)
scaler.scale(loss).backward()
if opt.grad_clip is not None:
torch.nn.utils.clip_grad_norm(policy.parameters(), opt.grad_clip)
scaler.step(optimizer)
scaler.update()
optimizer.step()
ema.update()
if sched is not None: sched.step()
# -------- logging --------
zs = util.unflatten_dim01(zs, [len(samp_m_idx), len(samp_t_idx)])
zs_impt = zs_impt.reshape(zs.shape)
self.log_sb_alternate_train(
opt, it, ep, stage, loss, optimizer, direction, num_epoch
)
@torch.no_grad()
def evaluate(self, opt, stage, rollout=None, resample=False, ode_samp=False):
corrector = (lambda x,t: self.z_f(x,t) + self.z_b(x,t)) if opt.use_corrector else None
ODE_drift = (lambda x,t: 0.5*(self.z_b(x,t) - self.z_f(x,t))) if ode_samp else None
snapshot, ckpt = util.evaluate_stage(opt, stage)
snapshot=True
if ckpt:
self.v_dists = self.dyn.prev_v_boundary
keys = ['z_f','optimizer_f','ema_f','z_b','optimizer_b','ema_b','v_dists']
util.save_checkpoint(opt, self, keys, stage)
if snapshot:
print(util.blue('======Ploting visualization image======'))
for z in [self.z_b, self.z_f]:
z = freeze_policy(z)
ms, _, _, _,_ = self.dyn.sample_traj(
self.ts,
z,
save_traj=True,
corrector=corrector,
rollout=rollout,
resample=resample,
test=True,
ode_drift= ODE_drift
)
fn = "{}/xs-stage{}-{}".format(z.direction, stage,z.direction)
if opt.problem_name =='semicircle':
util.save_toy_traj(
opt, fn, ms.detach().cpu().numpy(), n_snapshot=5, direction=z.direction
)
elif opt.problem_name == 'petal':
util.save_petal_traj(
opt, fn, ms.detach().cpu().numpy(), n_snapshot=5, direction=z.direction
)
elif opt.problem_name =='gmm':
util.save_toy_seg_traj(
opt, fn, ms.detach().cpu().numpy(), n_snapshot=5, direction=z.direction
)
elif opt.problem_name =='RNAsc' and z.direction=='forward': | processed_data = compute_metrics(opt, ms.detach().cpu().numpy(), self.x_data, self.metrics, self, stage) | 1 | 2023-11-05 21:12:37+00:00 | 8k |
mileswyn/SAMIHS | train.py | [
{
"identifier": "get_config",
"path": "utils/config.py",
"snippet": "def get_config(task=\"BCIHM\"):\n if task == \"BCIHM\":\n return Config_BCIHM()\n elif task == \"Instance\":\n return Config_Intance()\n else:\n assert(\"We do not have the related dataset, please choose a... | from ast import arg
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from utils.config import get_config
from utils.evaluation import get_eval
from models.model_dict import get_model
from utils.data_ihs import BCIHM, Transform2D_BCIHM, Instance, Transform2D_Instance
from utils.loss_functions.sam_loss import get_criterion
from utils.generate_prompts import get_click_prompt
from tqdm import tqdm
import os
import argparse
import torch
import torch.optim as optim
import numpy as np
import torch
import time
import random | 7,062 | if args.keep_log:
logtimestr = time.strftime('%m%d%H%M') # initialize the tensorboard for record the training process
boardpath = opt.tensorboard_path + args.modelname + opt.save_path_code + logtimestr
if not os.path.isdir(boardpath):
os.makedirs(boardpath)
TensorWriter = SummaryWriter(boardpath)
# ========== add the seed to make sure the results are reproducible ==========
seed_value = 1234 # the number of seed
np.random.seed(seed_value) # set random seed for numpy
random.seed(seed_value) # set random seed for python
os.environ['PYTHONHASHSEED'] = str(seed_value) # avoid hash random
torch.manual_seed(seed_value) # set random seed for CPU
torch.cuda.manual_seed(seed_value) # set random seed for one GPU
torch.cuda.manual_seed_all(seed_value) # set random seed for all GPU
torch.backends.cudnn.deterministic = True # set random seed for convolution
# ========== model and data preparation ==========
# register the sam model
model = get_model(args.modelname, args=args, opt=opt)
# opt.batch_size = args.batch_size * args.n_gpu
if args.task == 'BCIHM':
tf_train = Transform2D_BCIHM(mode=opt.mode, img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0.0, p_rota=0.5, p_scale=0.5, p_gaussn=0.0,
p_contr=0.5, p_gama=0.5, p_distor=0.0, color_jitter_params=None, long_mask=True)
tf_val = Transform2D_BCIHM(img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0, color_jitter_params=None, long_mask=True)
train_dataset = BCIHM(opt.data_path, opt.train_split, tf_train, img_size=args.encoder_input_size)
val_dataset = BCIHM(opt.data_path, opt.val_split, tf_val, img_size=args.encoder_input_size)
elif args.task == 'Instance':
tf_train = Transform2D_Instance(mode=opt.mode, img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0.0, p_rota=0.5, p_scale=0.5, p_gaussn=0.0,
p_contr=0.5, p_gama=0.5, p_distor=0.0, color_jitter_params=None, long_mask=True)
tf_val = Transform2D_Instance(img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0, color_jitter_params=None, long_mask=True)
train_dataset = Instance(opt.data_path, opt.train_split, tf_train, img_size=args.encoder_input_size)
val_dataset = Instance(opt.data_path, opt.val_split, tf_val, img_size=args.encoder_input_size)
else:
assert("We do not have the related dataset, please choose another task.")
trainloader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=8, pin_memory=True)
valloader = DataLoader(val_dataset, batch_size=2, shuffle=False, num_workers=4, pin_memory=True)
model.to(device)
if opt.pre_trained:
checkpoint = torch.load(opt.load_path)
new_state_dict = {}
for k,v in checkpoint.items():
if k[:7] == 'module.':
new_state_dict[k[7:]] = v
else:
new_state_dict[k] = v
model.load_state_dict(new_state_dict)
if args.n_gpu > 1:
model = nn.DataParallel(model)
if args.warmup:
b_lr = args.base_lr / args.warmup_period
optimizer = torch.optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=b_lr, betas=(0.9, 0.999), weight_decay=0.1)
else:
b_lr = args.base_lr
optimizer = optim.Adam(model.parameters(), lr=args.base_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.5) #learning rate decay
criterion = get_criterion(modelname=args.modelname, opt=opt)
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Total_params: {}".format(pytorch_total_params))
# [n for n, value in model.named_parameters() if value.requires_grad == True]
# ========== begin to train the model ==========
iter_num = 0
max_iterations = opt.epochs * len(trainloader)
best_dice, loss_log, dice_log = 0.0, np.zeros(opt.epochs+1), np.zeros(opt.epochs+1)
for epoch in range(opt.epochs):
# ---------- Train ----------
model.train()
optimizer.zero_grad()
train_losses = 0
with tqdm(total=len(trainloader), desc=f'Epoch {epoch}', unit='img') as pbar:
for batch_idx, (datapack) in enumerate(trainloader):
imgs = datapack['image'].to(dtype = torch.float32, device=opt.device)
masks = datapack['low_mask'].to(dtype = torch.float32, device=opt.device)
bbox = torch.as_tensor(datapack['bbox'], dtype=torch.float32, device=opt.device)
pt = get_click_prompt(datapack, opt)
# ---------- forward ----------
pred = model(imgs, pt, bbox)
train_loss = criterion(pred, masks)
# ---------- backward ----------
train_loss.backward()
optimizer.step()
optimizer.zero_grad()
pbar.set_postfix(**{'loss (batch)': train_loss.item()})
train_losses += train_loss.item()
# ---------- Adjust learning rate ----------
if args.warmup and iter_num < args.warmup_period:
lr_ = args.base_lr * ((iter_num + 1) / args.warmup_period)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
else:
if args.warmup:
shift_iter = iter_num - args.warmup_period
assert shift_iter >= 0, f'Shift iter is {shift_iter}, smaller than zero'
lr_ = args.base_lr * (1.0 - shift_iter / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
pbar.update()
scheduler.step()
# ---------- Write log ----------
print('epoch [{}/{}], train loss:{:.4f}'.format(epoch, opt.epochs, train_losses / (batch_idx + 1)))
print('lr: ', optimizer.param_groups[0]['lr'])
if args.keep_log:
TensorWriter.add_scalar('train_loss', train_losses / (batch_idx + 1), epoch)
TensorWriter.add_scalar('learning rate', optimizer.state_dict()['param_groups'][0]['lr'], epoch)
loss_log[epoch] = train_losses / (batch_idx + 1)
# ---------- Validation ----------
if epoch % opt.eval_freq == 0:
model.eval()
|
def main():
# ========== parameters setting ==========
parser = argparse.ArgumentParser(description='Networks')
parser.add_argument('-task', required=True, default='BCIHM', help='task or dataset name')
parser.add_argument('-sam_ckpt', required=True, type=str, default='/data/wyn/Medical-SAM-Adapter/ckpt/sam_vit_b_01ec64.pth', help='Pretrained checkpoint of SAM')
parser.add_argument('-fold', required=True, type=int, default=0, help='task or dataset name')
parser.add_argument('--modelname', default='SAMIHS', type=str, help='type of model, e.g., SAM, SAMFull, MedSAM, MSA, SAMed, SAMUS...')
parser.add_argument('--encoder_input_size', type=int, default=1024, help='the image size of the encoder input, 1024 in SAM, MSA, SAMIHS, 512 in SAMUS')
parser.add_argument('--low_image_size', type=int, default=256, help='the output image embedding size')
parser.add_argument('--vit_name', type=str, default='vit_b', help='select the vit model for the image encoder of sam')
# TODO
parser.add_argument('--n_gpu', type=int, default=1, help='total gpu')
parser.add_argument('--base_lr', type=float, default=0.0005, help='segmentation network learning rate, 0.005 for SAMed, 0.0001 for MSA')
parser.add_argument('--warmup', type=bool, default=False, help='If activated, warp up the learning from a lower lr to the base_lr')
parser.add_argument('--warmup_period', type=int, default=250, help='Warp up iterations, only valid whrn warmup is activated')
parser.add_argument('--keep_log', type=bool, default=False, help='keep the loss&lr&dice during training or not')
args = parser.parse_args()
opt = get_config(args.task)
opt.mode = 'train'
device = torch.device(opt.device)
if args.keep_log:
logtimestr = time.strftime('%m%d%H%M') # initialize the tensorboard for record the training process
boardpath = opt.tensorboard_path + args.modelname + opt.save_path_code + logtimestr
if not os.path.isdir(boardpath):
os.makedirs(boardpath)
TensorWriter = SummaryWriter(boardpath)
# ========== add the seed to make sure the results are reproducible ==========
seed_value = 1234 # the number of seed
np.random.seed(seed_value) # set random seed for numpy
random.seed(seed_value) # set random seed for python
os.environ['PYTHONHASHSEED'] = str(seed_value) # avoid hash random
torch.manual_seed(seed_value) # set random seed for CPU
torch.cuda.manual_seed(seed_value) # set random seed for one GPU
torch.cuda.manual_seed_all(seed_value) # set random seed for all GPU
torch.backends.cudnn.deterministic = True # set random seed for convolution
# ========== model and data preparation ==========
# register the sam model
model = get_model(args.modelname, args=args, opt=opt)
# opt.batch_size = args.batch_size * args.n_gpu
if args.task == 'BCIHM':
tf_train = Transform2D_BCIHM(mode=opt.mode, img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0.0, p_rota=0.5, p_scale=0.5, p_gaussn=0.0,
p_contr=0.5, p_gama=0.5, p_distor=0.0, color_jitter_params=None, long_mask=True)
tf_val = Transform2D_BCIHM(img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0, color_jitter_params=None, long_mask=True)
train_dataset = BCIHM(opt.data_path, opt.train_split, tf_train, img_size=args.encoder_input_size)
val_dataset = BCIHM(opt.data_path, opt.val_split, tf_val, img_size=args.encoder_input_size)
elif args.task == 'Instance':
tf_train = Transform2D_Instance(mode=opt.mode, img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0.0, p_rota=0.5, p_scale=0.5, p_gaussn=0.0,
p_contr=0.5, p_gama=0.5, p_distor=0.0, color_jitter_params=None, long_mask=True)
tf_val = Transform2D_Instance(img_size=args.encoder_input_size, low_img_size=args.low_image_size, ori_size=opt.img_size, crop=opt.crop, p_flip=0, color_jitter_params=None, long_mask=True)
train_dataset = Instance(opt.data_path, opt.train_split, tf_train, img_size=args.encoder_input_size)
val_dataset = Instance(opt.data_path, opt.val_split, tf_val, img_size=args.encoder_input_size)
else:
assert("We do not have the related dataset, please choose another task.")
trainloader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=8, pin_memory=True)
valloader = DataLoader(val_dataset, batch_size=2, shuffle=False, num_workers=4, pin_memory=True)
model.to(device)
if opt.pre_trained:
checkpoint = torch.load(opt.load_path)
new_state_dict = {}
for k,v in checkpoint.items():
if k[:7] == 'module.':
new_state_dict[k[7:]] = v
else:
new_state_dict[k] = v
model.load_state_dict(new_state_dict)
if args.n_gpu > 1:
model = nn.DataParallel(model)
if args.warmup:
b_lr = args.base_lr / args.warmup_period
optimizer = torch.optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=b_lr, betas=(0.9, 0.999), weight_decay=0.1)
else:
b_lr = args.base_lr
optimizer = optim.Adam(model.parameters(), lr=args.base_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.5) #learning rate decay
criterion = get_criterion(modelname=args.modelname, opt=opt)
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Total_params: {}".format(pytorch_total_params))
# [n for n, value in model.named_parameters() if value.requires_grad == True]
# ========== begin to train the model ==========
iter_num = 0
max_iterations = opt.epochs * len(trainloader)
best_dice, loss_log, dice_log = 0.0, np.zeros(opt.epochs+1), np.zeros(opt.epochs+1)
for epoch in range(opt.epochs):
# ---------- Train ----------
model.train()
optimizer.zero_grad()
train_losses = 0
with tqdm(total=len(trainloader), desc=f'Epoch {epoch}', unit='img') as pbar:
for batch_idx, (datapack) in enumerate(trainloader):
imgs = datapack['image'].to(dtype = torch.float32, device=opt.device)
masks = datapack['low_mask'].to(dtype = torch.float32, device=opt.device)
bbox = torch.as_tensor(datapack['bbox'], dtype=torch.float32, device=opt.device)
pt = get_click_prompt(datapack, opt)
# ---------- forward ----------
pred = model(imgs, pt, bbox)
train_loss = criterion(pred, masks)
# ---------- backward ----------
train_loss.backward()
optimizer.step()
optimizer.zero_grad()
pbar.set_postfix(**{'loss (batch)': train_loss.item()})
train_losses += train_loss.item()
# ---------- Adjust learning rate ----------
if args.warmup and iter_num < args.warmup_period:
lr_ = args.base_lr * ((iter_num + 1) / args.warmup_period)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
else:
if args.warmup:
shift_iter = iter_num - args.warmup_period
assert shift_iter >= 0, f'Shift iter is {shift_iter}, smaller than zero'
lr_ = args.base_lr * (1.0 - shift_iter / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
pbar.update()
scheduler.step()
# ---------- Write log ----------
print('epoch [{}/{}], train loss:{:.4f}'.format(epoch, opt.epochs, train_losses / (batch_idx + 1)))
print('lr: ', optimizer.param_groups[0]['lr'])
if args.keep_log:
TensorWriter.add_scalar('train_loss', train_losses / (batch_idx + 1), epoch)
TensorWriter.add_scalar('learning rate', optimizer.state_dict()['param_groups'][0]['lr'], epoch)
loss_log[epoch] = train_losses / (batch_idx + 1)
# ---------- Validation ----------
if epoch % opt.eval_freq == 0:
model.eval() | dices, mean_dice, _, val_losses = get_eval(valloader, model, criterion=criterion, opt=opt, args=args) | 1 | 2023-11-09 07:26:33+00:00 | 8k |
silicx/ObjectConceptLearning | models/OCRN_intervention.py | [
{
"identifier": "OcrnBaseModel",
"path": "models/base_models.py",
"snippet": "class OcrnBaseModel(nn.Module):\n\n def __init__(self, dataset, args):\n super(OcrnBaseModel, self).__init__()\n\n self.args = args\n self.num_obj = len(dataset.objs)\n self.num_attr = len(datase... | from typing import final
from models.base_models import OcrnBaseModel, MLP, ParallelMLP, Aggregator, build_counterfactual, CounterfactualHingeLoss
import torch
import torch.nn as nn
import math | 4,500 |
self.parallel_attr_feat = ParallelMLP(
args.attr_out_rep_dim, args.parallel_attr_rep_dim, num_para=self.num_attr,
hidden_layers=args.fc_para_feat, layernorm=args.layernorm, out_relu=args.out_relu)
self.attr_auxIA_classifier = ParallelMLP(args.parallel_attr_rep_dim, 1, num_para=self.num_attr, hidden_layers=args.fc_cls,
layernorm=args.layernorm, share_last_fc=True)
self.attr_IA_classifier = MLP(args.attr_rep_dim, self.num_attr, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
self.aff_IA_classifier = MLP(args.aff_rep_dim, self.num_aff, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
assert args.sep_CA_cls
self.attr_CA_classifier = MLP(args.attr_rep_dim, self.num_attr, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
self.aff_CA_classifier = MLP(args.aff_rep_dim, self.num_aff, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
self.mseloss = torch.nn.MSELoss()
self.hinge = CounterfactualHingeLoss(args.counterfactual_margin)
def forward(self, batch, require_loss=True):
if self.backbone:
feature = self.backbone(batch["image"], batch["main_bbox"])
batch["gt_attr"] = torch.cat(batch["gt_attr"], 0)
batch["gt_aff"] = torch.cat(batch["gt_aff"], 0)
else:
feature = batch["image"]
batchsize = feature.size(0)
gt_all_CAttr_vec = self.category_attr
gt_all_CAff_vec = self.category_aff
# Attibute module
feat_CAttr = self.fc_feat2attr(self.mean_obj_features) # (n_obj, dim_attr)
feat_IAttr = self.attr_instantialize(
feat_CAttr, feature,
in_postproc = lambda x:x.unsqueeze(0).expand(batchsize, -1, -1),
cond_postproc = lambda x:x.unsqueeze(1).expand(-1, self.num_obj, -1)
) # (n_obj, dim), (bz, dim) -> (bz, n_obj, dim)
# feat_IAttr = self.attr_inst_bn(feat_IAttr)
feat_mean_IAttr = torch.einsum("ijk,j->ik", feat_IAttr, self.obj_frequence)
# (bz, dim_attr)
logit_CAttr = self.attr_CA_classifier(feat_CAttr)
logit_IAttr = self.attr_IA_classifier(feat_mean_IAttr)
feat_parallel_IAttr = self.parallel_attr_feat(feat_mean_IAttr.unsqueeze(1).expand(-1,self.num_attr, -1))
logit_aux_IAttr = self.attr_auxIA_classifier(feat_parallel_IAttr).squeeze(-1)
# Affordance module
feat_aggr_IAttr = self.aggregator(feat_parallel_IAttr)
feat_CAff = self.fc_feat2aff(
torch.cat([self.mean_obj_features, feat_CAttr], 1)
) # (n_obj, dim_aff)
feat_IAff = self.aff_instantialize(
feat_CAff, torch.cat([feature, feat_aggr_IAttr], 1),
in_postproc = lambda x:x.unsqueeze(0).expand(batchsize, -1, -1),
cond_postproc = lambda x:x.unsqueeze(1).expand(-1, self.num_obj, -1)
)
# (n_obj, dim), (bz, dim) -> (bz, n_obj, dim)
# feat_IAff = self.aff_inst_bn(feat_IAff)
feat_mean_IAff = torch.einsum("ijk,j->ik", feat_IAff, self.obj_frequence)
# (bz, dim_aff)
logit_CAff = self.aff_CA_classifier(feat_CAff)
logit_IAff = self.aff_IA_classifier(feat_mean_IAff)
prob_IAttr = torch.sigmoid(logit_IAttr)
prob_IAff = torch.sigmoid(logit_IAff)
if require_loss:
losses = {}
if self.args.lambda_attr > 0:
if self.args.lambda_cls_CA>0:
losses["loss_attr/CA_cls"] = self.attr_bce(logit_CAttr, gt_all_CAttr_vec)
if self.args.lambda_cls_IA>0:
losses["loss_attr/IA_cls"] = self.attr_bce(logit_IAttr, batch["gt_attr"])
if self.args.lambda_cls_inst_IA>0:
logit_inst_IAttr = self.attr_IA_classifier(feat_IAttr)
losses["loss_attr/inst_IA_cls"] = self.attr_bce(
logit_inst_IAttr, batch["gt_attr"].unsqueeze(1).expand(-1, self.num_obj, -1))
if any([x.startswith("loss_attr") for x in losses]):
losses["loss_attr/total"] = (
self.args.lambda_cls_CA * losses.get("loss_attr/CA_cls", 0.) +
self.args.lambda_cls_IA * losses.get("loss_attr/IA_cls", 0.) +
self.args.lambda_cls_inst_IA * losses.get("loss_attr/inst_IA_cls", 0.) )
if self.args.lambda_aff > 0:
if self.args.lambda_cls_CA>0:
losses["loss_aff/CA_cls"] = self.aff_bce(logit_CAff, gt_all_CAff_vec)
if self.args.lambda_cls_IA>0:
losses["loss_aff/IA_cls"] = self.aff_bce(logit_IAff, batch["gt_aff"])
if self.args.lambda_cls_inst_IA>0:
logit_inst_IAff = self.aff_IA_classifier(feat_IAff)
losses["loss_aff/inst_IA_cls"] = self.aff_bce(
logit_inst_IAff, batch["gt_aff"].unsqueeze(1).expand(-1, self.num_obj, -1))
if any([x.startswith("loss_aff") for x in losses]):
losses["loss_aff/total"] = (
self.args.lambda_cls_CA * losses.get("loss_aff/CA_cls", 0.) +
self.args.lambda_cls_IA * losses.get("loss_aff/IA_cls", 0.) +
self.args.lambda_cls_inst_IA * losses.get("loss_aff/inst_IA_cls", 0.) )
if self.args.lambda_cf > 0 and batch['gt_causal'].shape[0] > 0:
|
@final
class FullSelfAttention(nn.Module):
def __init__(self, feat_dim, cond_dim, hidden_dim, args):
""" output = f(input, condition)
in_dim/cond_dim/out_dim = dimension of input/condition/output
fc_in_hid/fc_cond_hid = hidden layers of fc after input/condition
fc_out_hid = hidden layers of fc before output
"""
super(FullSelfAttention, self).__init__()
fc_in_hid = args.fc_pre
fc_cond_hid = args.fc_att
fc_out_hid = args.fc_compress
self.fc_feat_Q = MLP(feat_dim, hidden_dim, fc_in_hid, args.batchnorm, bias=False)
self.fc_feat_V = MLP(feat_dim, hidden_dim, fc_in_hid, args.batchnorm, bias=False)
self.fc_feat_K = MLP(feat_dim, hidden_dim, fc_in_hid, args.batchnorm, bias=False)
self.fc_cond_Q = MLP(cond_dim, hidden_dim, fc_cond_hid, args.batchnorm, bias=False)
self.fc_cond_V = MLP(cond_dim, hidden_dim, fc_cond_hid, args.batchnorm, bias=False)
self.fc_cond_K = MLP(cond_dim, hidden_dim, fc_cond_hid, args.batchnorm, bias=False)
self.rtemp = 1.0/math.sqrt(hidden_dim)
self.fc_out = MLP(2*hidden_dim, feat_dim, fc_out_hid, args.batchnorm, out_relu=args.out_relu)
def forward(self, feat, cond, in_postproc=lambda x:x, cond_postproc=lambda x:x):
feat_Q = in_postproc( self.fc_feat_Q(feat) ) # (bz*obj, hid_dim)
feat_V = in_postproc( self.fc_feat_V(feat) )
feat_K = in_postproc( self.fc_feat_K(feat) )
cond_Q = cond_postproc( self.fc_cond_Q(cond) )
cond_V = cond_postproc( self.fc_cond_V(cond) )
cond_K = cond_postproc( self.fc_cond_K(cond) )
K_diff = (feat_K - cond_K) * self.rtemp
KQ_ff_fc = (feat_Q * K_diff).sum(-1) # (bz*obj, )
KQ_cf_cc = (cond_Q * K_diff).sum(-1)
feat_att_f = torch.sigmoid(KQ_ff_fc).unsqueeze(-1)
cond_att_f = torch.sigmoid(KQ_cf_cc).unsqueeze(-1)
V_diff = (feat_V - cond_V)
hid_feat = V_diff*feat_att_f + cond_V
hid_cond = V_diff*cond_att_f + cond_V
hidden = torch.cat([hid_feat, hid_cond], -1)
out = self.fc_out(hidden)
return out
# @final
class Model(OcrnBaseModel):
def __init__(self, dataset, args):
super(Model, self).__init__(dataset, args)
# model param
self.fc_feat2attr = MLP(self.feat_dim, args.attr_rep_dim, args.fc_feat2attr, args.batchnorm, out_relu=args.out_relu, out_bn=args.batchnorm)
self.fc_feat2aff = MLP(self.feat_dim + args.attr_rep_dim, args.aff_rep_dim, args.fc_feat2aff, args.batchnorm, out_relu=args.out_relu, out_bn=args.batchnorm)
self.attr_instantialize = FullSelfAttention(args.attr_rep_dim, self.feat_dim, args.attr_hidden_rep_dim, args=args)
self.aff_instantialize = FullSelfAttention(args.aff_rep_dim, self.feat_dim + args.aggr_rep_dim, args.aff_hidden_rep_dim, args=args)
self.aggregator = Aggregator(self.args.aggregation, args, self.num_attr)
self.parallel_attr_feat = ParallelMLP(
args.attr_out_rep_dim, args.parallel_attr_rep_dim, num_para=self.num_attr,
hidden_layers=args.fc_para_feat, layernorm=args.layernorm, out_relu=args.out_relu)
self.attr_auxIA_classifier = ParallelMLP(args.parallel_attr_rep_dim, 1, num_para=self.num_attr, hidden_layers=args.fc_cls,
layernorm=args.layernorm, share_last_fc=True)
self.attr_IA_classifier = MLP(args.attr_rep_dim, self.num_attr, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
self.aff_IA_classifier = MLP(args.aff_rep_dim, self.num_aff, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
assert args.sep_CA_cls
self.attr_CA_classifier = MLP(args.attr_rep_dim, self.num_attr, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
self.aff_CA_classifier = MLP(args.aff_rep_dim, self.num_aff, hidden_layers=args.fc_cls, batchnorm=args.batchnorm)
self.mseloss = torch.nn.MSELoss()
self.hinge = CounterfactualHingeLoss(args.counterfactual_margin)
def forward(self, batch, require_loss=True):
if self.backbone:
feature = self.backbone(batch["image"], batch["main_bbox"])
batch["gt_attr"] = torch.cat(batch["gt_attr"], 0)
batch["gt_aff"] = torch.cat(batch["gt_aff"], 0)
else:
feature = batch["image"]
batchsize = feature.size(0)
gt_all_CAttr_vec = self.category_attr
gt_all_CAff_vec = self.category_aff
# Attibute module
feat_CAttr = self.fc_feat2attr(self.mean_obj_features) # (n_obj, dim_attr)
feat_IAttr = self.attr_instantialize(
feat_CAttr, feature,
in_postproc = lambda x:x.unsqueeze(0).expand(batchsize, -1, -1),
cond_postproc = lambda x:x.unsqueeze(1).expand(-1, self.num_obj, -1)
) # (n_obj, dim), (bz, dim) -> (bz, n_obj, dim)
# feat_IAttr = self.attr_inst_bn(feat_IAttr)
feat_mean_IAttr = torch.einsum("ijk,j->ik", feat_IAttr, self.obj_frequence)
# (bz, dim_attr)
logit_CAttr = self.attr_CA_classifier(feat_CAttr)
logit_IAttr = self.attr_IA_classifier(feat_mean_IAttr)
feat_parallel_IAttr = self.parallel_attr_feat(feat_mean_IAttr.unsqueeze(1).expand(-1,self.num_attr, -1))
logit_aux_IAttr = self.attr_auxIA_classifier(feat_parallel_IAttr).squeeze(-1)
# Affordance module
feat_aggr_IAttr = self.aggregator(feat_parallel_IAttr)
feat_CAff = self.fc_feat2aff(
torch.cat([self.mean_obj_features, feat_CAttr], 1)
) # (n_obj, dim_aff)
feat_IAff = self.aff_instantialize(
feat_CAff, torch.cat([feature, feat_aggr_IAttr], 1),
in_postproc = lambda x:x.unsqueeze(0).expand(batchsize, -1, -1),
cond_postproc = lambda x:x.unsqueeze(1).expand(-1, self.num_obj, -1)
)
# (n_obj, dim), (bz, dim) -> (bz, n_obj, dim)
# feat_IAff = self.aff_inst_bn(feat_IAff)
feat_mean_IAff = torch.einsum("ijk,j->ik", feat_IAff, self.obj_frequence)
# (bz, dim_aff)
logit_CAff = self.aff_CA_classifier(feat_CAff)
logit_IAff = self.aff_IA_classifier(feat_mean_IAff)
prob_IAttr = torch.sigmoid(logit_IAttr)
prob_IAff = torch.sigmoid(logit_IAff)
if require_loss:
losses = {}
if self.args.lambda_attr > 0:
if self.args.lambda_cls_CA>0:
losses["loss_attr/CA_cls"] = self.attr_bce(logit_CAttr, gt_all_CAttr_vec)
if self.args.lambda_cls_IA>0:
losses["loss_attr/IA_cls"] = self.attr_bce(logit_IAttr, batch["gt_attr"])
if self.args.lambda_cls_inst_IA>0:
logit_inst_IAttr = self.attr_IA_classifier(feat_IAttr)
losses["loss_attr/inst_IA_cls"] = self.attr_bce(
logit_inst_IAttr, batch["gt_attr"].unsqueeze(1).expand(-1, self.num_obj, -1))
if any([x.startswith("loss_attr") for x in losses]):
losses["loss_attr/total"] = (
self.args.lambda_cls_CA * losses.get("loss_attr/CA_cls", 0.) +
self.args.lambda_cls_IA * losses.get("loss_attr/IA_cls", 0.) +
self.args.lambda_cls_inst_IA * losses.get("loss_attr/inst_IA_cls", 0.) )
if self.args.lambda_aff > 0:
if self.args.lambda_cls_CA>0:
losses["loss_aff/CA_cls"] = self.aff_bce(logit_CAff, gt_all_CAff_vec)
if self.args.lambda_cls_IA>0:
losses["loss_aff/IA_cls"] = self.aff_bce(logit_IAff, batch["gt_aff"])
if self.args.lambda_cls_inst_IA>0:
logit_inst_IAff = self.aff_IA_classifier(feat_IAff)
losses["loss_aff/inst_IA_cls"] = self.aff_bce(
logit_inst_IAff, batch["gt_aff"].unsqueeze(1).expand(-1, self.num_obj, -1))
if any([x.startswith("loss_aff") for x in losses]):
losses["loss_aff/total"] = (
self.args.lambda_cls_CA * losses.get("loss_aff/CA_cls", 0.) +
self.args.lambda_cls_IA * losses.get("loss_aff/IA_cls", 0.) +
self.args.lambda_cls_inst_IA * losses.get("loss_aff/inst_IA_cls", 0.) )
if self.args.lambda_cf > 0 and batch['gt_causal'].shape[0] > 0: | cf_inst_id, cf_attr_mask, cf_aff_mask = build_counterfactual( | 4 | 2023-11-07 13:03:27+00:00 | 8k |
ApolloAuto/apollo-model-centerpoint | paddle3d/models/detection/centerpoint/centerpoint.py | [
{
"identifier": "manager",
"path": "paddle3d/apis/manager.py",
"snippet": "class ComponentManager:\n def __init__(self, *, name: str, description: str = ''):\n def __len__(self):\n def __repr__(self):\n def __getitem__(self, item: str):\n def components_dict(self) -> dict:\n def name(s... | import collections
import os
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from copy import deepcopy
from typing import Dict, List
from paddle.static import InputSpec
from paddle3d.apis import manager
from paddle3d.geometries import BBoxes3D
from paddle3d.models.base import BaseLidarModel
from paddle3d.sample import Sample, SampleMeta
from paddle3d.utils.checkpoint import load_pretrained_model
from paddle3d.utils.logger import logger
from paddle3d.utils import dtype2float32 | 5,148 | self.voxel_encoder = voxel_encoder
self.middle_encoder = middle_encoder
self.backbone = backbone
self.neck = neck
self.bbox_head = bbox_head
self.test_cfg = DictObject(test_cfg)
self.sync_bn = True
if pretrained is not None:
load_pretrained_model(self, self.pretrained)
self.freeze = freeze
def _freeze(self):
if len(self.freeze) > 0:
freeze_layers = []
for layer_name in self.freeze:
if layer_name == 'shared_conv':
freeze_layers.append(
getattr(self, 'bbox_head').shared_conv)
elif isinstance(layer_name, str):
freeze_layers.append(getattr(self, layer_name))
elif isinstance(layer_name, list):
for current_layer in layer_name:
freeze_layers.append(
getattr(self, 'bbox_head').tasks[current_layer])
else:
raise NotImplementedError(
'The freeze_layer type {} is not supported'.format(
layer_name))
for freeze_layer in freeze_layers:
self.freeze_signle_layer(freeze_layer)
def freeze_signle_layer(self, layer):
layer.eval()
for param in layer.parameters():
param.trainable = False
for m in layer.sublayers():
if isinstance(m, nn.layer.norm._BatchNormBase):
m.eval()
def deploy_preprocess(self, points):
def true_fn(points):
points = points[:, 0:5]
return points
def false_fn(points):
points = points.reshape([1, -1, 4])
points = F.pad(
points, [0, 1], value=0, mode='constant', data_format="NCL")
points = points.reshape([-1, 5])
return points
points = paddle.static.nn.cond(
points.shape[-1] >=
5, lambda: true_fn(points), lambda: false_fn(points))
return points[:, 0:self.voxel_encoder.in_channels]
def voxelize(self, points):
voxels, coordinates, num_points_in_voxel = self.voxelizer(points)
return voxels, coordinates, num_points_in_voxel
def extract_feat(self, data):
voxels, coordinates, num_points_in_voxel = self.voxelizer(
data['points'])
data["features"] = voxels
data["num_points_in_voxel"] = num_points_in_voxel
data["coors"] = coordinates
input_features = self.voxel_encoder(
data["features"], data["num_points_in_voxel"], data["coors"])
x = self.middle_encoder(input_features, data["coors"],
data["batch_size"])
x = self.backbone(x)
x = self.neck(x)
return x
def train_forward(self, samples):
if len(self.freeze) > 0:
self._freeze()
batch_size = len(samples["data"])
points = samples["data"]
data = dict(points=points, batch_size=batch_size)
if hasattr(self, 'amp_cfg_'):
with paddle.amp.auto_cast(**self.amp_cfg_):
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
preds = dtype2float32(preds)
else:
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
return self.bbox_head.loss(samples, preds, self.test_cfg)
def test_forward(self, samples):
batch_size = len(samples["data"])
points = samples["data"]
data = dict(points=points, batch_size=batch_size)
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
preds = self.bbox_head.predict(samples, preds, self.test_cfg)
preds = self._parse_results_to_sample(preds, samples)
return {'preds': preds}
def export_forward(self, samples):
batch_size = 1
points = samples["data"]
points = self.deploy_preprocess(points)
data = dict(points=points, batch_size=batch_size)
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
return self.bbox_head.predict_by_custom_op(samples, preds,
self.test_cfg)
def _parse_results_to_sample(self, results: dict, sample: dict):
num_samples = len(results)
new_results = []
for i in range(num_samples):
| # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DictObject(Dict):
def __init__(self, config: Dict):
for key, value in config.items():
if isinstance(value, dict):
setattr(self, key, DictObject(value))
else:
setattr(self, key, value)
@manager.MODELS.add_component
class CenterPoint(BaseLidarModel):
def __init__(self,
voxelizer,
voxel_encoder,
middle_encoder,
backbone,
neck,
bbox_head,
test_cfg=None,
pretrained=None,
box_with_velocity: bool = False,
freeze=[]):
super().__init__(
with_voxelizer=True, box_with_velocity=box_with_velocity)
self.voxelizer = voxelizer
self.voxel_encoder = voxel_encoder
self.middle_encoder = middle_encoder
self.backbone = backbone
self.neck = neck
self.bbox_head = bbox_head
self.test_cfg = DictObject(test_cfg)
self.sync_bn = True
if pretrained is not None:
load_pretrained_model(self, self.pretrained)
self.freeze = freeze
def _freeze(self):
if len(self.freeze) > 0:
freeze_layers = []
for layer_name in self.freeze:
if layer_name == 'shared_conv':
freeze_layers.append(
getattr(self, 'bbox_head').shared_conv)
elif isinstance(layer_name, str):
freeze_layers.append(getattr(self, layer_name))
elif isinstance(layer_name, list):
for current_layer in layer_name:
freeze_layers.append(
getattr(self, 'bbox_head').tasks[current_layer])
else:
raise NotImplementedError(
'The freeze_layer type {} is not supported'.format(
layer_name))
for freeze_layer in freeze_layers:
self.freeze_signle_layer(freeze_layer)
def freeze_signle_layer(self, layer):
layer.eval()
for param in layer.parameters():
param.trainable = False
for m in layer.sublayers():
if isinstance(m, nn.layer.norm._BatchNormBase):
m.eval()
def deploy_preprocess(self, points):
def true_fn(points):
points = points[:, 0:5]
return points
def false_fn(points):
points = points.reshape([1, -1, 4])
points = F.pad(
points, [0, 1], value=0, mode='constant', data_format="NCL")
points = points.reshape([-1, 5])
return points
points = paddle.static.nn.cond(
points.shape[-1] >=
5, lambda: true_fn(points), lambda: false_fn(points))
return points[:, 0:self.voxel_encoder.in_channels]
def voxelize(self, points):
voxels, coordinates, num_points_in_voxel = self.voxelizer(points)
return voxels, coordinates, num_points_in_voxel
def extract_feat(self, data):
voxels, coordinates, num_points_in_voxel = self.voxelizer(
data['points'])
data["features"] = voxels
data["num_points_in_voxel"] = num_points_in_voxel
data["coors"] = coordinates
input_features = self.voxel_encoder(
data["features"], data["num_points_in_voxel"], data["coors"])
x = self.middle_encoder(input_features, data["coors"],
data["batch_size"])
x = self.backbone(x)
x = self.neck(x)
return x
def train_forward(self, samples):
if len(self.freeze) > 0:
self._freeze()
batch_size = len(samples["data"])
points = samples["data"]
data = dict(points=points, batch_size=batch_size)
if hasattr(self, 'amp_cfg_'):
with paddle.amp.auto_cast(**self.amp_cfg_):
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
preds = dtype2float32(preds)
else:
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
return self.bbox_head.loss(samples, preds, self.test_cfg)
def test_forward(self, samples):
batch_size = len(samples["data"])
points = samples["data"]
data = dict(points=points, batch_size=batch_size)
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
preds = self.bbox_head.predict(samples, preds, self.test_cfg)
preds = self._parse_results_to_sample(preds, samples)
return {'preds': preds}
def export_forward(self, samples):
batch_size = 1
points = samples["data"]
points = self.deploy_preprocess(points)
data = dict(points=points, batch_size=batch_size)
x = self.extract_feat(data)
preds, x = self.bbox_head(x)
return self.bbox_head.predict_by_custom_op(samples, preds,
self.test_cfg)
def _parse_results_to_sample(self, results: dict, sample: dict):
num_samples = len(results)
new_results = []
for i in range(num_samples): | data = Sample(sample["path"][i], sample["modality"][i]) | 3 | 2023-11-08 07:08:03+00:00 | 8k |
JustlfC03/SCUNet-plusplus | test.py | [
{
"identifier": "Synapse_dataset",
"path": "datasets/dataset_synapse.py",
"snippet": "class Synapse_dataset(Dataset):\n def __init__(self, base_dir, list_dir, split, transform=None):\n self.transform = transform\n self.split = split\n self.sample_list = open(os.path.join(list_dir... | import argparse
import logging
import os
import random
import sys
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from datasets.dataset_synapse import Synapse_dataset
from utils import test_single_volume
from networks.vision_transformer import SwinUnet as ViT_seg
from trainer import trainer_synapse
from config import get_config | 3,941 |
"""
--dataset Synapse
--cfg ./configs/swin_tiny_patch4_window7_224_lite.yaml
--is_saveni
--volume_path ./datasets/Synapse
--output_dir ./output
--max_epoch 150
--base_lr 0.05
--img_size 224
--batch_size 1
"""
parser = argparse.ArgumentParser()
parser.add_argument('--volume_path', type=str,
default='./datasets/Synapse/test_vol_h5',
help='root dir for validation volume data')
parser.add_argument('--dataset', type=str,
default='Synapse', help='experiment_name')
# parser.add_argument('--num_classes', type=int,
# default=9, help='output channel of network')
parser.add_argument('--num_classes', type=int,
default=2, help='output channel of network')
parser.add_argument('--list_dir', type=str,
default='./lists/lists_Synapse', help='list dir')
parser.add_argument('--output_dir', default='./output', type=str, help='output dir')
parser.add_argument('--max_iterations', type=int, default=30000, help='maximum epoch number to train')
parser.add_argument('--max_epochs', type=int, default=150, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=1,
help='batch_size per gpu')
parser.add_argument('--img_size', type=int, default=224, help='input patch size of network input')
parser.add_argument('--is_savenii', action="store_true", help='whether to save results during inference')
parser.add_argument('--test_save_dir', type=str, default='../predictions', help='saving prediction as nii!')
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01, help='segmentation network learning rate')
parser.add_argument('--seed', type=int, default=1234, help='random seed')
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
args = parser.parse_args()
if args.dataset == "Synapse":
args.volume_path = os.path.join(args.volume_path, "test_vol_h5")
# print(args.volume_path)
config = get_config(args)
def inference(args, model, test_save_path=None):
db_test = args.Dataset(base_dir=args.volume_path, split="test_vol", list_dir=args.list_dir)
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1)
logging.info("{} test iterations per epoch".format(len(testloader)))
model.eval()
metric_list = 0.0
f = open(r'G:\FINAL\SCUNet++\lists\lists_Synapse\testxg.txt', 'w')
for i_batch, sampled_batch in tqdm(enumerate(testloader)):
h, w = sampled_batch["image"].size()[2:]
image, label, case_name = sampled_batch["image"], sampled_batch["label"], sampled_batch['case_name'][0]
|
"""
--dataset Synapse
--cfg ./configs/swin_tiny_patch4_window7_224_lite.yaml
--is_saveni
--volume_path ./datasets/Synapse
--output_dir ./output
--max_epoch 150
--base_lr 0.05
--img_size 224
--batch_size 1
"""
parser = argparse.ArgumentParser()
parser.add_argument('--volume_path', type=str,
default='./datasets/Synapse/test_vol_h5',
help='root dir for validation volume data')
parser.add_argument('--dataset', type=str,
default='Synapse', help='experiment_name')
# parser.add_argument('--num_classes', type=int,
# default=9, help='output channel of network')
parser.add_argument('--num_classes', type=int,
default=2, help='output channel of network')
parser.add_argument('--list_dir', type=str,
default='./lists/lists_Synapse', help='list dir')
parser.add_argument('--output_dir', default='./output', type=str, help='output dir')
parser.add_argument('--max_iterations', type=int, default=30000, help='maximum epoch number to train')
parser.add_argument('--max_epochs', type=int, default=150, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=1,
help='batch_size per gpu')
parser.add_argument('--img_size', type=int, default=224, help='input patch size of network input')
parser.add_argument('--is_savenii', action="store_true", help='whether to save results during inference')
parser.add_argument('--test_save_dir', type=str, default='../predictions', help='saving prediction as nii!')
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01, help='segmentation network learning rate')
parser.add_argument('--seed', type=int, default=1234, help='random seed')
parser.add_argument('--cfg', type=str, required=True, metavar="FILE", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
parser.add_argument('--zip', action='store_true', help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int, help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true', help='Test throughput only')
args = parser.parse_args()
if args.dataset == "Synapse":
args.volume_path = os.path.join(args.volume_path, "test_vol_h5")
# print(args.volume_path)
config = get_config(args)
def inference(args, model, test_save_path=None):
db_test = args.Dataset(base_dir=args.volume_path, split="test_vol", list_dir=args.list_dir)
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1)
logging.info("{} test iterations per epoch".format(len(testloader)))
model.eval()
metric_list = 0.0
f = open(r'G:\FINAL\SCUNet++\lists\lists_Synapse\testxg.txt', 'w')
for i_batch, sampled_batch in tqdm(enumerate(testloader)):
h, w = sampled_batch["image"].size()[2:]
image, label, case_name = sampled_batch["image"], sampled_batch["label"], sampled_batch['case_name'][0] | metric_i = test_single_volume(image, label, model, classes=args.num_classes, | 1 | 2023-11-04 11:42:02+00:00 | 8k |
corcel-api/cortex.t | miner/miner.py | [
{
"identifier": "Embeddings",
"path": "template/protocol.py",
"snippet": "class Embeddings( bt.Synapse):\n \"\"\" A class to represent the embeddings request and response. \"\"\"\n\n texts: List[str] = pydantic.Field(\n ...,\n title=\"Text\",\n description=\"The list of input ... | import base # noqa
import argparse
import asyncio
import copy
import json
import os
import io
import base64
import boto3
import pathlib
import threading
import time
import requests
import traceback
import requests
import anthropic
import bittensor as bt
import wandb
import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
import template
import sys
from abc import ABC, abstractmethod
from collections import deque
from functools import partial
from typing import Tuple
from stability_sdk import client
from config import check_config, get_config
from openai import AsyncOpenAI, OpenAI
from PIL import Image
from anthropic_bedrock import AsyncAnthropicBedrock, HUMAN_PROMPT, AI_PROMPT, AnthropicBedrock
from template.protocol import Embeddings, ImageResponse, IsAlive, StreamPrompting
from template.utils import get_version
from starlette.types import Send | 4,033 | bt.logging.info(
f"Running miner for subnet: {self.config.netuid} "
f"on network: {self.subtensor.chain_endpoint} with config:"
)
# metagraph provides the network's current state, holding state about other participants in a subnet.
self.metagraph = self.subtensor.metagraph(self.config.netuid)
bt.logging.info(f"Metagraph: {self.metagraph}")
if self.wallet.hotkey.ss58_address not in self.metagraph.hotkeys:
bt.logging.error(
f"\nYour validator: {self.wallet} if not registered to chain connection: {self.subtensor} "
f"\nRun btcli register and try again. "
)
sys.exit()
else:
# Each miner gets a unique identity (UID) in the network for differentiation.
self.my_subnet_uid = self.metagraph.hotkeys.index(
self.wallet.hotkey.ss58_address
)
bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}")
# The axon handles request processing, allowing validators to send this process requests.
self.axon = axon or bt.axon(wallet=self.wallet, port=self.config.axon.port)
# Attach determiners which functions are called when servicing a request.
bt.logging.info("Attaching forward function to axon.")
print(f"Attaching forward function to axon. {self._prompt}")
self.axon.attach(
forward_fn=self._prompt,
blacklist_fn=self.blacklist_prompt,
).attach(
forward_fn=self._is_alive,
blacklist_fn=self.blacklist_is_alive,
).attach(
forward_fn=self._images,
blacklist_fn=self.blacklist_images,
).attach(
forward_fn=self._embeddings,
blacklist_fn=self.blacklist_embeddings,
)
bt.logging.info(f"Axon created: {self.axon}")
# Instantiate runners
self.should_exit: bool = False
self.is_running: bool = False
self.thread: threading.Thread = None
self.lock = asyncio.Lock()
self.request_timestamps: dict = {}
thread = threading.Thread(target=get_valid_hotkeys, args=(self.config,))
# thread.start()
@abstractmethod
def config(self) -> bt.config:
...
def _prompt(self, synapse: StreamPrompting) -> StreamPrompting:
return self.prompt(synapse)
def base_blacklist(self, synapse, blacklist_amt = 20000) -> Tuple[bool, str]:
try:
hotkey = synapse.dendrite.hotkey
synapse_type = type(synapse).__name__
if hotkey in template.WHITELISTED_KEYS:
return False, f"accepting {synapse_type} request from {hotkey}"
if hotkey not in valid_hotkeys:
return True, f"Blacklisted a {synapse_type} request from a non-valid hotkey: {hotkey}"
uid = None
for uid, _axon in enumerate(self.metagraph.axons): # noqa: B007
if _axon.hotkey == hotkey:
break
if uid is None and template.ALLOW_NON_REGISTERED is False:
return True, f"Blacklisted a non registered hotkey's {synapse_type} request from {hotkey}"
# check the stake
tao = self.metagraph.neurons[uid].stake.tao
# metagraph.neurons[uid].S
if tao < blacklist_amt:
return True, f"Blacklisted a low stake {synapse_type} request: {tao} < {blacklist_amt} from {hotkey}"
time_window = template.MIN_REQUEST_PERIOD * 60
current_time = time.time()
if hotkey not in self.request_timestamps:
self.request_timestamps[hotkey] = deque()
# Remove timestamps outside the current time window
while self.request_timestamps[hotkey] and current_time - self.request_timestamps[hotkey][0] > time_window:
self.request_timestamps[hotkey].popleft()
# Check if the number of requests exceeds the limit
if len(self.request_timestamps[hotkey]) >= template.MAX_REQUESTS:
return (
True,
f"Request frequency for {hotkey} exceeded: "
f"{len(self.request_timestamps[hotkey])} requests in {template.MIN_REQUEST_PERIOD} minutes. "
f"Limit is {template.MAX_REQUESTS} requests."
)
self.request_timestamps[hotkey].append(current_time)
return False, f"accepting {synapse_type} request from {hotkey}"
except Exception:
bt.logging.error(f"errror in blacklist {traceback.format_exc()}")
def blacklist_prompt( self, synapse: StreamPrompting ) -> Tuple[bool, str]:
blacklist = self.base_blacklist(synapse, template.PROMPT_BLACKLIST_STAKE)
bt.logging.info(blacklist[1])
return blacklist
def blacklist_is_alive( self, synapse: IsAlive ) -> Tuple[bool, str]:
blacklist = self.base_blacklist(synapse, template.ISALIVE_BLACKLIST_STAKE)
bt.logging.debug(blacklist[1])
return blacklist
|
OpenAI.api_key = os.environ.get("OPENAI_API_KEY")
if not OpenAI.api_key:
raise ValueError("Please set the OPENAI_API_KEY environment variable.")
stability_api = client.StabilityInference(
key=os.environ['STABILITY_KEY'],
verbose=True,
engine="stable-diffusion-xl-1024-v1-0"
)
api_key = os.environ.get("ANTHROPIC_API_KEY")
bedrock_client = AsyncAnthropicBedrock(
# default is 10 minutes
# more granular timeout options: timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0),
timeout=60.0,
)
anthropic_client = anthropic.Anthropic()
anthropic_client.api_key = api_key
netrc_path = pathlib.Path.home() / ".netrc"
wandb_api_key = os.getenv("WANDB_API_KEY")
bt.logging.info("WANDB_API_KEY is set")
bt.logging.info("~/.netrc exists:", netrc_path.exists())
if not wandb_api_key and not netrc_path.exists():
raise ValueError("Please log in to wandb using `wandb login` or set the WANDB_API_KEY environment variable.")
client = AsyncOpenAI(timeout=60.0)
valid_hotkeys = []
class StreamMiner(ABC):
def __init__(self, config=None, axon=None, wallet=None, subtensor=None):
bt.logging.info("starting stream miner")
base_config = copy.deepcopy(config or get_config())
self.config = self.config()
self.config.merge(base_config)
check_config(StreamMiner, self.config)
bt.logging.info(self.config) # TODO: duplicate print?
self.prompt_cache: dict[str, Tuple[str, int]] = {}
self.request_timestamps = {}
# Activating Bittensor's logging with the set configurations.
bt.logging(config=self.config, logging_dir=self.config.full_path)
bt.logging.info("Setting up bittensor objects.")
# Wallet holds cryptographic information, ensuring secure transactions and communication.
self.wallet = wallet or bt.wallet(config=self.config)
bt.logging.info(f"Wallet {self.wallet}")
# subtensor manages the blockchain connection, facilitating interaction with the Bittensor blockchain.
self.subtensor = subtensor or bt.subtensor(config=self.config)
bt.logging.info(f"Subtensor: {self.subtensor}")
bt.logging.info(
f"Running miner for subnet: {self.config.netuid} "
f"on network: {self.subtensor.chain_endpoint} with config:"
)
# metagraph provides the network's current state, holding state about other participants in a subnet.
self.metagraph = self.subtensor.metagraph(self.config.netuid)
bt.logging.info(f"Metagraph: {self.metagraph}")
if self.wallet.hotkey.ss58_address not in self.metagraph.hotkeys:
bt.logging.error(
f"\nYour validator: {self.wallet} if not registered to chain connection: {self.subtensor} "
f"\nRun btcli register and try again. "
)
sys.exit()
else:
# Each miner gets a unique identity (UID) in the network for differentiation.
self.my_subnet_uid = self.metagraph.hotkeys.index(
self.wallet.hotkey.ss58_address
)
bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}")
# The axon handles request processing, allowing validators to send this process requests.
self.axon = axon or bt.axon(wallet=self.wallet, port=self.config.axon.port)
# Attach determiners which functions are called when servicing a request.
bt.logging.info("Attaching forward function to axon.")
print(f"Attaching forward function to axon. {self._prompt}")
self.axon.attach(
forward_fn=self._prompt,
blacklist_fn=self.blacklist_prompt,
).attach(
forward_fn=self._is_alive,
blacklist_fn=self.blacklist_is_alive,
).attach(
forward_fn=self._images,
blacklist_fn=self.blacklist_images,
).attach(
forward_fn=self._embeddings,
blacklist_fn=self.blacklist_embeddings,
)
bt.logging.info(f"Axon created: {self.axon}")
# Instantiate runners
self.should_exit: bool = False
self.is_running: bool = False
self.thread: threading.Thread = None
self.lock = asyncio.Lock()
self.request_timestamps: dict = {}
thread = threading.Thread(target=get_valid_hotkeys, args=(self.config,))
# thread.start()
@abstractmethod
def config(self) -> bt.config:
...
def _prompt(self, synapse: StreamPrompting) -> StreamPrompting:
return self.prompt(synapse)
def base_blacklist(self, synapse, blacklist_amt = 20000) -> Tuple[bool, str]:
try:
hotkey = synapse.dendrite.hotkey
synapse_type = type(synapse).__name__
if hotkey in template.WHITELISTED_KEYS:
return False, f"accepting {synapse_type} request from {hotkey}"
if hotkey not in valid_hotkeys:
return True, f"Blacklisted a {synapse_type} request from a non-valid hotkey: {hotkey}"
uid = None
for uid, _axon in enumerate(self.metagraph.axons): # noqa: B007
if _axon.hotkey == hotkey:
break
if uid is None and template.ALLOW_NON_REGISTERED is False:
return True, f"Blacklisted a non registered hotkey's {synapse_type} request from {hotkey}"
# check the stake
tao = self.metagraph.neurons[uid].stake.tao
# metagraph.neurons[uid].S
if tao < blacklist_amt:
return True, f"Blacklisted a low stake {synapse_type} request: {tao} < {blacklist_amt} from {hotkey}"
time_window = template.MIN_REQUEST_PERIOD * 60
current_time = time.time()
if hotkey not in self.request_timestamps:
self.request_timestamps[hotkey] = deque()
# Remove timestamps outside the current time window
while self.request_timestamps[hotkey] and current_time - self.request_timestamps[hotkey][0] > time_window:
self.request_timestamps[hotkey].popleft()
# Check if the number of requests exceeds the limit
if len(self.request_timestamps[hotkey]) >= template.MAX_REQUESTS:
return (
True,
f"Request frequency for {hotkey} exceeded: "
f"{len(self.request_timestamps[hotkey])} requests in {template.MIN_REQUEST_PERIOD} minutes. "
f"Limit is {template.MAX_REQUESTS} requests."
)
self.request_timestamps[hotkey].append(current_time)
return False, f"accepting {synapse_type} request from {hotkey}"
except Exception:
bt.logging.error(f"errror in blacklist {traceback.format_exc()}")
def blacklist_prompt( self, synapse: StreamPrompting ) -> Tuple[bool, str]:
blacklist = self.base_blacklist(synapse, template.PROMPT_BLACKLIST_STAKE)
bt.logging.info(blacklist[1])
return blacklist
def blacklist_is_alive( self, synapse: IsAlive ) -> Tuple[bool, str]:
blacklist = self.base_blacklist(synapse, template.ISALIVE_BLACKLIST_STAKE)
bt.logging.debug(blacklist[1])
return blacklist
| def blacklist_images( self, synapse: ImageResponse ) -> Tuple[bool, str]: | 1 | 2023-11-06 10:35:34+00:00 | 8k |
ljy0ustc/LLaRA | main.py | [
{
"identifier": "MInterface",
"path": "model/model_interface.py",
"snippet": "class MInterface(pl.LightningModule):\n def __init__(self, \n **kargs):\n super().__init__()\n self.save_hyperparameters()\n self.load_llm(self.hparams.llm_path)\n self.load_rec_m... | import os
import pytorch_lightning as pl
import pytorch_lightning.callbacks as plc
from argparse import ArgumentParser
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger, CSVLogger
from model.model_interface import MInterface
from data.data_interface import DInterface
from recommender.A_SASRec_final_bce_llm import SASRec, Caser, GRU
from SASRecModules_ori import *
from transformers import LlamaForCausalLM, LlamaTokenizer | 6,825 |
def load_callbacks(args):
callbacks = []
callbacks.append(plc.EarlyStopping(
monitor='metric',
mode='max',
patience=10,
min_delta=0.001
))
callbacks.append(plc.ModelCheckpoint(
monitor='metric',
dirpath=args.ckpt_dir,
filename='{epoch:02d}-{metric:.3f}',
save_top_k=-1,
mode='max',
save_last=True,
#train_time_interval=args.val_check_interval
every_n_epochs=1
))
if args.lr_scheduler:
callbacks.append(plc.LearningRateMonitor(
logging_interval='step'))
return callbacks
def main(args):
pl.seed_everything(args.seed)
model = MInterface(**vars(args))
if args.ckpt_path:
ckpt = torch.load(args.ckpt_path, map_location='cpu')
model.load_state_dict(ckpt['state_dict'], strict=False)
print("load checkpoints from {}".format(args.ckpt_path))
|
def load_callbacks(args):
callbacks = []
callbacks.append(plc.EarlyStopping(
monitor='metric',
mode='max',
patience=10,
min_delta=0.001
))
callbacks.append(plc.ModelCheckpoint(
monitor='metric',
dirpath=args.ckpt_dir,
filename='{epoch:02d}-{metric:.3f}',
save_top_k=-1,
mode='max',
save_last=True,
#train_time_interval=args.val_check_interval
every_n_epochs=1
))
if args.lr_scheduler:
callbacks.append(plc.LearningRateMonitor(
logging_interval='step'))
return callbacks
def main(args):
pl.seed_everything(args.seed)
model = MInterface(**vars(args))
if args.ckpt_path:
ckpt = torch.load(args.ckpt_path, map_location='cpu')
model.load_state_dict(ckpt['state_dict'], strict=False)
print("load checkpoints from {}".format(args.ckpt_path))
| data_module = DInterface(llm_tokenizer=model.llama_tokenizer,**vars(args)) | 1 | 2023-11-09 12:19:17+00:00 | 8k |
silicx/GoldFromOres | DatasetCondensation/utils.py | [
{
"identifier": "MLP",
"path": "DatasetCondensation/networks.py",
"snippet": "class MLP(nn.Module):\r\n def __init__(self, channel, num_classes):\r\n super(MLP, self).__init__()\r\n self.fc_1 = nn.Linear(28*28*1 if channel==1 else 32*32*3, 128)\r\n self.fc_2 = nn.Linear(128, 128)... | import time
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torchvision import datasets, transforms
from scipy.ndimage.interpolation import rotate as scipyrotate
from .networks import MLP, ConvNet, LeNet, AlexNet, AlexNetBN, VGG11, VGG11BN, ResNet18, ResNet18BN_AP, ResNet18BN
| 3,642 |
def get_dataset(dataset, data_path):
if dataset == 'MNIST':
channel = 1
im_size = (28, 28)
num_classes = 10
mean = [0.1307]
std = [0.3081]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.MNIST(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.MNIST(data_path, train=False, download=True, transform=transform)
class_names = [str(c) for c in range(num_classes)]
elif dataset == 'FashionMNIST':
channel = 1
im_size = (28, 28)
num_classes = 10
mean = [0.2861]
std = [0.3530]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.FashionMNIST(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.FashionMNIST(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'SVHN':
channel = 3
im_size = (32, 32)
num_classes = 10
mean = [0.4377, 0.4438, 0.4728]
std = [0.1980, 0.2010, 0.1970]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.SVHN(data_path, split='train', download=True, transform=transform) # no augmentation
dst_test = datasets.SVHN(data_path, split='test', download=True, transform=transform)
class_names = [str(c) for c in range(num_classes)]
elif dataset == 'CIFAR10':
channel = 3
im_size = (32, 32)
num_classes = 10
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.CIFAR10(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.CIFAR10(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'CIFAR100':
channel = 3
im_size = (32, 32)
num_classes = 100
mean = [0.5071, 0.4866, 0.4409]
std = [0.2673, 0.2564, 0.2762]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.CIFAR100(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.CIFAR100(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'TinyImageNet':
channel = 3
im_size = (64, 64)
num_classes = 200
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
data = torch.load(os.path.join(data_path, 'tinyimagenet.pt'), map_location='cpu')
class_names = data['classes']
images_train = data['images_train']
labels_train = data['labels_train']
images_train = images_train.detach().float() / 255.0
labels_train = labels_train.detach()
for c in range(channel):
images_train[:,c] = (images_train[:,c] - mean[c])/std[c]
dst_train = TensorDataset(images_train, labels_train) # no augmentation
images_val = data['images_val']
labels_val = data['labels_val']
images_val = images_val.detach().float() / 255.0
labels_val = labels_val.detach()
for c in range(channel):
images_val[:, c] = (images_val[:, c] - mean[c]) / std[c]
dst_test = TensorDataset(images_val, labels_val) # no augmentation
else:
exit('unknown dataset: %s'%dataset)
testloader = torch.utils.data.DataLoader(dst_test, batch_size=256, shuffle=False, num_workers=0)
return channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader
class TensorDataset(Dataset):
def __init__(self, images, labels): # images: n x c x h x w tensor
self.images = images.detach().float()
self.labels = labels.detach()
def __getitem__(self, index):
return self.images[index], self.labels[index]
def __len__(self):
return self.images.shape[0]
def get_default_convnet_setting():
net_width, net_depth, net_act, net_norm, net_pooling = 128, 3, 'relu', 'instancenorm', 'avgpooling'
return net_width, net_depth, net_act, net_norm, net_pooling
def get_network(model, channel, num_classes, im_size=(32, 32)):
torch.random.manual_seed(int(time.time() * 1000) % 100000)
net_width, net_depth, net_act, net_norm, net_pooling = get_default_convnet_setting()
if model == 'MLP':
|
def get_dataset(dataset, data_path):
if dataset == 'MNIST':
channel = 1
im_size = (28, 28)
num_classes = 10
mean = [0.1307]
std = [0.3081]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.MNIST(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.MNIST(data_path, train=False, download=True, transform=transform)
class_names = [str(c) for c in range(num_classes)]
elif dataset == 'FashionMNIST':
channel = 1
im_size = (28, 28)
num_classes = 10
mean = [0.2861]
std = [0.3530]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.FashionMNIST(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.FashionMNIST(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'SVHN':
channel = 3
im_size = (32, 32)
num_classes = 10
mean = [0.4377, 0.4438, 0.4728]
std = [0.1980, 0.2010, 0.1970]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.SVHN(data_path, split='train', download=True, transform=transform) # no augmentation
dst_test = datasets.SVHN(data_path, split='test', download=True, transform=transform)
class_names = [str(c) for c in range(num_classes)]
elif dataset == 'CIFAR10':
channel = 3
im_size = (32, 32)
num_classes = 10
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.CIFAR10(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.CIFAR10(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'CIFAR100':
channel = 3
im_size = (32, 32)
num_classes = 100
mean = [0.5071, 0.4866, 0.4409]
std = [0.2673, 0.2564, 0.2762]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.CIFAR100(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.CIFAR100(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'TinyImageNet':
channel = 3
im_size = (64, 64)
num_classes = 200
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
data = torch.load(os.path.join(data_path, 'tinyimagenet.pt'), map_location='cpu')
class_names = data['classes']
images_train = data['images_train']
labels_train = data['labels_train']
images_train = images_train.detach().float() / 255.0
labels_train = labels_train.detach()
for c in range(channel):
images_train[:,c] = (images_train[:,c] - mean[c])/std[c]
dst_train = TensorDataset(images_train, labels_train) # no augmentation
images_val = data['images_val']
labels_val = data['labels_val']
images_val = images_val.detach().float() / 255.0
labels_val = labels_val.detach()
for c in range(channel):
images_val[:, c] = (images_val[:, c] - mean[c]) / std[c]
dst_test = TensorDataset(images_val, labels_val) # no augmentation
else:
exit('unknown dataset: %s'%dataset)
testloader = torch.utils.data.DataLoader(dst_test, batch_size=256, shuffle=False, num_workers=0)
return channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader
class TensorDataset(Dataset):
def __init__(self, images, labels): # images: n x c x h x w tensor
self.images = images.detach().float()
self.labels = labels.detach()
def __getitem__(self, index):
return self.images[index], self.labels[index]
def __len__(self):
return self.images.shape[0]
def get_default_convnet_setting():
net_width, net_depth, net_act, net_norm, net_pooling = 128, 3, 'relu', 'instancenorm', 'avgpooling'
return net_width, net_depth, net_act, net_norm, net_pooling
def get_network(model, channel, num_classes, im_size=(32, 32)):
torch.random.manual_seed(int(time.time() * 1000) % 100000)
net_width, net_depth, net_act, net_norm, net_pooling = get_default_convnet_setting()
if model == 'MLP':
| net = MLP(channel=channel, num_classes=num_classes)
| 0 | 2023-11-03 09:34:15+00:00 | 8k |
WHU-USI3DV/PatchAugNet | utils/model_util/feat_processor.py | [
{
"identifier": "TransformerEncoderLayer",
"path": "utils/model_util/transformer.py",
"snippet": "class TransformerEncoderLayer(nn.Module):\n\n def __init__(self,\n d_model, \n nhead, \n dim_feedforward, \n mha_dropout, \n ffn... | import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from utils.model_util.transformer import TransformerEncoderLayer
from place_recognition.patch_aug_net.models.loupe import NetVLADBase
from utils.model_util.pool import get_pool
from utils.train_util import nn_dist, nn_angle | 4,097 | super().__init__()
assert d_model % num_heads == 0
self.dim = d_model // num_heads
self.num_heads = num_heads
self.merge = nn.Conv1d(d_model, d_model, kernel_size=1)
self.proj = nn.ModuleList([copy.deepcopy(self.merge) for _ in range(3)])
def forward(self, query, key, value):
batch_dim = query.size(0)
query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1)
for l, x in zip(self.proj, (query, key, value))]
x, prob = attention(query, key, value)
# self.prob.append(prob)
return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1))
class AttentionalPropagation(nn.Module):
def __init__(self, feature_dim: int, num_heads: int):
super().__init__()
self.attn = MultiHeadedAttention(num_heads, feature_dim)
self.mlp = MLP([feature_dim*2, feature_dim*2, feature_dim])
nn.init.constant_(self.mlp[-1].bias, 0.0)
def forward(self, x, source):
message = self.attn(x, source, source)
return self.mlp(torch.cat([x, message], dim=1))
class AttentionalGNN(nn.Module):
def __init__(self, feature_dim: int, layer_names: list):
super().__init__()
self.layers = nn.ModuleList([
AttentionalPropagation(feature_dim, 4)
for _ in range(len(layer_names))])
self.names = layer_names
self.only_self_attn = True
for name in layer_names:
if name == 'cross':
self.only_self_attn = False
break
def forward(self, desc0, desc1=None):
""" desc0: b x m x d, desc1: b x n x d """
# only self-attn
if self.only_self_attn or desc1 is None:
desc0 = desc0.permute(0, 2, 1) # b x d x m
for layer, name in zip(self.layers, self.names):
delta0 = layer(desc0, desc0)
desc0 = desc0 + delta0
desc0 = desc0.permute(0, 2, 1) # b x m x d
return desc0
# with cross-attn
desc0 = desc0.permute(0, 2, 1) # b x d x m
desc1 = desc1.permute(0, 2, 1) # b x d x n
for layer, name in zip(self.layers, self.names):
layer.attn.prob = []
if name == 'cross':
src0, src1 = desc1, desc0
else: # if name == 'self':
src0, src1 = desc0, desc1
delta0, delta1 = layer(desc0, src0), layer(desc1, src1)
desc0, desc1 = (desc0 + delta0), (desc1 + delta1)
desc0 = desc0.permute(0, 2, 1) # b x m x d
desc1 = desc1.permute(0, 2, 1) # b x n x d
return desc0, desc1
class AbsCoordEncoder(nn.Module):
""" Input: B x N x 2 or B x N x 3
Returns: B x N x d
"""
def __init__(self, coord_dim, embed_dim):
super(AbsCoordEncoder, self).__init__()
self.fc = nn.Sequential(
nn.Linear(coord_dim, embed_dim),
nn.LayerNorm(embed_dim),
nn.ReLU()
)
def forward(self, x):
return self.fc(x)
class DistanceEncoder(nn.Module):
""" Input: B x N x 2 or B x N x 3
Returns: B x N x d
"""
def __init__(self, N, embed_dim, max_dist=None):
super(DistanceEncoder, self).__init__()
self.max_dist = max_dist
self.fc = nn.Sequential(
nn.Linear(N, embed_dim),
nn.LayerNorm(embed_dim),
nn.ReLU()
)
def forward(self, x):
dist = nn_dist(x - torch.mean(x, dim=1, keepdim=True)).float() # B x N x N
if self.max_dist is not None:
max_dist_fill = torch.ones_like(dist) * self.max_dist
dist = torch.where(dist > self.max_dist, max_dist_fill, dist)
x = self.fc(dist / torch.max(dist)) # B x N x d
return x
class AngleEncoder(nn.Module):
""" Input: B x N x 2 or B x N x 3
Returns: B x N x d
"""
def __init__(self, N, embed_dim, angle_k=None):
super(AngleEncoder, self).__init__()
self.angle_k = angle_k
self.fc = nn.Sequential(
nn.Linear(N, embed_dim),
nn.LayerNorm(embed_dim),
nn.ReLU()
)
self.max_pool = nn.AdaptiveMaxPool1d(1)
def forward(self, x):
|
def MLP(channels: list, do_bn=True):
""" Multi-layer perceptron """
n = len(channels)
layers = []
for i in range(1, n):
layers.append(
nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True))
if i < (n-1):
if do_bn:
# layers.append(nn.BatchNorm1d(channels[i]))
layers.append(nn.InstanceNorm1d(channels[i]))
layers.append(nn.ReLU())
return nn.Sequential(*layers)
def attention(query, key, value):
dim = query.shape[1]
scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim**.5
prob = torch.nn.functional.softmax(scores, dim=-1)
return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob
class MultiHeadedAttention(nn.Module):
""" Multi-head attention to increase model expressivitiy """
def __init__(self, num_heads: int, d_model: int):
super().__init__()
assert d_model % num_heads == 0
self.dim = d_model // num_heads
self.num_heads = num_heads
self.merge = nn.Conv1d(d_model, d_model, kernel_size=1)
self.proj = nn.ModuleList([copy.deepcopy(self.merge) for _ in range(3)])
def forward(self, query, key, value):
batch_dim = query.size(0)
query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1)
for l, x in zip(self.proj, (query, key, value))]
x, prob = attention(query, key, value)
# self.prob.append(prob)
return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1))
class AttentionalPropagation(nn.Module):
def __init__(self, feature_dim: int, num_heads: int):
super().__init__()
self.attn = MultiHeadedAttention(num_heads, feature_dim)
self.mlp = MLP([feature_dim*2, feature_dim*2, feature_dim])
nn.init.constant_(self.mlp[-1].bias, 0.0)
def forward(self, x, source):
message = self.attn(x, source, source)
return self.mlp(torch.cat([x, message], dim=1))
class AttentionalGNN(nn.Module):
def __init__(self, feature_dim: int, layer_names: list):
super().__init__()
self.layers = nn.ModuleList([
AttentionalPropagation(feature_dim, 4)
for _ in range(len(layer_names))])
self.names = layer_names
self.only_self_attn = True
for name in layer_names:
if name == 'cross':
self.only_self_attn = False
break
def forward(self, desc0, desc1=None):
""" desc0: b x m x d, desc1: b x n x d """
# only self-attn
if self.only_self_attn or desc1 is None:
desc0 = desc0.permute(0, 2, 1) # b x d x m
for layer, name in zip(self.layers, self.names):
delta0 = layer(desc0, desc0)
desc0 = desc0 + delta0
desc0 = desc0.permute(0, 2, 1) # b x m x d
return desc0
# with cross-attn
desc0 = desc0.permute(0, 2, 1) # b x d x m
desc1 = desc1.permute(0, 2, 1) # b x d x n
for layer, name in zip(self.layers, self.names):
layer.attn.prob = []
if name == 'cross':
src0, src1 = desc1, desc0
else: # if name == 'self':
src0, src1 = desc0, desc1
delta0, delta1 = layer(desc0, src0), layer(desc1, src1)
desc0, desc1 = (desc0 + delta0), (desc1 + delta1)
desc0 = desc0.permute(0, 2, 1) # b x m x d
desc1 = desc1.permute(0, 2, 1) # b x n x d
return desc0, desc1
class AbsCoordEncoder(nn.Module):
""" Input: B x N x 2 or B x N x 3
Returns: B x N x d
"""
def __init__(self, coord_dim, embed_dim):
super(AbsCoordEncoder, self).__init__()
self.fc = nn.Sequential(
nn.Linear(coord_dim, embed_dim),
nn.LayerNorm(embed_dim),
nn.ReLU()
)
def forward(self, x):
return self.fc(x)
class DistanceEncoder(nn.Module):
""" Input: B x N x 2 or B x N x 3
Returns: B x N x d
"""
def __init__(self, N, embed_dim, max_dist=None):
super(DistanceEncoder, self).__init__()
self.max_dist = max_dist
self.fc = nn.Sequential(
nn.Linear(N, embed_dim),
nn.LayerNorm(embed_dim),
nn.ReLU()
)
def forward(self, x):
dist = nn_dist(x - torch.mean(x, dim=1, keepdim=True)).float() # B x N x N
if self.max_dist is not None:
max_dist_fill = torch.ones_like(dist) * self.max_dist
dist = torch.where(dist > self.max_dist, max_dist_fill, dist)
x = self.fc(dist / torch.max(dist)) # B x N x d
return x
class AngleEncoder(nn.Module):
""" Input: B x N x 2 or B x N x 3
Returns: B x N x d
"""
def __init__(self, N, embed_dim, angle_k=None):
super(AngleEncoder, self).__init__()
self.angle_k = angle_k
self.fc = nn.Sequential(
nn.Linear(N, embed_dim),
nn.LayerNorm(embed_dim),
nn.ReLU()
)
self.max_pool = nn.AdaptiveMaxPool1d(1)
def forward(self, x): | x = F.normalize(nn_angle(x, self.angle_k), dim=-1) # b x k x m x m | 4 | 2023-11-02 13:52:20+00:00 | 8k |
gchada/ROAM | sim/rail_walker_interface/environment/env.py | [
{
"identifier": "BaseWalker",
"path": "sim/rail_walker_interface/robot/robot.py",
"snippet": "class BaseWalker(Generic[_ObsT]):\n def __init__(\n self, \n name: Optional[str] = \"robot\", \n Kp: float = 5,\n Kd: float = 1,\n force_real_control_timestep : bool = Fals... | from ..robot import BaseWalker
from ..joystick_policy.joystick_policy import JoystickPolicy | 5,617 |
class WalkerEnvironment:
@property
def robot(self) -> BaseWalker:
pass
class JoystickEnvironment:
@property
|
class WalkerEnvironment:
@property
def robot(self) -> BaseWalker:
pass
class JoystickEnvironment:
@property | def joystick_policy(self) -> JoystickPolicy: | 1 | 2023-11-02 23:21:38+00:00 | 8k |
UMass-Foundation-Model/genome | main.py | [
{
"identifier": "parse_opt",
"path": "param.py",
"snippet": "def parse_opt():\n\n parser = argparse.ArgumentParser()\n # Data input settings\n\n # Dataset and Image\n parser.add_argument('--dataset', type=str, default=\"gqa\", help='') # Pending\n\n parser.add_argument('--ann_path', type=... | import numpy as np
from tqdm import tqdm
from param import parse_opt
from engine.dataset import get_samples
from engine.util import get_module_list, save_output, pre_process, post_process
from engine.gpt import get_response, parse_response
from engine.prompt import get_prompt, format_prompt
from engine.interpreter import create_interpreter, create_module_instance, test_on_cases | 4,321 |
def stage1(args):
module_list = get_module_list(args)
prompt = get_prompt(args, module_list=module_list)
|
def stage1(args):
module_list = get_module_list(args)
prompt = get_prompt(args, module_list=module_list) | samples = get_samples(args) | 1 | 2023-11-01 16:39:33+00:00 | 8k |
ml4bio/RhoFold | rhofold/model/e2eformer.py | [
{
"identifier": "Linear",
"path": "rhofold/model/primitives.py",
"snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in th... | import torch
import torch.nn as nn
from typing import Tuple, Sequence, Optional
from functools import partial
from rhofold.model.primitives import Linear, LayerNorm
from rhofold.model.msa import (
MSARowAttentionWithPairBias,
MSAColumnAttention,
)
from rhofold.model.outer_product_mean import OuterProductMean
from rhofold.model.pair import PairTransition
from rhofold.model.triangular_attention import (
TriangleAttention,
)
from rhofold.model.triangular_update import (
TriangleMultiplicationOutgoing,
TriangleMultiplicationIncoming,
)
from rhofold.utils.chunk_utils import chunk_layer, ChunkSizeTuner
from rhofold.utils.tensor_utils import add | 6,534 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MSATransition(nn.Module):
"""
Feed-forward network applied to MSA activations after attention.
Implements Algorithm 9
"""
def __init__(self, c_m, n):
"""
Args:
c_m:
MSA channel dimension
n:
Factor multiplied to c_m to obtain the hidden channel
dimension
"""
super(MSATransition, self).__init__()
self.c_m = c_m
self.n = n
self.layer_norm = LayerNorm(self.c_m)
self.linear_1 = Linear(self.c_m, self.n * self.c_m)
self.relu = nn.ReLU()
self.linear_2 = Linear(self.n * self.c_m, self.c_m)
def _transition(self, m, mask):
m = self.layer_norm(m)
m = self.linear_1(m)
m = self.relu(m)
m = self.linear_2(m) * mask
return m
@torch.jit.ignore
def _chunk(self,
m: torch.Tensor,
mask: torch.Tensor,
chunk_size: int,
) -> torch.Tensor:
return chunk_layer(
self._transition,
{"m": m, "mask": mask},
chunk_size=chunk_size,
no_batch_dims=len(m.shape[:-2]),
)
def forward(
self,
m: torch.Tensor,
mask: Optional[torch.Tensor] = None,
chunk_size: Optional[int] = None,
) -> torch.Tensor:
"""
Args:
m:
[*, N_seq, N_res, C_m] MSA activation
mask:
[*, N_seq, N_res, C_m] MSA mask
Returns:
m:
[*, N_seq, N_res, C_m] MSA activation update
"""
if mask is None:
mask = m.new_ones(m.shape[:-1])
mask = mask.unsqueeze(-1)
if chunk_size is not None:
m = self._chunk(m, mask, chunk_size)
else:
m = self._transition(m, mask)
return m
class E2EformerBlockCore(nn.Module):
def __init__(
self,
c_m: int,
c_z: int,
c_hidden_opm: int,
c_hidden_mul: int,
c_hidden_pair_att: int,
no_heads_msa: int,
no_heads_pair: int,
transition_n: int,
inf: float,
eps: float,
_is_extra_msa_stack: bool = False,
):
super(E2EformerBlockCore, self).__init__()
self.msa_transition = MSATransition(
c_m=c_m,
n=transition_n,
)
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MSATransition(nn.Module):
"""
Feed-forward network applied to MSA activations after attention.
Implements Algorithm 9
"""
def __init__(self, c_m, n):
"""
Args:
c_m:
MSA channel dimension
n:
Factor multiplied to c_m to obtain the hidden channel
dimension
"""
super(MSATransition, self).__init__()
self.c_m = c_m
self.n = n
self.layer_norm = LayerNorm(self.c_m)
self.linear_1 = Linear(self.c_m, self.n * self.c_m)
self.relu = nn.ReLU()
self.linear_2 = Linear(self.n * self.c_m, self.c_m)
def _transition(self, m, mask):
m = self.layer_norm(m)
m = self.linear_1(m)
m = self.relu(m)
m = self.linear_2(m) * mask
return m
@torch.jit.ignore
def _chunk(self,
m: torch.Tensor,
mask: torch.Tensor,
chunk_size: int,
) -> torch.Tensor:
return chunk_layer(
self._transition,
{"m": m, "mask": mask},
chunk_size=chunk_size,
no_batch_dims=len(m.shape[:-2]),
)
def forward(
self,
m: torch.Tensor,
mask: Optional[torch.Tensor] = None,
chunk_size: Optional[int] = None,
) -> torch.Tensor:
"""
Args:
m:
[*, N_seq, N_res, C_m] MSA activation
mask:
[*, N_seq, N_res, C_m] MSA mask
Returns:
m:
[*, N_seq, N_res, C_m] MSA activation update
"""
if mask is None:
mask = m.new_ones(m.shape[:-1])
mask = mask.unsqueeze(-1)
if chunk_size is not None:
m = self._chunk(m, mask, chunk_size)
else:
m = self._transition(m, mask)
return m
class E2EformerBlockCore(nn.Module):
def __init__(
self,
c_m: int,
c_z: int,
c_hidden_opm: int,
c_hidden_mul: int,
c_hidden_pair_att: int,
no_heads_msa: int,
no_heads_pair: int,
transition_n: int,
inf: float,
eps: float,
_is_extra_msa_stack: bool = False,
):
super(E2EformerBlockCore, self).__init__()
self.msa_transition = MSATransition(
c_m=c_m,
n=transition_n,
)
| self.outer_product_mean = OuterProductMean( | 4 | 2023-11-01 10:29:08+00:00 | 8k |
dcermak/rpm-spec-language-server | tests/conftest.py | [
{
"identifier": "RpmSpecLanguageServer",
"path": "rpm_spec_language_server/server.py",
"snippet": "class RpmSpecLanguageServer(LanguageServer):\n _CONDITION_KEYWORDS = [\n # from https://github.com/rpm-software-management/rpm/blob/7d3d9041af2d75c4709cf7a721daf5d1787cce14/build/rpmbuild_interna... | import asyncio
import os
import threading
import pytest
from typing import Generator
from lsprotocol.types import (
EXIT,
INITIALIZE,
SHUTDOWN,
ClientCapabilities,
InitializeParams,
)
from pygls.server import LanguageServer
from typeguard import install_import_hook
from rpm_spec_language_server.server import (
RpmSpecLanguageServer,
create_rpm_lang_server,
) | 3,783 |
install_import_hook("rpm_spec_language_server")
class ClientServer:
# shamelessly stolen from
# https://github.com/openlawlibrary/pygls/blob/8f601029dcf3c7c91be7bf2d86a841a1598ce1f0/tests/ls_setup.py#L109
def __init__(self):
# Client to Server pipe
csr, csw = os.pipe()
# Server to client pipe
scr, scw = os.pipe()
# Setup Server
self.server = create_rpm_lang_server()
self.server_thread = threading.Thread(
name="Server Thread",
target=self.server.start_io,
args=(os.fdopen(csr, "rb"), os.fdopen(scw, "wb")),
)
self.server_thread.daemon = True
# Setup client
self.client = LanguageServer("client", "v1", asyncio.new_event_loop())
self.client_thread = threading.Thread(
name="Client Thread",
target=self.client.start_io,
args=(os.fdopen(scr, "rb"), os.fdopen(csw, "wb")),
)
self.client_thread.daemon = True
@classmethod
def decorate(cls):
return pytest.mark.parametrize("client_server", [cls], indirect=True)
def start(self) -> None:
self.server_thread.start()
self.server.thread_id = self.server_thread.ident
self.client_thread.start()
self.initialize()
def stop(self) -> None:
shutdown_response = self.client.lsp.send_request(SHUTDOWN).result()
assert shutdown_response is None
self.client.lsp.notify(EXIT)
self.server_thread.join()
self.client._stop_event.set()
try:
self.client.loop._signal_handlers.clear() # HACK ?
except AttributeError:
pass
self.client_thread.join()
# @retry_stalled_init_fix_hack()
def initialize(self) -> None:
timeout = None if "DISABLE_TIMEOUT" in os.environ else 1
response = self.client.lsp.send_request(
INITIALIZE,
InitializeParams(
process_id=12345, root_uri="file://", capabilities=ClientCapabilities()
),
).result(timeout=timeout)
assert response.capabilities is not None
def __iter__(self) -> Generator[LanguageServer, None, None]:
yield self.client
yield self.server
|
install_import_hook("rpm_spec_language_server")
class ClientServer:
# shamelessly stolen from
# https://github.com/openlawlibrary/pygls/blob/8f601029dcf3c7c91be7bf2d86a841a1598ce1f0/tests/ls_setup.py#L109
def __init__(self):
# Client to Server pipe
csr, csw = os.pipe()
# Server to client pipe
scr, scw = os.pipe()
# Setup Server
self.server = create_rpm_lang_server()
self.server_thread = threading.Thread(
name="Server Thread",
target=self.server.start_io,
args=(os.fdopen(csr, "rb"), os.fdopen(scw, "wb")),
)
self.server_thread.daemon = True
# Setup client
self.client = LanguageServer("client", "v1", asyncio.new_event_loop())
self.client_thread = threading.Thread(
name="Client Thread",
target=self.client.start_io,
args=(os.fdopen(scr, "rb"), os.fdopen(csw, "wb")),
)
self.client_thread.daemon = True
@classmethod
def decorate(cls):
return pytest.mark.parametrize("client_server", [cls], indirect=True)
def start(self) -> None:
self.server_thread.start()
self.server.thread_id = self.server_thread.ident
self.client_thread.start()
self.initialize()
def stop(self) -> None:
shutdown_response = self.client.lsp.send_request(SHUTDOWN).result()
assert shutdown_response is None
self.client.lsp.notify(EXIT)
self.server_thread.join()
self.client._stop_event.set()
try:
self.client.loop._signal_handlers.clear() # HACK ?
except AttributeError:
pass
self.client_thread.join()
# @retry_stalled_init_fix_hack()
def initialize(self) -> None:
timeout = None if "DISABLE_TIMEOUT" in os.environ else 1
response = self.client.lsp.send_request(
INITIALIZE,
InitializeParams(
process_id=12345, root_uri="file://", capabilities=ClientCapabilities()
),
).result(timeout=timeout)
assert response.capabilities is not None
def __iter__(self) -> Generator[LanguageServer, None, None]:
yield self.client
yield self.server
| CLIENT_SERVER_T = Generator[tuple[LanguageServer, RpmSpecLanguageServer], None, None] | 0 | 2023-11-02 10:52:17+00:00 | 8k |
ziqi-zhang/TAOISM | python/layers/batch_norm_2d.py | [
{
"identifier": "SecretActivationLayer",
"path": "python/layers/activation.py",
"snippet": "class SecretActivationLayer(SecretNonlinearLayer):\n def __init__(\n self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=Fals... | import numpy as np
import torch
from pdb import set_trace as st
from python.layers.activation import SecretActivationLayer
from python.sgx_net import LearnableParamTuple
from python.tensor_loader import TensorLoader
from python.utils.timer_utils import NamedTimerInstance, VerboseLevel
from python.utils.torch_utils import compare_expected_actual
from python.utils.basic_utils import ExecutionModeOptions
from python.global_config import SecretConfig | 6,380 | self.set_gpu("weight", list(self.ForwardFunc.parameters())[0].data)
self.set_gpu("bias", list(self.ForwardFunc.parameters())[1].data)
self.set_gpu("RunMean", self.ForwardFunc.running_mean.data)
self.set_gpu("RunVar", self.ForwardFunc.running_var.data)
self.PlainFunc.eval()
self.ForwardFunc.cuda().eval()
# def inject_params(self, params):
# if self.sid == -2:
# raise ValueError("S2 has no learnable parameters for injection")
# self.get_cpu("weight").copy_(params.weight.data)
# self.get_cpu("bias").copy_(params.bias.data)
# self.get_cpu("RunMean").copy_(params.running_mean.data)
# # inject sqrt(running_var) instead of running_var for precision
# self.get_cpu("RunVar").copy_(params.running_var.data)
# if self.is_enclave_mode:
# self.transfer_cpu_to_enclave("weight")
# self.transfer_cpu_to_enclave("bias")
# self.transfer_cpu_to_enclave("RunMean")
# self.transfer_cpu_to_enclave("RunVar")
def inject_params(self, params):
if self.sid == -2:
raise ValueError("S2 has no learnable parameters for injection")
if self.EnclaveMode in [ExecutionModeOptions.CPU, ExecutionModeOptions.Enclave]:
self.get_cpu("weight").copy_(params.weight.data)
self.get_cpu("bias").copy_(params.bias.data)
self.get_cpu("RunMean").copy_(params.running_mean.data)
self.get_cpu("RunVar").copy_(params.running_var.data)
if self.EnclaveMode is ExecutionModeOptions.Enclave:
self.transfer_cpu_to_enclave("weight")
self.transfer_cpu_to_enclave("bias")
self.transfer_cpu_to_enclave("RunMean")
self.transfer_cpu_to_enclave("RunVar")
elif self.EnclaveMode is ExecutionModeOptions.GPU:
self.get_gpu("weight").copy_(params.weight.data)
self.get_gpu("bias").copy_(params.bias.data)
self.get_gpu("RunMean").copy_(params.running_mean.data)
self.get_gpu("RunVar").copy_(params.running_var.data)
def reset_plain_bn(self):
# module = torch.BatchNorm2d()
self.get_cpu("weight").copy_(torch.ones(self.InputShape[1]))
self.get_cpu("bias").copy_(torch.zeros(self.InputShape[1]))
self.get_cpu("RunMean").copy_(torch.zeros(self.InputShape[1]))
self.get_cpu("RunVar").copy_(torch.ones(self.InputShape[1]))
if self.EnclaveMode is ExecutionModeOptions.Enclave:
self.transfer_cpu_to_enclave("weight")
self.transfer_cpu_to_enclave("bias")
self.transfer_cpu_to_enclave("RunMean")
self.transfer_cpu_to_enclave("RunVar")
def inject_to_plain(self, plain_layer: torch.nn.Module) -> None:
raise NotImplementedError
if self.sid == -2:
raise ValueError("S2 has no learnable parameters for injection")
self.make_sure_cpu_is_latest("weight")
self.make_sure_cpu_is_latest("bias")
plain_layer.weight.data.copy_(self.get_cpu("weight"))
plain_layer.bias.data.copy_(self.get_cpu("bias"))
plain_layer.running_mean.data.copy_(self.get_cpu("RunMean"))
plain_layer.running_var.data.copy_(self.get_cpu("RunVar"))
def generate_tensor_name_list(self, force=False):
if not force and self.tensor_name_list:
return
if self.sid == 2:
self.tensor_name_list = {}
return
if self.EnclaveMode is ExecutionModeOptions.Enclave:
NeededTensorNames = [
("input", self.InputShape, None),
# ("DerInput", self.InputShape, None),
("output", self.OutputShape, None),
# ("DerOutput", self.OutputShape, None),
("weight", self.WeightShape, None),
# ("DerWeight", self.WeightShape, None),
("bias", self.WeightShape, None),
# ("DerBias", self.WeightShape, None),
("RunMean", self.WeightShape, None),
("CurMean", self.WeightShape, None),
("RunVar", self.WeightShape, None),
("CurVar", self.WeightShape, None),
("mu", self.InputShape, None),
]
else:
NeededTensorNames = [
("output", self.OutputShape, None),
# ("DerInput", self.InputShape, None),
("input", self.InputShape, None),
("weight", self.WeightShape, None),
# ("DerWeight", self.WeightShape, None),
("bias", self.WeightShape, None),
# ("DerBias", self.WeightShape, None),
# ("DerOutput", self.OutputShape, None)
]
self.tensor_name_list = NeededTensorNames
# def forward(self):
# if self.sid == 2:
# return
# with NamedTimerInstance(f"S{self.sid}: {self.LayerName} Forward", verbose_level=VerboseLevel.LAYER):
# if self.is_enclave_mode:
# self.forward_tensor_transfer()
# self.batchnorm_forward(self.LayerName, int(False))
# else:
# self.forward_tensor_transfer()
# self.requires_grad_on_cpu("input")
# self.ForwardFunc.bias.data.copy_(self.get_cpu("bias"))
# self.ForwardFunc.weight.data.copy_(self.get_cpu("weight"))
# self.ForwardFunc.running_mean.data.copy_(self.get_cpu("RunMean"))
# # running_var of PlainFunc is ^2 of that in the enclave
# enclave_running_var = self.get_cpu("RunVar")
# self.ForwardFunc.running_var.data.copy_(enclave_running_var)
# self.set_cpu("output", self.ForwardFunc(self.get_cpu("input")))
def forward(self):
|
class SecretBatchNorm2dLayer(SecretActivationLayer):
# https://pytorch.org/docs/stable/nn.html#batchnorm2d
BatchSize = None
NumChannel = None
ImgH = None
ImgW = None
WeightShape = None
def __init__(
self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,
manually_register_prev=False, manually_register_next=False, merge_own_tensors=False
):
super().__init__(
sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next, merge_own_tensors
)
self.ForwardFuncName = "BatchNorm2d"
self.BackwardFuncName = "DerBatchNorm2d"
self.PlainFunc = torch.nn.BatchNorm2d
self.IsAffine = True
self.momentum = 0.1
self.IsCumulative = (self.momentum is None)
self.epsilon = 1e-5
if EnclaveMode is ExecutionModeOptions.CPU or EnclaveMode is ExecutionModeOptions.GPU:
self.ForwardFunc = torch.nn.BatchNorm2d
# if self.is_enclave_mode:
# self.StoreInEnclave = True
# else:
# self.ForwardFunc = torch.nn.BatchNorm2d
# self.StoreInEnclave = False
def init_shape(self):
self.InputShape = self.PrevLayer.get_output_shape()
self.OutputShape = self.InputShape
self.BatchSize, self.NumChannel, self.ImgH, self.ImgW = self.InputShape
self.WeightShape = [self.NumChannel]
self.LearnableParamsList = [
LearnableParamTuple(dw_name="DerWeight", w_name="weight", shape=self.WeightShape),
LearnableParamTuple(dw_name="DerBias", w_name="bias", shape=self.WeightShape),
]
# def init(self, start_enclave=True):
# if self.sid == 2:
# return
# TensorLoader.init(self, start_enclave)
# if self.is_enclave_mode:
# self.PlainFunc = self.PlainFunc(self.InputShape[1])
# self.PlainFunc.eval()
# self.get_cpu("weight").data.copy_(self.PlainFunc.weight.data)
# self.get_cpu("bias").data.copy_(self.PlainFunc.bias.data)
# self.get_cpu("RunMean").data.copy_(self.PlainFunc.running_mean.data)
# # inject sqrt(running_var) instead of running_var for precision
# self.get_cpu("RunVar").data.copy_(self.PlainFunc.running_var.data)
# self.transfer_cpu_to_enclave("weight")
# self.transfer_cpu_to_enclave("bias")
# self.transfer_cpu_to_enclave("RunMean")
# self.transfer_cpu_to_enclave("RunVar")
# self.batchnorm_init(
# self.LayerName,
# "input", "output", "weight", "bias",
# "DerInput", "DerOutput", "DerWeight", "DerBias",
# "RunMean", "RunVar", "CurMean", "CurVar",
# "mu",
# self.BatchSize, self.NumChannel, self.ImgH, self.ImgW,
# int(self.IsAffine), int(self.IsCumulative), self.momentum, self.epsilon)
# else:
# self.ForwardFunc = self.ForwardFunc(self.InputShape[1])
# self.PlainFunc = self.PlainFunc(self.InputShape[1])
# self.PlainFunc.eval()
# self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)
# self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)
# self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)
# self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)
# self.set_cpu("weight", list(self.ForwardFunc.parameters())[0].data)
# self.set_cpu("bias", list(self.ForwardFunc.parameters())[1].data)
# self.set_cpu("RunMean", self.ForwardFunc.running_mean.data)
# self.set_cpu("RunVar", self.ForwardFunc.running_var.data)
# self.ForwardFunc.eval()
def init(self, start_enclave=True):
# if self.LayerName == "Layer3.10.proxies.0.bn2":
# st()
TensorLoader.init(self, start_enclave)
if self.EnclaveMode is ExecutionModeOptions.Enclave:
self.PlainFunc = self.PlainFunc(self.InputShape[1])
self.PlainFunc.eval()
self.get_cpu("weight").data.copy_(self.PlainFunc.weight.data)
self.get_cpu("bias").data.copy_(self.PlainFunc.bias.data)
self.get_cpu("RunMean").data.copy_(self.PlainFunc.running_mean.data)
# inject sqrt(running_var) instead of running_var for precision
self.get_cpu("RunVar").data.copy_(self.PlainFunc.running_var.data)
self.transfer_cpu_to_enclave("weight")
self.transfer_cpu_to_enclave("bias")
self.transfer_cpu_to_enclave("RunMean")
self.transfer_cpu_to_enclave("RunVar")
self.batchnorm_init(
self.LayerName,
"input", "output", "weight", "bias",
# "DerInput", "DerOutput", "DerWeight", "DerBias",
"RunMean", "RunVar", "CurMean", "CurVar",
"mu",
self.BatchSize, self.NumChannel, self.ImgH, self.ImgW,
int(self.IsAffine), int(self.IsCumulative), self.momentum, self.epsilon)
elif self.EnclaveMode is ExecutionModeOptions.CPU:
self.ForwardFunc = self.ForwardFunc(self.InputShape[1])
self.PlainFunc = self.PlainFunc(self.InputShape[1])
self.PlainFunc.eval()
self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)
self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)
self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)
self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)
self.set_cpu("weight", list(self.ForwardFunc.parameters())[0].data)
self.set_cpu("bias", list(self.ForwardFunc.parameters())[1].data)
self.set_cpu("RunMean", self.ForwardFunc.running_mean.data)
self.set_cpu("RunVar", self.ForwardFunc.running_var.data)
self.ForwardFunc.eval()
elif self.EnclaveMode is ExecutionModeOptions.GPU:
self.ForwardFunc = self.ForwardFunc(self.InputShape[1])
self.PlainFunc = self.PlainFunc(self.InputShape[1])
self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)
self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)
self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)
self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)
self.set_gpu("weight", list(self.ForwardFunc.parameters())[0].data)
self.set_gpu("bias", list(self.ForwardFunc.parameters())[1].data)
self.set_gpu("RunMean", self.ForwardFunc.running_mean.data)
self.set_gpu("RunVar", self.ForwardFunc.running_var.data)
self.PlainFunc.eval()
self.ForwardFunc.cuda().eval()
# def inject_params(self, params):
# if self.sid == -2:
# raise ValueError("S2 has no learnable parameters for injection")
# self.get_cpu("weight").copy_(params.weight.data)
# self.get_cpu("bias").copy_(params.bias.data)
# self.get_cpu("RunMean").copy_(params.running_mean.data)
# # inject sqrt(running_var) instead of running_var for precision
# self.get_cpu("RunVar").copy_(params.running_var.data)
# if self.is_enclave_mode:
# self.transfer_cpu_to_enclave("weight")
# self.transfer_cpu_to_enclave("bias")
# self.transfer_cpu_to_enclave("RunMean")
# self.transfer_cpu_to_enclave("RunVar")
def inject_params(self, params):
if self.sid == -2:
raise ValueError("S2 has no learnable parameters for injection")
if self.EnclaveMode in [ExecutionModeOptions.CPU, ExecutionModeOptions.Enclave]:
self.get_cpu("weight").copy_(params.weight.data)
self.get_cpu("bias").copy_(params.bias.data)
self.get_cpu("RunMean").copy_(params.running_mean.data)
self.get_cpu("RunVar").copy_(params.running_var.data)
if self.EnclaveMode is ExecutionModeOptions.Enclave:
self.transfer_cpu_to_enclave("weight")
self.transfer_cpu_to_enclave("bias")
self.transfer_cpu_to_enclave("RunMean")
self.transfer_cpu_to_enclave("RunVar")
elif self.EnclaveMode is ExecutionModeOptions.GPU:
self.get_gpu("weight").copy_(params.weight.data)
self.get_gpu("bias").copy_(params.bias.data)
self.get_gpu("RunMean").copy_(params.running_mean.data)
self.get_gpu("RunVar").copy_(params.running_var.data)
def reset_plain_bn(self):
# module = torch.BatchNorm2d()
self.get_cpu("weight").copy_(torch.ones(self.InputShape[1]))
self.get_cpu("bias").copy_(torch.zeros(self.InputShape[1]))
self.get_cpu("RunMean").copy_(torch.zeros(self.InputShape[1]))
self.get_cpu("RunVar").copy_(torch.ones(self.InputShape[1]))
if self.EnclaveMode is ExecutionModeOptions.Enclave:
self.transfer_cpu_to_enclave("weight")
self.transfer_cpu_to_enclave("bias")
self.transfer_cpu_to_enclave("RunMean")
self.transfer_cpu_to_enclave("RunVar")
def inject_to_plain(self, plain_layer: torch.nn.Module) -> None:
raise NotImplementedError
if self.sid == -2:
raise ValueError("S2 has no learnable parameters for injection")
self.make_sure_cpu_is_latest("weight")
self.make_sure_cpu_is_latest("bias")
plain_layer.weight.data.copy_(self.get_cpu("weight"))
plain_layer.bias.data.copy_(self.get_cpu("bias"))
plain_layer.running_mean.data.copy_(self.get_cpu("RunMean"))
plain_layer.running_var.data.copy_(self.get_cpu("RunVar"))
def generate_tensor_name_list(self, force=False):
if not force and self.tensor_name_list:
return
if self.sid == 2:
self.tensor_name_list = {}
return
if self.EnclaveMode is ExecutionModeOptions.Enclave:
NeededTensorNames = [
("input", self.InputShape, None),
# ("DerInput", self.InputShape, None),
("output", self.OutputShape, None),
# ("DerOutput", self.OutputShape, None),
("weight", self.WeightShape, None),
# ("DerWeight", self.WeightShape, None),
("bias", self.WeightShape, None),
# ("DerBias", self.WeightShape, None),
("RunMean", self.WeightShape, None),
("CurMean", self.WeightShape, None),
("RunVar", self.WeightShape, None),
("CurVar", self.WeightShape, None),
("mu", self.InputShape, None),
]
else:
NeededTensorNames = [
("output", self.OutputShape, None),
# ("DerInput", self.InputShape, None),
("input", self.InputShape, None),
("weight", self.WeightShape, None),
# ("DerWeight", self.WeightShape, None),
("bias", self.WeightShape, None),
# ("DerBias", self.WeightShape, None),
# ("DerOutput", self.OutputShape, None)
]
self.tensor_name_list = NeededTensorNames
# def forward(self):
# if self.sid == 2:
# return
# with NamedTimerInstance(f"S{self.sid}: {self.LayerName} Forward", verbose_level=VerboseLevel.LAYER):
# if self.is_enclave_mode:
# self.forward_tensor_transfer()
# self.batchnorm_forward(self.LayerName, int(False))
# else:
# self.forward_tensor_transfer()
# self.requires_grad_on_cpu("input")
# self.ForwardFunc.bias.data.copy_(self.get_cpu("bias"))
# self.ForwardFunc.weight.data.copy_(self.get_cpu("weight"))
# self.ForwardFunc.running_mean.data.copy_(self.get_cpu("RunMean"))
# # running_var of PlainFunc is ^2 of that in the enclave
# enclave_running_var = self.get_cpu("RunVar")
# self.ForwardFunc.running_var.data.copy_(enclave_running_var)
# self.set_cpu("output", self.ForwardFunc(self.get_cpu("input")))
def forward(self): | with NamedTimerInstance(f"S{self.sid}: {self.LayerName} Forward", verbose_level=VerboseLevel.LAYER): | 4 | 2023-11-01 10:37:37+00:00 | 8k |
NVlabs/M2T2 | demo_rlbench.py | [
{
"identifier": "collate",
"path": "m2t2/dataset.py",
"snippet": "def collate(batch):\n batch = [data for data in batch if not data.get('invalid', False)]\n batch = {key: [data[key] for data in batch] for key in batch[0]}\n if 'task' in batch:\n task = batch.pop('task')\n batch['t... | import hydra
import pickle
import torch
from m2t2.dataset import collate
from m2t2.dataset_utils import normalize_rgb, sample_points
from m2t2.meshcat_utils import (
create_visualizer, visualize_grasp, visualize_pointcloud
)
from m2t2.m2t2 import M2T2
from m2t2.rlbench_utils import (
load_image, within_bound, gripper_pose_from_rlbench
)
from m2t2.train_utils import to_cpu, to_gpu | 4,262 | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Author: Wentao Yuan
'''
Demo script showing prediction for language-conditioned tasks.
'''
def load_data(episode_dir, cfg):
with open(f"{episode_dir}/meta_data.pkl", 'rb') as f:
meta_data = pickle.load(f)
data = {}
for camera in cfg.rlbench.cameras:
rgb, xyz, mask = load_image(
episode_dir, camera, meta_data, cfg.rlbench.frame_id
)
data[f"{camera}_rgb"] = rgb
data[f"{camera}_point_cloud"] = xyz
data[f"{camera}_mask"] = mask
pcd_raw, rgb_raw, seg_raw = within_bound(
data, cfg.rlbench.cameras, cfg.rlbench.scene_bounds
)
rgb = normalize_rgb(rgb_raw[:, None]).squeeze(2).T
pcd = torch.from_numpy(pcd_raw).float()
pt_idx = sample_points(pcd_raw, cfg.data.num_points)
pcd, rgb = pcd[pt_idx], rgb[pt_idx]
with open(cfg.rlbench.lang_emb_path, 'rb') as f:
lang_emb = pickle.load(f)
model_inputs = {
'inputs': torch.cat([pcd - pcd.mean(dim=0), rgb], dim=1),
'points': pcd,
'lang_tokens': torch.from_numpy(
lang_emb[meta_data['goal_description']]
).float()
}
obj_label = meta_data['object_label'][cfg.rlbench.frame_id]
if obj_label == 0:
model_inputs.update({
'object_inputs': torch.rand(1024, 6),
'ee_pose': torch.eye(4),
'bottom_center': torch.zeros(3),
'object_center': torch.zeros(3),
'task': 'pick'
})
else:
obj_xyz = torch.from_numpy(pcd_raw[seg_raw == obj_label]).float()
obj_rgb = torch.from_numpy(rgb_raw[seg_raw == obj_label]).float()
obj_xyz_grid = torch.unique(
(obj_xyz[:, :2] / cfg.data.grid_resolution).round(), dim=0
) * cfg.data.grid_resolution
bottom_center = obj_xyz.min(dim=0)[0]
bottom_center[:2] = obj_xyz_grid.mean(dim=0)
ee_pose = torch.from_numpy(gripper_pose_from_rlbench(
meta_data['gripper_matrix'][cfg.rlbench.frame_id]
)).float()
inv_ee_pose = ee_pose.inverse()
obj_xyz = obj_xyz @ inv_ee_pose[:3, :3].T + inv_ee_pose[:3, 3]
model_inputs.update({
'object_inputs': torch.cat([
obj_xyz - obj_xyz.mean(dim=0), obj_rgb
], dim=1),
'ee_pose': ee_pose,
'bottom_center': bottom_center,
'object_center': obj_xyz.mean(dim=0),
'task': 'place'
})
raw_data = meta_data
raw_data.update({
'pcd': pcd_raw, 'rgb': rgb_raw,
'seg': seg_raw, 'object_label': obj_label
})
return model_inputs, raw_data
@hydra.main(config_path='.', config_name='rlbench', version_base='1.3')
def main(cfg):
episode_dir = f"{cfg.rlbench.base_dir}/{cfg.rlbench.task_name}/episode{cfg.rlbench.episode}"
data, raw = load_data(episode_dir, cfg)
data_batch = collate([data])
to_gpu(data_batch)
| # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
# Author: Wentao Yuan
'''
Demo script showing prediction for language-conditioned tasks.
'''
def load_data(episode_dir, cfg):
with open(f"{episode_dir}/meta_data.pkl", 'rb') as f:
meta_data = pickle.load(f)
data = {}
for camera in cfg.rlbench.cameras:
rgb, xyz, mask = load_image(
episode_dir, camera, meta_data, cfg.rlbench.frame_id
)
data[f"{camera}_rgb"] = rgb
data[f"{camera}_point_cloud"] = xyz
data[f"{camera}_mask"] = mask
pcd_raw, rgb_raw, seg_raw = within_bound(
data, cfg.rlbench.cameras, cfg.rlbench.scene_bounds
)
rgb = normalize_rgb(rgb_raw[:, None]).squeeze(2).T
pcd = torch.from_numpy(pcd_raw).float()
pt_idx = sample_points(pcd_raw, cfg.data.num_points)
pcd, rgb = pcd[pt_idx], rgb[pt_idx]
with open(cfg.rlbench.lang_emb_path, 'rb') as f:
lang_emb = pickle.load(f)
model_inputs = {
'inputs': torch.cat([pcd - pcd.mean(dim=0), rgb], dim=1),
'points': pcd,
'lang_tokens': torch.from_numpy(
lang_emb[meta_data['goal_description']]
).float()
}
obj_label = meta_data['object_label'][cfg.rlbench.frame_id]
if obj_label == 0:
model_inputs.update({
'object_inputs': torch.rand(1024, 6),
'ee_pose': torch.eye(4),
'bottom_center': torch.zeros(3),
'object_center': torch.zeros(3),
'task': 'pick'
})
else:
obj_xyz = torch.from_numpy(pcd_raw[seg_raw == obj_label]).float()
obj_rgb = torch.from_numpy(rgb_raw[seg_raw == obj_label]).float()
obj_xyz_grid = torch.unique(
(obj_xyz[:, :2] / cfg.data.grid_resolution).round(), dim=0
) * cfg.data.grid_resolution
bottom_center = obj_xyz.min(dim=0)[0]
bottom_center[:2] = obj_xyz_grid.mean(dim=0)
ee_pose = torch.from_numpy(gripper_pose_from_rlbench(
meta_data['gripper_matrix'][cfg.rlbench.frame_id]
)).float()
inv_ee_pose = ee_pose.inverse()
obj_xyz = obj_xyz @ inv_ee_pose[:3, :3].T + inv_ee_pose[:3, 3]
model_inputs.update({
'object_inputs': torch.cat([
obj_xyz - obj_xyz.mean(dim=0), obj_rgb
], dim=1),
'ee_pose': ee_pose,
'bottom_center': bottom_center,
'object_center': obj_xyz.mean(dim=0),
'task': 'place'
})
raw_data = meta_data
raw_data.update({
'pcd': pcd_raw, 'rgb': rgb_raw,
'seg': seg_raw, 'object_label': obj_label
})
return model_inputs, raw_data
@hydra.main(config_path='.', config_name='rlbench', version_base='1.3')
def main(cfg):
episode_dir = f"{cfg.rlbench.base_dir}/{cfg.rlbench.task_name}/episode{cfg.rlbench.episode}"
data, raw = load_data(episode_dir, cfg)
data_batch = collate([data])
to_gpu(data_batch)
| model = M2T2.from_config(cfg.m2t2) | 5 | 2023-11-03 22:32:05+00:00 | 8k |
Codra-Ingenierie-Informatique/DataLab | cdl/core/gui/panel/macro.py | [
{
"identifier": "Conf",
"path": "cdl/config.py",
"snippet": "CONF_VERSION = \"1.0.0\"\nAPP_NAME = \"DataLab\"\nMOD_NAME = \"cdl\"\nAPP_DESC = _(\"\"\"DataLab is a generic signal and image processing platform\"\"\")\nAPP_PATH = osp.dirname(__file__)\nDEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"... | import re
from typing import TYPE_CHECKING
from guidata.config import CONF
from guidata.configtools import get_font, get_icon
from guidata.qthelpers import add_actions, create_action, is_dark_mode
from guidata.widgets.console.shell import PythonShellWidget
from guidata.widgets.dockable import DockableWidgetMixin
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from qtpy.compat import getopenfilename, getsavefilename
from cdl.config import Conf, _
from cdl.core.gui.macroeditor import Macro
from cdl.core.gui.panel.base import AbstractPanel
from cdl.env import execenv
from cdl.utils.qthelpers import (
create_menu_button,
qt_try_loadsave_file,
save_restore_stds,
)
from cdl.core.io.native import NativeH5Reader, NativeH5Writer | 6,457 | # -*- coding: utf-8 -*-
#
# Licensed under the terms of the BSD 3-Clause
# (see cdl/LICENSE for details)
"""DataLab Macro Panel"""
# pylint: disable=invalid-name # Allows short reference names like x, y, ...
from __future__ import annotations
if TYPE_CHECKING: # pragma: no cover
class MacroTabs(QW.QTabWidget):
"""Macro tabwidget
Args:
parent (QWidget): Parent widget
"""
SIG_CONTEXT_MENU = QC.Signal(QC.QPoint)
def __init__(self, parent=None) -> None:
super().__init__(parent)
self.setTabsClosable(True)
self.setMovable(True)
def contextMenuEvent(self, event): # pylint: disable=C0103
"""Override Qt method"""
self.SIG_CONTEXT_MENU.emit(event.globalPos())
class MacroPanel(AbstractPanel, DockableWidgetMixin):
"""Macro manager widget
Args:
parent (QWidget): Parent widget
"""
LOCATION = QC.Qt.LeftDockWidgetArea
PANEL_STR = _("Macro panel")
H5_PREFIX = "DataLab_Mac"
SIG_OBJECT_MODIFIED = QC.Signal()
FILE_FILTERS = f"{_('Python files')} (*.py)"
def __init__(self, parent: QW.QWidget | None = None) -> None:
super().__init__(parent)
self.setWindowTitle(_("Macro manager"))
self.setWindowIcon(get_icon("libre-gui-cogs.svg"))
self.setOrientation(QC.Qt.Vertical)
self.context_menu = QW.QMenu()
self.tabwidget_tb = QW.QToolBar(self)
self.tabwidget_tb.setOrientation(QC.Qt.Vertical)
self.console = PythonShellWidget(self, read_only=True)
self.console.set_light_background(not is_dark_mode())
self.console.setMaximumBlockCount(5000)
font = get_font(CONF, "console")
font.setPointSize(10)
self.console.set_font(font)
self.console.write(_("-***- Macro Console -***-"), prompt=True)
self.tabwidget = MacroTabs(self)
self.tabwidget.tabBarDoubleClicked.connect(self.rename_macro)
self.tabwidget.tabCloseRequested.connect(self.remove_macro)
self.tabwidget.currentChanged.connect(self.__update_actions)
tabwidget_with_tb = QW.QWidget(self)
tabwidget_with_tb.setLayout(QW.QHBoxLayout())
tabwidget_with_tb.layout().addWidget(self.tabwidget_tb)
tabwidget_with_tb.layout().addWidget(self.tabwidget)
# Put console in a groupbox to have a title
console_groupbox = QW.QGroupBox(_("Console"), self)
console_groupbox.setLayout(QW.QHBoxLayout())
console_groupbox.layout().addWidget(self.console)
# Put console groupbox in a frame to have a nice margin
console_frame = QW.QFrame(self)
console_frame.setLayout(QW.QHBoxLayout())
console_frame.layout().addWidget(console_groupbox)
for widget in (tabwidget_with_tb, console_frame):
self.addWidget(widget)
# Ensure that the tabwidget and the console have the same height
self.setStretchFactor(0, 1)
self.setStretchFactor(1, 0)
self.run_action = None
self.stop_action = None
self.obj_actions: list[QW.QAction] = [] # Object-dependent actions
| # -*- coding: utf-8 -*-
#
# Licensed under the terms of the BSD 3-Clause
# (see cdl/LICENSE for details)
"""DataLab Macro Panel"""
# pylint: disable=invalid-name # Allows short reference names like x, y, ...
from __future__ import annotations
if TYPE_CHECKING: # pragma: no cover
class MacroTabs(QW.QTabWidget):
"""Macro tabwidget
Args:
parent (QWidget): Parent widget
"""
SIG_CONTEXT_MENU = QC.Signal(QC.QPoint)
def __init__(self, parent=None) -> None:
super().__init__(parent)
self.setTabsClosable(True)
self.setMovable(True)
def contextMenuEvent(self, event): # pylint: disable=C0103
"""Override Qt method"""
self.SIG_CONTEXT_MENU.emit(event.globalPos())
class MacroPanel(AbstractPanel, DockableWidgetMixin):
"""Macro manager widget
Args:
parent (QWidget): Parent widget
"""
LOCATION = QC.Qt.LeftDockWidgetArea
PANEL_STR = _("Macro panel")
H5_PREFIX = "DataLab_Mac"
SIG_OBJECT_MODIFIED = QC.Signal()
FILE_FILTERS = f"{_('Python files')} (*.py)"
def __init__(self, parent: QW.QWidget | None = None) -> None:
super().__init__(parent)
self.setWindowTitle(_("Macro manager"))
self.setWindowIcon(get_icon("libre-gui-cogs.svg"))
self.setOrientation(QC.Qt.Vertical)
self.context_menu = QW.QMenu()
self.tabwidget_tb = QW.QToolBar(self)
self.tabwidget_tb.setOrientation(QC.Qt.Vertical)
self.console = PythonShellWidget(self, read_only=True)
self.console.set_light_background(not is_dark_mode())
self.console.setMaximumBlockCount(5000)
font = get_font(CONF, "console")
font.setPointSize(10)
self.console.set_font(font)
self.console.write(_("-***- Macro Console -***-"), prompt=True)
self.tabwidget = MacroTabs(self)
self.tabwidget.tabBarDoubleClicked.connect(self.rename_macro)
self.tabwidget.tabCloseRequested.connect(self.remove_macro)
self.tabwidget.currentChanged.connect(self.__update_actions)
tabwidget_with_tb = QW.QWidget(self)
tabwidget_with_tb.setLayout(QW.QHBoxLayout())
tabwidget_with_tb.layout().addWidget(self.tabwidget_tb)
tabwidget_with_tb.layout().addWidget(self.tabwidget)
# Put console in a groupbox to have a title
console_groupbox = QW.QGroupBox(_("Console"), self)
console_groupbox.setLayout(QW.QHBoxLayout())
console_groupbox.layout().addWidget(self.console)
# Put console groupbox in a frame to have a nice margin
console_frame = QW.QFrame(self)
console_frame.setLayout(QW.QHBoxLayout())
console_frame.layout().addWidget(console_groupbox)
for widget in (tabwidget_with_tb, console_frame):
self.addWidget(widget)
# Ensure that the tabwidget and the console have the same height
self.setStretchFactor(0, 1)
self.setStretchFactor(1, 0)
self.run_action = None
self.stop_action = None
self.obj_actions: list[QW.QAction] = [] # Object-dependent actions | self.__macros: list[Macro] = [] | 1 | 2023-11-09 16:56:03+00:00 | 8k |
sxwyh/pytradecn | src/pytradecn/template/basetemplate.py | [
{
"identifier": "BaseClientMeta",
"path": "src/pytradecn/client/baseclient.py",
"snippet": "class BaseClientMeta(type):\n \"\"\"客户端元类\"\"\"\n\n clients = []\n\n def __init__(cls, name, bases, attrs):\n\n super(BaseClientMeta, cls).__init__(name, bases, attrs)\n\n if name != 'BaseC... | from abc import ABCMeta, abstractmethod
from functools import wraps
from pywinauto.timings import Timings
from pywinauto.application import AppStartError
from ..client.baseclient import BaseClientMeta
from ..prompt import PromptManager
from ..engine.baseengine import BaseEngine
from ..model.basemodel import BaseModel
from ..logger import logger
from ..error import ClientConfigError, TimeoutError | 4,664 | # 险,开源软件提供者或插件提供者均不承担任何责任。同时,无论是直接的、间接的、偶然的、潜在的因使用该软件所造成的账号安全
# 损失、数据安全损失、账户资产损失或其他任何责任事故,开源软件提供者或插件提供者均不承担任何责任。请不要将该软件应用于商
# 业活动,否则由于把该软件应用于商业活动所造成的一切损失或法律责任,开源软件提供者或插件提供者均不承担任何责任。
#
# 修改日志:
# 2022-08-20 第一次编写
#
"""
模板就象是汽车的总装车间,模板基类用来完成交易模板的基础行为,模板只用来定义功能而不实现功能,功能的实现应有交易模型(model)完成。
"""
class BaseTemplateMeta(ABCMeta):
"""交易模板元类"""
templates = {}
def __init__(cls, name, bases, attrs):
super(BaseTemplateMeta, cls).__init__(name, bases, attrs)
if name != 'BaseTemplate':
BaseTemplateMeta.templates[attrs['name']] = cls
def __call__(cls, client=None, user=None, psw=None, second=None, **account):
client = BaseClientMeta.clients[-1] if client is None else client
client.user = user if user is not None else client.user
client.psw = psw if psw is not None else client.psw
client.second = second if second is not None else client.second
client.account.update(account)
return super(BaseTemplateMeta, cls).__call__(client)
class BaseTemplate(metaclass=BaseTemplateMeta):
"""
交易模板的基类,有4个功能在其子类中必须有定义,分别是buy(买入)、sell(卖出)、cancel(撤单)、query(查询),任何在子类中定义
的功能都必须添加@BaseTemplate.connect修饰器才能正常工作。在子类中self._client用于访问客户端,self._prompt用于访问弹窗管理
器,模板基类是唯一对外接口,外部访问时使用Trader()访问,下面是在您的项目中的访问方法:
"""
name = '' # 交易模板的名称
def __new__(cls, client):
return object.__new__(BaseTemplateMeta.templates[client.tradetemplate])
def __init__(self, client):
self._client = client
self._prompt = PromptManager(client)
getattr(Timings, client.TRADE_SPEED_MODE)()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# if exc_type is not None:
# logger.error(''.join(traceback.format_exception(exc_type, exc_val, exc_tb)))
self.close()
def close(self):
self._prompt.stop_monitor()
self._client.close()
def __login(self):
if self._client.window() is self._client.loginwindow:
# 用户未登录
BaseEngine(self._client).login()
self._client.mainwindow.wait('ready', timeout=15) # 等待交易主窗口准备好
self._prompt.start_monitor(delay=5) # 关闭自动弹出的提示框
BaseModel.model_object.pop(self._client.key, None) # 建立新对象
self._model = BaseModel(self._client)
else:
# 用户已登录
self._model = BaseModel(self._client)
self._model.initialization() # 初始化交易窗口
self._model.reset()
def __hook(self):
self._client.hook()
def __active(self):
self._client.active()
def __setapp(self):
try:
self._client.connect()
except (AppStartError, TimeoutError):
raise ClientConfigError(f'无法启动客户端,可能路径拼写错误:{self._client.path}')
def __unlock(self):
"""软件的自动化依赖电脑在登录的情况下"""
# if win32gui.GetForegroundWindow() == 0:
# raise ScreenLockedError('屏幕被锁定') # 操作系统限制,无法用软件解锁电脑
# return self
pass
def __connect(self):
# 1.电脑屏幕是否被锁定
self.__unlock()
# 2.启动应用程序
self.__setapp()
# 3.激活应用程序
self.__active()
# 4.调用钩子
self.__hook()
# 5.登录应用程序
self.__login()
@staticmethod
def connect(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
self.__connect()
return True, func(self, *args, **kwargs)
except Exception as err:
| #
# 券商客户端自动化测试库
# Copyright (C) 2023 谁的谁(41715399@qq.com) All rights reserved.
#
# 模块功能:设计总体架构的模板规划
# 建立日期:2023.08.20
# 联系方式:谁的谁(41715399@qq.com)
#
# 开源软件声明:
# 本软件遵守“MIT License”开源协议开源,仅供学习和参考。您可以自由使用或修改源代码或二进制文件,但必须保留上述版权声明。
# 该软件旨在深度学习和挖掘python pywinauto库的功能和潜力,由于环境的不确定性和该软件的不可靠性,请不要将该软件应用于
# 实盘交易。如您确需量化交易实盘功能,请使用券商提供的量化交易平台,否则由于您使用该软件实盘交易所造成的账户损失或政策风
# 险,开源软件提供者或插件提供者均不承担任何责任。同时,无论是直接的、间接的、偶然的、潜在的因使用该软件所造成的账号安全
# 损失、数据安全损失、账户资产损失或其他任何责任事故,开源软件提供者或插件提供者均不承担任何责任。请不要将该软件应用于商
# 业活动,否则由于把该软件应用于商业活动所造成的一切损失或法律责任,开源软件提供者或插件提供者均不承担任何责任。
#
# 修改日志:
# 2022-08-20 第一次编写
#
"""
模板就象是汽车的总装车间,模板基类用来完成交易模板的基础行为,模板只用来定义功能而不实现功能,功能的实现应有交易模型(model)完成。
"""
class BaseTemplateMeta(ABCMeta):
"""交易模板元类"""
templates = {}
def __init__(cls, name, bases, attrs):
super(BaseTemplateMeta, cls).__init__(name, bases, attrs)
if name != 'BaseTemplate':
BaseTemplateMeta.templates[attrs['name']] = cls
def __call__(cls, client=None, user=None, psw=None, second=None, **account):
client = BaseClientMeta.clients[-1] if client is None else client
client.user = user if user is not None else client.user
client.psw = psw if psw is not None else client.psw
client.second = second if second is not None else client.second
client.account.update(account)
return super(BaseTemplateMeta, cls).__call__(client)
class BaseTemplate(metaclass=BaseTemplateMeta):
"""
交易模板的基类,有4个功能在其子类中必须有定义,分别是buy(买入)、sell(卖出)、cancel(撤单)、query(查询),任何在子类中定义
的功能都必须添加@BaseTemplate.connect修饰器才能正常工作。在子类中self._client用于访问客户端,self._prompt用于访问弹窗管理
器,模板基类是唯一对外接口,外部访问时使用Trader()访问,下面是在您的项目中的访问方法:
"""
name = '' # 交易模板的名称
def __new__(cls, client):
return object.__new__(BaseTemplateMeta.templates[client.tradetemplate])
def __init__(self, client):
self._client = client
self._prompt = PromptManager(client)
getattr(Timings, client.TRADE_SPEED_MODE)()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# if exc_type is not None:
# logger.error(''.join(traceback.format_exception(exc_type, exc_val, exc_tb)))
self.close()
def close(self):
self._prompt.stop_monitor()
self._client.close()
def __login(self):
if self._client.window() is self._client.loginwindow:
# 用户未登录
BaseEngine(self._client).login()
self._client.mainwindow.wait('ready', timeout=15) # 等待交易主窗口准备好
self._prompt.start_monitor(delay=5) # 关闭自动弹出的提示框
BaseModel.model_object.pop(self._client.key, None) # 建立新对象
self._model = BaseModel(self._client)
else:
# 用户已登录
self._model = BaseModel(self._client)
self._model.initialization() # 初始化交易窗口
self._model.reset()
def __hook(self):
self._client.hook()
def __active(self):
self._client.active()
def __setapp(self):
try:
self._client.connect()
except (AppStartError, TimeoutError):
raise ClientConfigError(f'无法启动客户端,可能路径拼写错误:{self._client.path}')
def __unlock(self):
"""软件的自动化依赖电脑在登录的情况下"""
# if win32gui.GetForegroundWindow() == 0:
# raise ScreenLockedError('屏幕被锁定') # 操作系统限制,无法用软件解锁电脑
# return self
pass
def __connect(self):
# 1.电脑屏幕是否被锁定
self.__unlock()
# 2.启动应用程序
self.__setapp()
# 3.激活应用程序
self.__active()
# 4.调用钩子
self.__hook()
# 5.登录应用程序
self.__login()
@staticmethod
def connect(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
self.__connect()
return True, func(self, *args, **kwargs)
except Exception as err: | logger.exception(str(err)) | 4 | 2023-11-03 02:22:34+00:00 | 8k |
humemarx/CPG-LCF | models/backbone2d/hrnet.py | [
{
"identifier": "ConvModule",
"path": "models/networks/backbone.py",
"snippet": "class ConvModule(nn.Module):\r\n \"\"\"A conv block that bundles conv/norm/activation layers.\r\n\r\n This block simplifies the usage of convolution layers, which are commonly\r\n used with a norm layer (e.g., Batc... | import warnings
import torch.nn as nn
import torch
from models.networks.backbone import ConvModule
from models.backbone2d.resnet import BasicBlock, Bottleneck, get_norm_name
from utils.config_parser import get_module
from models.utils import resize, Upsample
from torch.nn.modules.batchnorm import _BatchNorm
from collections import OrderedDict | 6,408 | ConvModule(
in_channels[j],
in_channels[j],
kernel_size=3,
stride=2,
padding=1,
bias=False,
conv_type=self.conv_type),
self.norm_type(in_channels[j]),
nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
"""Forward function."""
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if i == j:
y += x[j]
elif j > i:
y = y + resize(
self.fuse_layers[i][j](x[j]),
size=x[i].shape[2:],
mode='bilinear',
align_corners=False)
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
class HRNet(nn.Module):
"""HRNet backbone.
This backbone is the implementation of `High-Resolution Representations
for Labeling Pixels and Regions <https://arxiv.org/abs/1904.04514>`_.
Args:
extra (dict): Detailed configuration for each stage of HRNet.
There must be 4 stages, the configuration for each stage must have
5 keys:
- num_modules (int): The number of HRModule in this stage.
- num_branches (int): The number of branches in the HRModule.
- block (str): The type of convolution block.
- num_blocks (tuple): The number of blocks in each branch.
The length must be equal to num_branches.
- num_channels (tuple): The number of channels in each branch.
The length must be equal to num_branches.
in_channels (int): Number of input image channels. Normally 3.
conv_cfg (dict): Dictionary to construct and config conv layer.
Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Use `BN` by default.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters. Default: -1.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity. Default: False.
multiscale_output (bool): Whether to output multi-level features
produced by multiple branches. If False, only the first level
feature will be output. Default: True.
pretrained (str, optional): Model pretrained path. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
Example:
>>> from mmseg.models import HRNet
>>> import torch
>>> extra = dict(
>>> stage1=dict(
>>> num_modules=1,
>>> num_branches=1,
>>> block='BOTTLENECK',
>>> num_blocks=(4, ),
>>> num_channels=(64, )),
>>> stage2=dict(
>>> num_modules=1,
>>> num_branches=2,
>>> block='BASIC',
>>> num_blocks=(4, 4),
>>> num_channels=(32, 64)),
>>> stage3=dict(
>>> num_modules=4,
>>> num_branches=3,
>>> block='BASIC',
>>> num_blocks=(4, 4, 4),
>>> num_channels=(32, 64, 128)),
>>> stage4=dict(
>>> num_modules=3,
>>> num_branches=4,
>>> block='BASIC',
>>> num_blocks=(4, 4, 4, 4),
>>> num_channels=(32, 64, 128, 256)))
>>> self = HRNet(extra, in_channels=1)
>>> self.eval()
>>> inputs = torch.rand(1, 1, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 32, 8, 8)
(1, 64, 4, 4)
(1, 128, 2, 2)
(1, 256, 1, 1)
"""
| # Copyright (c) OpenMMLab. All rights reserved.
class HRModule(nn.Module):
"""High-Resolution Module for HRNet.
In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange
is in this module.
"""
def __init__(self,
num_branches,
blocks,
num_blocks,
in_channels,
num_channels,
multiscale_output=True,
with_cp=False,
conv_type=nn.Conv2d,
norm_type=nn.BatchNorm2d):
super().__init__()
self._check_branches(num_branches, num_blocks, in_channels,
num_channels)
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_type = norm_type
self.conv_type = conv_type
self.with_cp = with_cp
self.branches = self._make_branches(num_branches, blocks, num_blocks,
num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=False)
def _check_branches(self, num_branches, num_blocks, in_channels,
num_channels):
"""Check branches configuration."""
if num_branches != len(num_blocks):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_BLOCKS(' \
f'{len(num_blocks)})'
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_CHANNELS(' \
f'{len(num_channels)})'
raise ValueError(error_msg)
if num_branches != len(in_channels):
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_INCHANNELS(' \
f'{len(in_channels)})'
raise ValueError(error_msg)
def _make_one_branch(self,
branch_index,
block,
num_blocks,
num_channels,
stride=1):
"""Build one branch."""
downsample = None
if stride != 1 or \
self.in_channels[branch_index] != \
num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
ConvModule(
self.in_channels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
conv_type=self.conv_type),
self.norm_type(num_channels[branch_index] * block.expansion))
layers = []
layers.append(
block(
self.in_channels[branch_index],
num_channels[branch_index],
stride,
downsample=downsample,
with_cp=self.with_cp,
norm_type=self.norm_type,
conv_type=self.conv_type))
self.in_channels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(
block(
self.in_channels[branch_index],
num_channels[branch_index],
with_cp=self.with_cp,
norm_type=self.norm_type,
conv_type=self.conv_type))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
"""Build multiple branch."""
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
"""Build fuse layer."""
if self.num_branches == 1:
return None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = num_branches if self.multiscale_output else 1
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(
nn.Sequential(
ConvModule(
in_channels[j],
in_channels[i],
kernel_size=1,
stride=1,
padding=0,
bias=False,
conv_type=self.conv_type),
self.norm_type(in_channels[i]),
# we set align_corners=False for HRNet
Upsample(
scale_factor=2**(j - i),
mode='bilinear',
align_corners=False)))
elif j == i:
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range(i - j):
if k == i - j - 1:
conv_downsamples.append(
nn.Sequential(
ConvModule(
in_channels[j],
in_channels[i],
kernel_size=3,
stride=2,
padding=1,
bias=False,
conv_type=self.conv_type),
self.norm_type(in_channels[i])))
else:
conv_downsamples.append(
nn.Sequential(
ConvModule(
in_channels[j],
in_channels[j],
kernel_size=3,
stride=2,
padding=1,
bias=False,
conv_type=self.conv_type),
self.norm_type(in_channels[j]),
nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
"""Forward function."""
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if i == j:
y += x[j]
elif j > i:
y = y + resize(
self.fuse_layers[i][j](x[j]),
size=x[i].shape[2:],
mode='bilinear',
align_corners=False)
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
class HRNet(nn.Module):
"""HRNet backbone.
This backbone is the implementation of `High-Resolution Representations
for Labeling Pixels and Regions <https://arxiv.org/abs/1904.04514>`_.
Args:
extra (dict): Detailed configuration for each stage of HRNet.
There must be 4 stages, the configuration for each stage must have
5 keys:
- num_modules (int): The number of HRModule in this stage.
- num_branches (int): The number of branches in the HRModule.
- block (str): The type of convolution block.
- num_blocks (tuple): The number of blocks in each branch.
The length must be equal to num_branches.
- num_channels (tuple): The number of channels in each branch.
The length must be equal to num_branches.
in_channels (int): Number of input image channels. Normally 3.
conv_cfg (dict): Dictionary to construct and config conv layer.
Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Use `BN` by default.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters. Default: -1.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity. Default: False.
multiscale_output (bool): Whether to output multi-level features
produced by multiple branches. If False, only the first level
feature will be output. Default: True.
pretrained (str, optional): Model pretrained path. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
Example:
>>> from mmseg.models import HRNet
>>> import torch
>>> extra = dict(
>>> stage1=dict(
>>> num_modules=1,
>>> num_branches=1,
>>> block='BOTTLENECK',
>>> num_blocks=(4, ),
>>> num_channels=(64, )),
>>> stage2=dict(
>>> num_modules=1,
>>> num_branches=2,
>>> block='BASIC',
>>> num_blocks=(4, 4),
>>> num_channels=(32, 64)),
>>> stage3=dict(
>>> num_modules=4,
>>> num_branches=3,
>>> block='BASIC',
>>> num_blocks=(4, 4, 4),
>>> num_channels=(32, 64, 128)),
>>> stage4=dict(
>>> num_modules=3,
>>> num_branches=4,
>>> block='BASIC',
>>> num_blocks=(4, 4, 4, 4),
>>> num_channels=(32, 64, 128, 256)))
>>> self = HRNet(extra, in_channels=1)
>>> self.eval()
>>> inputs = torch.rand(1, 1, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 32, 8, 8)
(1, 64, 4, 4)
(1, 128, 2, 2)
(1, 256, 1, 1)
"""
| blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} | 1 | 2023-11-02 09:50:13+00:00 | 8k |
lalalamdbf/PLSE_IDRR | src/prompt-tuning/prompt/prompt_base.py | [
{
"identifier": "InputFeatures",
"path": "src/prompt-tuning/prompt/data_utils.py",
"snippet": "class InputFeatures(dict):\n \"\"\"\n The class for input to the PLM and Prompts. To make users explicitly know the available keys,\n we define a dict with a set of predefined possible keys. The defau... | from abc import abstractmethod
from transformers.file_utils import ModelOutput
from transformers.utils.dummy_pt_objects import PreTrainedModel
from .data_utils import InputFeatures, InputExample
from typing import *
from transformers.tokenization_utils import PreTrainedTokenizer
import json
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import traceback | 3,763 |
class Template(nn.Module):
r'''
Base class for all the templates.
Most of methods are abstract, with some exceptions to hold the common methods for all template, such as ``loss_ids``, ``save``, ``load``.
Args:
tokenizer (:obj:`PreTrainedTokenizer`): A tokenizer to appoint the vocabulary and the tokenization strategy.
placeholder_mapping (:obj:`dict`): A place holder to represent the original input text.
'''
registered_inputflag_names = ["loss_ids", "shortenable_ids"]
def __init__(self,
tokenizer: PreTrainedTokenizer,
placeholder_mapping: dict = {'<text_a>':'text_a','<text_b>':'text_b'},
):
super().__init__()
self.tokenizer = tokenizer
self.placeholder_mapping = placeholder_mapping
self._in_on_text_set = False
self.mixed_token_start = "{"
self.mixed_token_end = "}"
def get_default_loss_ids(self) -> List[int]:
'''Get the loss indices for the template using mask.
e.g. when self.text is ``'{"placeholder": "text_a"}. {"meta": "word"} is {"mask"}.'``,
output is ``[0, 0, 0, 0, 1, 0]``.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]:
- 1 for a masked tokens.
- 0 for a sequence tokens.
'''
return [1 if 'mask' in d else 0 for d in self.text]
def get_default_shortenable_ids(self) -> List[int]:
"""Every template needs shortenable_ids, denoting which part of the template can be truncate to fit
the language model's ``max_seq_length``. Default: the input text is shortenable, while the template text and other
special tokens are not shortenable.
e.g. when self.text is ``'{"placeholder": "text_a"} {"placeholder": "text_b", "shortenable": False} {"meta": "word"} is {"mask"}.'``,
output is ``[1, 0, 0, 0, 0, 0, 0]``.
Returns:
:obj:`List[int]`: A list of integers in the range ``[0, 1]``:
- 1 for the input tokens.
- 0 for the template sequence tokens.
"""
idx = []
for d in self.text:
if 'shortenable' in d:
idx.append(1 if d['shortenable'] else 0)
else:
idx.append(1 if 'placeholder' in d else 0)
return idx
def get_default_soft_token_ids(self) -> List[int]:
r'''
This function identifies which tokens are soft tokens.
Sometimes tokens in the template are not from the vocabulary,
but a sequence of soft tokens.
In this case, you need to implement this function
Raises:
NotImplementedError: if needed, add ``soft_token_ids`` into ``registered_inputflag_names`` attribute of Template class and implement this method.
'''
raise NotImplementedError
def incorporate_text_example(self,
|
class Template(nn.Module):
r'''
Base class for all the templates.
Most of methods are abstract, with some exceptions to hold the common methods for all template, such as ``loss_ids``, ``save``, ``load``.
Args:
tokenizer (:obj:`PreTrainedTokenizer`): A tokenizer to appoint the vocabulary and the tokenization strategy.
placeholder_mapping (:obj:`dict`): A place holder to represent the original input text.
'''
registered_inputflag_names = ["loss_ids", "shortenable_ids"]
def __init__(self,
tokenizer: PreTrainedTokenizer,
placeholder_mapping: dict = {'<text_a>':'text_a','<text_b>':'text_b'},
):
super().__init__()
self.tokenizer = tokenizer
self.placeholder_mapping = placeholder_mapping
self._in_on_text_set = False
self.mixed_token_start = "{"
self.mixed_token_end = "}"
def get_default_loss_ids(self) -> List[int]:
'''Get the loss indices for the template using mask.
e.g. when self.text is ``'{"placeholder": "text_a"}. {"meta": "word"} is {"mask"}.'``,
output is ``[0, 0, 0, 0, 1, 0]``.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]:
- 1 for a masked tokens.
- 0 for a sequence tokens.
'''
return [1 if 'mask' in d else 0 for d in self.text]
def get_default_shortenable_ids(self) -> List[int]:
"""Every template needs shortenable_ids, denoting which part of the template can be truncate to fit
the language model's ``max_seq_length``. Default: the input text is shortenable, while the template text and other
special tokens are not shortenable.
e.g. when self.text is ``'{"placeholder": "text_a"} {"placeholder": "text_b", "shortenable": False} {"meta": "word"} is {"mask"}.'``,
output is ``[1, 0, 0, 0, 0, 0, 0]``.
Returns:
:obj:`List[int]`: A list of integers in the range ``[0, 1]``:
- 1 for the input tokens.
- 0 for the template sequence tokens.
"""
idx = []
for d in self.text:
if 'shortenable' in d:
idx.append(1 if d['shortenable'] else 0)
else:
idx.append(1 if 'placeholder' in d else 0)
return idx
def get_default_soft_token_ids(self) -> List[int]:
r'''
This function identifies which tokens are soft tokens.
Sometimes tokens in the template are not from the vocabulary,
but a sequence of soft tokens.
In this case, you need to implement this function
Raises:
NotImplementedError: if needed, add ``soft_token_ids`` into ``registered_inputflag_names`` attribute of Template class and implement this method.
'''
raise NotImplementedError
def incorporate_text_example(self, | example: InputExample, | 1 | 2023-11-01 08:52:36+00:00 | 8k |
JakubPluta/gymhero | gymhero/api/routes/training_unit.py | [
{
"identifier": "get_current_active_user",
"path": "gymhero/api/dependencies.py",
"snippet": "def get_current_active_user(\n current_user: User = Depends(get_current_user),\n) -> User:\n \"\"\"Returns the current active user.\n\n Parameters:\n current_user (User, optional): The current u... | from typing import List, Optional
from fastapi import APIRouter, Depends, HTTPException, status
from sqlalchemy.orm import Session
from gymhero.api.dependencies import (
get_current_active_user,
get_current_superuser,
get_pagination_params,
)
from gymhero.crud import exercise_crud, training_unit_crud
from gymhero.database.db import get_db
from gymhero.log import get_logger
from gymhero.models import TrainingUnit
from gymhero.models.exercise import Exercise
from gymhero.models.user import User
from gymhero.schemas.exercise import ExerciseInDB
from gymhero.schemas.training_unit import (
TrainingUnitCreate,
TrainingUnitInDB,
TrainingUnitUpdate,
) | 3,916 | "/{training_unit_id}",
response_model=TrainingUnitInDB,
status_code=status.HTTP_200_OK,
)
def update_training_unit(
training_unit_id: int,
training_unit_update: TrainingUnitUpdate,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Updates a training unit.
Parameters:
training_unit_id (int): The ID of the training unit to update.
training_unit_update (TrainingUnitUpdate): The updated training unit data.
db (Session): The database session.
Returns:
TrainingUnitInDB: The updated training unit.
"""
training_unit: TrainingUnit = training_unit_crud.get_one(
db, TrainingUnit.id == training_unit_id
)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with id {training_unit_id} not found",
)
if training_unit.owner_id != user.id and not user.is_superuser:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="You do not have permission to perform this action",
)
try:
training_unit = training_unit_crud.update(
db, training_unit, training_unit_update
)
except Exception as e: # pragma: no cover
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Could not update training unit. Error: " + str(e),
) from e
return training_unit
@router.delete("/{training_unit_id}", status_code=status.HTTP_200_OK)
def delete_training_unit(
training_unit_id: int,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Deletes a training unit.
Parameters:
training_unit_id (int): The ID of the training unit to delete.
db (Session): The database session.
Returns:
Dict[str, str]: A message indicating that the training unit has been deleted.
"""
training_unit: TrainingUnit = training_unit_crud.get_one(
db, TrainingUnit.id == training_unit_id
)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with id {training_unit_id} not found",
)
if training_unit.owner_id != user.id and not user.is_superuser:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="You do not have permission to perform this action",
)
try:
training_unit_crud.delete(db, training_unit)
except Exception as e: # pragma: no cover
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Could not delete training unit. Error: " + str(e),
) from e # pragma: no cover
return {"detail": f"Training unit type with id {training_unit_id} deleted."}
@router.put(
"/{training_unit_id}/exercises/{exercise_id}/add",
response_model=Optional[TrainingUnitInDB],
status_code=status.HTTP_200_OK,
)
def add_exercise_to_training_unit(
training_unit_id: int,
exercise_id: int,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Adds an exercise to a training unit.
Parameters:
training_unit_id (int): The ID of the training unit.
exercise_id (int): The ID of the exercise.
db (Session, optional): The database session. Defaults to Depends(get_db).
user (User, optional): The current authenticated user.
Defaults to Depends(get_current_active_user).
Returns:
The updated training unit with the added exercise.
"""
training_unit = training_unit_crud.get_one(db, TrainingUnit.id == training_unit_id)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with id {training_unit_id} not found",
)
|
log = get_logger(__name__)
router = APIRouter()
@router.get(
"/all",
response_model=List[Optional[TrainingUnitInDB]],
status_code=status.HTTP_200_OK,
)
def get_all_training_units(
db: Session = Depends(get_db),
pagination_params: dict = Depends(get_pagination_params),
user: User = Depends(get_current_superuser),
):
"""
Retrieves all training units with pagination.
Parameters:
db (Session): The database session.
pagination_params (dict): The pagination parameters.
Returns:
TrainingUnitsInDB: The training units retrieved from the database.
"""
skip, limit = pagination_params
return training_unit_crud.get_many(db, skip=skip, limit=limit)
@router.get(
"/all/my",
response_model=List[Optional[TrainingUnitInDB]],
status_code=status.HTTP_200_OK,
)
def get_all_training_units_for_owner(
db: Session = Depends(get_db),
pagination_params: dict = Depends(get_pagination_params),
user: User = Depends(get_current_active_user),
):
"""
Retrieves all training units for the current user with pagination.
Parameters:
db (Session): The database session.
pagination_params (dict): The pagination parameters.
user (User): The current active user.
Returns:
TrainingUnitsInDB: The training units retrieved from the database.
"""
skip, limit = pagination_params
return training_unit_crud.get_many_for_owner(
db, owner_id=user.id, skip=skip, limit=limit
)
@router.get(
"/{training_unit_id}",
response_model=Optional[TrainingUnitInDB],
status_code=status.HTTP_200_OK,
)
def get_training_unit_by_id(
training_unit_id: int,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Retrieves a training unit by ID.
Parameters:
training_unit_id (int): The ID of the training unit.
db (Session): The database session.
Returns:
Optional[TrainingUnitInDB]: The training unit retrieved
from the database, or None if not found.
"""
if user.is_superuser:
training_unit = training_unit_crud.get_one(
db, TrainingUnit.id == training_unit_id
)
else:
training_unit = training_unit_crud.get_one(
db, TrainingUnit.id == training_unit_id, owner_id=user.id
)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with id {training_unit_id} not found",
)
return training_unit
@router.get(
"/name/{training_unit_name}",
response_model=Optional[TrainingUnitInDB],
status_code=status.HTTP_200_OK,
)
def get_training_unit_by_name(
training_unit_name: str,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Retrieves a training unit by name.
Parameters:
training_unit_name (str): The name of the training unit.
db (Session): The database session.
Returns:
Optional[TrainingUnitInDB]: The training unit retrieved
from the database, or None if not found.
"""
training_unit = training_unit_crud.get_one(
db, TrainingUnit.name == training_unit_name, owner_id=user.id
)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with name {training_unit_name} not found for user {user.id}",
)
return training_unit
# For superuser
@router.get(
"/name/{training_unit_name}/superuser",
response_model=List[Optional[TrainingUnitInDB]],
status_code=status.HTTP_200_OK,
include_in_schema=False,
)
def get_training_units_by_name(
training_unit_name: str,
db: Session = Depends(get_db),
user: User = Depends(get_current_superuser),
):
"""
Retrieves a training units by name.
Parameters:
training_unit_name (str): The name of the training unit.
db (Session): The database session.
Returns:
List[Optional[TrainingUnitInDB]]: The training unit retrieved
from the database, or None if not found.
"""
training_unit = training_unit_crud.get_many(
db, TrainingUnit.name == training_unit_name
)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with name {training_unit_name} not found for user {user.id}",
)
return training_unit
@router.post("/", response_model=TrainingUnitInDB, status_code=status.HTTP_201_CREATED)
def create_training_unit(
training_unit_in: TrainingUnitCreate,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Creates a new training unit.
Parameters:
training_unit_in (TrainingUnitCreate): The training unit data.
db (Session): The database session.
Returns:
TrainingUnitInDB: The created training unit.
"""
training_unit = training_unit_crud.get_one(
db, TrainingUnit.name == training_unit_in.name, owner_id=user.id
)
if training_unit is not None:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail=f"Training unit with name {training_unit_in.name} already exists for user {user.id}",
)
training_unit = training_unit_crud.create_with_owner(
db, training_unit_in, owner_id=user.id
)
return training_unit
@router.put(
"/{training_unit_id}",
response_model=TrainingUnitInDB,
status_code=status.HTTP_200_OK,
)
def update_training_unit(
training_unit_id: int,
training_unit_update: TrainingUnitUpdate,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Updates a training unit.
Parameters:
training_unit_id (int): The ID of the training unit to update.
training_unit_update (TrainingUnitUpdate): The updated training unit data.
db (Session): The database session.
Returns:
TrainingUnitInDB: The updated training unit.
"""
training_unit: TrainingUnit = training_unit_crud.get_one(
db, TrainingUnit.id == training_unit_id
)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with id {training_unit_id} not found",
)
if training_unit.owner_id != user.id and not user.is_superuser:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="You do not have permission to perform this action",
)
try:
training_unit = training_unit_crud.update(
db, training_unit, training_unit_update
)
except Exception as e: # pragma: no cover
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Could not update training unit. Error: " + str(e),
) from e
return training_unit
@router.delete("/{training_unit_id}", status_code=status.HTTP_200_OK)
def delete_training_unit(
training_unit_id: int,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Deletes a training unit.
Parameters:
training_unit_id (int): The ID of the training unit to delete.
db (Session): The database session.
Returns:
Dict[str, str]: A message indicating that the training unit has been deleted.
"""
training_unit: TrainingUnit = training_unit_crud.get_one(
db, TrainingUnit.id == training_unit_id
)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with id {training_unit_id} not found",
)
if training_unit.owner_id != user.id and not user.is_superuser:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="You do not have permission to perform this action",
)
try:
training_unit_crud.delete(db, training_unit)
except Exception as e: # pragma: no cover
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Could not delete training unit. Error: " + str(e),
) from e # pragma: no cover
return {"detail": f"Training unit type with id {training_unit_id} deleted."}
@router.put(
"/{training_unit_id}/exercises/{exercise_id}/add",
response_model=Optional[TrainingUnitInDB],
status_code=status.HTTP_200_OK,
)
def add_exercise_to_training_unit(
training_unit_id: int,
exercise_id: int,
db: Session = Depends(get_db),
user: User = Depends(get_current_active_user),
):
"""
Adds an exercise to a training unit.
Parameters:
training_unit_id (int): The ID of the training unit.
exercise_id (int): The ID of the exercise.
db (Session, optional): The database session. Defaults to Depends(get_db).
user (User, optional): The current authenticated user.
Defaults to Depends(get_current_active_user).
Returns:
The updated training unit with the added exercise.
"""
training_unit = training_unit_crud.get_one(db, TrainingUnit.id == training_unit_id)
if training_unit is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Training unit with id {training_unit_id} not found",
)
| exercise = exercise_crud.get_one(db, Exercise.id == exercise_id) | 3 | 2023-11-05 14:37:46+00:00 | 8k |
choderalab/chiron | chiron/integrators.py | [
{
"identifier": "SamplerState",
"path": "chiron/states.py",
"snippet": "class SamplerState:\n \"\"\"\n Represents the state of the system that is updated during integration.\n\n Parameters\n ----------\n x0 : unit.Quantity\n The current positions of the particles in the simulation.... | import jax.numpy as jnp
from jax import random
from tqdm import tqdm
from openmm import unit
from .states import SamplerState, ThermodynamicState
from typing import Dict
from loguru import logger as log
from .reporters import SimulationReporter
from typing import Optional
from .utils import get_list_of_mass | 3,795 | # This file contains the integrator class for the Langevin dynamics simulation
class LangevinIntegrator:
"""
Langevin dynamics integrator for molecular dynamics simulation using the BAOAB splitting scheme [1].
References:
[1] Benedict Leimkuhler, Charles Matthews;
Robust and efficient configurational molecular sampling via Langevin dynamics.
J. Chem. Phys. 7 May 2013; 138 (17): 174102. https://doi.org/10.1063/1.4802990
"""
def __init__(
self,
stepsize=1.0 * unit.femtoseconds,
collision_rate=1.0 / unit.picoseconds,
save_frequency: int = 100,
reporter: Optional[SimulationReporter] = None,
) -> None:
"""
Initialize the LangevinIntegrator object.
Parameters
----------
stepsize : unit.Quantity, optional
Time step of integration with units of time. Default is 1.0 * unit.femtoseconds.
collision_rate : unit.Quantity, optional
Collision rate for the Langevin dynamics, with units 1/time. Default is 1.0 / unit.picoseconds.
save_frequency : int, optional
Frequency of saving the simulation data. Default is 100.
reporter : SimulationReporter, optional
Reporter object for saving the simulation data. Default is None.
"""
self.kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA
log.info(f"stepsize = {stepsize}")
log.info(f"collision_rate = {collision_rate}")
log.info(f"save_frequency = {save_frequency}")
self.stepsize = stepsize
self.collision_rate = collision_rate
if reporter is not None:
log.info(f"Using reporter {reporter} saving to {reporter.filename}")
self.reporter = reporter
self.save_frequency = save_frequency
self.velocities = None
def set_velocities(self, vel: unit.Quantity) -> None:
"""
Set the initial velocities for the Langevin Integrator.
Parameters
----------
vel : unit.Quantity
Velocities to be set for the integrator.
"""
self.velocities = vel
def run(
self,
sampler_state: SamplerState,
| # This file contains the integrator class for the Langevin dynamics simulation
class LangevinIntegrator:
"""
Langevin dynamics integrator for molecular dynamics simulation using the BAOAB splitting scheme [1].
References:
[1] Benedict Leimkuhler, Charles Matthews;
Robust and efficient configurational molecular sampling via Langevin dynamics.
J. Chem. Phys. 7 May 2013; 138 (17): 174102. https://doi.org/10.1063/1.4802990
"""
def __init__(
self,
stepsize=1.0 * unit.femtoseconds,
collision_rate=1.0 / unit.picoseconds,
save_frequency: int = 100,
reporter: Optional[SimulationReporter] = None,
) -> None:
"""
Initialize the LangevinIntegrator object.
Parameters
----------
stepsize : unit.Quantity, optional
Time step of integration with units of time. Default is 1.0 * unit.femtoseconds.
collision_rate : unit.Quantity, optional
Collision rate for the Langevin dynamics, with units 1/time. Default is 1.0 / unit.picoseconds.
save_frequency : int, optional
Frequency of saving the simulation data. Default is 100.
reporter : SimulationReporter, optional
Reporter object for saving the simulation data. Default is None.
"""
self.kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA
log.info(f"stepsize = {stepsize}")
log.info(f"collision_rate = {collision_rate}")
log.info(f"save_frequency = {save_frequency}")
self.stepsize = stepsize
self.collision_rate = collision_rate
if reporter is not None:
log.info(f"Using reporter {reporter} saving to {reporter.filename}")
self.reporter = reporter
self.save_frequency = save_frequency
self.velocities = None
def set_velocities(self, vel: unit.Quantity) -> None:
"""
Set the initial velocities for the Langevin Integrator.
Parameters
----------
vel : unit.Quantity
Velocities to be set for the integrator.
"""
self.velocities = vel
def run(
self,
sampler_state: SamplerState, | thermodynamic_state: ThermodynamicState, | 1 | 2023-11-07 18:17:43+00:00 | 8k |
HealthSciTech/E2E-PPG | ppg_peak_detection.py | [
{
"identifier": "ppg_peaks",
"path": "kazemi_peak_detection.py",
"snippet": "def ppg_peaks(signal, sampling_freq, seconds, overlap, minlen):\n \"\"\"\n Main function to detect peaks in PPG signals using the trained model.\n \n Args:\n signal (numpy.ndarray): PPG signal\n sampli... | import neurokit2 as nk
import heartpy as hp
import numpy as np
import warnings
from heartpy.datautils import rolling_mean
from scipy import signal
from kazemi_peak_detection import ppg_peaks
from ppg_sqa import sqa
from ppg_reconstruction import reconstruction
from ppg_clean_extraction import clean_seg_extraction
from utils import normalize_data, get_data | 5,336 | # -*- coding: utf-8 -*-
warnings.filterwarnings("ignore")
def peak_detection(
clean_segments: list,
sampling_rate: int,
method: str ='kazemi') -> list:
'''
Detect peaks in clean PPG segments using specified peak detection method.
Args:
clean_segments (list): List of clean PPG segments with the specified window length and their starting index.
sampling_rate: Sampling rate of the PPG signal.
method (str): Peak detection method. Valid inputs: 'nk', 'kazemi', and 'heartpy'. The default is 'kazemi'. (optional)
Return:
total_peaks (list): List of lists, each containing the detected peaks for a corresponding clean segment.
Refernces:
Kazemi method: Kazemi, K., Laitala, J., Azimi, I., Liljeberg, P., & Rahmani, A. M. (2022).
Robust ppg peak detection using dilated convolutional neural networks. Sensors, 22(16), 6054.
Neurokit method: Makowski, D., Pham, T., Lau, Z. J., Brammer, J. C., Lespinasse, F., Pham, H., ... & Chen, S. A. (2021).
NeuroKit2: A Python toolbox for neurophysiological signal processing. Behavior research methods, 1-8.
HeartPY method: Van Gent, P., Farah, H., Nes, N., & van Arem, B. (2018, June).
Heart rate analysis for human factors: Development and validation of an open source toolkit for noisy naturalistic heart rate data.
In Proceedings of the 6th HUMANIST Conference (pp. 173-178).
'''
# Initialize a list to store total peaks
total_peaks = []
# Check the deisred peak detection method
if method == 'nk':
# Neurokit method
upsampling_rate = 2
sampling_rate_new = sampling_rate * upsampling_rate
for i in range(len(clean_segments)):
# Normalize PPG signal
| # -*- coding: utf-8 -*-
warnings.filterwarnings("ignore")
def peak_detection(
clean_segments: list,
sampling_rate: int,
method: str ='kazemi') -> list:
'''
Detect peaks in clean PPG segments using specified peak detection method.
Args:
clean_segments (list): List of clean PPG segments with the specified window length and their starting index.
sampling_rate: Sampling rate of the PPG signal.
method (str): Peak detection method. Valid inputs: 'nk', 'kazemi', and 'heartpy'. The default is 'kazemi'. (optional)
Return:
total_peaks (list): List of lists, each containing the detected peaks for a corresponding clean segment.
Refernces:
Kazemi method: Kazemi, K., Laitala, J., Azimi, I., Liljeberg, P., & Rahmani, A. M. (2022).
Robust ppg peak detection using dilated convolutional neural networks. Sensors, 22(16), 6054.
Neurokit method: Makowski, D., Pham, T., Lau, Z. J., Brammer, J. C., Lespinasse, F., Pham, H., ... & Chen, S. A. (2021).
NeuroKit2: A Python toolbox for neurophysiological signal processing. Behavior research methods, 1-8.
HeartPY method: Van Gent, P., Farah, H., Nes, N., & van Arem, B. (2018, June).
Heart rate analysis for human factors: Development and validation of an open source toolkit for noisy naturalistic heart rate data.
In Proceedings of the 6th HUMANIST Conference (pp. 173-178).
'''
# Initialize a list to store total peaks
total_peaks = []
# Check the deisred peak detection method
if method == 'nk':
# Neurokit method
upsampling_rate = 2
sampling_rate_new = sampling_rate * upsampling_rate
for i in range(len(clean_segments)):
# Normalize PPG signal | ppg_normed = normalize_data(clean_segments[i][1]) | 4 | 2023-11-07 22:52:14+00:00 | 8k |
Antelcat/ida_copilot | ida_copilot.py | [
{
"identifier": "panel",
"path": "ida_copilot/panel.py",
"snippet": "class Singleton(type):\nclass CopilotPanel(idaapi.PluginForm, metaclass=Singleton):\nclass CopilotPanelCallbackManager(BaseCallbackHandler):\nclass ShowCopilotPanel(idaapi.action_handler_t):\n def __call__(cls, *args, **kwargs):\n ... | import ida_hexrays
import ida_kernwin
import idaapi
from ida_copilot import panel
from ida_copilot.copilot import Copilot | 3,961 |
class CopilotPluginActionHandler(idaapi.action_handler_t):
def __init__(self):
super(CopilotPluginActionHandler, self).__init__()
def activate(self, ctx):
ida_kernwin.show_wait_box('HIDECANCEL\nRunning Copilot...')
try:
Copilot().run()
finally:
ida_kernwin.hide_wait_box()
ida_hexrays.get_widget_vdui(ctx.widget).refresh_view(True)
ida_kernwin.refresh_idaview_anyway()
def on_task_complete(self, future):
# 关闭进度条或状态信息
ida_kernwin.hide_wait_box()
# 更新UI...
ida_kernwin.refresh_idaview_anyway()
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
class CopilotPlugin(idaapi.plugin_t):
flags = idaapi.PLUGIN_UNL
comment = "Copilot"
help = "Copilot"
wanted_name = "Copilot"
wanted_hotkey = ""
def init(self):
if not ida_hexrays.init_hexrays_plugin():
print("Hex-Rays decompiler is not available!")
return
run_action = idaapi.action_desc_t(
'copilot:run',
'Run Copilot',
CopilotPluginActionHandler(),
'Ctrl+Shift+P',
'使用Copilot分析当前函数',
-1)
idaapi.register_action(run_action)
idaapi.attach_action_to_menu(
'Edit/Copilot',
'copilot:run',
idaapi.SETMENU_APP)
action_desc = idaapi.action_desc_t(
'copilot:show_panel',
'Show Copilot',
|
class CopilotPluginActionHandler(idaapi.action_handler_t):
def __init__(self):
super(CopilotPluginActionHandler, self).__init__()
def activate(self, ctx):
ida_kernwin.show_wait_box('HIDECANCEL\nRunning Copilot...')
try:
Copilot().run()
finally:
ida_kernwin.hide_wait_box()
ida_hexrays.get_widget_vdui(ctx.widget).refresh_view(True)
ida_kernwin.refresh_idaview_anyway()
def on_task_complete(self, future):
# 关闭进度条或状态信息
ida_kernwin.hide_wait_box()
# 更新UI...
ida_kernwin.refresh_idaview_anyway()
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
class CopilotPlugin(idaapi.plugin_t):
flags = idaapi.PLUGIN_UNL
comment = "Copilot"
help = "Copilot"
wanted_name = "Copilot"
wanted_hotkey = ""
def init(self):
if not ida_hexrays.init_hexrays_plugin():
print("Hex-Rays decompiler is not available!")
return
run_action = idaapi.action_desc_t(
'copilot:run',
'Run Copilot',
CopilotPluginActionHandler(),
'Ctrl+Shift+P',
'使用Copilot分析当前函数',
-1)
idaapi.register_action(run_action)
idaapi.attach_action_to_menu(
'Edit/Copilot',
'copilot:run',
idaapi.SETMENU_APP)
action_desc = idaapi.action_desc_t(
'copilot:show_panel',
'Show Copilot', | panel.ShowCopilotPanel(panel.CopilotPanel()), | 0 | 2023-11-02 14:23:11+00:00 | 8k |
WSH032/fastapi-proxy-lib | tests/test_ws.py | [
{
"identifier": "get_app",
"path": "tests/app/echo_ws_app.py",
"snippet": "def get_app() -> AppDataclass4Test: # noqa: C901, PLR0915\n \"\"\"Get the echo ws app.\n\n Returns:\n TestAppDataclass.\n \"\"\"\n app = FastAPI()\n request_dict = RequestDict(request=None)\n test_app_da... | import asyncio
import httpx
import httpx_ws
import pytest
import uvicorn
from contextlib import AsyncExitStack
from multiprocessing import Process, Queue
from typing import Any, Dict, Literal, Optional
from fastapi_proxy_lib.fastapi.app import reverse_ws_app as get_reverse_ws_app
from httpx_ws import aconnect_ws
from starlette import websockets as starlette_websockets_module
from typing_extensions import override
from .app.echo_ws_app import get_app as get_ws_test_app
from .app.tool import UvicornServer
from .conftest import UvicornServerFixture
from .tool import (
AbstractTestProxy,
Tool4TestFixture,
) | 3,615 | # noqa: D100
DEFAULT_HOST = "127.0.0.1"
DEFAULT_PORT = 0
DEFAULT_CONTX_EXIT_TIMEOUT = 5
# WS_BACKENDS_NEED_BE_TESTED = ("websockets", "wsproto")
# # FIXME: wsproto 有问题,暂时不测试
# # ConnectionResetError: [WinError 10054] 远程主机强迫关闭了一个现有的连接。
# # https://github.com/encode/uvicorn/discussions/2105
WS_BACKENDS_NEED_BE_TESTED = ("websockets",)
# https://www.python-httpx.org/advanced/#http-proxying
NO_PROXIES: Dict[Any, Any] = {"all://": None}
def _subprocess_run_echo_ws_uvicorn_server(queue: "Queue[str]", **kwargs: Any):
"""Run echo ws app in subprocess.
Args:
queue: The queue for subprocess to put the url of echo ws app.
After the server is started, the url will be put into the queue.
**kwargs: The kwargs for `uvicorn.Config`
"""
default_kwargs = {
"app": get_ws_test_app().app,
"port": DEFAULT_PORT,
"host": DEFAULT_HOST,
}
default_kwargs.update(kwargs)
target_ws_server = UvicornServer(
uvicorn.Config(**default_kwargs), # pyright: ignore[reportGeneralTypeIssues]
)
async def run():
await target_ws_server.aenter()
url = str(target_ws_server.contx_socket_url)
queue.put(url)
queue.close()
while True: # run forever
await asyncio.sleep(0.1)
asyncio.run(run())
def _subprocess_run_httpx_ws(
queue: "Queue[str]",
kwargs_async_client: Optional[Dict[str, Any]] = None,
kwargs_aconnect_ws: Optional[Dict[str, Any]] = None,
):
"""Run aconnect_ws in subprocess.
Args:
queue: The queue for subprocess to put something for flag of ws connection established.
kwargs_async_client: The kwargs for `httpx.AsyncClient`
kwargs_aconnect_ws: The kwargs for `httpx_ws.aconnect_ws`
"""
kwargs_async_client = kwargs_async_client or {}
kwargs_aconnect_ws = kwargs_aconnect_ws or {}
kwargs_async_client.pop("proxies", None)
kwargs_aconnect_ws.pop("client", None)
async def run():
_exit_stack = AsyncExitStack()
_temp_client = httpx.AsyncClient(proxies=NO_PROXIES, **kwargs_async_client)
_ = await _exit_stack.enter_async_context(
aconnect_ws(
client=_temp_client,
**kwargs_aconnect_ws,
)
)
queue.put("done")
queue.close()
while True: # run forever
await asyncio.sleep(0.1)
asyncio.run(run())
class TestReverseWsProxy(AbstractTestProxy):
"""For testing reverse websocket proxy."""
@override
@pytest.fixture(params=WS_BACKENDS_NEED_BE_TESTED)
async def tool_4_test_fixture( # pyright: ignore[reportIncompatibleMethodOverride]
self,
| # noqa: D100
DEFAULT_HOST = "127.0.0.1"
DEFAULT_PORT = 0
DEFAULT_CONTX_EXIT_TIMEOUT = 5
# WS_BACKENDS_NEED_BE_TESTED = ("websockets", "wsproto")
# # FIXME: wsproto 有问题,暂时不测试
# # ConnectionResetError: [WinError 10054] 远程主机强迫关闭了一个现有的连接。
# # https://github.com/encode/uvicorn/discussions/2105
WS_BACKENDS_NEED_BE_TESTED = ("websockets",)
# https://www.python-httpx.org/advanced/#http-proxying
NO_PROXIES: Dict[Any, Any] = {"all://": None}
def _subprocess_run_echo_ws_uvicorn_server(queue: "Queue[str]", **kwargs: Any):
"""Run echo ws app in subprocess.
Args:
queue: The queue for subprocess to put the url of echo ws app.
After the server is started, the url will be put into the queue.
**kwargs: The kwargs for `uvicorn.Config`
"""
default_kwargs = {
"app": get_ws_test_app().app,
"port": DEFAULT_PORT,
"host": DEFAULT_HOST,
}
default_kwargs.update(kwargs)
target_ws_server = UvicornServer(
uvicorn.Config(**default_kwargs), # pyright: ignore[reportGeneralTypeIssues]
)
async def run():
await target_ws_server.aenter()
url = str(target_ws_server.contx_socket_url)
queue.put(url)
queue.close()
while True: # run forever
await asyncio.sleep(0.1)
asyncio.run(run())
def _subprocess_run_httpx_ws(
queue: "Queue[str]",
kwargs_async_client: Optional[Dict[str, Any]] = None,
kwargs_aconnect_ws: Optional[Dict[str, Any]] = None,
):
"""Run aconnect_ws in subprocess.
Args:
queue: The queue for subprocess to put something for flag of ws connection established.
kwargs_async_client: The kwargs for `httpx.AsyncClient`
kwargs_aconnect_ws: The kwargs for `httpx_ws.aconnect_ws`
"""
kwargs_async_client = kwargs_async_client or {}
kwargs_aconnect_ws = kwargs_aconnect_ws or {}
kwargs_async_client.pop("proxies", None)
kwargs_aconnect_ws.pop("client", None)
async def run():
_exit_stack = AsyncExitStack()
_temp_client = httpx.AsyncClient(proxies=NO_PROXIES, **kwargs_async_client)
_ = await _exit_stack.enter_async_context(
aconnect_ws(
client=_temp_client,
**kwargs_aconnect_ws,
)
)
queue.put("done")
queue.close()
while True: # run forever
await asyncio.sleep(0.1)
asyncio.run(run())
class TestReverseWsProxy(AbstractTestProxy):
"""For testing reverse websocket proxy."""
@override
@pytest.fixture(params=WS_BACKENDS_NEED_BE_TESTED)
async def tool_4_test_fixture( # pyright: ignore[reportIncompatibleMethodOverride]
self, | uvicorn_server_fixture: UvicornServerFixture, | 2 | 2023-11-08 04:38:36+00:00 | 8k |
aws-samples/amazon-location-geospatial-agent | geospatial_agent/agent/geospatial/solver/solver.py | [
{
"identifier": "NODE_TYPE_ATTRIBUTE",
"path": "geospatial_agent/agent/geospatial/solver/constants.py",
"snippet": "NODE_TYPE_ATTRIBUTE = \"node_type\""
},
{
"identifier": "NODE_TYPE_OPERATION",
"path": "geospatial_agent/agent/geospatial/solver/constants.py",
"snippet": "NODE_TYPE_OPERAT... | import json
import networkx
from langchain import PromptTemplate, LLMChain
from langchain.llms.base import LLM
from pydispatch import dispatcher
from geospatial_agent.agent.geospatial.solver.constants import NODE_TYPE_ATTRIBUTE, NODE_TYPE_OPERATION
from geospatial_agent.agent.geospatial.solver.op_graph import OperationsParser, OperationNode
from geospatial_agent.agent.geospatial.solver.prompts import operation_code_gen_intro, \
operation_task_prefix, operation_reply_example, operation_code_gen_prompt_template, \
operation_pydeck_example, operation_requirement_gen_task_prefix, predefined_operation_requirements, \
shim_instructions
from geospatial_agent.agent.shared import SIGNAL_OPERATION_CODE_GENERATED, SENDER_GEOSPATIAL_AGENT, AgentSignal, \
EventType, SIGNAL_TAIL_CODE_GENERATED
from geospatial_agent.shared.prompts import HUMAN_ROLE, ASSISTANT_ROLE, HUMAN_STOP_SEQUENCE
from geospatial_agent.shared.shim import get_shim_imports
from geospatial_agent.shared.utils import extract_code, extract_content_xml
from typing import List | 3,869 | dispatcher.send(signal=SIGNAL_OPERATION_CODE_GENERATED,
sender=SENDER_GEOSPATIAL_AGENT,
event_data=AgentSignal(
event_source=SENDER_GEOSPATIAL_AGENT,
event_message=f"{idx + 1} / {len(op_nodes)}: Generated code for operation {op_node.node_name}",
event_data=operation_code_gen_output.operation_code,
event_type=EventType.PythonCode
))
# INFO: Updating Operation Nodes with generated code
op_node.operation_prompt = operation_code_gen_output.operation_prompt
op_node.code_gen_response = operation_code_gen_output.operation_code_gen_response
op_node.operation_code = operation_code_gen_output.operation_code
return op_nodes
def assemble(self):
output_node_names = self.operation_parser.output_node_names
operation_nodes = self.operation_parser.operation_nodes
# The head end of the code
head = ""
# The tail end of the code
tail = ""
reverse_graph = self.graph.reverse(copy=True)
for idx, output_node in enumerate(output_node_names):
bfs_edges = networkx.bfs_edges(reverse_graph, source=output_node)
for bfs_edge in bfs_edges:
from_node_name, _ = bfs_edge
current_nx_node = self.graph.nodes[from_node_name]
if current_nx_node.get(NODE_TYPE_ATTRIBUTE, None) == NODE_TYPE_OPERATION:
op_node: OperationNode = next(
(op_node for op_node in operation_nodes if op_node.node_name == from_node_name), None)
head = "\n" + op_node.operation_code + "\n" + head
tail = f'{", ".join(op_node.return_names)}={op_node.function_definition}\n' + tail
# Adding the session id and task name to the code
tail = f'\nsession_id = "{self.session_id}"\n' + \
f'task_name = "{self.task_name}"\n' + \
f'storage_mode = "{self.storage_mode}"\n' + \
tail
dispatcher.send(signal=SIGNAL_TAIL_CODE_GENERATED,
sender=SENDER_GEOSPATIAL_AGENT,
event_data=AgentSignal(
event_source=SENDER_GEOSPATIAL_AGENT,
event_message=f"Generated final code block.",
event_data=tail,
event_type=EventType.PythonCode
))
assembled_code = head + "\n" + tail
assembled_code = f'{get_shim_imports()}\n{assembled_code}'
return assembled_code
def get_operation_requirement(self, op_node: OperationNode) -> list[str]:
node_name = op_node.node_name
task_def = self.task_def.strip("\n").strip()
op_properties = [
f'The function description is: {op_node.description}',
f'The type of work done in this function is: {op_node.operation_type}',
f'This function is one step to solve the question/task: {task_def}'
]
op_properties_str = '\n'.join(
[f"{idx + 1}. {line}" for idx, line in enumerate(op_properties)])
operation_requirement_str = '\n'.join(
[f"{idx + 1}. {line}" for idx, line in enumerate(predefined_operation_requirements)])
op_req_gen_prompt_template: PromptTemplate = PromptTemplate.from_template(operation_requirement_gen_task_prefix)
chain = LLMChain(llm=self.llm, prompt=op_req_gen_prompt_template)
req_gen_response = chain.run(
human_role=HUMAN_ROLE,
operation_req_gen_intro=operation_code_gen_intro,
operation_name=node_name,
pre_requirements=operation_requirement_str,
operation_properties=op_properties_str,
assistant_role=ASSISTANT_ROLE,
stop=[HUMAN_STOP_SEQUENCE]
).strip()
operation_requirement_json = extract_content_xml("json", req_gen_response)
operation_requirement_list: List[str] = json.loads(operation_requirement_json)
operation_requirement_list = shim_instructions + operation_requirement_list
return operation_requirement_list
def gen_operation_code(self, op_node: OperationNode) -> OperationCodeGenOutput:
operation_requirement_list = self.get_operation_requirement(op_node)
node_name = op_node.node_name
# Get ancestors operations functions. For operations that has ancestors, this will also come with LLM
# generated code for the operations.
ancestor_op_nodes = self.operation_parser.get_ancestors(node_name)
ancestor_op_nodes_code = '\n'.join([op_node.operation_code for op_node in ancestor_op_nodes])
descendant_op_node = self.operation_parser.get_descendants(node_name)
descendant_op_node_defs = self.operation_parser.stringify_nodes(descendant_op_node)
pre_requirements = [
f'The function description is: {op_node.description}',
f'The function definition is: {op_node.function_definition}',
f'The function return line is: {op_node.return_line}'
]
operation_requirements_str = '\n'.join(
[f"{idx + 1}. {line}" for idx, line in enumerate(pre_requirements + operation_requirement_list)])
op_code_gen_prompt_template: PromptTemplate = PromptTemplate.from_template(operation_code_gen_prompt_template)
op_code_gen_prompt = op_code_gen_prompt_template.format(
human_role=HUMAN_ROLE,
operation_code_gen_intro=operation_code_gen_intro,
|
class OperationCodeGenOutput:
def __init__(self,
operation_prompt: str,
operation_code_gen_response: str,
operation_code: str):
self.operation_prompt = operation_prompt
self.operation_code_gen_response = operation_code_gen_response
self.operation_code = operation_code
class InvalidStateError(Exception):
def __init__(self, message: str):
self.message = message
super().__init__(self.message)
class Solver:
def __init__(self,
llm: LLM,
graph: networkx.DiGraph,
graph_code: str,
session_id: str,
storage_mode: str,
task_definition: str,
task_name: str,
data_locations_instructions: str):
self.llm = llm
self.graph = graph
self.graph_code = graph_code
self.session_id = session_id
self.storage_mode = storage_mode
self.task_def = task_definition
self.task_name = task_name
self.data_locations_instructions = data_locations_instructions
self.operation_parser = OperationsParser(graph)
def solve(self):
op_nodes = self.operation_parser.operation_nodes
for idx, op_node in enumerate(op_nodes):
operation_code_gen_output = self.gen_operation_code(op_node)
dispatcher.send(signal=SIGNAL_OPERATION_CODE_GENERATED,
sender=SENDER_GEOSPATIAL_AGENT,
event_data=AgentSignal(
event_source=SENDER_GEOSPATIAL_AGENT,
event_message=f"{idx + 1} / {len(op_nodes)}: Generated code for operation {op_node.node_name}",
event_data=operation_code_gen_output.operation_code,
event_type=EventType.PythonCode
))
# INFO: Updating Operation Nodes with generated code
op_node.operation_prompt = operation_code_gen_output.operation_prompt
op_node.code_gen_response = operation_code_gen_output.operation_code_gen_response
op_node.operation_code = operation_code_gen_output.operation_code
return op_nodes
def assemble(self):
output_node_names = self.operation_parser.output_node_names
operation_nodes = self.operation_parser.operation_nodes
# The head end of the code
head = ""
# The tail end of the code
tail = ""
reverse_graph = self.graph.reverse(copy=True)
for idx, output_node in enumerate(output_node_names):
bfs_edges = networkx.bfs_edges(reverse_graph, source=output_node)
for bfs_edge in bfs_edges:
from_node_name, _ = bfs_edge
current_nx_node = self.graph.nodes[from_node_name]
if current_nx_node.get(NODE_TYPE_ATTRIBUTE, None) == NODE_TYPE_OPERATION:
op_node: OperationNode = next(
(op_node for op_node in operation_nodes if op_node.node_name == from_node_name), None)
head = "\n" + op_node.operation_code + "\n" + head
tail = f'{", ".join(op_node.return_names)}={op_node.function_definition}\n' + tail
# Adding the session id and task name to the code
tail = f'\nsession_id = "{self.session_id}"\n' + \
f'task_name = "{self.task_name}"\n' + \
f'storage_mode = "{self.storage_mode}"\n' + \
tail
dispatcher.send(signal=SIGNAL_TAIL_CODE_GENERATED,
sender=SENDER_GEOSPATIAL_AGENT,
event_data=AgentSignal(
event_source=SENDER_GEOSPATIAL_AGENT,
event_message=f"Generated final code block.",
event_data=tail,
event_type=EventType.PythonCode
))
assembled_code = head + "\n" + tail
assembled_code = f'{get_shim_imports()}\n{assembled_code}'
return assembled_code
def get_operation_requirement(self, op_node: OperationNode) -> list[str]:
node_name = op_node.node_name
task_def = self.task_def.strip("\n").strip()
op_properties = [
f'The function description is: {op_node.description}',
f'The type of work done in this function is: {op_node.operation_type}',
f'This function is one step to solve the question/task: {task_def}'
]
op_properties_str = '\n'.join(
[f"{idx + 1}. {line}" for idx, line in enumerate(op_properties)])
operation_requirement_str = '\n'.join(
[f"{idx + 1}. {line}" for idx, line in enumerate(predefined_operation_requirements)])
op_req_gen_prompt_template: PromptTemplate = PromptTemplate.from_template(operation_requirement_gen_task_prefix)
chain = LLMChain(llm=self.llm, prompt=op_req_gen_prompt_template)
req_gen_response = chain.run(
human_role=HUMAN_ROLE,
operation_req_gen_intro=operation_code_gen_intro,
operation_name=node_name,
pre_requirements=operation_requirement_str,
operation_properties=op_properties_str,
assistant_role=ASSISTANT_ROLE,
stop=[HUMAN_STOP_SEQUENCE]
).strip()
operation_requirement_json = extract_content_xml("json", req_gen_response)
operation_requirement_list: List[str] = json.loads(operation_requirement_json)
operation_requirement_list = shim_instructions + operation_requirement_list
return operation_requirement_list
def gen_operation_code(self, op_node: OperationNode) -> OperationCodeGenOutput:
operation_requirement_list = self.get_operation_requirement(op_node)
node_name = op_node.node_name
# Get ancestors operations functions. For operations that has ancestors, this will also come with LLM
# generated code for the operations.
ancestor_op_nodes = self.operation_parser.get_ancestors(node_name)
ancestor_op_nodes_code = '\n'.join([op_node.operation_code for op_node in ancestor_op_nodes])
descendant_op_node = self.operation_parser.get_descendants(node_name)
descendant_op_node_defs = self.operation_parser.stringify_nodes(descendant_op_node)
pre_requirements = [
f'The function description is: {op_node.description}',
f'The function definition is: {op_node.function_definition}',
f'The function return line is: {op_node.return_line}'
]
operation_requirements_str = '\n'.join(
[f"{idx + 1}. {line}" for idx, line in enumerate(pre_requirements + operation_requirement_list)])
op_code_gen_prompt_template: PromptTemplate = PromptTemplate.from_template(operation_code_gen_prompt_template)
op_code_gen_prompt = op_code_gen_prompt_template.format(
human_role=HUMAN_ROLE,
operation_code_gen_intro=operation_code_gen_intro, | operation_task_prefix=operation_task_prefix, | 4 | 2023-11-09 18:29:25+00:00 | 8k |
sammysun0711/ov_llm_bench | inference_engine.py | [
{
"identifier": "OVQwenModel",
"path": "modeling.py",
"snippet": "class OVQwenModel(OVModelForCausalLM):\n def __init__(\n self,\n model: Model,\n config: PretrainedConfig = None,\n device: str = 'CPU',\n dynamic_shapes: bool = True,\n ov_config: Optional[Dic... | import time
import gc
import numpy as np
from transformers import AutoTokenizer, TextIteratorStreamer, AutoConfig, GenerationConfig
from optimum.intel.openvino import OVModelForCausalLM
from threading import Thread, Event
from time import perf_counter
from typing import List
from modeling import OVQwenModel, OVChatGLM2Model
from utils import print_perf_counters_sort | 4,455 |
"""
from utils import MemConsumption
mem_consumption = MemConsumption()
max_rss_mem_consumption = ''
max_shared_mem_consumption = ''
"""
class InferenceEngine:
def __init__(self, args=None, ov_config=None):
self.args = args
self.config = AutoConfig.from_pretrained(
self.args.model_id, trust_remote_code=True)
s = time.time()
if self.config.model_type == "llama":
print("Loading Llama2 model")
self.tokenizer = AutoTokenizer.from_pretrained(
self.args.model_id, trust_remote_code=True)
self.ov_model = OVModelForCausalLM.from_pretrained(self.args.model_id,
compile=False,
device=self.args.device,
ov_config=ov_config,
trust_remote_code=True)
elif self.config.model_type == "chatglm":
print("Loading ChatGLM2 model")
self.tokenizer = AutoTokenizer.from_pretrained(
self.args.model_id,
padding_side='left',
trust_remote_code=True)
|
"""
from utils import MemConsumption
mem_consumption = MemConsumption()
max_rss_mem_consumption = ''
max_shared_mem_consumption = ''
"""
class InferenceEngine:
def __init__(self, args=None, ov_config=None):
self.args = args
self.config = AutoConfig.from_pretrained(
self.args.model_id, trust_remote_code=True)
s = time.time()
if self.config.model_type == "llama":
print("Loading Llama2 model")
self.tokenizer = AutoTokenizer.from_pretrained(
self.args.model_id, trust_remote_code=True)
self.ov_model = OVModelForCausalLM.from_pretrained(self.args.model_id,
compile=False,
device=self.args.device,
ov_config=ov_config,
trust_remote_code=True)
elif self.config.model_type == "chatglm":
print("Loading ChatGLM2 model")
self.tokenizer = AutoTokenizer.from_pretrained(
self.args.model_id,
padding_side='left',
trust_remote_code=True) | self.ov_model = OVChatGLM2Model.from_pretrained(self.args.model_id, | 1 | 2023-11-08 02:09:04+00:00 | 8k |
Rishit-dagli/Astroformer | pytorch-image-models/timm/models/efficientvit_mit.py | [
{
"identifier": "build_model_with_cfg",
"path": "pytorch-image-models/timm/models/_builder.py",
"snippet": "def build_model_with_cfg(\n model_cls: Callable,\n variant: str,\n pretrained: bool,\n pretrained_cfg: Optional[Dict] = None,\n pretrained_cfg_overlay: Optional[... | from typing import Optional
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SelectAdaptivePool2d, create_conv2d
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
import torch
import torch.nn as nn
import torch.nn.functional as F | 4,793 | super(ClassifierHead, self).__init__()
self.in_conv = ConvNormAct(in_channels, widths[0], 1, norm_layer=norm_layer, act_layer=act_layer)
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True, input_fmt='NCHW')
self.classifier = nn.Sequential(
nn.Linear(widths[0], widths[1], bias=False),
nn.LayerNorm(widths[1]),
act_layer(inplace=True),
nn.Dropout(dropout, inplace=False),
nn.Linear(widths[1], n_classes, bias=True),
)
def forward(self, x, pre_logits: bool = False):
x = self.in_conv(x)
x = self.global_pool(x)
if pre_logits:
return x
x = self.classifier(x)
return x
class EfficientVit(nn.Module):
def __init__(
self,
in_chans=3,
widths=(),
depths=(),
head_dim=32,
expand_ratio=4,
norm_layer=nn.BatchNorm2d,
act_layer=nn.Hardswish,
global_pool='avg',
head_widths=(),
drop_rate=0.0,
num_classes=1000,
):
super(EfficientVit, self).__init__()
self.grad_checkpointing = False
self.global_pool = global_pool
self.num_classes = num_classes
# input stem
self.stem = Stem(in_chans, widths[0], depths[0], norm_layer, act_layer)
stride = self.stem.stride
# stages
self.feature_info = []
self.stages = nn.Sequential()
in_channels = widths[0]
for i, (w, d) in enumerate(zip(widths[1:], depths[1:])):
self.stages.append(EfficientVitStage(
in_channels,
w,
depth=d,
norm_layer=norm_layer,
act_layer=act_layer,
expand_ratio=expand_ratio,
head_dim=head_dim,
vit_stage=i >= 2,
))
stride *= 2
in_channels = w
self.feature_info += [dict(num_chs=in_channels, reduction=stride, module=f'stages.{i}')]
self.num_features = in_channels
self.head_widths = head_widths
self.head_dropout = drop_rate
if num_classes > 0:
self.head = ClassifierHead(
self.num_features,
self.head_widths,
n_classes=num_classes,
dropout=self.head_dropout,
global_pool=self.global_pool,
)
else:
if self.global_pool == 'avg':
self.head = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
else:
self.head = nn.Identity()
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.classifier[-1]
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
if num_classes > 0:
self.head = ClassifierHead(
self.num_features,
self.head_widths,
n_classes=num_classes,
dropout=self.head_dropout,
global_pool=self.global_pool,
)
else:
if self.global_pool == 'avg':
self.head = SelectAdaptivePool2d(pool_type=self.global_pool, flatten=True)
else:
self.head = nn.Identity()
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
| """ EfficientViT (by MIT Song Han's Lab)
Paper: `Efficientvit: Enhanced linear attention for high-resolution low-computation visual recognition`
- https://arxiv.org/abs/2205.14756
Adapted from official impl at https://github.com/mit-han-lab/efficientvit
"""
__all__ = ['EfficientVit']
def val2list(x: list or tuple or any, repeat_time=1):
if isinstance(x, (list, tuple)):
return list(x)
return [x for _ in range(repeat_time)]
def val2tuple(x: list or tuple or any, min_len: int = 1, idx_repeat: int = -1):
# repeat elements if necessary
x = val2list(x)
if len(x) > 0:
x[idx_repeat:idx_repeat] = [x[idx_repeat] for _ in range(min_len - len(x))]
return tuple(x)
def get_same_padding(kernel_size: int or tuple[int, ...]) -> int or tuple[int, ...]:
if isinstance(kernel_size, tuple):
return tuple([get_same_padding(ks) for ks in kernel_size])
else:
assert kernel_size % 2 > 0, "kernel size should be odd number"
return kernel_size // 2
class ConvNormAct(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size=3,
stride=1,
dilation=1,
groups=1,
bias=False,
dropout=0.,
norm_layer=nn.BatchNorm2d,
act_layer=nn.ReLU,
):
super(ConvNormAct, self).__init__()
self.dropout = nn.Dropout(dropout, inplace=False)
self.conv = create_conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
)
self.norm = norm_layer(num_features=out_channels) if norm_layer else nn.Identity()
self.act = act_layer(inplace=True) if act_layer else nn.Identity()
def forward(self, x):
x = self.dropout(x)
x = self.conv(x)
x = self.norm(x)
x = self.act(x)
return x
class DSConv(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size=3,
stride=1,
use_bias=False,
norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d),
act_layer=(nn.ReLU6, None),
):
super(DSConv, self).__init__()
use_bias = val2tuple(use_bias, 2)
norm_layer = val2tuple(norm_layer, 2)
act_layer = val2tuple(act_layer, 2)
self.depth_conv = ConvNormAct(
in_channels,
in_channels,
kernel_size,
stride,
groups=in_channels,
norm_layer=norm_layer[0],
act_layer=act_layer[0],
bias=use_bias[0],
)
self.point_conv = ConvNormAct(
in_channels,
out_channels,
1,
norm_layer=norm_layer[1],
act_layer=act_layer[1],
bias=use_bias[1],
)
def forward(self, x):
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class MBConv(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size=3,
stride=1,
mid_channels=None,
expand_ratio=6,
use_bias=False,
norm_layer=(nn.BatchNorm2d, nn.BatchNorm2d, nn.BatchNorm2d),
act_layer=(nn.ReLU6, nn.ReLU6, None),
):
super(MBConv, self).__init__()
use_bias = val2tuple(use_bias, 3)
norm_layer = val2tuple(norm_layer, 3)
act_layer = val2tuple(act_layer, 3)
mid_channels = mid_channels or round(in_channels * expand_ratio)
self.inverted_conv = ConvNormAct(
in_channels,
mid_channels,
1,
stride=1,
norm_layer=norm_layer[0],
act_layer=act_layer[0],
bias=use_bias[0],
)
self.depth_conv = ConvNormAct(
mid_channels,
mid_channels,
kernel_size,
stride=stride,
groups=mid_channels,
norm_layer=norm_layer[1],
act_layer=act_layer[1],
bias=use_bias[1],
)
self.point_conv = ConvNormAct(
mid_channels,
out_channels,
1,
norm_layer=norm_layer[2],
act_layer=act_layer[2],
bias=use_bias[2],
)
def forward(self, x):
x = self.inverted_conv(x)
x = self.depth_conv(x)
x = self.point_conv(x)
return x
class LiteMSA(nn.Module):
"""Lightweight multi-scale attention"""
def __init__(
self,
in_channels: int,
out_channels: int,
heads: int or None = None,
heads_ratio: float = 1.0,
dim=8,
use_bias=False,
norm_layer=(None, nn.BatchNorm2d),
act_layer=(None, None),
kernel_func=nn.ReLU,
scales=(5,),
eps=1e-5,
):
super(LiteMSA, self).__init__()
self.eps = eps
heads = heads or int(in_channels // dim * heads_ratio)
total_dim = heads * dim
use_bias = val2tuple(use_bias, 2)
norm_layer = val2tuple(norm_layer, 2)
act_layer = val2tuple(act_layer, 2)
self.dim = dim
self.qkv = ConvNormAct(
in_channels,
3 * total_dim,
1,
bias=use_bias[0],
norm_layer=norm_layer[0],
act_layer=act_layer[0],
)
self.aggreg = nn.ModuleList([
nn.Sequential(
nn.Conv2d(
3 * total_dim,
3 * total_dim,
scale,
padding=get_same_padding(scale),
groups=3 * total_dim,
bias=use_bias[0],
),
nn.Conv2d(3 * total_dim, 3 * total_dim, 1, groups=3 * heads, bias=use_bias[0]),
)
for scale in scales
])
self.kernel_func = kernel_func(inplace=False)
self.proj = ConvNormAct(
total_dim * (1 + len(scales)),
out_channels,
1,
bias=use_bias[1],
norm_layer=norm_layer[1],
act_layer=act_layer[1],
)
def _attn(self, q, k, v):
dtype = v.dtype
q, k, v = q.float(), k.float(), v.float()
kv = k.transpose(-1, -2) @ v
out = q @ kv
out = out[..., :-1] / (out[..., -1:] + self.eps)
return out.to(dtype)
def forward(self, x):
B, _, H, W = x.shape
# generate multi-scale q, k, v
qkv = self.qkv(x)
multi_scale_qkv = [qkv]
for op in self.aggreg:
multi_scale_qkv.append(op(qkv))
multi_scale_qkv = torch.cat(multi_scale_qkv, dim=1)
multi_scale_qkv = multi_scale_qkv.reshape(B, -1, 3 * self.dim, H * W).transpose(-1, -2)
q, k, v = multi_scale_qkv.chunk(3, dim=-1)
# lightweight global attention
q = self.kernel_func(q)
k = self.kernel_func(k)
v = F.pad(v, (0, 1), mode="constant", value=1.)
if not torch.jit.is_scripting():
with torch.autocast(device_type=v.device.type, enabled=False):
out = self._attn(q, k, v)
else:
out = self._attn(q, k, v)
# final projection
out = out.transpose(-1, -2).reshape(B, -1, H, W)
out = self.proj(out)
return out
register_notrace_module(LiteMSA)
class EfficientVitBlock(nn.Module):
def __init__(
self,
in_channels,
heads_ratio=1.0,
head_dim=32,
expand_ratio=4,
norm_layer=nn.BatchNorm2d,
act_layer=nn.Hardswish,
):
super(EfficientVitBlock, self).__init__()
self.context_module = ResidualBlock(
LiteMSA(
in_channels=in_channels,
out_channels=in_channels,
heads_ratio=heads_ratio,
dim=head_dim,
norm_layer=(None, norm_layer),
),
nn.Identity(),
)
self.local_module = ResidualBlock(
MBConv(
in_channels=in_channels,
out_channels=in_channels,
expand_ratio=expand_ratio,
use_bias=(True, True, False),
norm_layer=(None, None, norm_layer),
act_layer=(act_layer, act_layer, None),
),
nn.Identity(),
)
def forward(self, x):
x = self.context_module(x)
x = self.local_module(x)
return x
class ResidualBlock(nn.Module):
def __init__(
self,
main: Optional[nn.Module],
shortcut: Optional[nn.Module] = None,
pre_norm: Optional[nn.Module] = None,
):
super(ResidualBlock, self).__init__()
self.pre_norm = pre_norm if pre_norm is not None else nn.Identity()
self.main = main
self.shortcut = shortcut
def forward(self, x):
res = self.main(self.pre_norm(x))
if self.shortcut is not None:
res = res + self.shortcut(x)
return res
def build_local_block(
in_channels: int,
out_channels: int,
stride: int,
expand_ratio: float,
norm_layer: str,
act_layer: str,
fewer_norm: bool = False,
):
if expand_ratio == 1:
block = DSConv(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
use_bias=(True, False) if fewer_norm else False,
norm_layer=(None, norm_layer) if fewer_norm else norm_layer,
act_layer=(act_layer, None),
)
else:
block = MBConv(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
expand_ratio=expand_ratio,
use_bias=(True, True, False) if fewer_norm else False,
norm_layer=(None, None, norm_layer) if fewer_norm else norm_layer,
act_layer=(act_layer, act_layer, None),
)
return block
class Stem(nn.Sequential):
def __init__(self, in_chs, out_chs, depth, norm_layer, act_layer):
super().__init__()
self.stride = 2
self.add_module(
'in_conv',
ConvNormAct(
in_chs, out_chs,
kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer,
)
)
stem_block = 0
for _ in range(depth):
self.add_module(f'res{stem_block}', ResidualBlock(
build_local_block(
in_channels=out_chs,
out_channels=out_chs,
stride=1,
expand_ratio=1,
norm_layer=norm_layer,
act_layer=act_layer,
),
nn.Identity(),
))
stem_block += 1
class EfficientVitStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
depth,
norm_layer,
act_layer,
expand_ratio,
head_dim,
vit_stage=False,
):
super(EfficientVitStage, self).__init__()
blocks = [ResidualBlock(
build_local_block(
in_channels=in_chs,
out_channels=out_chs,
stride=2,
expand_ratio=expand_ratio,
norm_layer=norm_layer,
act_layer=act_layer,
fewer_norm=vit_stage,
),
None,
)]
in_chs = out_chs
if vit_stage:
# for stage 3, 4
for _ in range(depth):
blocks.append(
EfficientVitBlock(
in_channels=in_chs,
head_dim=head_dim,
expand_ratio=expand_ratio,
norm_layer=norm_layer,
act_layer=act_layer,
)
)
else:
# for stage 1, 2
for i in range(1, depth):
blocks.append(ResidualBlock(
build_local_block(
in_channels=in_chs,
out_channels=out_chs,
stride=1,
expand_ratio=expand_ratio,
norm_layer=norm_layer,
act_layer=act_layer
),
nn.Identity(),
))
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
return self.blocks(x)
class ClassifierHead(nn.Module):
def __init__(
self,
in_channels,
widths,
n_classes=1000,
dropout=0.,
norm_layer=nn.BatchNorm2d,
act_layer=nn.Hardswish,
global_pool='avg',
):
super(ClassifierHead, self).__init__()
self.in_conv = ConvNormAct(in_channels, widths[0], 1, norm_layer=norm_layer, act_layer=act_layer)
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True, input_fmt='NCHW')
self.classifier = nn.Sequential(
nn.Linear(widths[0], widths[1], bias=False),
nn.LayerNorm(widths[1]),
act_layer(inplace=True),
nn.Dropout(dropout, inplace=False),
nn.Linear(widths[1], n_classes, bias=True),
)
def forward(self, x, pre_logits: bool = False):
x = self.in_conv(x)
x = self.global_pool(x)
if pre_logits:
return x
x = self.classifier(x)
return x
class EfficientVit(nn.Module):
def __init__(
self,
in_chans=3,
widths=(),
depths=(),
head_dim=32,
expand_ratio=4,
norm_layer=nn.BatchNorm2d,
act_layer=nn.Hardswish,
global_pool='avg',
head_widths=(),
drop_rate=0.0,
num_classes=1000,
):
super(EfficientVit, self).__init__()
self.grad_checkpointing = False
self.global_pool = global_pool
self.num_classes = num_classes
# input stem
self.stem = Stem(in_chans, widths[0], depths[0], norm_layer, act_layer)
stride = self.stem.stride
# stages
self.feature_info = []
self.stages = nn.Sequential()
in_channels = widths[0]
for i, (w, d) in enumerate(zip(widths[1:], depths[1:])):
self.stages.append(EfficientVitStage(
in_channels,
w,
depth=d,
norm_layer=norm_layer,
act_layer=act_layer,
expand_ratio=expand_ratio,
head_dim=head_dim,
vit_stage=i >= 2,
))
stride *= 2
in_channels = w
self.feature_info += [dict(num_chs=in_channels, reduction=stride, module=f'stages.{i}')]
self.num_features = in_channels
self.head_widths = head_widths
self.head_dropout = drop_rate
if num_classes > 0:
self.head = ClassifierHead(
self.num_features,
self.head_widths,
n_classes=num_classes,
dropout=self.head_dropout,
global_pool=self.global_pool,
)
else:
if self.global_pool == 'avg':
self.head = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
else:
self.head = nn.Identity()
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.classifier[-1]
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
if num_classes > 0:
self.head = ClassifierHead(
self.num_features,
self.head_widths,
n_classes=num_classes,
dropout=self.head_dropout,
global_pool=self.global_pool,
)
else:
if self.global_pool == 'avg':
self.head = SelectAdaptivePool2d(pool_type=self.global_pool, flatten=True)
else:
self.head = nn.Identity()
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting(): | x = checkpoint_seq(self.stages, x) | 2 | 2023-11-05 01:25:14+00:00 | 8k |
AdFiFi/D-FaST | utils/trainer.py | [
{
"identifier": "init_model_config",
"path": "config.py",
"snippet": "def init_model_config(args, data_config: DataConfig):\r\n if args.model == \"BNT\":\r\n model_config = BNTConfig(node_size=data_config.node_size,\r\n sizes=(data_config.node_size, data_config.... | import json
import os
import wandb
import logging
import torch
import numpy as np
from timeit import default_timer as timer
from abc import abstractmethod
from torch.nn import functional as F
from tqdm import tqdm
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.metrics import precision_recall_fscore_support, classification_report
from config import init_model_config
from .optimizer import init_optimizer
from .schedule import init_schedule
from .accuracy import accuracy
from data import *
| 4,034 | if args.device != 'cpu' and torch.cuda.is_available() else args.device
self.model = model.to(args.device)
# self.model = torch.compile(model, dynamic=True)
self.optimizer = None
self.scheduler = None
self.best_result = None
self.test_result = None
@abstractmethod
def prepare_inputs_kwargs(self, inputs):
return {}
def load_datasets(self):
# datasets = eval(
# f"load_{self.args.dataset}_data")(self.data_config)
datasets = eval(
f"{self.args.dataset}Dataset")(self.data_config, k=self.task_id, subject_id=self.subject_id)
if self.args.do_parallel:
data_loaders = init_distributed_dataloader(self.data_config, datasets)
else:
data_loaders = init_StratifiedKFold_dataloader(self.data_config, datasets)
return data_loaders
def init_components(self):
total = self.args.num_epochs * len(self.data_loaders['train'])
self.optimizer = init_optimizer(self.model, self.args)
self.scheduler = init_schedule(self.optimizer, self.args, total)
def train_epoch(self):
train_dataloader = self.data_loaders['train']
self.model.train()
losses = 0
loss_list = []
for step, inputs in enumerate(train_dataloader):
# with torch.autograd.set_detect_anomaly(True):
input_kwargs = self.prepare_inputs_kwargs(inputs)
outputs = self.model(**input_kwargs)
loss = outputs.loss
if self.data_config.dataset == "ZuCo":
loss.backward()
if step % self.data_config.batch_size == self.data_config.batch_size - 1:
self.optimizer.step()
self.scheduler.step() # Update learning rate schedule
self.optimizer.zero_grad()
else:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step() # Update learning rate schedule
losses += loss.item()
loss_list.append(loss.item())
wandb.log({'Training loss': loss.item(),
'Learning rate': self.optimizer.param_groups[0]['lr']})
return losses / len(loss_list)
def train(self):
total = self.args.num_epochs*len(self.data_loaders['train'])
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(self.data_loaders['train']))
logger.info(" Num Epochs = %d", self.args.num_epochs)
logger.info(" Total train batch size = %d", self.args.batch_size)
logger.info(" warmup steps = %d", self.args.warmup_steps)
logger.info(" Total optimization steps = %d", total)
logger.info(" Save steps = %d", self.args.save_steps)
self.init_components()
if self.args.visualize:
self.visualize()
for epoch in tqdm(range(1, self.args.num_epochs + 1), desc="epoch", ncols=0):
start_time = timer()
train_loss = self.train_epoch()
end_time = timer()
self.data_config.alpha = self.data_config.beta = \
0.5 * (self.args.num_epochs - epoch) / self.args.num_epochs + 0.5
self.test_result = self.evaluate()
msg = f" Train loss: {train_loss:.5f}, Test loss: {self.test_result['Loss']:.5f}," \
f"Epoch time = {(end_time - start_time):.3f}s"
print(msg)
logger.info(msg)
if self.best_result is None or self.best_result['Accuracy'] <= self.test_result['Accuracy']:
self.best_result = self.test_result
self.save_model()
wandb.log({f"Best {k}": v for k, v in self.best_result.items()})
def evaluate(self):
if self.data_config.num_class == 2:
result = self.binary_evaluate()
else:
result = self.multiple_evaluate()
return result
def binary_evaluate(self):
logger.info(f"***** Running evaluation on test{self.task_id} dataset *****")
self.model.eval()
evaluate_dataloader = self.data_loaders['test']
losses = 0
loss_list = []
labels = []
result = {}
preds = []
acc = []
with torch.no_grad():
for inputs in evaluate_dataloader:
input_kwargs = self.prepare_inputs_kwargs(inputs)
outputs = self.model(**input_kwargs)
loss = outputs.loss
losses += loss.item()
loss_list.append(loss.item())
# print(f"Evaluate loss: {loss.item():.5f}")
|
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Trainer(object):
def __init__(self, args, local_rank=0, task_id=0, subject_id=0):
self.task_id = task_id
self.args = args
self.local_rank = local_rank
self.subject_id = subject_id
self.data_config = DataConfig(args)
self.data_loaders = self.load_datasets()
model, self.model_config = init_model_config(args, self.data_config)
if args.do_parallel:
# self.model = torch.nn.DataParallel(self.model)
self.device = f'cuda:{self.local_rank}' \
if args.device != 'cpu' and torch.cuda.is_available() else args.device
self.model = model.to(args.device)
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.local_rank],
find_unused_parameters=True)
else:
self.device = f'cuda' \
if args.device != 'cpu' and torch.cuda.is_available() else args.device
self.model = model.to(args.device)
# self.model = torch.compile(model, dynamic=True)
self.optimizer = None
self.scheduler = None
self.best_result = None
self.test_result = None
@abstractmethod
def prepare_inputs_kwargs(self, inputs):
return {}
def load_datasets(self):
# datasets = eval(
# f"load_{self.args.dataset}_data")(self.data_config)
datasets = eval(
f"{self.args.dataset}Dataset")(self.data_config, k=self.task_id, subject_id=self.subject_id)
if self.args.do_parallel:
data_loaders = init_distributed_dataloader(self.data_config, datasets)
else:
data_loaders = init_StratifiedKFold_dataloader(self.data_config, datasets)
return data_loaders
def init_components(self):
total = self.args.num_epochs * len(self.data_loaders['train'])
self.optimizer = init_optimizer(self.model, self.args)
self.scheduler = init_schedule(self.optimizer, self.args, total)
def train_epoch(self):
train_dataloader = self.data_loaders['train']
self.model.train()
losses = 0
loss_list = []
for step, inputs in enumerate(train_dataloader):
# with torch.autograd.set_detect_anomaly(True):
input_kwargs = self.prepare_inputs_kwargs(inputs)
outputs = self.model(**input_kwargs)
loss = outputs.loss
if self.data_config.dataset == "ZuCo":
loss.backward()
if step % self.data_config.batch_size == self.data_config.batch_size - 1:
self.optimizer.step()
self.scheduler.step() # Update learning rate schedule
self.optimizer.zero_grad()
else:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step() # Update learning rate schedule
losses += loss.item()
loss_list.append(loss.item())
wandb.log({'Training loss': loss.item(),
'Learning rate': self.optimizer.param_groups[0]['lr']})
return losses / len(loss_list)
def train(self):
total = self.args.num_epochs*len(self.data_loaders['train'])
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(self.data_loaders['train']))
logger.info(" Num Epochs = %d", self.args.num_epochs)
logger.info(" Total train batch size = %d", self.args.batch_size)
logger.info(" warmup steps = %d", self.args.warmup_steps)
logger.info(" Total optimization steps = %d", total)
logger.info(" Save steps = %d", self.args.save_steps)
self.init_components()
if self.args.visualize:
self.visualize()
for epoch in tqdm(range(1, self.args.num_epochs + 1), desc="epoch", ncols=0):
start_time = timer()
train_loss = self.train_epoch()
end_time = timer()
self.data_config.alpha = self.data_config.beta = \
0.5 * (self.args.num_epochs - epoch) / self.args.num_epochs + 0.5
self.test_result = self.evaluate()
msg = f" Train loss: {train_loss:.5f}, Test loss: {self.test_result['Loss']:.5f}," \
f"Epoch time = {(end_time - start_time):.3f}s"
print(msg)
logger.info(msg)
if self.best_result is None or self.best_result['Accuracy'] <= self.test_result['Accuracy']:
self.best_result = self.test_result
self.save_model()
wandb.log({f"Best {k}": v for k, v in self.best_result.items()})
def evaluate(self):
if self.data_config.num_class == 2:
result = self.binary_evaluate()
else:
result = self.multiple_evaluate()
return result
def binary_evaluate(self):
logger.info(f"***** Running evaluation on test{self.task_id} dataset *****")
self.model.eval()
evaluate_dataloader = self.data_loaders['test']
losses = 0
loss_list = []
labels = []
result = {}
preds = []
acc = []
with torch.no_grad():
for inputs in evaluate_dataloader:
input_kwargs = self.prepare_inputs_kwargs(inputs)
outputs = self.model(**input_kwargs)
loss = outputs.loss
losses += loss.item()
loss_list.append(loss.item())
# print(f"Evaluate loss: {loss.item():.5f}")
| top1 = accuracy(outputs.logits, input_kwargs['labels'][:, 1])[0]
| 3 | 2023-11-07 13:57:36+00:00 | 8k |
YihePang/DisoFLAG | model_running.py | [
{
"identifier": "Args_config",
"path": "args.py",
"snippet": "class Args_config: \n\tdef __init__(self):\n\t\tself.use_gpu = True\n\t\tself.max_seq_length = 128\n\n\t\tself.feature_dim = 1024\n\t\tself.encoder_hidden = 512\n\n\t\tself.decoder_hidden = 1024\n\n\t\tself.decoder_dropout = 0.3\n\n\t\tself.... | import numpy as np
import random
import os
import torch as t
import sys
from args import Args_config
from prepare_model_data import data_2_samples, Batches_data
from load_data import load_file_2_data
from torch import nn
from model import Seq2FUN
from evaluator import write_2_file | 5,843 | # -*- coding: utf-8 -*-
# @Author: Yihe Pang
# @Date: 2023-06-13 10:08:51
# @Last Modified by: Yihe Pang
# @Last Modified time: 2023-06-14 22:43:30
def FLAG_model_running(input_data_file, output_file_name, output_type):
args = Args_config()
test_data = data_2_samples(args = args,
data_file_name = input_data_file,
is_slice = True)
for root, dirs, files in os.walk(args.model_path):
for one_file in files:
model_file = args.model_path+'/'+one_file
# print("model_file:",model_file)
model = t.load(model_file, map_location='cpu')
# print("Model : ------",model)
model.eval()
if len(test_data) < args.batch_size:
input_data = []
for i in range(args.batch_size):
if i < len(test_data):
input_data.append(test_data[i])
else:
input_data.append(test_data[0])
else:
input_data = test_data
test_batches = Batches_data(test_data, args.batch_size, is_train=False)
IDR_probs = []
PB_probs = []
DB_probs = []
RB_probs = []
IB_probs = []
LB_probs = []
Link_probs = []
for t_batch in test_batches: #一个batch
t_input_featues = t.tensor(np.array(t_batch.seq_T5_feature))
# seq_mask
one_seq_mask = t.tensor(np.array(t_batch.seq_mask), dtype=t.float32)
one_IDR_probs, one_PB_probs, one_DB_probs, one_RB_probs, one_IB_probs, one_LB_probs, one_Link_probs = model(t_input_featues)
# logits
one_IDR_logits = one_IDR_probs * one_seq_mask
one_PB_logits = one_PB_probs * one_seq_mask
one_DB_logits = one_DB_probs * one_seq_mask
one_RB_logits = one_RB_probs * one_seq_mask
one_IB_logits = one_IB_probs * one_seq_mask
one_LB_logits = one_LB_probs * one_seq_mask
one_Link_logits = one_Link_probs * one_seq_mask
IDR_probs.append(one_IDR_probs.detach().numpy())
PB_probs.append(one_PB_logits.detach().numpy())
DB_probs.append(one_DB_logits.detach().numpy())
RB_probs.append(one_RB_logits.detach().numpy())
IB_probs.append(one_IB_logits.detach().numpy())
LB_probs.append(one_LB_logits.detach().numpy())
Link_probs.append(one_Link_logits.detach().numpy())
test_file = load_file_2_data(input_data_file)
| # -*- coding: utf-8 -*-
# @Author: Yihe Pang
# @Date: 2023-06-13 10:08:51
# @Last Modified by: Yihe Pang
# @Last Modified time: 2023-06-14 22:43:30
def FLAG_model_running(input_data_file, output_file_name, output_type):
args = Args_config()
test_data = data_2_samples(args = args,
data_file_name = input_data_file,
is_slice = True)
for root, dirs, files in os.walk(args.model_path):
for one_file in files:
model_file = args.model_path+'/'+one_file
# print("model_file:",model_file)
model = t.load(model_file, map_location='cpu')
# print("Model : ------",model)
model.eval()
if len(test_data) < args.batch_size:
input_data = []
for i in range(args.batch_size):
if i < len(test_data):
input_data.append(test_data[i])
else:
input_data.append(test_data[0])
else:
input_data = test_data
test_batches = Batches_data(test_data, args.batch_size, is_train=False)
IDR_probs = []
PB_probs = []
DB_probs = []
RB_probs = []
IB_probs = []
LB_probs = []
Link_probs = []
for t_batch in test_batches: #一个batch
t_input_featues = t.tensor(np.array(t_batch.seq_T5_feature))
# seq_mask
one_seq_mask = t.tensor(np.array(t_batch.seq_mask), dtype=t.float32)
one_IDR_probs, one_PB_probs, one_DB_probs, one_RB_probs, one_IB_probs, one_LB_probs, one_Link_probs = model(t_input_featues)
# logits
one_IDR_logits = one_IDR_probs * one_seq_mask
one_PB_logits = one_PB_probs * one_seq_mask
one_DB_logits = one_DB_probs * one_seq_mask
one_RB_logits = one_RB_probs * one_seq_mask
one_IB_logits = one_IB_probs * one_seq_mask
one_LB_logits = one_LB_probs * one_seq_mask
one_Link_logits = one_Link_probs * one_seq_mask
IDR_probs.append(one_IDR_probs.detach().numpy())
PB_probs.append(one_PB_logits.detach().numpy())
DB_probs.append(one_DB_logits.detach().numpy())
RB_probs.append(one_RB_logits.detach().numpy())
IB_probs.append(one_IB_logits.detach().numpy())
LB_probs.append(one_LB_logits.detach().numpy())
Link_probs.append(one_Link_logits.detach().numpy())
test_file = load_file_2_data(input_data_file) | write_2_file(test_file, test_data, test_batches, IDR_probs, PB_probs, DB_probs, RB_probs, IB_probs, LB_probs, Link_probs, output_file_name, output_type) | 5 | 2023-11-09 15:08:24+00:00 | 8k |
BouncyKoishi/ChuCaoQi-Bot | plugins/spellcard_battle.py | [
{
"identifier": "Battle",
"path": "plugins/scBattle/scBattleObj.py",
"snippet": "class Battle:\n def __init__(self, creatorId, groupId) -> None:\n self.creatorId = creatorId\n self.joinerId = None\n self.creator: Battler or None = None\n self.joiner: Battler or None = None... | from plugins.scBattle.scBattleObj import Battle
from plugins.scBattle.scBattlerObj import Battler
from nonebot import on_command, CommandSession
import plugins.scBattle.scBattleUtils as utils
import dbConnection.kusa_item as itemDB
import re
import string
import codecs
import nonebot | 6,327 |
async def battleMain(battle: Battle):
await sendTitle(battle.creatorId)
await sendTitle(battle.joinerId)
print('BeforeGameStart:' + str(battleList))
battle.gameStart()
print('OnGameStart:' + str(battleList))
gameBreak = False
while not gameBreak:
cardBreak, gameBreak = battle.cardBreakJudge()
if cardBreak:
await bot.send_group_msg(group_id=battle.groupId, message=battle.lastTurnInfoImg)
continue
battle.turnStart()
cardBreak, gameBreak = battle.cardBreakJudge()
if cardBreak:
await bot.send_group_msg(group_id=battle.groupId, message=battle.lastTurnInfoImg)
continue
battle.turnGetBasePoint()
battle.turnHurtValueCalc()
battle.turnHpChange()
cardBreak, gameBreak = battle.cardBreakJudge()
if cardBreak:
await bot.send_group_msg(group_id=battle.groupId, message=battle.lastTurnInfoImg)
continue
battle.turnEnd()
print('OnMainCycleEnd:' + str(battleList))
endGame, loserName = battle.endGameCheck()
await battleEnd(battle, loserName)
async def battleEnd(battle: Battle, loserName):
global battleList
message = ''
if len(loserName) == 1:
message = f"{loserName[0]} 已被击破!"
elif len(loserName) == 2:
message = f"{loserName[0]} 和 {loserName[1]} 同时被对方击破!"
await bot.send_group_msg(group_id=battle.groupId, message=message)
print('BeforeEndGame:' + str(battleList))
battleList.pop(battle.creatorId)
async def sendTitle(userId):
titleExist = await itemDB.getItemAmount(userId, '早期符卡对战者')
if titleExist == 0:
await itemDB.changeItemAmount(userId, '早期符卡对战者', 1)
@on_command(name='符卡查询', only_to_me=False)
async def showCardInfo(session: CommandSession):
cardId = session.current_arg_text.strip()
cardId = int(cardId)
card = utils.getCardObjById(cardId)
if not card:
await session.send('没有查询到id对应的符卡信息!')
return
await session.send(card.getCardDescribe(cardId))
@on_command(name='符卡配置', only_to_me=False)
async def setCard(session: CommandSession):
userId = session.ctx['user_id']
argText = session.current_arg_text.strip()
isRandom = True if 'random' in argText.lower() else False
regex = r'\d+ \d+ \d+ \d+ \d+'
argMatch = re.search(regex, argText)
if not argMatch and not isRandom:
with codecs.open(u'text/符卡配置帮助.txt', 'r', 'utf-8') as f:
await session.send(f.read().strip())
return
battle = inBattle(userId)
if not battle:
await session.send('您不在一场符卡对战中!')
return
if battle.gameRound is not None and battle.gameRound != 0:
await session.send('对战已开始,不可中途更换符卡!')
return
if isRandom:
setCardSuccess, cost = setCardInRandom(battle, userId)
else:
setCardSuccess, cost = setCardByCardString(battle, userId, argText)
if not setCardSuccess:
if cost is None:
await session.send('符卡配置失败:你选择的某个编号不存在对应符卡。')
else:
await session.send(f'符卡配置失败:你选择的符卡Cost总和为{cost}, 超出Cost上限:7')
return
await session.send(f'符卡配置成功!选择的符卡总Cost为{cost}')
print('BeforeSetCard:' + str(battleList))
if userId not in battle.spellCardSettled:
battle.spellCardSettled.append(userId)
if len(battle.spellCardSettled) == 1:
info = '一位玩家完成了符卡配置!等待另一位玩家。'
await bot.send_group_msg(group_id=battle.groupId, message=info)
print('OnSetCard1:' + str(battleList))
elif len(battle.spellCardSettled) == 2:
info = '所有玩家已完成符卡配置,对战启动中……'
await bot.send_group_msg(group_id=battle.groupId, message=info)
print('OnSetCard2:' + str(battleList))
await battleMain(battle)
def setCardByCardString(battle: Battle, userId: int, argText: string):
battler = battle.creator if userId == battle.creatorId else battle.joiner
mainCardList = list(map(lambda x: int(x), argText.split(" ")))
return setCardByIdList(battler, mainCardList)
def setCardInRandom(battle: Battle, userId: int):
battler = battle.creator if userId == battle.creatorId else battle.joiner
cardIdList = utils.getRandomCardIdList()
return setCardByIdList(battler, cardIdList)
|
bot = nonebot.get_bot()
battleList = {}
def inBattle(qq) -> Battle or None:
for battle in battleList.values():
if battle.creatorId == qq or battle.joinerId == qq:
return battle
return None
def waitingBattleQQList() -> list:
waitingList = []
for battle in battleList.values():
if not battle.joinerId:
waitingList.append(battle.creatorId)
return waitingList
@on_command(name='符卡对战', only_to_me=False)
async def _(session: CommandSession):
global battleList
userId = session.ctx['user_id']
groupId = session.ctx['group_id']
if inBattle(userId):
await session.send('您已经在一场符卡对战中!')
return
print('BeforeOpen:' + str(battleList))
newBattle = Battle(userId, groupId)
await newBattle.setCreator()
battleList[userId] = newBattle
await session.send('已创建对战,其他人可使用 !加入符卡对战 [对方qq号] 指令加入本场对战。')
print('OnOpen:' + str(battleList))
@on_command(name='取消符卡对战', only_to_me=False)
async def _(session: CommandSession):
global battleList
userId = session.ctx['user_id']
battle = inBattle(userId)
if not battle:
await session.send('您不在一场符卡对战中!')
return
if battle.gameRound:
await session.send('对战已经开始,无法取消。')
return
battleList.pop(userId)
await session.send('已取消对战。')
@on_command(name='加入符卡对战', only_to_me=False)
async def join(session: CommandSession):
global battleList
userId = session.ctx['user_id']
if inBattle(userId):
await session.send('您已经在一场符卡对战中!')
return
argId = session.current_arg_text.strip()
if not argId:
waitingList = waitingBattleQQList()
if len(waitingList) == 0:
await session.send('当前没有正在等待加入的对战。')
return
if len(waitingList) == 1:
argId = waitingList[0]
else:
await session.send('当前有多场对战正在等待加入,请指定开启对战方的qq号。')
return
battle = inBattle(int(argId))
if not battle:
await session.send('该符卡对战未开启。')
return
if battle.joinerId:
await session.send('该符卡对战人员已满。')
return
print('BeforeJoin:' + str(battleList))
await battle.joinBattle(userId)
await session.send(f'加入对战成功!等待双方配置符卡……\n使用“!符卡配置”指令以进行配置,建议私聊配置\n当前所有符卡列表:https://docs.qq.com/sheet/DSHNYTW9mWEhTVWJx')
battleList[int(argId)] = battle
print('OnJoin:' + str(battleList))
async def battleMain(battle: Battle):
await sendTitle(battle.creatorId)
await sendTitle(battle.joinerId)
print('BeforeGameStart:' + str(battleList))
battle.gameStart()
print('OnGameStart:' + str(battleList))
gameBreak = False
while not gameBreak:
cardBreak, gameBreak = battle.cardBreakJudge()
if cardBreak:
await bot.send_group_msg(group_id=battle.groupId, message=battle.lastTurnInfoImg)
continue
battle.turnStart()
cardBreak, gameBreak = battle.cardBreakJudge()
if cardBreak:
await bot.send_group_msg(group_id=battle.groupId, message=battle.lastTurnInfoImg)
continue
battle.turnGetBasePoint()
battle.turnHurtValueCalc()
battle.turnHpChange()
cardBreak, gameBreak = battle.cardBreakJudge()
if cardBreak:
await bot.send_group_msg(group_id=battle.groupId, message=battle.lastTurnInfoImg)
continue
battle.turnEnd()
print('OnMainCycleEnd:' + str(battleList))
endGame, loserName = battle.endGameCheck()
await battleEnd(battle, loserName)
async def battleEnd(battle: Battle, loserName):
global battleList
message = ''
if len(loserName) == 1:
message = f"{loserName[0]} 已被击破!"
elif len(loserName) == 2:
message = f"{loserName[0]} 和 {loserName[1]} 同时被对方击破!"
await bot.send_group_msg(group_id=battle.groupId, message=message)
print('BeforeEndGame:' + str(battleList))
battleList.pop(battle.creatorId)
async def sendTitle(userId):
titleExist = await itemDB.getItemAmount(userId, '早期符卡对战者')
if titleExist == 0:
await itemDB.changeItemAmount(userId, '早期符卡对战者', 1)
@on_command(name='符卡查询', only_to_me=False)
async def showCardInfo(session: CommandSession):
cardId = session.current_arg_text.strip()
cardId = int(cardId)
card = utils.getCardObjById(cardId)
if not card:
await session.send('没有查询到id对应的符卡信息!')
return
await session.send(card.getCardDescribe(cardId))
@on_command(name='符卡配置', only_to_me=False)
async def setCard(session: CommandSession):
userId = session.ctx['user_id']
argText = session.current_arg_text.strip()
isRandom = True if 'random' in argText.lower() else False
regex = r'\d+ \d+ \d+ \d+ \d+'
argMatch = re.search(regex, argText)
if not argMatch and not isRandom:
with codecs.open(u'text/符卡配置帮助.txt', 'r', 'utf-8') as f:
await session.send(f.read().strip())
return
battle = inBattle(userId)
if not battle:
await session.send('您不在一场符卡对战中!')
return
if battle.gameRound is not None and battle.gameRound != 0:
await session.send('对战已开始,不可中途更换符卡!')
return
if isRandom:
setCardSuccess, cost = setCardInRandom(battle, userId)
else:
setCardSuccess, cost = setCardByCardString(battle, userId, argText)
if not setCardSuccess:
if cost is None:
await session.send('符卡配置失败:你选择的某个编号不存在对应符卡。')
else:
await session.send(f'符卡配置失败:你选择的符卡Cost总和为{cost}, 超出Cost上限:7')
return
await session.send(f'符卡配置成功!选择的符卡总Cost为{cost}')
print('BeforeSetCard:' + str(battleList))
if userId not in battle.spellCardSettled:
battle.spellCardSettled.append(userId)
if len(battle.spellCardSettled) == 1:
info = '一位玩家完成了符卡配置!等待另一位玩家。'
await bot.send_group_msg(group_id=battle.groupId, message=info)
print('OnSetCard1:' + str(battleList))
elif len(battle.spellCardSettled) == 2:
info = '所有玩家已完成符卡配置,对战启动中……'
await bot.send_group_msg(group_id=battle.groupId, message=info)
print('OnSetCard2:' + str(battleList))
await battleMain(battle)
def setCardByCardString(battle: Battle, userId: int, argText: string):
battler = battle.creator if userId == battle.creatorId else battle.joiner
mainCardList = list(map(lambda x: int(x), argText.split(" ")))
return setCardByIdList(battler, mainCardList)
def setCardInRandom(battle: Battle, userId: int):
battler = battle.creator if userId == battle.creatorId else battle.joiner
cardIdList = utils.getRandomCardIdList()
return setCardByIdList(battler, cardIdList)
| def setCardByIdList(battler: Battler, cardIdList: list): | 1 | 2023-11-02 04:06:31+00:00 | 8k |
ilur98/DGQ | dgq/entry.py | [
{
"identifier": "PTQ",
"path": "dgq/quant/quant_sequence.py",
"snippet": "@torch.no_grad()\ndef PTQ(model, enc, \n qconfig, \n nsamples=128, seqlen=2048):\n dev = \"cuda:0\"\n layers = get_blocks(model)\n layer_kwargs = {}\n cache={'i': 0}\n layers[0] = layers[0].cuda()\n ... | import argparse
import numpy as np
import torch
import torch.nn as nn
import time
import lm_eval
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
from texttable import Texttable
from dgq.quant.quant_sequence import PTQ
from dgq.utils.datautils import get_loaders, prepare_mmlu
from dgq.utils.evalutils import model_eval, total_model_eval, mmlu_eval
from dgq.utils.loadutils import load_quant, inference_model
from dgq.utils.modelutils import convert_model
from safetensors.torch import save_file as safe_save | 5,657 |
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str, help='llama model to load')
parser.add_argument('dataset', type=str, choices=['wikitext2', 'ptb', 'c4'], help='Where to extract calibration data from.')
parser.add_argument('--nsamples', type=int, default=18, help='Number of calibration data samples.')
parser.add_argument('--seed', type=int, default=0, help='Seed for sampling the calibration data.')
parser.add_argument('--wbits', type=int, default=4, choices=[2, 3, 4, 8, 16], help='#bits to use for weight quantization; use 16 for evaluating base model.')
parser.add_argument('--abits', type=int, default=8, choices=[8, 16], help='#bits to use for activation quantization; use 16 for evaluating base model.')
parser.add_argument('--percdamp', type=float, default=.01, help='Percent of the average Hessian diagonal to use for dampening.')
parser.add_argument('--save', type=str, default='', help='Save quantized checkpoint under this name.')
parser.add_argument('--save_safetensors', type=str, default='', help='Save quantized `.safetensors` checkpoint under this name.')
parser.add_argument('--load', type=str, default='', help='Load quantized model.')
parser.add_argument('--benchmark', type=int, default=0, help='Number of tokens to use for benchmarking.')
parser.add_argument('--check', action='store_true', help='Whether to compute perplexity during benchmarking for verification.')
parser.add_argument('--groupsize', type=int, default=-1, help='Groupsize to use for quantization; default uses full row.')
parser.add_argument('--sym', action='store_true', help='Whether to perform symmetric quantization.')
parser.add_argument('--act-order', action='store_true', help='Whether to apply the activation order GPTQ heuristic')
parser.add_argument('--true-sequential', action='store_true', help='Whether to run in true sequential model.')
parser.add_argument('--act_fun', type=str, default='static', help='activation quantization.')
parser.add_argument('--wt_fun', type=str, default='naive', help='weight quantization.')
parser.add_argument('--smoothquant', action='store_true', help='whether to ')
parser.add_argument('--kvquant', action='store_true', help='whether to ')
parser.add_argument('--meanact', action='store_true', help='whether to ')
parser.add_argument('--observe', action='store_true', help='whether to ')
parser.add_argument('--nearest', action='store_true', help='whether to ')
parser.add_argument('--w4w8', action='store_true', help='wheter to open dual grained quantization')
parser.add_argument('--eval', action='store_true', help='evaluate quantized model.')
parser.add_argument('--mmlu_eval', type=str, default="no", help="mmlu evaluation.")
parser.add_argument('--csqa_eval', type=str, default="no", help="csqa evaluation.")
parser.add_argument('--inference_mod', action='store_true', help='whether to ')
args = parser.parse_args()
def generate_qconfig(args):
qconfig = {}
if args.act_fun == "no":
qconfig["act_quant"] = None
else:
act_qconfig = {}
act_qconfig["bits"] = args.abits
act_qconfig["method"] = args.act_fun
qconfig["act_quant"] = act_qconfig
if args.wt_fun == "no":
qconfig["wt_quant"] = None
else:
wt_qconfig = {}
wt_qconfig["bits"] = args.wbits
wt_qconfig["method"] = args.wt_fun
wt_qconfig["groupsize"] = args.groupsize
wt_qconfig["w4w8"] = hasattr(args, "w4w8") and args.w4w8
qconfig["wt_quant"] = wt_qconfig
qconfig["smoothquant"] = hasattr(args, "smoothquant") and args.smoothquant
qconfig["meanact"] = hasattr(args, "meanact") and args.meanact
qconfig["kvquant"] = hasattr(args, "kvquant") and args.kvquant
return qconfig
def prepare_model(model, seqlen=2048):
def skip(*args, **kwargs):
pass
torch.nn.init.kaiming_uniform_ = skip
torch.nn.init.uniform_ = skip
torch.nn.init.normal_ = skip
model = AutoModelForCausalLM.from_pretrained(model, torch_dtype=torch.bfloat16)
model.seqlen = seqlen
return model
def main():
model = prepare_model(args.model)
qconfig = generate_qconfig(args)
convert_model(model, qconfig)
print(args)
enc, _ = get_loaders(args.dataset, args.nsamples, model=args.model)
if args.load:
load_quant(model, args.load)
if hasattr(args, "inference_mod"):
model = inference_model(model)
else:
tick = time.time()
PTQ(model, enc, qconfig, args.nsamples)
print(time.time() - tick)
if args.save_safetensors:
model = model.cpu()
state_dict = model.state_dict()
state_dict = {k: v.clone().contiguous() for k, v in state_dict.items()}
safe_save(state_dict, args.save_safetensors)
if args.save:
model = model.cpu()
torch.save(model.state_dict(), args.save)
if args.eval:
datasets = ['wikitext2', 'ptb', 'c4']
for dataset in datasets:
_, testloader = get_loaders(dataset, seed=args.seed, model=args.model, seqlen=model.seqlen)
print(dataset)
# model_eval(model, testloader, torch.device('cuda:0'), local_args=args)
total_model_eval(model, testloader, torch.device('cuda:0'), local_args=args)
|
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str, help='llama model to load')
parser.add_argument('dataset', type=str, choices=['wikitext2', 'ptb', 'c4'], help='Where to extract calibration data from.')
parser.add_argument('--nsamples', type=int, default=18, help='Number of calibration data samples.')
parser.add_argument('--seed', type=int, default=0, help='Seed for sampling the calibration data.')
parser.add_argument('--wbits', type=int, default=4, choices=[2, 3, 4, 8, 16], help='#bits to use for weight quantization; use 16 for evaluating base model.')
parser.add_argument('--abits', type=int, default=8, choices=[8, 16], help='#bits to use for activation quantization; use 16 for evaluating base model.')
parser.add_argument('--percdamp', type=float, default=.01, help='Percent of the average Hessian diagonal to use for dampening.')
parser.add_argument('--save', type=str, default='', help='Save quantized checkpoint under this name.')
parser.add_argument('--save_safetensors', type=str, default='', help='Save quantized `.safetensors` checkpoint under this name.')
parser.add_argument('--load', type=str, default='', help='Load quantized model.')
parser.add_argument('--benchmark', type=int, default=0, help='Number of tokens to use for benchmarking.')
parser.add_argument('--check', action='store_true', help='Whether to compute perplexity during benchmarking for verification.')
parser.add_argument('--groupsize', type=int, default=-1, help='Groupsize to use for quantization; default uses full row.')
parser.add_argument('--sym', action='store_true', help='Whether to perform symmetric quantization.')
parser.add_argument('--act-order', action='store_true', help='Whether to apply the activation order GPTQ heuristic')
parser.add_argument('--true-sequential', action='store_true', help='Whether to run in true sequential model.')
parser.add_argument('--act_fun', type=str, default='static', help='activation quantization.')
parser.add_argument('--wt_fun', type=str, default='naive', help='weight quantization.')
parser.add_argument('--smoothquant', action='store_true', help='whether to ')
parser.add_argument('--kvquant', action='store_true', help='whether to ')
parser.add_argument('--meanact', action='store_true', help='whether to ')
parser.add_argument('--observe', action='store_true', help='whether to ')
parser.add_argument('--nearest', action='store_true', help='whether to ')
parser.add_argument('--w4w8', action='store_true', help='wheter to open dual grained quantization')
parser.add_argument('--eval', action='store_true', help='evaluate quantized model.')
parser.add_argument('--mmlu_eval', type=str, default="no", help="mmlu evaluation.")
parser.add_argument('--csqa_eval', type=str, default="no", help="csqa evaluation.")
parser.add_argument('--inference_mod', action='store_true', help='whether to ')
args = parser.parse_args()
def generate_qconfig(args):
qconfig = {}
if args.act_fun == "no":
qconfig["act_quant"] = None
else:
act_qconfig = {}
act_qconfig["bits"] = args.abits
act_qconfig["method"] = args.act_fun
qconfig["act_quant"] = act_qconfig
if args.wt_fun == "no":
qconfig["wt_quant"] = None
else:
wt_qconfig = {}
wt_qconfig["bits"] = args.wbits
wt_qconfig["method"] = args.wt_fun
wt_qconfig["groupsize"] = args.groupsize
wt_qconfig["w4w8"] = hasattr(args, "w4w8") and args.w4w8
qconfig["wt_quant"] = wt_qconfig
qconfig["smoothquant"] = hasattr(args, "smoothquant") and args.smoothquant
qconfig["meanact"] = hasattr(args, "meanact") and args.meanact
qconfig["kvquant"] = hasattr(args, "kvquant") and args.kvquant
return qconfig
def prepare_model(model, seqlen=2048):
def skip(*args, **kwargs):
pass
torch.nn.init.kaiming_uniform_ = skip
torch.nn.init.uniform_ = skip
torch.nn.init.normal_ = skip
model = AutoModelForCausalLM.from_pretrained(model, torch_dtype=torch.bfloat16)
model.seqlen = seqlen
return model
def main():
model = prepare_model(args.model)
qconfig = generate_qconfig(args)
convert_model(model, qconfig)
print(args)
enc, _ = get_loaders(args.dataset, args.nsamples, model=args.model)
if args.load:
load_quant(model, args.load)
if hasattr(args, "inference_mod"):
model = inference_model(model)
else:
tick = time.time()
PTQ(model, enc, qconfig, args.nsamples)
print(time.time() - tick)
if args.save_safetensors:
model = model.cpu()
state_dict = model.state_dict()
state_dict = {k: v.clone().contiguous() for k, v in state_dict.items()}
safe_save(state_dict, args.save_safetensors)
if args.save:
model = model.cpu()
torch.save(model.state_dict(), args.save)
if args.eval:
datasets = ['wikitext2', 'ptb', 'c4']
for dataset in datasets:
_, testloader = get_loaders(dataset, seed=args.seed, model=args.model, seqlen=model.seqlen)
print(dataset)
# model_eval(model, testloader, torch.device('cuda:0'), local_args=args)
total_model_eval(model, testloader, torch.device('cuda:0'), local_args=args) | if args.mmlu_eval != 'no': | 5 | 2023-11-01 13:45:16+00:00 | 8k |
noco-ai/elemental-golem | modules/turboderp/exllama/golem-generator.py | [
{
"identifier": "LlmHandler",
"path": "application/llm_handler.py",
"snippet": "class LlmHandler(BaseHandler):\n \n def __init__(self):\n super().__init__()\n\n def load(self, model, model_options, local_path):\n pass\n \n def load_config_settings(self, num_input_tokens, req... | from application.llm_handler import LlmHandler
from model import ExLlama, ExLlamaCache, ExLlamaConfig
from tokenizer import ExLlamaTokenizer
from generator import ExLlamaGenerator
from lora import ExLlamaLora
from application.system_info import get_gpu_memory_usage
from huggingface_hub import snapshot_download
import sys
import os
import glob
import time
import logging
import math | 4,029 | if unicode_hold is True:
unicode_hold = False
chunk = res_line[-1:]
# send chunk to front end
if stream_output:
if debug:
print('\033[96m' + chunk, end="")
channel.basic_publish(
exchange=incoming_headers['return_exchange'],
routing_key=incoming_headers['return_routing_key'],
body=chunk, properties=outgoing_properties)
else:
response += chunk
prompt += chunk
held_text = ""
else:
held_text += new_text
# check stop conditions
stop_condition = self.check_stop_conditions(token, res_line, tokenizer.eos_token_id,
check_stop_token, stop_conditions)
if stop_condition: break
end_time = time.time()
elapsed = end_time - begin_time
token_rate = 0 if elapsed == 0 else (new_tokens / elapsed)
generator.end_beam_search()
if debug and stream_output:
print('\033[0m' + "")
if new_tokens == max_new_tokens:
finish_reason = "length"
model_name = incoming_headers["model_name"] if "model_name" in incoming_headers else "not_provided"
resp = self.finish_response(stop_key, response, request, stream_output, finish_reason,
token_rate, new_tokens, input_token_count, model_name, elapsed, debug)
return resp
def load_lora(self, request, model, config):
# load lora from config and override w/ request if present
lora_name = config["default_lora"] if "default_lora" in config else None
if "lora" in request:
lora_name = request["lora"]
if lora_name != None:
if lora_name not in self.loras:
logger.info(f"loading lora {lora_name}")
lora_dir = os.path.join(f"data/loras/", lora_name)
if not os.path.exists(lora_dir):
logger.info("downloading lora {lora_name} from huggingface")
snapshot_download(repo_id=lora_name, local_dir=lora_dir, cache_dir='data/cache', local_dir_use_symlinks=False)
lora_path = os.path.join(f"data/loras/", lora_name, "adapter_model.bin")
lora_config_path = os.path.join(f"data/loras/{lora_name}", "adapter_config.json")
lora = ExLlamaLora(model["model_loaded"], lora_config_path, lora_path)
self.loras[lora_name] = lora
else:
logger.info(f"using lora {lora_name}")
model["generator"].lora = self.loras[lora_name]
else:
model["generator"].lora = None
def execute(self, model, request):
# load lora
config = self.model_config
self.load_lora(request, model, config)
# build prompt
prompt = self.build_prompt(request, config, model)
# copy amqp headers
incoming_headers = model["amqp_headers"]
outgoing_properties = self.copy_queue_headers(incoming_headers)
stream_resp = self.stream(
model["generator"],
model["tokenizer"],
model["model_loaded"],
prompt,
model["amqp_channel"],
incoming_headers,
outgoing_properties,
config["stop_on"],
model,
request)
return stream_resp
def load(self, model, model_options, local_path):
self.model_config = model["configuration"]
# get paths
logger.info(f"starting module {local_path}")
tokenizer_path = os.path.join(local_path, "tokenizer.model")
model_config_path = os.path.join(local_path, "config.json")
st_pattern = os.path.join(local_path, "*.safetensors")
model_path = glob.glob(st_pattern)[0]
# Create config, model, tokenizer and generator
config = ExLlamaConfig(model_config_path)
config.model_path = model_path
config.compress_pos_emb = model["configuration"].get("compress_pos_emb", 1.0)
config.max_seq_len = model["configuration"].get("max_seq_len", 2048)
config.matmul_recons_thd = 8
config.fused_mlp_thd = 2
config.sdp_thd = 8
# set model device
if model_options["device"].startswith("split"):
device_map = model_options["device"].split(':')[1]
config.set_auto_map(device_map)
elif model_options["device"].startswith("cuda"):
device_number = int(model_options["device"].split(':')[1])
device_array = [0]*12
| sys.path.append(os.path.dirname(os.path.realpath(__file__)))
logger = logging.getLogger(__name__)
class GolemExLlamaGenerator(LlmHandler):
def __init__(self):
super().__init__()
self.loras = {}
def update_config(self, config_data):
current_config = self.model_config
merged_config = {**current_config, **config_data}
self.model_config = merged_config
def validate(self, request):
is_valid, errors = self.validate_request(request, 'llm')
return is_valid, errors
def get_token_count(self, input_text):
ids = self.generator.tokenizer.encode(input_text)
input_token_count = len(ids[0])
return input_token_count
def stream(self, generator, tokenizer, model, prompt, channel, incoming_headers,
outgoing_properties, stops, model_data, request):
# setup stop conditions
check_stop_token, stop_conditions = self.build_stop_conditions(stops)
res_line = ""
held_text = ""
response = ""
unicode_hold = False
finish_reason = "stop"
stop_condition = False
new_tokens = 0
stop_generation_counter = 0
ids = generator.tokenizer.encode(prompt)
input_token_count = len(ids[0])
max_new_tokens, top_p, top_k, seed, temperature, stream_output, debug, stop_key, \
min_p, mirostat, mirostat_eta, mirostat_tau = self.load_config_settings(input_token_count, request)
if debug:
print('\033[94m')
print(request)
print(prompt)
print('\033[0m')
socket_id = incoming_headers["socket_id"] if "socket_id" in incoming_headers else None
generator.settings.temperature = temperature
generator.settings.top_p = top_p
begin_time = time.time()
generator.gen_begin(ids)
generator.begin_beam_search()
for i in range(max_new_tokens):
new_tokens += 1
# check if stop generation was requested
stop_generation, stop_generation_counter = self.check_stop_generation(stop_generation_counter,
model_data["stop_generation_event"], model_data["stop_generation_filter"], socket_id)
if stop_generation:
finish_reason = "abort"
break
token = generator.beam_search()
prev_res_line = res_line
res_line = tokenizer.decode(generator.sequence_actual[0, -new_tokens:])
new_text = res_line[len(prev_res_line):]
# new text
chunk = held_text + new_text
# check if we should hold off on streaming this text
hold_text = False
for stop_string in stop_conditions:
if stop_string.startswith(chunk.lower()): hold_text = True
if len(res_line):
check_ord = ord(res_line[-1])
if check_ord == 65533 or check_ord == 55356 or check_ord == 55357:
hold_text = True
unicode_hold = True
if not hold_text:
if unicode_hold is True:
unicode_hold = False
chunk = res_line[-1:]
# send chunk to front end
if stream_output:
if debug:
print('\033[96m' + chunk, end="")
channel.basic_publish(
exchange=incoming_headers['return_exchange'],
routing_key=incoming_headers['return_routing_key'],
body=chunk, properties=outgoing_properties)
else:
response += chunk
prompt += chunk
held_text = ""
else:
held_text += new_text
# check stop conditions
stop_condition = self.check_stop_conditions(token, res_line, tokenizer.eos_token_id,
check_stop_token, stop_conditions)
if stop_condition: break
end_time = time.time()
elapsed = end_time - begin_time
token_rate = 0 if elapsed == 0 else (new_tokens / elapsed)
generator.end_beam_search()
if debug and stream_output:
print('\033[0m' + "")
if new_tokens == max_new_tokens:
finish_reason = "length"
model_name = incoming_headers["model_name"] if "model_name" in incoming_headers else "not_provided"
resp = self.finish_response(stop_key, response, request, stream_output, finish_reason,
token_rate, new_tokens, input_token_count, model_name, elapsed, debug)
return resp
def load_lora(self, request, model, config):
# load lora from config and override w/ request if present
lora_name = config["default_lora"] if "default_lora" in config else None
if "lora" in request:
lora_name = request["lora"]
if lora_name != None:
if lora_name not in self.loras:
logger.info(f"loading lora {lora_name}")
lora_dir = os.path.join(f"data/loras/", lora_name)
if not os.path.exists(lora_dir):
logger.info("downloading lora {lora_name} from huggingface")
snapshot_download(repo_id=lora_name, local_dir=lora_dir, cache_dir='data/cache', local_dir_use_symlinks=False)
lora_path = os.path.join(f"data/loras/", lora_name, "adapter_model.bin")
lora_config_path = os.path.join(f"data/loras/{lora_name}", "adapter_config.json")
lora = ExLlamaLora(model["model_loaded"], lora_config_path, lora_path)
self.loras[lora_name] = lora
else:
logger.info(f"using lora {lora_name}")
model["generator"].lora = self.loras[lora_name]
else:
model["generator"].lora = None
def execute(self, model, request):
# load lora
config = self.model_config
self.load_lora(request, model, config)
# build prompt
prompt = self.build_prompt(request, config, model)
# copy amqp headers
incoming_headers = model["amqp_headers"]
outgoing_properties = self.copy_queue_headers(incoming_headers)
stream_resp = self.stream(
model["generator"],
model["tokenizer"],
model["model_loaded"],
prompt,
model["amqp_channel"],
incoming_headers,
outgoing_properties,
config["stop_on"],
model,
request)
return stream_resp
def load(self, model, model_options, local_path):
self.model_config = model["configuration"]
# get paths
logger.info(f"starting module {local_path}")
tokenizer_path = os.path.join(local_path, "tokenizer.model")
model_config_path = os.path.join(local_path, "config.json")
st_pattern = os.path.join(local_path, "*.safetensors")
model_path = glob.glob(st_pattern)[0]
# Create config, model, tokenizer and generator
config = ExLlamaConfig(model_config_path)
config.model_path = model_path
config.compress_pos_emb = model["configuration"].get("compress_pos_emb", 1.0)
config.max_seq_len = model["configuration"].get("max_seq_len", 2048)
config.matmul_recons_thd = 8
config.fused_mlp_thd = 2
config.sdp_thd = 8
# set model device
if model_options["device"].startswith("split"):
device_map = model_options["device"].split(':')[1]
config.set_auto_map(device_map)
elif model_options["device"].startswith("cuda"):
device_number = int(model_options["device"].split(':')[1])
device_array = [0]*12 | used_memory, free_memory, total_memory = get_gpu_memory_usage(device_number) | 1 | 2023-11-06 19:03:07+00:00 | 8k |
m4rkw/monzo-utils | monzo_utils/model/flex_summary.py | [
{
"identifier": "Payment",
"path": "monzo_utils/model/payment.py",
"snippet": "class Payment:\n\n transaction_type = 'money_out'\n always_fixed = False\n\n def __init__(self, config, payment_list_config, payment_config, last_salary_date, next_salary_date, following_salary_date):\n self.c... | import datetime
import math
from monzo_utils.model.payment import Payment
from monzo_utils.model.account import Account
from monzo_utils.model.transaction import Transaction | 4,266 |
class FlexSummary(Payment):
def __init__(self, config, total, total_next_month, remaining, last_salary_date):
self.config = config
self.flex_total = total
self.flex_total_next_month = total_next_month
self.flex_remaining = remaining
self.last_salary_date = last_salary_date
self.cache = {}
@property
def status(self):
if self.last_payment and self.last_payment.date >= self.last_salary_date:
return 'PAID'
return 'DUE'
@property
def name(self):
return 'Flex Payment'
@property
def display_amount(self):
return self.flex_total
@property
def last_date(self):
if self.last_payment:
return self.last_payment.date
last_date = datetime.datetime.now()
while last_date.day != self.config['flex_payment_date']:
last_date -= datetime.timedelta(days=1)
return last_date
@property
def due_date(self):
if 'due_date' in self.cache:
return self.cache['due_date']
if self.last_payment:
due_date = self.last_payment.date
while due_date.day != self.config['flex_payment_date']:
due_date += datetime.timedelta(days=1)
due_date += datetime.timedelta(days=1)
while due_date.day != self.config['flex_payment_date']:
due_date += datetime.timedelta(days=1)
return due_date
date = datetime.datetime.now() + datetime.timedelta(days=1)
while date.day != self.config['flex_payment_date']:
date += datetime.timedelta(days=1)
return datetime.date(date.year, date.month, date.day)
@property
def remaining(self):
return self.flex_remaining
def display(self):
super().display()
data = self.data()
print("%s: %s %s %s %s %s %s %s" % (
'SKIPPED'.rjust(7),
data['payment_type'].ljust(15),
'Flex Payment next month'.ljust(25),
data['suffix'].ljust(5),
('£%.2f' % (self.flex_total_next_month)).ljust(8),
('£%.2f' % (data['remaining'] - self.flex_total_next_month)).ljust(8) if data['remaining'] else ''.ljust(8),
data['last_date'].strftime('%Y-%m-%d').ljust(12) if data['last_date'] else ''.ljust(12),
data['due_date'].strftime('%Y-%m-%d').ljust(10) if data['due_date'] else ''
))
@property
def last_payment(self):
if 'last_payment' in self.cache:
return self.cache['last_payment']
account = Account().find_by_name(self.config['flex_account'])
where = [{'clause': 'date > %s', 'params': [self.last_salary_date]}]
|
class FlexSummary(Payment):
def __init__(self, config, total, total_next_month, remaining, last_salary_date):
self.config = config
self.flex_total = total
self.flex_total_next_month = total_next_month
self.flex_remaining = remaining
self.last_salary_date = last_salary_date
self.cache = {}
@property
def status(self):
if self.last_payment and self.last_payment.date >= self.last_salary_date:
return 'PAID'
return 'DUE'
@property
def name(self):
return 'Flex Payment'
@property
def display_amount(self):
return self.flex_total
@property
def last_date(self):
if self.last_payment:
return self.last_payment.date
last_date = datetime.datetime.now()
while last_date.day != self.config['flex_payment_date']:
last_date -= datetime.timedelta(days=1)
return last_date
@property
def due_date(self):
if 'due_date' in self.cache:
return self.cache['due_date']
if self.last_payment:
due_date = self.last_payment.date
while due_date.day != self.config['flex_payment_date']:
due_date += datetime.timedelta(days=1)
due_date += datetime.timedelta(days=1)
while due_date.day != self.config['flex_payment_date']:
due_date += datetime.timedelta(days=1)
return due_date
date = datetime.datetime.now() + datetime.timedelta(days=1)
while date.day != self.config['flex_payment_date']:
date += datetime.timedelta(days=1)
return datetime.date(date.year, date.month, date.day)
@property
def remaining(self):
return self.flex_remaining
def display(self):
super().display()
data = self.data()
print("%s: %s %s %s %s %s %s %s" % (
'SKIPPED'.rjust(7),
data['payment_type'].ljust(15),
'Flex Payment next month'.ljust(25),
data['suffix'].ljust(5),
('£%.2f' % (self.flex_total_next_month)).ljust(8),
('£%.2f' % (data['remaining'] - self.flex_total_next_month)).ljust(8) if data['remaining'] else ''.ljust(8),
data['last_date'].strftime('%Y-%m-%d').ljust(12) if data['last_date'] else ''.ljust(12),
data['due_date'].strftime('%Y-%m-%d').ljust(10) if data['due_date'] else ''
))
@property
def last_payment(self):
if 'last_payment' in self.cache:
return self.cache['last_payment']
account = Account().find_by_name(self.config['flex_account'])
where = [{'clause': 'date > %s', 'params': [self.last_salary_date]}]
| transaction = Transaction().find_by_account_id_and_declined_and_money_in_and_description( | 2 | 2023-11-05 12:48:18+00:00 | 8k |
rossiyareich/inknhue | train.py | [
{
"identifier": "ConditionalDataset",
"path": "src/conditional/conditional_dataset.py",
"snippet": "class ConditionalDataset(Dataset):\n def __init__(self, dataset_path, transform=None):\n self.dataset_path = dataset_path\n self.transform = transform\n self.cond_dataset = []\n\n ... | import argparse
import copy
import gc
import logging
import os
import numpy as np
import torch
import torch.backends.cuda
import torch.backends.cudnn
import torch.optim as optim
import torchvision.transforms.functional as VF
import wandb
from accelerate import Accelerator
from omegaconf import OmegaConf
from rich.traceback import install
from torch import nn
from torch.utils.data import DataLoader, Subset
from torchvision import transforms
from tqdm.auto import tqdm
from src.conditional.conditional_dataset import ConditionalDataset
from src.conditional.conditional_decoder import ConditionalDecoder
from src.conditional.conditional_encoder import ConditionalEncoder
from src.encoder import Encoder
from src.gaussian_distribution import GaussianDistribution
from src.perceptual_loss import LPIPSWithDiscriminator
from src.utils import resize | 5,534 | "architecture": conf.wandb.config.architecture,
"base_lr": conf.params.base_lr,
"epoch": conf.params.epoch,
},
)
# Load models
logging.info("Setting up models")
# Convolution to map from embedding space to quantized embedding space moments
quant_conv = nn.Conv2d(
2 * pretrained_yaml.params.ddconfig.z_channels,
2 * pretrained_yaml.params.embed_dim,
1,
)
# Convolution to map from quantized embedding space back to embedding space
post_quant_conv = nn.Conv2d(
pretrained_yaml.params.embed_dim,
pretrained_yaml.params.ddconfig.z_channels,
1,
)
encoder = Encoder(
channels=pretrained_yaml.params.ddconfig.ch,
channel_multipliers=pretrained_yaml.params.ddconfig.ch_mult,
n_resnet_blocks=pretrained_yaml.params.ddconfig.num_res_blocks,
in_channels=pretrained_yaml.params.ddconfig.in_channels,
z_channels=pretrained_yaml.params.ddconfig.z_channels,
)
cond_encoder = ConditionalEncoder(
channels=pretrained_yaml.params.ddconfig.ch,
channel_multipliers=pretrained_yaml.params.ddconfig.ch_mult,
n_resnet_blocks=pretrained_yaml.params.ddconfig.num_res_blocks,
in_channels=pretrained_yaml.params.ddconfig.in_channels,
)
cond_decoder = ConditionalDecoder(
channels=pretrained_yaml.params.ddconfig.ch,
channel_multipliers=pretrained_yaml.params.ddconfig.ch_mult,
n_resnet_blocks=pretrained_yaml.params.ddconfig.num_res_blocks,
out_channels=pretrained_yaml.params.ddconfig.out_ch,
z_channels=pretrained_yaml.params.ddconfig.z_channels,
)
discriminator = LPIPSWithDiscriminator(
disc_start=pretrained_yaml.params.lossconfig.disc_start,
disc_weight=pretrained_yaml.params.lossconfig.disc_weight,
kl_weight=pretrained_yaml.params.lossconfig.kl_weight,
)
# Setup flags
logging.info("Setting up flags")
quant_conv.requires_grad_(False)
post_quant_conv.requires_grad_(False)
encoder.requires_grad_(False)
cond_encoder.train()
cond_decoder.train()
discriminator.train()
# Load state_dicts
logging.info("Loading state_dicts")
quant_conv_state_dict = {}
post_quant_conv_state_dict = {}
encoder_state_dict = {}
cond_encoder_state_dict = {}
cond_decoder_state_dict = {}
loss_state_dict = {}
for k, v in pretrained_ckpt["state_dict"].items():
if k.startswith("quant_conv"):
quant_conv_state_dict[k.replace("quant_conv.", "", 1)] = v
elif k.startswith("post_quant_conv"):
post_quant_conv_state_dict[k.replace("post_quant_conv.", "", 1)] = v
elif k.startswith("encoder"):
encoder_state_dict[k.replace("encoder.", "", 1)] = v
if not (
k.startswith("encoder.mid")
or k.startswith("encoder.norm_out")
or k.startswith("encoder.conv_out")
):
cond_encoder_state_dict[k.replace("encoder.", "", 1)] = copy.deepcopy(v)
elif k.startswith("decoder"):
cond_decoder_state_dict[k.replace("decoder.", "", 1)] = v
elif k.startswith("loss"):
loss_state_dict[k.replace("loss.", "", 1)] = v
else:
raise KeyError(f"Unexpected state_dict key: {k}")
quant_conv.load_state_dict(quant_conv_state_dict, strict=True)
post_quant_conv.load_state_dict(post_quant_conv_state_dict, strict=True)
encoder.load_state_dict(encoder_state_dict, strict=True)
cond_encoder.load_state_dict(cond_encoder_state_dict, strict=False)
cond_decoder.load_state_dict(cond_decoder_state_dict, strict=True)
discriminator.load_state_dict(loss_state_dict, strict=True)
# Load dataset & dataloader
logging.info("Setting up Dataset and DataLoader")
def transform(g, s, c):
g, s, c = (
resize(g, conf.params.size),
resize(s, conf.params.size),
resize(c, conf.params.size),
)
i, j, h, w = transforms.RandomCrop.get_params(
img=g,
output_size=(
conf.params.crop_size,
conf.params.crop_size,
),
)
g, s, c = VF.crop(g, i, j, h, w), VF.crop(s, i, j, h, w), VF.crop(c, i, j, h, w)
pil_to_tensor = transforms.PILToTensor()
g, s, c = pil_to_tensor(g), pil_to_tensor(s), pil_to_tensor(c)
g, s, c = (
((g / 255.0) * 2.0 - 1.0).clamp(-1, 1),
((s / 255.0) * 2.0 - 1.0).clamp(-1, 1),
((c / 255.0) * 2.0 - 1.0).clamp(-1, 1),
)
return g, s, c
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--conf_path",
type=str,
required=True,
help="Path to the configuration file",
)
args = parser.parse_args()
return args
def main(args):
# Load configuration
logging.info("Loading configuration")
conf = OmegaConf.load(args.conf_path)
# Create checkpoint directory
logging.info("Creating checkpoint directory")
os.makedirs(conf.paths.checkpoint_path, exist_ok=True)
# Allow TF32
logging.info("Allowing TF32")
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
# Load Accelerate
logging.info("Setting up Accelerate")
accelerator = Accelerator()
# Load pretrained parameters
logging.info("Loading pretrained checkpoints")
pretrained_ckpt = torch.load(conf.paths.pretrained_ckpt)
pretrained_yaml = OmegaConf.load(conf.paths.pretrained_yaml)
# Load wandb
logging.info("Setting up wandb")
wandb.init(
project=conf.wandb.project,
config={
"optimizer": conf.wandb.config.optimizer,
"architecture": conf.wandb.config.architecture,
"base_lr": conf.params.base_lr,
"epoch": conf.params.epoch,
},
)
# Load models
logging.info("Setting up models")
# Convolution to map from embedding space to quantized embedding space moments
quant_conv = nn.Conv2d(
2 * pretrained_yaml.params.ddconfig.z_channels,
2 * pretrained_yaml.params.embed_dim,
1,
)
# Convolution to map from quantized embedding space back to embedding space
post_quant_conv = nn.Conv2d(
pretrained_yaml.params.embed_dim,
pretrained_yaml.params.ddconfig.z_channels,
1,
)
encoder = Encoder(
channels=pretrained_yaml.params.ddconfig.ch,
channel_multipliers=pretrained_yaml.params.ddconfig.ch_mult,
n_resnet_blocks=pretrained_yaml.params.ddconfig.num_res_blocks,
in_channels=pretrained_yaml.params.ddconfig.in_channels,
z_channels=pretrained_yaml.params.ddconfig.z_channels,
)
cond_encoder = ConditionalEncoder(
channels=pretrained_yaml.params.ddconfig.ch,
channel_multipliers=pretrained_yaml.params.ddconfig.ch_mult,
n_resnet_blocks=pretrained_yaml.params.ddconfig.num_res_blocks,
in_channels=pretrained_yaml.params.ddconfig.in_channels,
)
cond_decoder = ConditionalDecoder(
channels=pretrained_yaml.params.ddconfig.ch,
channel_multipliers=pretrained_yaml.params.ddconfig.ch_mult,
n_resnet_blocks=pretrained_yaml.params.ddconfig.num_res_blocks,
out_channels=pretrained_yaml.params.ddconfig.out_ch,
z_channels=pretrained_yaml.params.ddconfig.z_channels,
)
discriminator = LPIPSWithDiscriminator(
disc_start=pretrained_yaml.params.lossconfig.disc_start,
disc_weight=pretrained_yaml.params.lossconfig.disc_weight,
kl_weight=pretrained_yaml.params.lossconfig.kl_weight,
)
# Setup flags
logging.info("Setting up flags")
quant_conv.requires_grad_(False)
post_quant_conv.requires_grad_(False)
encoder.requires_grad_(False)
cond_encoder.train()
cond_decoder.train()
discriminator.train()
# Load state_dicts
logging.info("Loading state_dicts")
quant_conv_state_dict = {}
post_quant_conv_state_dict = {}
encoder_state_dict = {}
cond_encoder_state_dict = {}
cond_decoder_state_dict = {}
loss_state_dict = {}
for k, v in pretrained_ckpt["state_dict"].items():
if k.startswith("quant_conv"):
quant_conv_state_dict[k.replace("quant_conv.", "", 1)] = v
elif k.startswith("post_quant_conv"):
post_quant_conv_state_dict[k.replace("post_quant_conv.", "", 1)] = v
elif k.startswith("encoder"):
encoder_state_dict[k.replace("encoder.", "", 1)] = v
if not (
k.startswith("encoder.mid")
or k.startswith("encoder.norm_out")
or k.startswith("encoder.conv_out")
):
cond_encoder_state_dict[k.replace("encoder.", "", 1)] = copy.deepcopy(v)
elif k.startswith("decoder"):
cond_decoder_state_dict[k.replace("decoder.", "", 1)] = v
elif k.startswith("loss"):
loss_state_dict[k.replace("loss.", "", 1)] = v
else:
raise KeyError(f"Unexpected state_dict key: {k}")
quant_conv.load_state_dict(quant_conv_state_dict, strict=True)
post_quant_conv.load_state_dict(post_quant_conv_state_dict, strict=True)
encoder.load_state_dict(encoder_state_dict, strict=True)
cond_encoder.load_state_dict(cond_encoder_state_dict, strict=False)
cond_decoder.load_state_dict(cond_decoder_state_dict, strict=True)
discriminator.load_state_dict(loss_state_dict, strict=True)
# Load dataset & dataloader
logging.info("Setting up Dataset and DataLoader")
def transform(g, s, c):
g, s, c = (
resize(g, conf.params.size),
resize(s, conf.params.size),
resize(c, conf.params.size),
)
i, j, h, w = transforms.RandomCrop.get_params(
img=g,
output_size=(
conf.params.crop_size,
conf.params.crop_size,
),
)
g, s, c = VF.crop(g, i, j, h, w), VF.crop(s, i, j, h, w), VF.crop(c, i, j, h, w)
pil_to_tensor = transforms.PILToTensor()
g, s, c = pil_to_tensor(g), pil_to_tensor(s), pil_to_tensor(c)
g, s, c = (
((g / 255.0) * 2.0 - 1.0).clamp(-1, 1),
((s / 255.0) * 2.0 - 1.0).clamp(-1, 1),
((c / 255.0) * 2.0 - 1.0).clamp(-1, 1),
)
return g, s, c
| cond_dataset = cond_dataset_full = ConditionalDataset( | 0 | 2023-11-03 09:35:30+00:00 | 8k |
TencentBlueKing/bkflow-feel | bkflow_feel/transformer.py | [
{
"identifier": "RangeGroupOperator",
"path": "bkflow_feel/data_models.py",
"snippet": "class RangeGroupOperator(enum.Enum):\n GT = \"greater than\"\n GTE = \"greater than or equal\"\n LT = \"less than\"\n LTE = \"less than or equal\""
},
{
"identifier": "AfterFunc",
"path": "bkf... | from lark import Token, Transformer, v_args
from .data_models import RangeGroupOperator
from .parsers import (
AfterFunc,
And,
BeforeFunc,
Between,
Boolean,
Context,
ContextItem,
Date,
DateAndTime,
DayOfWeekFunc,
Expr,
FuncInvocation,
FunctionCall,
In,
IncludesFunc,
List,
ListEvery,
ListFilter,
ListItem,
ListOperator,
ListSome,
MonthOfYearFunc,
Not,
NotEqual,
NowFunc,
Null,
Number,
Or,
Pair,
RangeGroup,
SameTypeBinaryOperator,
String,
StringOperator,
Time,
TodayFunc,
ToString,
TZInfo,
Variable, IsDefinedFunc, GetOrElseFunc,
) | 4,372 | # -*- coding: utf-8 -*-
@v_args(inline=True)
class FEELTransformer(Transformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def number(self, token):
try:
value = int(token.value)
except ValueError:
value = float(token.value)
| # -*- coding: utf-8 -*-
@v_args(inline=True)
class FEELTransformer(Transformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def number(self, token):
try:
value = int(token.value)
except ValueError:
value = float(token.value) | return Number(value) | 27 | 2023-11-09 13:47:26+00:00 | 8k |
sivasurend/lyzr | lyzr/chatqa/chatbot.py | [
{
"identifier": "pdf_chat_",
"path": "lyzr/utils/chat_utils.py",
"snippet": "def pdf_chat_(\n input_dir: Optional[str] = None,\n input_files: Optional[List] = None,\n exclude_hidden: bool = True,\n filename_as_id: bool = True,\n recursive: bool = True,\n required_exts: Optional[List[st... | from typing import Union, Optional, List
from llama_index import ServiceContext, VectorStoreIndex
from llama_index.chat_engine.types import BaseChatEngine
from llama_index.embeddings.utils import EmbedType
from lyzr.utils.chat_utils import (
pdf_chat_,
txt_chat_,
docx_chat_,
webpage_chat_,
website_chat_,
youtube_chat_,
) | 3,694 | query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return pdf_chat_(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def docx_chat(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return docx_chat_(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def txt_chat(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return txt_chat_(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def webpage_chat(
url: Optional[str] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return webpage_chat_(
url=url,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def website_chat(
url: Optional[str] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
|
class ChatBot:
def __init__(self) -> None:
return None
@staticmethod
def from_instances(
vector_store_index: VectorStoreIndex, service_context: ServiceContext, **kwargs
) -> BaseChatEngine:
return vector_store_index.as_chat_engine(
service_context=service_context, **kwargs
)
@staticmethod
def pdf_chat(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return pdf_chat_(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def docx_chat(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return docx_chat_(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def txt_chat(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return txt_chat_(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def webpage_chat(
url: Optional[str] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine:
return webpage_chat_(
url=url,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
embed_model=embed_model,
llm_params=llm_params,
vector_store_params=vector_store_params,
service_context_params=service_context_params,
chat_engine_params=chat_engine_params,
)
@staticmethod
def website_chat(
url: Optional[str] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
) -> BaseChatEngine: | return website_chat_( | 4 | 2023-11-07 14:52:08+00:00 | 8k |
siyuanseever/llama2Rnn.c | train.py | [
{
"identifier": "Transformer",
"path": "model.py",
"snippet": "class Transformer(nn.Module):\n last_loss: Optional[torch.Tensor]\n\n def __init__(self, params: ModelArgs):\n super().__init__()\n self.params = params\n self.vocab_size = params.vocab_size\n self.n_layers ... | import math
import os
import time
import torch
import wandb
from contextlib import nullcontext
from datetime import datetime
from functools import partial
from tqdm import tqdm
from model import Transformer, ModelArgs
from torch.distributed import destroy_process_group, init_process_group
from torch.nn.parallel import DistributedDataParallel as DDP
from tinystories import Task
from ultrachat import Task
from wikipedia_en import Task
from wiki_zh import Task
from wiki import Task
from zhihu import Task
from jiiov import Task
from datatask import Task
from dataset import Task
from data_generator import Task | 5,049 | dtype = "float32" # float32|bfloat16|float16
compile = True # use PyTorch 2.0 to compile the model to be faster
# test_model
test_model = False
# fixing some hyperparams to sensible defaults
lr_decay_iters = max_iters # should be ~= max_iters per Chinchilla
min_lr = 0.0 # minimum learning rate, should be ~= learning_rate/10 per Chinchilla
# -----------------------------------------------------------------------------
config_keys = [
k
for k, v in globals().items()
if not k.startswith("_") and isinstance(v, (int, float, bool, str))
]
exec(open("configurator.py").read()) # overrides from command line or config file
config = {k: globals()[k] for k in config_keys} # will be useful for logging
# -----------------------------------------------------------------------------
# model init
model_args = dict(
dim=dim,
n_layers=n_layers,
n_heads=n_heads,
n_kv_heads=n_kv_heads,
vocab_size=vocab_size,
multiple_of=multiple_of,
max_seq_len=max_seq_len,
extend_seq_len=max_seq_len,
extend_method=extend_method,
dropout=dropout,
attention_type=attention_type,
memseqlen=memseqlen,
do_wm=do_wm,
do_memory_ffn=do_memory_ffn,
memory_norm=memory_norm,
train_orimem=train_orimem,
reuse_kv=reuse_kv,
update_memory=update_memory,
use_saved_mem=bool(use_saved_mem),
key_norm=key_norm,
) # start with model_args from command line
# validating checks
assert vocab_source in ["llama2", "custom"]
assert vocab_source == "custom" or vocab_size == 32000, "The vocab from Meta has 32K tokens"
# various inits, derived attributes, I/O setup
ddp = int(os.environ.get("RANK", -1)) != -1 # is this a ddp run?
if ddp:
init_process_group(backend="nccl")
ddp_rank = int(os.environ["RANK"])
ddp_local_rank = int(os.environ["LOCAL_RANK"])
ddp_world_size = int(os.environ["WORLD_SIZE"])
device = f"cuda:{ddp_local_rank}"
torch.cuda.set_device(device)
master_process = ddp_rank == 0 # this process will do logging, checkpointing etc.
seed_offset = ddp_rank # each process gets a different seed
# world_size number of processes will be training simultaneously, so we can scale
# down the desired gradient accumulation iterations per process proportionally
assert gradient_accumulation_steps % ddp_world_size == 0
gradient_accumulation_steps //= ddp_world_size
else:
# if not ddp, we are running on a single gpu, and one process
master_process = True
seed_offset = 0
ddp_world_size = 1
tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * max_seq_len
if master_process:
print(f"tokens per iteration will be: {tokens_per_iter:,}")
print(f"breaks down as: {gradient_accumulation_steps} grad accum steps * {ddp_world_size} processes * {batch_size} batch size * {max_seq_len} max seq len")
if master_process:
os.makedirs(out_dir, exist_ok=True)
torch.manual_seed(1337 + seed_offset)
torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
device_type = "cuda" if "cuda" in device else "cpu" # for later use in torch.autocast
# note: float16 data type will automatically use a GradScaler
ptdtype = {"float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16}[dtype]
ctx = (
nullcontext()
if device_type == "cpu"
else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
)
# task-specific setup
num_workers = os.cpu_count() // ddp_world_size - 1
num_workers = 0
print(f'task num workers = {num_workers}')
task_args = dict(
batch_size=batch_size,
max_seq_len=max_seq_len,
vocab_size=vocab_size,
vocab_source=vocab_source,
device=device,
num_workers = num_workers,
)
if task_name == 'tinystories':
elif task_name == 'ultrachat':
elif task_name == 'wikipedia_en':
elif task_name == 'wiki_zh':
elif task_name == 'wiki':
elif task_name == 'zhihu':
elif task_name == 'jiiov':
elif task_name.startswith('all'):
task_args["tasks"] = tasks
elif task_name.startswith('ds_'):
tasks = task_name[len('ds_'):].split('_')
task_args["tasks"] = tasks
elif task_name.startswith('dg_'):
tasks = task_name[len('dg_'):].split('_')
task_args["tasks"] = tasks
iter_batches = partial(Task.iter_batches, **task_args)
# init these up here, can override if init_from='resume' (i.e. from a checkpoint)
iter_num = 0
best_val_loss = 1e9
if init_from == "scratch":
# init a new model from scratch
print("Initializing a new model from scratch")
gptconf = ModelArgs(**model_args)
| """
This training script can be run both on a single gpu in debug mode,
and also in a larger training run with distributed data parallel (ddp).
To run on a single GPU small debug run, example:
$ python -m train.py --compile=False --eval_iters=10 --batch_size=8
To run with DDP on 4 gpus on 1 node, example:
$ torchrun --standalone --nproc_per_node=4 train.py
To run with DDP on 4 gpus across 2 nodes, example:
- Run on the first (master) node with example IP 123.456.123.456:
$ torchrun --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr=123.456.123.456 --master_port=1234 train.py
- Run on the worker node:
$ torchrun --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr=123.456.123.456 --master_port=1234 train.py
(If your cluster does not have Infiniband interconnect prepend NCCL_IB_DISABLE=1)
"""
# -----------------------------------------------------------------------------
# I/O
out_dir = "out"
eval_interval = 2000
log_interval = 1
eval_iters = 100
eval_only = False # if True, script exits right after the first eval
eval_last = False
repeat_tokens = False
always_save_checkpoint = False # if True, always save a checkpoint after each eval
init_from = "scratch" # 'scratch' or 'resume'
# wandb logging
wandb_log = False # disabled by default
wandb_project = "llamac"
wandb_run_name = "run" + datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
# data
tasks = []
task_name = "tinystories"
batch_size = 128 # if gradient_accumulation_steps > 1, this is the micro-batch size
max_seq_len = 256
extend_method = "extrapolation"
vocab_source = "llama2" # llama2|custom; use Lllama 2 vocab from Meta, or custom trained
vocab_size = 32000 # the Llama 2 tokenizer has 32K tokens
# model
dim = 288
n_layers = 6
n_heads = 6
n_kv_heads = 6
multiple_of = 32
dropout = 0.0
# extrapolation
key_norm = False
# memory
attention_type = "attention"
memseqlen = 128
do_wm = False
do_memory_ffn = False
memory_norm = False
train_orimem = False
reuse_kv = False
save_memory = ""
update_memory = False
use_saved_mem = ""
# adamw optimizer
gradient_accumulation_steps = 4 # used to simulate larger batch sizes
learning_rate = 5e-4 # max learning rate
max_iters = 100000 # total number of training iterations
weight_decay = 1e-1
beta1 = 0.9
beta2 = 0.95
grad_clip = 1.0 # clip gradients at this value, or disable if == 0.0
# learning rate decay settings
decay_lr = True # whether to decay the learning rate
warmup_iters = 1000 # how many steps to warm up for
# system
# device = "cuda" # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1' etc., or try 'mps' on macbooks
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = "float32" # float32|bfloat16|float16
compile = True # use PyTorch 2.0 to compile the model to be faster
# test_model
test_model = False
# fixing some hyperparams to sensible defaults
lr_decay_iters = max_iters # should be ~= max_iters per Chinchilla
min_lr = 0.0 # minimum learning rate, should be ~= learning_rate/10 per Chinchilla
# -----------------------------------------------------------------------------
config_keys = [
k
for k, v in globals().items()
if not k.startswith("_") and isinstance(v, (int, float, bool, str))
]
exec(open("configurator.py").read()) # overrides from command line or config file
config = {k: globals()[k] for k in config_keys} # will be useful for logging
# -----------------------------------------------------------------------------
# model init
model_args = dict(
dim=dim,
n_layers=n_layers,
n_heads=n_heads,
n_kv_heads=n_kv_heads,
vocab_size=vocab_size,
multiple_of=multiple_of,
max_seq_len=max_seq_len,
extend_seq_len=max_seq_len,
extend_method=extend_method,
dropout=dropout,
attention_type=attention_type,
memseqlen=memseqlen,
do_wm=do_wm,
do_memory_ffn=do_memory_ffn,
memory_norm=memory_norm,
train_orimem=train_orimem,
reuse_kv=reuse_kv,
update_memory=update_memory,
use_saved_mem=bool(use_saved_mem),
key_norm=key_norm,
) # start with model_args from command line
# validating checks
assert vocab_source in ["llama2", "custom"]
assert vocab_source == "custom" or vocab_size == 32000, "The vocab from Meta has 32K tokens"
# various inits, derived attributes, I/O setup
ddp = int(os.environ.get("RANK", -1)) != -1 # is this a ddp run?
if ddp:
init_process_group(backend="nccl")
ddp_rank = int(os.environ["RANK"])
ddp_local_rank = int(os.environ["LOCAL_RANK"])
ddp_world_size = int(os.environ["WORLD_SIZE"])
device = f"cuda:{ddp_local_rank}"
torch.cuda.set_device(device)
master_process = ddp_rank == 0 # this process will do logging, checkpointing etc.
seed_offset = ddp_rank # each process gets a different seed
# world_size number of processes will be training simultaneously, so we can scale
# down the desired gradient accumulation iterations per process proportionally
assert gradient_accumulation_steps % ddp_world_size == 0
gradient_accumulation_steps //= ddp_world_size
else:
# if not ddp, we are running on a single gpu, and one process
master_process = True
seed_offset = 0
ddp_world_size = 1
tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * max_seq_len
if master_process:
print(f"tokens per iteration will be: {tokens_per_iter:,}")
print(f"breaks down as: {gradient_accumulation_steps} grad accum steps * {ddp_world_size} processes * {batch_size} batch size * {max_seq_len} max seq len")
if master_process:
os.makedirs(out_dir, exist_ok=True)
torch.manual_seed(1337 + seed_offset)
torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
device_type = "cuda" if "cuda" in device else "cpu" # for later use in torch.autocast
# note: float16 data type will automatically use a GradScaler
ptdtype = {"float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16}[dtype]
ctx = (
nullcontext()
if device_type == "cpu"
else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
)
# task-specific setup
num_workers = os.cpu_count() // ddp_world_size - 1
num_workers = 0
print(f'task num workers = {num_workers}')
task_args = dict(
batch_size=batch_size,
max_seq_len=max_seq_len,
vocab_size=vocab_size,
vocab_source=vocab_source,
device=device,
num_workers = num_workers,
)
if task_name == 'tinystories':
elif task_name == 'ultrachat':
elif task_name == 'wikipedia_en':
elif task_name == 'wiki_zh':
elif task_name == 'wiki':
elif task_name == 'zhihu':
elif task_name == 'jiiov':
elif task_name.startswith('all'):
task_args["tasks"] = tasks
elif task_name.startswith('ds_'):
tasks = task_name[len('ds_'):].split('_')
task_args["tasks"] = tasks
elif task_name.startswith('dg_'):
tasks = task_name[len('dg_'):].split('_')
task_args["tasks"] = tasks
iter_batches = partial(Task.iter_batches, **task_args)
# init these up here, can override if init_from='resume' (i.e. from a checkpoint)
iter_num = 0
best_val_loss = 1e9
if init_from == "scratch":
# init a new model from scratch
print("Initializing a new model from scratch")
gptconf = ModelArgs(**model_args) | model = Transformer(gptconf) | 0 | 2023-11-07 09:36:35+00:00 | 8k |
WolfgangFahl/dcm | dcm/dcm_assessment.py | [
{
"identifier": "Achievement",
"path": "dcm/dcm_core.py",
"snippet": "class Achievement:\n \"\"\"\n Class representing an individual's achievement level for a specific competence facet.\n\n Attributes:\n path (str): The path in the CompetenceTree, used to derive tree_id, aspect_id, and f... | from ngwidgets.progress import NiceguiProgressbar
from ngwidgets.webserver import NiceGuiWebserver
from ngwidgets.widgets import Link
from nicegui import ui
from dcm.dcm_core import (
Achievement,
CompetenceFacet,
CompetenceArea,
CompetenceTree,
DynamicCompetenceMap,
Learner,
) | 5,801 | self.learner = learner
self.achievement_index = 0
# do we need setup the achievements?
if self.learner.achievements is None:
self.learner.achievements = []
self.setup_achievements()
self.total = len(self.learner.achievements)
def clear(self):
"""
clear the ui
"""
self.container.clear()
@property
def current_achievement(self) -> Achievement:
if self.achievement_index < 0 or self.achievement_index > len(
self.learner.achievements
):
raise ValueError(f"invalid achievement index {self.achievement_index}")
achievement = self.learner.achievements[self.achievement_index]
return achievement
def setup_achievements(self):
"""
Setup achievements based on the competence tree.
This method iterates over the competence aspects and their facets,
constructs a path for each facet, and creates an Achievement instance
based on the path. These achievements are then added to the learner's
achievements list.
"""
for aspect in self.competence_tree.aspects:
for area in aspect.areas:
area_path: str = f"{self.competence_tree.id}/{aspect.id}"
self.add_achievement(area_path)
for facet in area.facets:
# Construct the path for the facet
facet_path=f"{area_path}/{facet.id}"
self.add_achievement(facet_path)
def add_achievement(self,path):
# Create a new Achievement instance with the constructed path
new_achievement = Achievement(
path=path,
)
self.learner.add_achievement(new_achievement)
def get_index_str(self) -> str:
index_str = f"{self.achievement_index+1:2}/{self.total:2}"
return index_str
def setup_ui(self):
"""
display my competence Tree elements
"""
with ui.grid(columns=1).classes("w-full") as self.container:
self.progress_bar = NiceguiProgressbar(
total=self.total, desc="self assessment", unit="facets"
)
self.progress_bar.reset()
with ui.row():
ui.button("", icon="arrow_back", on_click=lambda _args: self.step(-1))
ui.button("", icon="arrow_forward", on_click=lambda _args: self.step(1))
with ui.row():
with ui.card() as self.achievement_view:
self.index_view = ui.label(self.get_index_str())
self.link_view = ui.html()
self.markdown_view = ui.markdown()
self.button_row = ButtonRow(
self, self.competence_tree, self.current_achievement
)
def show_progress(self):
"""
Update the progress bar based on the
number of achievements with a non-None level value.
"""
count = sum(
1
for achievement in self.learner.achievements
if achievement.level is not None
)
self.progress_bar.total = self.total
self.progress_bar.update_value(count)
async def step(self, step: int = 0):
self.update_achievement_view(step)
def update_achievement_view(self, step: int = 0):
"""
display the active achievement as the step indicates
"""
self.show_progress()
self.webserver.render_dcm(self.dcm, self.learner, clear_assessment=False)
if self.achievement_index + step < 0:
ui.notify("first achievement reached!")
step = 0
if self.achievement_index + step < len(self.learner.achievements):
self.achievement_index += step
self.index_view.text = self.get_index_str()
achievement = self.current_achievement
self.button_row.achievement = achievement
self.button_row.set_button_states(achievement)
competence_element = self.competence_tree.lookup_by_path(achievement.path)
if not competence_element:
ui.notify("invalid path: {achievement.path}")
self.markdown_view.content = f"⚠️ {achievement.path}"
else:
if hasattr(competence_element, "path"):
if competence_element.url:
link = Link.create(
competence_element.url, competence_element.path
)
else:
link = competence_element.path
else:
link = "⚠️ - competence element path missing"
self.link_view.content = link
description = competence_element.description or ""
| """
Created on 2024-01-10
@author: wf
"""
class ButtonRow:
"""
A button row for selecting competence levels
to document achievements from a CompetenceTree.
"""
def __init__(
self,
assessment: "Assessment",
competence_tree: CompetenceTree,
achievement: Achievement = None,
):
"""
Construct a button row for the competence levels of the given CompetenceTree.
Args:
assessment (Assessment): The Assessment instance.
competence_tree (CompetenceTree): The Competence Tree to display buttons for.
achievement (Achievement): The current achievement of the learner.
"""
self.assessment = assessment
self.competence_tree = competence_tree
self.achievement = achievement
self.setup_buttons()
self.set_button_states(achievement)
def setup_buttons(self):
"""
Create buttons for each competence level defined in the CompetenceTree.
"""
self.buttons = {}
with ui.row() as self.row:
for level in self.competence_tree.levels:
button = ui.button(
level.name,
icon=level.icon,
color=level.color_code,
on_click=lambda _args, l=level.level: self.handle_selection(l),
).tooltip(level.description)
self.buttons[level.level] = button
def set_button_states(self, achievement: Achievement):
"""
Set the state of buttons based on the given achievement.
Args:
achievement (Achievement): The current achievement of the learner.
"""
# If no achievement or level is set, enable all buttons
if achievement is None or achievement.level is None:
for button in self.buttons.values():
button.enable()
button.visible = True
else:
# Enable only the button corresponding to the current level and disable others
for level, button in self.buttons.items():
if level == achievement.level:
button.enable()
button.visible = True
else:
button.disable()
button.visible = False
def handle_selection(self, selected_level: int):
"""
handle the selected level
Args:
selected_level(int): the selected level
"""
# Check if the same level is selected again,
# then reset the selection
if self.achievement.level == selected_level:
self.achievement.level = None
else:
self.achievement.level = selected_level
self.set_button_states(self.achievement)
# refresh the ui
self.row.update()
# show achievement_view
step = 1 if self.achievement.level else 0
self.assessment.update_achievement_view(step)
class Assessment:
"""
Assessment for CompetenceTree
"""
def __init__(
self,
webserver: NiceGuiWebserver,
dcm: DynamicCompetenceMap,
learner: Learner,
debug: bool = False,
):
"""
initialize the assessment
Args:
webserver(NiceguiWebServer): the webserver context
dcm(DynamicCompetenceMap): the competence map
learner(Learner): the learner to get the self assessment for
debug(bool): if True show debugging information
"""
self.webserver = webserver
self.debug = debug
self.reset(dcm=dcm, learner=learner)
self.setup_ui()
def reset(
self,
dcm: DynamicCompetenceMap,
learner: Learner,
):
"""
(re)set the assessment
Args:
webserver(NiceguiWebServer): the webserver context
dcm(DynamicCompetenceMap): the competence map
learner(Learner): the learner to get the self assessment for
"""
self.dcm = dcm
self.competence_tree = dcm.competence_tree
self.learner = learner
self.achievement_index = 0
# do we need setup the achievements?
if self.learner.achievements is None:
self.learner.achievements = []
self.setup_achievements()
self.total = len(self.learner.achievements)
def clear(self):
"""
clear the ui
"""
self.container.clear()
@property
def current_achievement(self) -> Achievement:
if self.achievement_index < 0 or self.achievement_index > len(
self.learner.achievements
):
raise ValueError(f"invalid achievement index {self.achievement_index}")
achievement = self.learner.achievements[self.achievement_index]
return achievement
def setup_achievements(self):
"""
Setup achievements based on the competence tree.
This method iterates over the competence aspects and their facets,
constructs a path for each facet, and creates an Achievement instance
based on the path. These achievements are then added to the learner's
achievements list.
"""
for aspect in self.competence_tree.aspects:
for area in aspect.areas:
area_path: str = f"{self.competence_tree.id}/{aspect.id}"
self.add_achievement(area_path)
for facet in area.facets:
# Construct the path for the facet
facet_path=f"{area_path}/{facet.id}"
self.add_achievement(facet_path)
def add_achievement(self,path):
# Create a new Achievement instance with the constructed path
new_achievement = Achievement(
path=path,
)
self.learner.add_achievement(new_achievement)
def get_index_str(self) -> str:
index_str = f"{self.achievement_index+1:2}/{self.total:2}"
return index_str
def setup_ui(self):
"""
display my competence Tree elements
"""
with ui.grid(columns=1).classes("w-full") as self.container:
self.progress_bar = NiceguiProgressbar(
total=self.total, desc="self assessment", unit="facets"
)
self.progress_bar.reset()
with ui.row():
ui.button("", icon="arrow_back", on_click=lambda _args: self.step(-1))
ui.button("", icon="arrow_forward", on_click=lambda _args: self.step(1))
with ui.row():
with ui.card() as self.achievement_view:
self.index_view = ui.label(self.get_index_str())
self.link_view = ui.html()
self.markdown_view = ui.markdown()
self.button_row = ButtonRow(
self, self.competence_tree, self.current_achievement
)
def show_progress(self):
"""
Update the progress bar based on the
number of achievements with a non-None level value.
"""
count = sum(
1
for achievement in self.learner.achievements
if achievement.level is not None
)
self.progress_bar.total = self.total
self.progress_bar.update_value(count)
async def step(self, step: int = 0):
self.update_achievement_view(step)
def update_achievement_view(self, step: int = 0):
"""
display the active achievement as the step indicates
"""
self.show_progress()
self.webserver.render_dcm(self.dcm, self.learner, clear_assessment=False)
if self.achievement_index + step < 0:
ui.notify("first achievement reached!")
step = 0
if self.achievement_index + step < len(self.learner.achievements):
self.achievement_index += step
self.index_view.text = self.get_index_str()
achievement = self.current_achievement
self.button_row.achievement = achievement
self.button_row.set_button_states(achievement)
competence_element = self.competence_tree.lookup_by_path(achievement.path)
if not competence_element:
ui.notify("invalid path: {achievement.path}")
self.markdown_view.content = f"⚠️ {achievement.path}"
else:
if hasattr(competence_element, "path"):
if competence_element.url:
link = Link.create(
competence_element.url, competence_element.path
)
else:
link = competence_element.path
else:
link = "⚠️ - competence element path missing"
self.link_view.content = link
description = competence_element.description or "" | if isinstance(competence_element, CompetenceArea): | 2 | 2023-11-06 09:24:24+00:00 | 8k |
weiwei-cool/FanQieNovelDownloadOnWeb | Api/views.py | [
{
"identifier": "Fanqie",
"path": "tools/Fanqie.py",
"snippet": "class FanqieNovel:\n def __init__(self, url, mode):\n def __str__(self):\n def parse_url(self, url: str) -> str:"
},
{
"identifier": "DownloadNovel",
"path": "tools/DownloadNovel.py",
"snippet": "class DownloadNove... | import os
import tools
import json
from django.http import JsonResponse
from tools import Fanqie, DownloadNovel
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from .models import History | 4,160 |
# 下载的小说集合
download_object = []
@csrf_exempt # 为了允许跨域请求,可选
@require_POST # 确保只接受POST请求,可选
@tools.logger.catch # 获取详细的报错信息
def download(request): # 下载接口
global download_object
if request.method == 'POST':
try:
# 获取url数据
tools.logger.info('正在获取url数据……') # 打印日志
data = json.loads(request.body.decode('utf-8'))
urls = data.get('urls', [])
# 初步去重
urls = list(set(urls))
tools.logger.info(f'已获取urls为:{urls}')
# 获取下载方式
format_ = data.get('format', 'txt')
tools.logger.info(f'下载方式为{format_}')
# 获取书本信息
books = []
|
# 下载的小说集合
download_object = []
@csrf_exempt # 为了允许跨域请求,可选
@require_POST # 确保只接受POST请求,可选
@tools.logger.catch # 获取详细的报错信息
def download(request): # 下载接口
global download_object
if request.method == 'POST':
try:
# 获取url数据
tools.logger.info('正在获取url数据……') # 打印日志
data = json.loads(request.body.decode('utf-8'))
urls = data.get('urls', [])
# 初步去重
urls = list(set(urls))
tools.logger.info(f'已获取urls为:{urls}')
# 获取下载方式
format_ = data.get('format', 'txt')
tools.logger.info(f'下载方式为{format_}')
# 获取书本信息
books = [] | [books.append(Fanqie.FanqieNovel(url, format_)) for url in urls] | 0 | 2023-11-05 09:35:20+00:00 | 8k |
StoneMoe/ASub | app/ui/views/project_view.py | [
{
"identifier": "Project",
"path": "app/core/models/project.py",
"snippet": "class Project:\r\n path: str # 工程目录(相对位置)\r\n name: str # 工程名称\r\n\r\n def __init__(self, name: str, existed_err=False):\r\n self.name = name\r\n self.path = os.path.join(Core.PROJ_DIR, name)\r\n ... | import os
from typing import Optional
from PyQt5.QtCore import pyqtSignal, QPoint, Qt
from PyQt5.QtWidgets import QFrame, QVBoxLayout, QHBoxLayout, QAction
from qfluentwidgets import PushButton, FluentIcon, RoundMenu, ToolButton, MessageBox, StateToolTip
from app.core.models.project import Project, TranscribeOpt
from app.core.utils.generic import info
from app.ui.components.label import AutoLabel
from app.ui.config import cfg
from app.ui.const import CONTAINER_MARGINS
from app.ui.utils import run_in_thread, clear_layout, open_folder
from app.ui.windows.subtitle_window import SubtitleWindow
| 4,301 |
def set_project(self, project: Project):
self.project = project
self.label_title.setText(self.project.name)
self.label_title.setToolTip(self.project.name)
self._reload_subtitle_list()
def _init_layout(self):
self.layout_title.addWidget(self.label_title)
self.layout_title.addWidget(self.btn_manage)
self.layout.addLayout(self.layout_title)
self.layout.addLayout(self.layout_subtitles)
self.layout.addStretch(1)
self.layout.addWidget(self.btn_transcribe)
self.layout.setContentsMargins(*CONTAINER_MARGINS)
def _init_signal(self):
self.sig_subtitle_list_loaded.connect(self._on_subtitle_list_loaded)
self.sig_transcribe_running.connect(self._on_transcribe_running_changed)
def _on_transcribe_running_changed(self, running: bool):
if self.state_tooltip is None:
self.state_tooltip = StateToolTip('正在听写中', '请耐心等待', self)
self.state_tooltip.closeButton.hide()
if running:
self.btn_transcribe.setDisabled(True)
self.state_tooltip.move(10, 10)
self.state_tooltip.show()
else:
self.btn_transcribe.setDisabled(False)
self.state_tooltip.setState(True)
self.state_tooltip.setTitle('听写完成!')
self.state_tooltip.setContent('')
self.state_tooltip = None
def _on_subtitle_list_loaded(self, filenames: list):
clear_layout(self.layout_subtitles)
for filename in filenames:
layout = QHBoxLayout(self)
label = AutoLabel(filename, self, Qt.ElideLeft)
label.setToolTip(filename)
btn_translate = ToolButton(FluentIcon.EDIT, self)
btn_translate.setToolTip('编辑')
btn_translate.clicked.connect(self._on_subtitle_edit_clicked(filename))
btn_delete = ToolButton(FluentIcon.DELETE, self)
btn_delete.setToolTip('删除')
btn_delete.clicked.connect(self._on_subtitle_delete_clicked(filename))
layout.addWidget(label)
layout.addWidget(btn_translate)
layout.addWidget(btn_delete)
self.layout_subtitles.addLayout(layout)
def _reload_subtitle_list(self):
self.sig_subtitle_list_loaded.emit(
[
filename
for filename in os.listdir(self.project.path)
if filename.endswith('.srt') or filename.endswith('.ass')
]
)
def _on_subtitle_edit_clicked(self, filename):
def f():
target_file = os.path.join(self.project.path, filename)
edit_win = SubtitleWindow(target_file)
edit_win.exec_()
return f
def _on_subtitle_delete_clicked(self, filename):
def f():
target_file = os.path.join(self.project.path, filename)
if MessageBox('删除确认', f'真的要删除 {target_file} 吗?', self.window()).exec():
os.remove(target_file)
self._reload_subtitle_list()
return f
def _on_btn_manage_clicked(self, pos):
menu = RoundMenu(parent=self)
act_open_folder = QAction(FluentIcon.FOLDER.icon(), '打开文件夹')
act_archive = QAction(FluentIcon.SAVE.icon(), '归档')
act_clear_srt = QAction(FluentIcon.DELETE.icon(), '删除所有 SRT 文件')
act_clear_ass = QAction(FluentIcon.DELETE.icon(), '删除所有 ASS 文件')
act_delete_proj = QAction(FluentIcon.DELETE.icon(), '删除该项目')
act_open_folder.triggered.connect(lambda: open_folder(self.project.path))
act_archive.triggered.connect(lambda: MessageBox('要归档吗?', '这个功能还没做', self.window()).exec())
act_clear_srt.triggered.connect(lambda: print('这个功能还没做'))
act_clear_ass.triggered.connect(lambda: print('这个功能还没做'))
act_delete_proj.triggered.connect(self._on_act_del_proj)
menu.addActions([
act_open_folder,
act_archive,
])
menu.addSeparator()
menu.addActions([
act_clear_srt,
act_clear_ass,
])
menu.addSeparator()
menu.addAction(act_delete_proj)
# show menu
menu.exec(pos, ani=True)
def _on_act_del_proj(self):
if MessageBox('删除确认', '真的要删除吗?', self.window()).exec():
self.project.delete()
self.window().reload_projects()
|
class ProjectView(QFrame):
sig_subtitle_list_loaded = pyqtSignal(list)
sig_transcribe_running = pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setObjectName('proj-view')
self.project: Optional[Project] = None
self.state_tooltip = None
self.layout = QVBoxLayout(self)
self.layout_title = QHBoxLayout(self)
self.layout_subtitles = QVBoxLayout(self)
self.label_title = AutoLabel('<Loading>', self, Qt.ElideMiddle)
self.label_title.setObjectName('ViewTitle')
self.btn_manage = ToolButton(FluentIcon.MORE, self)
self.btn_manage.clicked.connect(
lambda: self._on_btn_manage_clicked(
self.btn_manage.mapToGlobal(QPoint()) + QPoint(self.btn_manage.width() + 5, 10)
)
)
self.btn_transcribe = PushButton('开始听写', self, FluentIcon.SEND_FILL)
self.btn_transcribe.clicked.connect(self._run_transcribe)
self._init_signal()
self._init_layout()
def set_project(self, project: Project):
self.project = project
self.label_title.setText(self.project.name)
self.label_title.setToolTip(self.project.name)
self._reload_subtitle_list()
def _init_layout(self):
self.layout_title.addWidget(self.label_title)
self.layout_title.addWidget(self.btn_manage)
self.layout.addLayout(self.layout_title)
self.layout.addLayout(self.layout_subtitles)
self.layout.addStretch(1)
self.layout.addWidget(self.btn_transcribe)
self.layout.setContentsMargins(*CONTAINER_MARGINS)
def _init_signal(self):
self.sig_subtitle_list_loaded.connect(self._on_subtitle_list_loaded)
self.sig_transcribe_running.connect(self._on_transcribe_running_changed)
def _on_transcribe_running_changed(self, running: bool):
if self.state_tooltip is None:
self.state_tooltip = StateToolTip('正在听写中', '请耐心等待', self)
self.state_tooltip.closeButton.hide()
if running:
self.btn_transcribe.setDisabled(True)
self.state_tooltip.move(10, 10)
self.state_tooltip.show()
else:
self.btn_transcribe.setDisabled(False)
self.state_tooltip.setState(True)
self.state_tooltip.setTitle('听写完成!')
self.state_tooltip.setContent('')
self.state_tooltip = None
def _on_subtitle_list_loaded(self, filenames: list):
clear_layout(self.layout_subtitles)
for filename in filenames:
layout = QHBoxLayout(self)
label = AutoLabel(filename, self, Qt.ElideLeft)
label.setToolTip(filename)
btn_translate = ToolButton(FluentIcon.EDIT, self)
btn_translate.setToolTip('编辑')
btn_translate.clicked.connect(self._on_subtitle_edit_clicked(filename))
btn_delete = ToolButton(FluentIcon.DELETE, self)
btn_delete.setToolTip('删除')
btn_delete.clicked.connect(self._on_subtitle_delete_clicked(filename))
layout.addWidget(label)
layout.addWidget(btn_translate)
layout.addWidget(btn_delete)
self.layout_subtitles.addLayout(layout)
def _reload_subtitle_list(self):
self.sig_subtitle_list_loaded.emit(
[
filename
for filename in os.listdir(self.project.path)
if filename.endswith('.srt') or filename.endswith('.ass')
]
)
def _on_subtitle_edit_clicked(self, filename):
def f():
target_file = os.path.join(self.project.path, filename)
edit_win = SubtitleWindow(target_file)
edit_win.exec_()
return f
def _on_subtitle_delete_clicked(self, filename):
def f():
target_file = os.path.join(self.project.path, filename)
if MessageBox('删除确认', f'真的要删除 {target_file} 吗?', self.window()).exec():
os.remove(target_file)
self._reload_subtitle_list()
return f
def _on_btn_manage_clicked(self, pos):
menu = RoundMenu(parent=self)
act_open_folder = QAction(FluentIcon.FOLDER.icon(), '打开文件夹')
act_archive = QAction(FluentIcon.SAVE.icon(), '归档')
act_clear_srt = QAction(FluentIcon.DELETE.icon(), '删除所有 SRT 文件')
act_clear_ass = QAction(FluentIcon.DELETE.icon(), '删除所有 ASS 文件')
act_delete_proj = QAction(FluentIcon.DELETE.icon(), '删除该项目')
act_open_folder.triggered.connect(lambda: open_folder(self.project.path))
act_archive.triggered.connect(lambda: MessageBox('要归档吗?', '这个功能还没做', self.window()).exec())
act_clear_srt.triggered.connect(lambda: print('这个功能还没做'))
act_clear_ass.triggered.connect(lambda: print('这个功能还没做'))
act_delete_proj.triggered.connect(self._on_act_del_proj)
menu.addActions([
act_open_folder,
act_archive,
])
menu.addSeparator()
menu.addActions([
act_clear_srt,
act_clear_ass,
])
menu.addSeparator()
menu.addAction(act_delete_proj)
# show menu
menu.exec(pos, ani=True)
def _on_act_del_proj(self):
if MessageBox('删除确认', '真的要删除吗?', self.window()).exec():
self.project.delete()
self.window().reload_projects()
| @run_in_thread
| 6 | 2023-11-07 16:45:43+00:00 | 8k |
openshift/lightspeed-service | ols/app/endpoints/ols.py | [
{
"identifier": "constants",
"path": "ols/app/constants.py",
"snippet": "VALID = \"VALID\"\nINVALID = \"INVALID\"\nYAML = \"YAML\"\nNOYAML = \"NOYAML\"\nSOME_FAILURE = \"some failure\""
},
{
"identifier": "LLMRequest",
"path": "ols/app/models/models.py",
"snippet": "class LLMRequest(Base... | from fastapi import APIRouter, HTTPException, status
from ols.app import constants
from ols.app.models.models import LLMRequest
from ols.app.utils import Utils
from ols.src.docs.docs_summarizer import DocsSummarizer
from ols.src.llms.llm_loader import LLMLoader
from ols.src.query_helpers.question_validator import QuestionValidator
from ols.src.query_helpers.yaml_generator import YamlGenerator
from ols.utils import config | 5,517 | """Handlers for all OLS-related REST API endpoints."""
router = APIRouter(prefix="/ols", tags=["ols"])
@router.post("")
def ols_request(llm_request: LLMRequest) -> LLMRequest:
"""Handle requests for the OLS endpoint.
Args:
llm_request: The request containing a query and conversation ID.
Returns:
Response containing the processed information.
"""
logger = config.default_logger
# Initialize variables
previous_input = None
conversation = llm_request.conversation_id
# Generate a new conversation ID if not provided
if conversation is None:
conversation = Utils.get_suid()
logger.info(f"{conversation} New conversation")
else:
previous_input = config.conversation_cache.get(conversation)
logger.info(f"{conversation} Previous conversation input: {previous_input}")
llm_response = LLMRequest(query=llm_request.query, conversation_id=conversation)
# Log incoming request
logger.info(f"{conversation} Incoming request: {llm_request.query}")
# Validate the query
question_validator = QuestionValidator()
validation_result = question_validator.validate_question(
conversation, llm_request.query
)
if validation_result[0] == constants.INVALID:
logger.info(f"{conversation} Question is not about k8s/ocp, rejecting")
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail={
"response": "Sorry, I can only answer questions about "
"OpenShift and Kubernetes. This does not look "
"like something I know how to handle."
},
)
if validation_result[0] == constants.VALID:
logger.info(f"{conversation} Question is about k8s/ocp")
question_type = validation_result[1]
# check if question type is from known categories
if question_type not in {constants.NOYAML, constants.YAML}:
# not known question type has been detected
logger.error(f"Unknown question type {question_type}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={"response": "Internal server error. Please try again."},
)
if question_type == constants.NOYAML:
logger.info(
f"{conversation} Question is not about yaml, sending for generic info"
)
# Summarize documentation
| """Handlers for all OLS-related REST API endpoints."""
router = APIRouter(prefix="/ols", tags=["ols"])
@router.post("")
def ols_request(llm_request: LLMRequest) -> LLMRequest:
"""Handle requests for the OLS endpoint.
Args:
llm_request: The request containing a query and conversation ID.
Returns:
Response containing the processed information.
"""
logger = config.default_logger
# Initialize variables
previous_input = None
conversation = llm_request.conversation_id
# Generate a new conversation ID if not provided
if conversation is None:
conversation = Utils.get_suid()
logger.info(f"{conversation} New conversation")
else:
previous_input = config.conversation_cache.get(conversation)
logger.info(f"{conversation} Previous conversation input: {previous_input}")
llm_response = LLMRequest(query=llm_request.query, conversation_id=conversation)
# Log incoming request
logger.info(f"{conversation} Incoming request: {llm_request.query}")
# Validate the query
question_validator = QuestionValidator()
validation_result = question_validator.validate_question(
conversation, llm_request.query
)
if validation_result[0] == constants.INVALID:
logger.info(f"{conversation} Question is not about k8s/ocp, rejecting")
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail={
"response": "Sorry, I can only answer questions about "
"OpenShift and Kubernetes. This does not look "
"like something I know how to handle."
},
)
if validation_result[0] == constants.VALID:
logger.info(f"{conversation} Question is about k8s/ocp")
question_type = validation_result[1]
# check if question type is from known categories
if question_type not in {constants.NOYAML, constants.YAML}:
# not known question type has been detected
logger.error(f"Unknown question type {question_type}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={"response": "Internal server error. Please try again."},
)
if question_type == constants.NOYAML:
logger.info(
f"{conversation} Question is not about yaml, sending for generic info"
)
# Summarize documentation | docs_summarizer = DocsSummarizer() | 3 | 2023-11-08 06:29:41+00:00 | 8k |
NicolasZucchet/Online-learning-LR-dependencies | online_lru/train.py | [
{
"identifier": "create_train_state",
"path": "online_lru/train_helpers.py",
"snippet": "def create_train_state(\n model,\n rng,\n retrieval,\n in_dim=1,\n bsz=128,\n seq_len=784,\n weight_decay=0.01,\n padded=False,\n opt_config=\"standard\",\n rec_lr=1e-3,\n lr=1e-3,\n... | from functools import partial
from jax import random
from .train_helpers import (
create_train_state,
reduce_lr_on_plateau,
linear_warmup,
cosine_annealing,
constant_lr,
train_epoch,
validate,
)
from .dataloading import Datasets
from .seq_model import BatchClassificationModel
from .rec import init_layer
import wandb | 4,947 | Main function to train over a certain number of epochs
"""
best_test_loss = 100000000
if args.USE_WANDB:
# Make wandb config dictionary
wandb.init(
project=args.wandb_project,
job_type="model_training",
config=vars(args),
entity=args.wandb_entity,
)
else:
wandb.init(mode="offline")
# Set rec learning rate lr as function of lr
lr = args.lr_base
rec_lr = args.lr_factor * lr
# Set randomness...
print("[*] Setting Randomness...")
key = random.PRNGKey(args.jax_seed)
init_rng, train_rng = random.split(key, num=2)
# Close over additional dataset-specific kwargs
create_dataset_fn = Datasets[args.dataset]
if args.dataset == "copy-pad-classification":
create_dataset_fn = partial(
create_dataset_fn,
pattern_length=args.copy_pattern_length,
train_samples=args.copy_train_samples,
)
elif args.dataset == "enwik9":
create_dataset_fn = partial(
create_dataset_fn,
seq_len=args.enwik9_seq_len,
train_samples=args.enwik9_train_samples,
)
# Dataset dependent logic
if args.dataset in [
"imdb-classification",
"listops-classification",
"aan-classification",
]:
padded = True
if args.dataset in ["aan-classification"]:
# Use retreival model for document matching
retrieval = True
print("Using retrieval model for document matching")
else:
retrieval = False
else:
padded = False
retrieval = False
# Create dataset...
init_rng, key = random.split(init_rng, num=2)
(
trainloader,
valloader,
testloader,
aux_dataloaders,
n_classes,
seq_len,
in_dim,
train_size,
) = create_dataset_fn(args.dir_name, seed=args.jax_seed, bsz=args.bsz)
print(f"[*] Starting training on `{args.dataset}` =>> Initializing...")
# arguments specific to LRU or to RNN class
additional_arguments = {}
if args.layer_cls == "LRU":
additional_arguments["r_min"] = args.r_min
additional_arguments["r_max"] = args.r_max
if args.layer_cls == "RNN":
additional_arguments["activation"] = args.rnn_activation_fn
additional_arguments["scaling_hidden"] = args.rnn_scaling_hidden
elif args.layer_cls == "GRU":
assert args.d_hidden == args.d_model
rec_train = init_layer(
layer_cls=args.layer_cls,
d_hidden=args.d_hidden,
d_model=args.d_model,
seq_length=seq_len,
training_mode=args.training_mode,
**additional_arguments,
)
rec_val = init_layer(
layer_cls=args.layer_cls,
d_hidden=args.d_hidden,
d_model=args.d_model,
seq_length=seq_len,
training_mode="bptt", # bptt mode so rec does not keep in memory states and traces
**additional_arguments,
)
model_cls = partial(
BatchClassificationModel,
rec_type=args.layer_cls,
d_input=in_dim,
d_output=n_classes,
d_model=args.d_model,
n_layers=args.n_layers,
seq_length=seq_len,
padded=padded,
activation=args.activation_fn,
readout=args.readout,
dropout=args.p_dropout,
mode=args.mode,
prenorm=args.prenorm,
multidim=1 + (args.dataset == "copy-pad-classification"),
) # signature: (bool) training -> BatchClassificationModel
model = model_cls(rec=rec_train, training_mode=args.training_mode, training=True)
bptt_model = model_cls(rec=rec_val, training_mode="bptt", training=True)
val_model = model_cls(rec=rec_val, training_mode="bptt", training=False)
# initialize training state (optax) and internal states of the model
|
def train(args):
"""
Main function to train over a certain number of epochs
"""
best_test_loss = 100000000
if args.USE_WANDB:
# Make wandb config dictionary
wandb.init(
project=args.wandb_project,
job_type="model_training",
config=vars(args),
entity=args.wandb_entity,
)
else:
wandb.init(mode="offline")
# Set rec learning rate lr as function of lr
lr = args.lr_base
rec_lr = args.lr_factor * lr
# Set randomness...
print("[*] Setting Randomness...")
key = random.PRNGKey(args.jax_seed)
init_rng, train_rng = random.split(key, num=2)
# Close over additional dataset-specific kwargs
create_dataset_fn = Datasets[args.dataset]
if args.dataset == "copy-pad-classification":
create_dataset_fn = partial(
create_dataset_fn,
pattern_length=args.copy_pattern_length,
train_samples=args.copy_train_samples,
)
elif args.dataset == "enwik9":
create_dataset_fn = partial(
create_dataset_fn,
seq_len=args.enwik9_seq_len,
train_samples=args.enwik9_train_samples,
)
# Dataset dependent logic
if args.dataset in [
"imdb-classification",
"listops-classification",
"aan-classification",
]:
padded = True
if args.dataset in ["aan-classification"]:
# Use retreival model for document matching
retrieval = True
print("Using retrieval model for document matching")
else:
retrieval = False
else:
padded = False
retrieval = False
# Create dataset...
init_rng, key = random.split(init_rng, num=2)
(
trainloader,
valloader,
testloader,
aux_dataloaders,
n_classes,
seq_len,
in_dim,
train_size,
) = create_dataset_fn(args.dir_name, seed=args.jax_seed, bsz=args.bsz)
print(f"[*] Starting training on `{args.dataset}` =>> Initializing...")
# arguments specific to LRU or to RNN class
additional_arguments = {}
if args.layer_cls == "LRU":
additional_arguments["r_min"] = args.r_min
additional_arguments["r_max"] = args.r_max
if args.layer_cls == "RNN":
additional_arguments["activation"] = args.rnn_activation_fn
additional_arguments["scaling_hidden"] = args.rnn_scaling_hidden
elif args.layer_cls == "GRU":
assert args.d_hidden == args.d_model
rec_train = init_layer(
layer_cls=args.layer_cls,
d_hidden=args.d_hidden,
d_model=args.d_model,
seq_length=seq_len,
training_mode=args.training_mode,
**additional_arguments,
)
rec_val = init_layer(
layer_cls=args.layer_cls,
d_hidden=args.d_hidden,
d_model=args.d_model,
seq_length=seq_len,
training_mode="bptt", # bptt mode so rec does not keep in memory states and traces
**additional_arguments,
)
model_cls = partial(
BatchClassificationModel,
rec_type=args.layer_cls,
d_input=in_dim,
d_output=n_classes,
d_model=args.d_model,
n_layers=args.n_layers,
seq_length=seq_len,
padded=padded,
activation=args.activation_fn,
readout=args.readout,
dropout=args.p_dropout,
mode=args.mode,
prenorm=args.prenorm,
multidim=1 + (args.dataset == "copy-pad-classification"),
) # signature: (bool) training -> BatchClassificationModel
model = model_cls(rec=rec_train, training_mode=args.training_mode, training=True)
bptt_model = model_cls(rec=rec_val, training_mode="bptt", training=True)
val_model = model_cls(rec=rec_val, training_mode="bptt", training=False)
# initialize training state (optax) and internal states of the model | state, init_states = create_train_state( | 0 | 2023-11-01 13:18:32+00:00 | 8k |
zhaohengz/CAILA | test.py | [
{
"identifier": "DATA_FOLDER",
"path": "flags.py",
"snippet": "DATA_FOLDER = \"./all_data\""
},
{
"identifier": "dataset",
"path": "data/dataset.py",
"snippet": "class ImageLoader:\nclass CompositionDataset(Dataset):\n def __init__(self, root):\n def __call__(self, img):\ndef datas... | import torch
import torch.backends.cudnn as cudnn
import numpy as np
import tqdm
import os
from torch.utils.tensorboard import SummaryWriter
from flags import DATA_FOLDER
from tqdm import tqdm
from os.path import join as ospj
from data import dataset as dset
from models.common import Evaluator
from utils.utils import load_args
from utils.config_model import configure_model
from flags import parser | 5,098 | # Torch imports
cudnn.benchmark = True
# Python imports
# Local imports
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def main():
# Get arguments and start logging
args = parser.parse_args()
logpath = args.logpath
load_args(args.config, args)
# Get dataset
trainset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER, args.data_dir),
phase='train',
split=args.splitname,
train_only=args.train_only,
subset=args.subset,
open_world=args.open_world,
dataset=args.dataset
)
testset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER,args.data_dir),
phase='test',
split=args.splitname,
subset=args.subset,
open_world=args.open_world,
norm_family=args.norm_family,
dataset=args.dataset
)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.workers)
# Get model and optimizer
model, _ = configure_model(args, trainset)
args.load = ospj(logpath,'ckpt_best_auc.t7')
checkpoint = torch.load(args.load)
model.load_state_dict(checkpoint['net'], strict=True)
model = model.cuda()
model.eval()
| # Torch imports
cudnn.benchmark = True
# Python imports
# Local imports
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def main():
# Get arguments and start logging
args = parser.parse_args()
logpath = args.logpath
load_args(args.config, args)
# Get dataset
trainset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER, args.data_dir),
phase='train',
split=args.splitname,
train_only=args.train_only,
subset=args.subset,
open_world=args.open_world,
dataset=args.dataset
)
testset = dset.CompositionDataset(
root=os.path.join(DATA_FOLDER,args.data_dir),
phase='test',
split=args.splitname,
subset=args.subset,
open_world=args.open_world,
norm_family=args.norm_family,
dataset=args.dataset
)
testloader = torch.utils.data.DataLoader(
testset,
batch_size=args.test_batch_size,
shuffle=False,
num_workers=args.workers)
# Get model and optimizer
model, _ = configure_model(args, trainset)
args.load = ospj(logpath,'ckpt_best_auc.t7')
checkpoint = torch.load(args.load)
model.load_state_dict(checkpoint['net'], strict=True)
model = model.cuda()
model.eval()
| evaluator = Evaluator(testset, model) | 2 | 2023-11-01 00:54:59+00:00 | 8k |
fortelex/hiveline | hiveline/results/modal_shares.py | [
{
"identifier": "fptf",
"path": "hiveline/models/fptf.py",
"snippet": "def _remove_empty_keys(d):\ndef read_datetime(time_str):\ndef format_datetime(dt):\n def __init__(self, name=None, address=None, longitude=None, latitude=None, altitude=None):\n def to_dict(self):\n def to_json(self):\n d... | import datetime
import random
import uuid
import osmnx
import hiveline.vc.vc_extract as vc_extract
from matplotlib import pyplot as plt
from hiveline.models import fptf
from hiveline.od.place import Place
from hiveline.results.journeys import Journeys, Option, Options, get_option_stats, JourneyStats
from hiveline.routing.util import ensure_directory | 6,852 |
rail_modes = [fptf.Mode.TRAIN, fptf.Mode.BUS, fptf.Mode.GONDOLA, fptf.Mode.WATERCRAFT]
class Params:
"""
Simulation parameters for congestion and modal share analysis
"""
num_citizens = 2000000
vehicle_factor = 0.00007
vcs_car_usage_start = 0.5
mix_factor = 0.1
max_iterations = 100
car_ownership_override = 0 # probability that a vc will own a car even though they don't have one. all of these would use it as well.
car_usage_override = 0 # probability that a car owner would choose a car even though there is no parking
|
rail_modes = [fptf.Mode.TRAIN, fptf.Mode.BUS, fptf.Mode.GONDOLA, fptf.Mode.WATERCRAFT]
class Params:
"""
Simulation parameters for congestion and modal share analysis
"""
num_citizens = 2000000
vehicle_factor = 0.00007
vcs_car_usage_start = 0.5
mix_factor = 0.1
max_iterations = 100
car_ownership_override = 0 # probability that a vc will own a car even though they don't have one. all of these would use it as well.
car_usage_override = 0 # probability that a car owner would choose a car even though there is no parking
| def decide(options: Options, params: Params = None) -> Option | None: | 2 | 2023-11-07 15:34:04+00:00 | 8k |
uhppoted/uhppoted-app-home-assistant | custom_components/uhppoted/sensor.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/uhppoted/const.py",
"snippet": "DOMAIN = 'uhppoted'"
},
{
"identifier": "CONF_BIND_ADDR",
"path": "custom_components/uhppoted/const.py",
"snippet": "CONF_BIND_ADDR = 'bind_address'"
},
{
"identifier": "CONF_BROADCAST_ADDR",
... | import datetime
import logging
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.typing import DiscoveryInfoType
from homeassistant.components.sensor import SensorEntity
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from uhppoted import uhppote
from .const import DOMAIN
from .const import CONF_BIND_ADDR
from .const import CONF_BROADCAST_ADDR
from .const import CONF_LISTEN_ADDR
from .const import CONF_DEBUG
from .const import ATTR_ADDRESS
from .const import ATTR_NETMASK
from .const import ATTR_GATEWAY
from .const import ATTR_FIRMWARE
from .config import configure_controllers
from .config import configure_doors
from .config import configure_cards
from .config import configure_driver
from .controller import ControllerInfo
from .door import ControllerDoor
from .door import ControllerDoorOpen
from .door import ControllerDoorLock
from .door import ControllerDoorButton
from .door import ControllerDoorMode
from .card import CardInfo
from .card import CardHolder | 6,159 | from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
# Configuration constants
# Attribute constants
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback):
config = entry.data
options = entry.options
u = configure_driver(options)
entities = []
def f(controller, serial_no, address):
entities.extend([
ControllerInfo(u['api'], controller, serial_no),
])
def g(controller, serial_no, door, door_no):
entities.extend([
ControllerDoor(u['api'], controller, serial_no, door, door_no),
ControllerDoorOpen(u['api'], controller, serial_no, door, door_no),
ControllerDoorLock(u['api'], controller, serial_no, door, door_no),
ControllerDoorButton(u['api'], controller, serial_no, door, door_no),
])
def h(card, name, unique_id):
entities.extend([
CardInfo(u, card, name, unique_id),
CardHolder(u, card, name, unique_id),
])
configure_controllers(options, f)
configure_doors(options, g)
| from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
# Configuration constants
# Attribute constants
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback):
config = entry.data
options = entry.options
u = configure_driver(options)
entities = []
def f(controller, serial_no, address):
entities.extend([
ControllerInfo(u['api'], controller, serial_no),
])
def g(controller, serial_no, door, door_no):
entities.extend([
ControllerDoor(u['api'], controller, serial_no, door, door_no),
ControllerDoorOpen(u['api'], controller, serial_no, door, door_no),
ControllerDoorLock(u['api'], controller, serial_no, door, door_no),
ControllerDoorButton(u['api'], controller, serial_no, door, door_no),
])
def h(card, name, unique_id):
entities.extend([
CardInfo(u, card, name, unique_id),
CardHolder(u, card, name, unique_id),
])
configure_controllers(options, f)
configure_doors(options, g) | configure_cards(options, h) | 11 | 2023-11-06 18:46:49+00:00 | 8k |
shadowpa0327/FLORA | data/build.py | [
{
"identifier": "create_transform",
"path": "data/augmentation/transforms_factory.py",
"snippet": "def create_transform(\n input_size,\n is_training=False,\n use_prefetcher=False,\n no_aug=False,\n scale=None,\n ratio=None,\n hflip=0.5,\n vflip=0.,... | import os
import torch
import numpy as np
import torch.distributed as dist
from torchvision import datasets, transforms
from timm.data.constants import \
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.data import Mixup
from timm.data import create_transform
from .augmentation import create_transform as create_transform_record
from .augmentation.mixup import Mixup as Mixup_record
from .augmentation.dataset_wrapper import DatasetWrapper
from .sampler import MyDistributedSampler
from timm.data import TimmDatasetTar
from timm.data import ImageDataset as TimmDatasetTar
from torchvision.transforms import InterpolationMode
from timm.data.transforms import _pil_interp | 5,663 | # --------------------------------------------------------
# TinyViT Data Builder
# Copyright (c) 2022 Microsoft
# Based on the code: Swin Transformer
# (https://github.com/microsoft/swin-transformer)
# Adapted for TinyVIT
# --------------------------------------------------------
try:
except ImportError:
# for higher version of timm
try:
def _pil_interp(method):
if method == 'bicubic':
return InterpolationMode.BICUBIC
elif method == 'lanczos':
return InterpolationMode.LANCZOS
elif method == 'hamming':
return InterpolationMode.HAMMING
else:
# default bilinear, do we want to allow nearest?
return InterpolationMode.BILINEAR
except:
def build_loader(config):
config.defrost()
dataset_train, config.MODEL.NUM_CLASSES = build_dataset(
is_train=True, config=config)
config.freeze()
print(
f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build train dataset")
dataset_val, _ = build_dataset(is_train=False, config=config)
print(
f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset")
mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None
sampler_train = MyDistributedSampler(
dataset_train, shuffle=True,
drop_last=False, padding=True, pair=mixup_active and config.DISTILL.ENABLED,
)
sampler_val = MyDistributedSampler(
dataset_val, shuffle=False,
drop_last=False, padding=False, pair=False,
)
# TinyViT Dataset Wrapper
if config.DISTILL.ENABLED:
| # --------------------------------------------------------
# TinyViT Data Builder
# Copyright (c) 2022 Microsoft
# Based on the code: Swin Transformer
# (https://github.com/microsoft/swin-transformer)
# Adapted for TinyVIT
# --------------------------------------------------------
try:
except ImportError:
# for higher version of timm
try:
def _pil_interp(method):
if method == 'bicubic':
return InterpolationMode.BICUBIC
elif method == 'lanczos':
return InterpolationMode.LANCZOS
elif method == 'hamming':
return InterpolationMode.HAMMING
else:
# default bilinear, do we want to allow nearest?
return InterpolationMode.BILINEAR
except:
def build_loader(config):
config.defrost()
dataset_train, config.MODEL.NUM_CLASSES = build_dataset(
is_train=True, config=config)
config.freeze()
print(
f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build train dataset")
dataset_val, _ = build_dataset(is_train=False, config=config)
print(
f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset")
mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None
sampler_train = MyDistributedSampler(
dataset_train, shuffle=True,
drop_last=False, padding=True, pair=mixup_active and config.DISTILL.ENABLED,
)
sampler_val = MyDistributedSampler(
dataset_val, shuffle=False,
drop_last=False, padding=False, pair=False,
)
# TinyViT Dataset Wrapper
if config.DISTILL.ENABLED: | dataset_train = DatasetWrapper(dataset_train, | 2 | 2023-11-03 09:54:45+00:00 | 8k |
fw-ai/fireworks_poe_bot | fireworks_poe_bot/fw_poe_qr_bot.py | [
{
"identifier": "PoeBot",
"path": "fireworks_poe_bot/fastapi_poe/base.py",
"snippet": "class PoeBot:\n # Override these for your bot\n\n async def get_response(\n self, query: QueryRequest\n ) -> AsyncIterable[Union[PartialResponse, ServerSentEvent]]:\n \"\"\"Override this to retu... | import base64
import copy
import io
import fireworks.client
import time
import uuid
import requests
import qrcode
import traceback
from typing import AsyncIterable, Dict, List, Optional, Union
from .fastapi_poe import PoeBot
from sse_starlette.sse import ServerSentEvent
from .fastapi_poe.types import (
PartialResponse,
QueryRequest,
ReportErrorRequest,
ReportFeedbackRequest,
SettingsRequest,
SettingsResponse,
ErrorResponse,
)
from fireworks.client.api import ChatMessage
from fireworks.client.error import InvalidRequestError
from fireworks.client.image import ImageInference, Answer
from fireworks_poe_bot.plugin import log_error, log_info, log_warn, register_bot_plugin
from fireworks_poe_bot.config import ModelConfig
from itertools import groupby
from PIL import Image
from google.cloud import storage | 4,446 | "qr_data": qr_data,
"qr_strength": qr_strength,
"prompt_strength": prompt_strength,
"response": response_text,
"elapsed_sec": elapsed_sec,
"elapsed_sec_inference": end_t_inference - start_t,
"elapsed_sec_upload": end_t - start_t_encode,
}
)
yield PartialResponse(text=response_text)
yield ServerSentEvent(event="done")
return
except Exception as e:
end_t = time.time()
log_error(
{
"severity": "ERROR",
"msg": "Invalid request",
"error": "\n".join(traceback.format_exception(e)),
"elapsed_sec": end_t - start_t,
**query.dict(),
}
)
if "prompt is too long" in str(e):
error_type = "user_message_too_long"
else:
error_type = None
yield ErrorResponse(allow_retry=False, error_type=error_type, text=str(e))
return
finally:
fireworks.client.api_key = orig_api_key
# Function to upload a PIL Image to an S3 bucket with a presigned URL
def _upload_image_to_s3_with_ttl(
self, bucket_name, object_name, image: Image, expiration=600
):
"""
Upload a PIL Image to an S3 bucket with TTL by generating a presigned URL.
:param bucket_name: String name of the bucket to which the image is uploaded.
:param object_name: S3 object name. If not specified then file_name is used.
:param image: PIL Image object to be uploaded.
:param expiration: Time in seconds for the presigned URL to remain valid.
"""
# In-memory binary streams
in_mem_file = io.BytesIO()
# Save the PIL image to in-memory file as JPEG
image.save(in_mem_file, format="JPEG")
in_mem_file.seek(0) # Reset file pointer to the beginning
# Upload the image to S3
# self.s3_client.upload_fileobj(in_mem_file, bucket_name, object_name)
self.s3_client.put_object(
Bucket=self.s3_bucket_name,
Key=object_name,
Body=in_mem_file,
ContentType="image/jpeg",
)
# Generate a presigned URL for the S3 object
url = self.s3_client.generate_presigned_url(
"get_object",
Params={"Bucket": bucket_name, "Key": object_name},
ExpiresIn=expiration,
)
return url
def _upload_image_to_gcs(self, image: Image, bucket_name: str):
"""Uploads a given PIL.Image to a GCS bucket."""
# Generate a (statistically) unique filename with a uuid4
random_uuid = str(uuid.uuid4()).replace("-", "")
filename = f"{random_uuid}.jpg"
# Initialize the GCS client
client = storage.Client()
bucket = client.get_bucket(bucket_name)
# Convert the PIL.Image to bytes
img_byte_arr = io.BytesIO()
image.save(img_byte_arr, format="JPEG")
img_byte_arr = img_byte_arr.getvalue()
# Create a new blob (i.e., object) in the bucket and upload the image bytes
blob = bucket.blob(filename)
blob.upload_from_string(img_byte_arr, content_type=f"image/jpeg")
blob.make_public()
# The public URL can be accessed with the `public_url` attribute
public_url = blob.public_url
return public_url
def _download_image(self, image_url):
# Send an HTTP GET request to the image URL
response = requests.get(image_url)
# Check if the request was successful
if response.status_code == 200:
# Read the image content into an in-memory bytes buffer
image_bytes = io.BytesIO(response.content)
# Use Pillow to open the image from the bytes buffer
img = Image.open(image_bytes)
return img
else:
# If the request failed, raise an HTTPError with the response
response.raise_for_status()
async def get_settings(self, setting: SettingsRequest) -> SettingsResponse:
"""Override this to return non-standard settings."""
return SettingsResponse()
async def on_feedback(self, feedback_request: ReportFeedbackRequest) -> None:
"""Override this to record feedback from the user."""
pass
|
def parse_input(input_string, default_qr_strength, default_prompt_strength):
# Parse initial prompt
prompt_end_index = input_string.find('--')
if prompt_end_index == -1:
prompt_end_index = len(input_string)
prompt = input_string[:prompt_end_index].strip() if prompt_end_index != -1 else input_string.strip()
input_string = input_string[prompt_end_index:].strip()
qr_prompt = None
qr_strength = default_qr_strength
prompt_strength = default_prompt_strength
model = "sdxl"
while len(input_string) > 0:
next_flag_idx = input_string.find('--', 2)
if next_flag_idx == -1:
next_flag_idx = len(input_string)
# Parse the flag and its arguments
if input_string.startswith('--qr-strength'):
qr_strength = float(input_string[len("--qr-strength"):next_flag_idx].strip())
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--qr'):
qr_prompt = input_string[len("--qr"):next_flag_idx].strip()
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--prompt-strength'):
prompt_strength = int(input_string[len("--prompt-strength"):next_flag_idx].strip())
input_string = input_string[next_flag_idx:].strip()
elif input_string.startswith('--model'):
model = input_string[len("--model"):next_flag_idx].strip()
input_string = input_string[next_flag_idx:].strip()
else:
raise ValueError(f'Unknown flag: {input_string[:next_flag_idx]}')
if qr_prompt is None:
raise ValueError('Please specify a QR prompt with a --qr flag.')
return prompt, qr_prompt, qr_strength, prompt_strength, model
def gen_qr_code(input_text: str) -> Image:
# Generate QR Code
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=16,
border=4,
)
qr.add_data(input_text)
qr.make(fit=True)
# Create QR Code image
img = qr.make_image(fill_color="black", back_color="white")
# Padding the image to be 768x768
width, height = img.size
new_width = new_height = 768
# Create a new image with white background
new_img = Image.new("RGB", (new_width, new_height), "white")
# Paste the original image onto the new image, centered
new_img.paste(img, ((new_width - width) // 2, (new_height - height) // 2))
return new_img
class QRCodeConfig(ModelConfig):
gcs_bucket_name: str
conditioning_scale: Optional[float] = None
default_cfg_scale: Optional[float] = None
@register_bot_plugin("qr_models", QRCodeConfig)
class FireworksPoeQRBot(PoeBot):
def __init__(
self,
model: str,
api_key: str,
environment: str,
deployment: str,
server_version: str,
gcs_bucket_name: str,
conditioning_scale: float,
default_cfg_scale: float,
):
super().__init__()
self.model = model
self.api_key = api_key
self.environment = environment
self.deployment = deployment
self.server_version = server_version
self.default_cfg_scale = default_cfg_scale if default_cfg_scale is not None else 8
model_atoms = model.split("/")
if len(model_atoms) != 4:
raise ValueError(
f"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}"
)
if model_atoms[0] != "accounts" or model_atoms[2] != "models":
raise ValueError(
f"Expected model name to be in the form accounts/{{modelname}}/models/{{model}}, but got {model}"
)
self.account = model_atoms[1]
self.model = model_atoms[3]
self.client = ImageInference(account=self.account, model=self.model)
self.gcs_bucket_name = gcs_bucket_name
self.conditioning_scale = conditioning_scale
def _log_warn(self, payload: Dict):
payload = copy.copy(payload)
payload.update(
{
"severity": "WARNING",
"environment": self.environment,
"deployment": self.deployment,
"model": self.model,
"server_version": self.server_version,
}
)
log_warn(payload)
def _log_info(self, payload: Dict):
payload = copy.copy(payload)
payload.update(
{
"severity": "INFO",
"environment": self.environment,
"deployment": self.deployment,
"model": self.model,
"server_version": self.server_version,
}
)
log_info(payload)
async def get_response(
self, query: QueryRequest
) -> AsyncIterable[Union[PartialResponse, ServerSentEvent]]:
orig_api_key = self.client.api_key
fireworks.client.api_key = self.api_key
try:
start_t = time.time()
if len(query.query) == 0:
yield ErrorResponse(allow_retry=False, text="Empty query")
raise
messages: List[ChatMessage] = []
for protocol_message in query.query:
# OpenAI/Fireworks use the "assistant" role for the LLM, but Poe uses the
# "bot" role. Replace that one. Otherwise, ignore the role
if protocol_message.role not in {"system", "user", "bot"}:
self._log_warn({"msg": "Unknown role", **protocol_message})
continue
if protocol_message.content_type not in {"text/plain", "text/markdown"}:
self._log_warn({"msg": "Unknown content type", **protocol_message})
continue
# TODO: support protocol_message.feedback and protocol_message.attachments
# if needed
if protocol_message.role == "bot":
role = "assistant"
else:
role = protocol_message.role
messages.append({"role": role, "content": protocol_message.content})
self._log_info(
{
"msg": "Request received",
**query.dict(),
}
)
# The poe servers send us arbitrary lists of messages. We need to do a few things
# to normalize for our chat completion API:
# 1. Ensure that all assistant messages are preceded by a user message
# 2. Merge adjacent messages from the same role
# 3. Ensure that the last message is a user message
# Ensure that all assistant messages are preceded by a user message
for i in range(len(messages) - 1, -1, -1):
if messages[i]["role"] == "assistant" and (
i == 0 or messages[i - 1]["role"] != "user"
):
self._log_warn(
{
"msg": f"Assistant message {messages[i]} not preceded by user message"
}
)
messages.insert(i, {"role": "user", "content": ""})
# Merge adjacent messages from the same role
merged_messages = []
for role, group in groupby(messages, key=lambda x: x["role"]):
content = " ".join(message["content"] for message in group)
merged_messages.append({"role": role, "content": content})
messages = merged_messages
# Ensure last message is a user message
if messages[-1]["role"] != "user":
self._log_warn({"msg": f"Last message {messages[-1]} not a user message"})
messages.append({"role": "user", "content": ""})
# generated_len = 0
assert messages[-1]["role"] == "user"
prompt = messages[-1]["content"]
try:
prompt, qr_data, qr_strength, prompt_strength, model = parse_input(prompt, self.conditioning_scale, self.default_cfg_scale)
except Exception as e:
yield self.text_event(text=f"Error parsing input: {e}")
return
if model == "sdxl":
self.client.model = "stable-diffusion-xl-1024-v1-0"
elif model == "sdv1.5":
self.client.model = "stable-diffusion-v1-5"
else:
yield self.text_event(text=f"Unknown model: {model}. Model must be one of 'sdxl' or 'sdv1.5'.")
return
qr_image = gen_qr_code(qr_data)
answer: Answer = await self.client.control_net_async(
control_image=qr_image,
control_net_name="qr",
conditioning_scale=qr_strength,
prompt=prompt,
cfg_scale=prompt_strength,
sampler=None,
steps=25,
seed=0,
safety_check=False,
output_image_format="JPG",
# Add additional parameters here as necessary
)
end_t_inference = time.time()
start_t_encode = time.time()
if answer.finish_reason == "CONTENT_FILTERED":
yield self.text_event(text="Potentially sensitive content detected")
return
public_image_url = self._upload_image_to_gcs(
answer.image, self.gcs_bucket_name
)
response_text = f""
end_t = time.time()
elapsed_sec = end_t - start_t
self._log_info(
{
"severity": "INFO",
"msg": "Request completed",
**query.dict(),
"prompt": prompt,
"qr_data": qr_data,
"qr_strength": qr_strength,
"prompt_strength": prompt_strength,
"response": response_text,
"elapsed_sec": elapsed_sec,
"elapsed_sec_inference": end_t_inference - start_t,
"elapsed_sec_upload": end_t - start_t_encode,
}
)
yield PartialResponse(text=response_text)
yield ServerSentEvent(event="done")
return
except Exception as e:
end_t = time.time()
log_error(
{
"severity": "ERROR",
"msg": "Invalid request",
"error": "\n".join(traceback.format_exception(e)),
"elapsed_sec": end_t - start_t,
**query.dict(),
}
)
if "prompt is too long" in str(e):
error_type = "user_message_too_long"
else:
error_type = None
yield ErrorResponse(allow_retry=False, error_type=error_type, text=str(e))
return
finally:
fireworks.client.api_key = orig_api_key
# Function to upload a PIL Image to an S3 bucket with a presigned URL
def _upload_image_to_s3_with_ttl(
self, bucket_name, object_name, image: Image, expiration=600
):
"""
Upload a PIL Image to an S3 bucket with TTL by generating a presigned URL.
:param bucket_name: String name of the bucket to which the image is uploaded.
:param object_name: S3 object name. If not specified then file_name is used.
:param image: PIL Image object to be uploaded.
:param expiration: Time in seconds for the presigned URL to remain valid.
"""
# In-memory binary streams
in_mem_file = io.BytesIO()
# Save the PIL image to in-memory file as JPEG
image.save(in_mem_file, format="JPEG")
in_mem_file.seek(0) # Reset file pointer to the beginning
# Upload the image to S3
# self.s3_client.upload_fileobj(in_mem_file, bucket_name, object_name)
self.s3_client.put_object(
Bucket=self.s3_bucket_name,
Key=object_name,
Body=in_mem_file,
ContentType="image/jpeg",
)
# Generate a presigned URL for the S3 object
url = self.s3_client.generate_presigned_url(
"get_object",
Params={"Bucket": bucket_name, "Key": object_name},
ExpiresIn=expiration,
)
return url
def _upload_image_to_gcs(self, image: Image, bucket_name: str):
"""Uploads a given PIL.Image to a GCS bucket."""
# Generate a (statistically) unique filename with a uuid4
random_uuid = str(uuid.uuid4()).replace("-", "")
filename = f"{random_uuid}.jpg"
# Initialize the GCS client
client = storage.Client()
bucket = client.get_bucket(bucket_name)
# Convert the PIL.Image to bytes
img_byte_arr = io.BytesIO()
image.save(img_byte_arr, format="JPEG")
img_byte_arr = img_byte_arr.getvalue()
# Create a new blob (i.e., object) in the bucket and upload the image bytes
blob = bucket.blob(filename)
blob.upload_from_string(img_byte_arr, content_type=f"image/jpeg")
blob.make_public()
# The public URL can be accessed with the `public_url` attribute
public_url = blob.public_url
return public_url
def _download_image(self, image_url):
# Send an HTTP GET request to the image URL
response = requests.get(image_url)
# Check if the request was successful
if response.status_code == 200:
# Read the image content into an in-memory bytes buffer
image_bytes = io.BytesIO(response.content)
# Use Pillow to open the image from the bytes buffer
img = Image.open(image_bytes)
return img
else:
# If the request failed, raise an HTTPError with the response
response.raise_for_status()
async def get_settings(self, setting: SettingsRequest) -> SettingsResponse:
"""Override this to return non-standard settings."""
return SettingsResponse()
async def on_feedback(self, feedback_request: ReportFeedbackRequest) -> None:
"""Override this to record feedback from the user."""
pass
| async def on_error(self, error_request: ReportErrorRequest) -> None: | 3 | 2023-11-03 23:24:23+00:00 | 8k |
Fsoft-AIC/LSDM | atiss/scene_synthesis/datasets/threed_front.py | [
{
"identifier": "BaseDataset",
"path": "atiss/scene_synthesis/datasets/common.py",
"snippet": "class BaseDataset(Dataset):\n \"\"\"Implements the interface for all datasets that consist of scenes.\"\"\"\n def __init__(self, scenes):\n assert len(scenes) > 0\n self.scenes = scenes\n\n... | from collections import Counter, OrderedDict
from functools import lru_cache
from PIL import Image
from .common import BaseDataset
from .threed_front_scene import Room
from .utils import parse_threed_front_scenes
import numpy as np
import json
import os | 4,465 | #
# Copyright (C) 2021 NVIDIA Corporation. All rights reserved.
# Licensed under the NVIDIA Source Code License.
# See LICENSE at https://github.com/nv-tlabs/ATISS.
# Authors: Despoina Paschalidou, Amlan Kar, Maria Shugrina, Karsten Kreis,
# Andreas Geiger, Sanja Fidler
#
class ThreedFront(BaseDataset):
"""Container for the scenes in the 3D-FRONT dataset.
Arguments
---------
scenes: list of Room objects for all scenes in 3D-FRONT dataset
"""
def __init__(self, scenes, bounds=None):
super().__init__(scenes)
| #
# Copyright (C) 2021 NVIDIA Corporation. All rights reserved.
# Licensed under the NVIDIA Source Code License.
# See LICENSE at https://github.com/nv-tlabs/ATISS.
# Authors: Despoina Paschalidou, Amlan Kar, Maria Shugrina, Karsten Kreis,
# Andreas Geiger, Sanja Fidler
#
class ThreedFront(BaseDataset):
"""Container for the scenes in the 3D-FRONT dataset.
Arguments
---------
scenes: list of Room objects for all scenes in 3D-FRONT dataset
"""
def __init__(self, scenes, bounds=None):
super().__init__(scenes) | assert isinstance(self.scenes[0], Room) | 1 | 2023-11-06 07:55:51+00:00 | 8k |
molML/traversing_chem_space | active_learning/screening.py | [
{
"identifier": "Ensemble",
"path": "active_learning/nn.py",
"snippet": "class Ensemble(torch.nn.Module):\n \"\"\" Ensemble of GCNs\"\"\"\n def __init__(self, ensemble_size: int = 10, seed: int = 0, architecture: str = 'mlp', **kwargs) -> None:\n self.ensemble_size = ensemble_size\n ... | import pandas as pd
import numpy as np
import torch
from active_learning.nn import Ensemble
from active_learning.data_prep import MasterDataset
from active_learning.data_handler import Handler
from active_learning.utils import Evaluate, to_torch_dataloader
from active_learning.acquisition import Acquisition, logits_to_pred
from tqdm.auto import tqdm
from torch.utils.data import WeightedRandomSampler
from math import ceil | 4,060 | """
This script contains the main active learning loop that runs all experiments.
Author: Derek van Tilborg, Eindhoven University of Technology, May 2023
"""
INFERENCE_BATCH_SIZE = 512
TRAINING_BATCH_SIZE = 64
NUM_WORKERS = 4
def active_learning(n_start: int = 64, acquisition_method: str = 'exploration', max_screen_size: int = None,
batch_size: int = 16, architecture: str = 'gcn', seed: int = 0, bias: str = 'random',
optimize_hyperparameters: bool = False, ensemble_size: int = 10, retrain: bool = True,
anchored: bool = True, dataset: str = 'ALDH1') -> pd.DataFrame:
"""
:param n_start: number of molecules to start out with
:param acquisition_method: acquisition method, as defined in active_learning.acquisition
:param max_screen_size: we stop when this number of molecules has been screened
:param batch_size: number of molecules to add every cycle
:param architecture: 'gcn' or 'mlp'
:param seed: int 1-20
:param bias: 'random', 'small', 'large'
:param optimize_hyperparameters: Bool
:param ensemble_size: number of models in the ensemble, default is 10
:return: dataframe with results
"""
# Load the datasets
representation = 'ecfp' if architecture == 'mlp' else 'graph'
| """
This script contains the main active learning loop that runs all experiments.
Author: Derek van Tilborg, Eindhoven University of Technology, May 2023
"""
INFERENCE_BATCH_SIZE = 512
TRAINING_BATCH_SIZE = 64
NUM_WORKERS = 4
def active_learning(n_start: int = 64, acquisition_method: str = 'exploration', max_screen_size: int = None,
batch_size: int = 16, architecture: str = 'gcn', seed: int = 0, bias: str = 'random',
optimize_hyperparameters: bool = False, ensemble_size: int = 10, retrain: bool = True,
anchored: bool = True, dataset: str = 'ALDH1') -> pd.DataFrame:
"""
:param n_start: number of molecules to start out with
:param acquisition_method: acquisition method, as defined in active_learning.acquisition
:param max_screen_size: we stop when this number of molecules has been screened
:param batch_size: number of molecules to add every cycle
:param architecture: 'gcn' or 'mlp'
:param seed: int 1-20
:param bias: 'random', 'small', 'large'
:param optimize_hyperparameters: Bool
:param ensemble_size: number of models in the ensemble, default is 10
:return: dataframe with results
"""
# Load the datasets
representation = 'ecfp' if architecture == 'mlp' else 'graph' | ds_screen = MasterDataset('screen', representation=representation, dataset=dataset) | 1 | 2023-11-10 08:53:40+00:00 | 8k |
yunik1004/SAiD | script/train.py | [
{
"identifier": "SAID",
"path": "said/model/diffusion.py",
"snippet": "class SAID(ABC, nn.Module):\n \"\"\"Abstract class of SAiD models\"\"\"\n\n denoiser: nn.Module\n\n def __init__(\n self,\n audio_config: Optional[Wav2Vec2Config] = None,\n audio_processor: Optional[Wav2... | import argparse
import os
import pathlib
import torch
from dataclasses import dataclass
from typing import Optional
from accelerate import Accelerator
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from torch import nn
from torch.utils.data import DataLoader, RandomSampler
from tqdm import tqdm
from said.model.diffusion import SAID, SAID_UNet1D
from said.model.wav2vec2 import ModifiedWav2Vec2Model
from said.util.blendshape import load_blendshape_coeffs
from dataset.dataset_voca import DataBatch, BlendVOCATrainDataset, BlendVOCAValDataset | 7,098 | help="Directory of the outputs",
)
parser.add_argument(
"--prediction_type",
type=str,
default="epsilon",
help="Prediction type of the scheduler function, 'epsilon', 'sample', or 'v_prediction'",
)
parser.add_argument(
"--window_size_min",
type=int,
default=120,
help="Minimum window size of the blendshape coefficients sequence at training",
)
parser.add_argument(
"--batch_size", type=int, default=8, help="Batch size at training"
)
parser.add_argument(
"--epochs", type=int, default=100000, help="The number of epochs"
)
parser.add_argument(
"--num_warmup_epochs",
type=int,
default=5000,
help="The number of warmup epochs",
)
parser.add_argument(
"--num_workers", type=int, default=0, help="The number of workers"
)
parser.add_argument(
"--learning_rate", type=float, default=1e-5, help="Learning rate"
)
parser.add_argument(
"--uncond_prob",
type=float,
default=0.1,
help="Unconditional probability of waveform (for classifier-free guidance)",
)
parser.add_argument(
"--unet_feature_dim",
type=int,
default=-1,
help="Dimension of the latent feature of the UNet",
)
parser.add_argument(
"--weight_vel",
type=float,
default=1.0,
help="Weight for the velocity loss",
)
parser.add_argument(
"--weight_vertex",
type=float,
default=0.02,
help="Weight for the vertex loss",
)
parser.add_argument(
"--ema",
type=bool,
default=True,
help="Use Exponential Moving Average of models weights",
)
parser.add_argument(
"--ema_decay",
type=float,
default=0.9999,
help="Ema decay rate",
)
parser.add_argument(
"--val_period", type=int, default=200, help="Period of validating model"
)
parser.add_argument(
"--val_repeat", type=int, default=50, help="Number of repetition of val dataset"
)
parser.add_argument(
"--save_period", type=int, default=200, help="Period of saving model"
)
args = parser.parse_args()
audio_dir = args.audio_dir
coeffs_dir = args.coeffs_dir
coeffs_std_path = args.coeffs_std_path
blendshape_deltas_path = args.blendshape_residuals_path
if blendshape_deltas_path == "":
blendshape_deltas_path = None
landmarks_path = args.landmarks_path
if landmarks_path == "":
landmarks_path = None
coeffs_std = (
None if coeffs_std_path == "" else load_blendshape_coeffs(coeffs_std_path)
)
output_dir = args.output_dir
prediction_type = args.prediction_type
window_size_min = args.window_size_min
batch_size = args.batch_size
epochs = args.epochs
num_warmup_epochs = args.num_warmup_epochs
num_workers = args.num_workers
learning_rate = args.learning_rate
uncond_prob = args.uncond_prob
unet_feature_dim = args.unet_feature_dim
weight_vel = args.weight_vel
weight_vertex = args.weight_vertex
ema = args.ema
ema_decay = args.ema_decay
val_period = args.val_period
val_repeat = args.val_repeat
save_period = args.save_period
# Initialize accelerator
accelerator = Accelerator(log_with="tensorboard", project_dir=output_dir)
if accelerator.is_main_process:
accelerator.init_trackers("SAiD")
said_model = SAID_UNet1D(
feature_dim=unet_feature_dim,
prediction_type=prediction_type,
)
| """Train the SAID_UNet1D model
"""
@dataclass
class LossStepOutput:
"""
Dataclass for the losses at each step
"""
predict: torch.FloatTensor # MAE loss for the predicted output
velocity: torch.FloatTensor # MAE loss for the velocity
vertex: Optional[torch.FloatTensor] # MAE loss for the reconstructed vertex
@dataclass
class LossEpochOutput:
"""
Dataclass for the averaged losses at each epoch
"""
total: float = 0 # Averaged total loss
predict: float = 0 # Averaged prediction loss
velocity: float = 0 # Averaged velocity loss
vertex: float = 0 # Averaged vertex loss
lr: Optional[float] = None # Last learning rate
def random_noise_loss(
said_model: SAID,
data: DataBatch,
std: Optional[torch.FloatTensor],
device: torch.device,
prediction_type: str = "epsilon",
) -> LossStepOutput:
"""Compute the loss with randomized noises
Parameters
----------
said_model : SAID
SAiD model object
data : DataBatch
Output of the BlendVOCADataset.collate_fn
std : Optional[torch.FloatTensor]
(1, x_dim), Standard deviation of coefficients
device : torch.device
GPU device
prediction_type: str
Prediction type of the scheduler function, "epsilon", "sample", or "v_prediction", by default "epsilon"
Returns
-------
LossStepOutput
Computed losses
"""
waveform = data.waveform
blendshape_coeffs = data.blendshape_coeffs.to(device)
cond = data.cond.to(device)
coeff_latents = said_model.encode_samples(
blendshape_coeffs * said_model.latent_scale
)
curr_batch_size = len(waveform)
window_size = blendshape_coeffs.shape[1]
waveform_processed = said_model.process_audio(waveform).to(device)
random_timesteps = said_model.get_random_timesteps(curr_batch_size).to(device)
cond_embedding = said_model.get_audio_embedding(waveform_processed, window_size)
uncond_embedding = said_model.null_cond_emb.repeat(
curr_batch_size, cond_embedding.shape[1], 1
)
cond_mask = cond.view(-1, 1, 1)
audio_embedding = cond_embedding * cond_mask + uncond_embedding * torch.logical_not(
cond_mask
)
noise_dict = said_model.add_noise(coeff_latents, random_timesteps)
noisy_latents = noise_dict.noisy_sample
noise = noise_dict.noise
velocity = noise_dict.velocity
pred = said_model(noisy_latents, random_timesteps, audio_embedding)
# Set answer corresponding to prediction_type
answer = None
if prediction_type == "epsilon":
answer = noise
elif prediction_type == "sample":
answer = coeff_latents
elif prediction_type == "v_prediction":
answer = velocity
criterion_pred = nn.L1Loss()
criterion_velocity = nn.L1Loss()
criterion_vertex = nn.L1Loss()
answer_reweight = answer
pred_reweight = pred
if std is not None:
answer_reweight /= std.view(1, 1, -1)
pred_reweight /= std.view(1, 1, -1)
loss_pred = criterion_pred(pred_reweight, answer_reweight)
answer_diff = answer_reweight[:, 1:, :] - answer_reweight[:, :-1, :]
pred_diff = pred_reweight[:, 1:, :] - pred_reweight[:, :-1, :]
loss_vel = criterion_velocity(pred_diff, answer_diff)
loss_vertex = None
if data.blendshape_delta is not None:
blendshape_delta = data.blendshape_delta.to(device)
b, k, v, i = blendshape_delta.shape
_, t, _ = answer.shape
blendshape_delta_norm = torch.norm(blendshape_delta, p=1, dim=[1, 2, 3]) / (
k * v * i
)
blendshape_delta_normalized = torch.div(
blendshape_delta,
blendshape_delta_norm.view(-1, 1, 1, 1),
)
be_answer = torch.bmm(answer, blendshape_delta_normalized.view(b, k, v * i))
be_pred = torch.bmm(pred, blendshape_delta_normalized.view(b, k, v * i))
# be_answer = torch.einsum("bkvi,btk->btvi", blendshape_delta_normalized, answer)
# be_pred = torch.einsum("bkvi,btk->btvi", blendshape_delta_normalized, pred)
loss_vertex = criterion_vertex(be_pred, be_answer)
return LossStepOutput(
predict=loss_pred,
velocity=loss_vel,
vertex=loss_vertex,
)
def train_epoch(
said_model: SAID,
train_dataloader: DataLoader,
optimizer: torch.optim.Optimizer,
lr_scheduler: torch.optim.lr_scheduler,
accelerator: Accelerator,
std: Optional[torch.FloatTensor],
weight_vel: float,
weight_vertex: float,
prediction_type: str = "epsilon",
ema_model: Optional[EMAModel] = None,
) -> LossEpochOutput:
"""Train the SAiD model one epoch.
Parameters
----------
said_model : SAID
SAiD model object
train_dataloader : DataLoader
Dataloader of the BlendVOCATrainDataset
optimizer : torch.optim.Optimizer
Optimizer object
lr_scheduler: torch.optim.lr_scheduler
Learning rate scheduler object
accelerator : Accelerator
Accelerator object
std : Optional[torch.FloatTensor]
(1, x_dim), Standard deviation of coefficients
weight_vel: float
Weight for the velocity loss
weight_vertex: float
Weight for the vertex loss
prediction_type: str
Prediction type of the scheduler function, "epsilon", "sample", or "v_prediction", by default "epsilon"
ema_model: Optional[EMAModel]
EMA model of said_model, by default None
Returns
-------
LossEpochOutput
Average losses
"""
device = accelerator.device
if std is not None:
std = std.to(device)
said_model.train()
train_total_losses = {
"loss": 0,
"loss_predict": 0,
"loss_velocity": 0,
"loss_vertex": 0,
}
train_total_num = 0
for data in train_dataloader:
curr_batch_size = len(data.waveform)
with accelerator.accumulate(said_model):
losses = random_noise_loss(said_model, data, std, device, prediction_type)
loss = losses.predict + weight_vel * losses.velocity
if losses.vertex is not None:
loss += weight_vertex * losses.vertex
accelerator.backward(loss)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(said_model.parameters(), 1.0)
optimizer.step()
if ema_model:
ema_model.step(said_model.parameters())
lr_scheduler.step()
optimizer.zero_grad()
train_total_losses["loss"] += loss.item() * curr_batch_size
train_total_losses["loss_predict"] += losses.predict.item() * curr_batch_size
train_total_losses["loss_velocity"] += losses.velocity.item() * curr_batch_size
if losses.vertex is not None:
train_total_losses["loss_vertex"] += losses.vertex.item() * curr_batch_size
train_total_num += curr_batch_size
train_avg_losses = LossEpochOutput(
total=train_total_losses["loss"] / train_total_num,
predict=train_total_losses["loss_predict"] / train_total_num,
velocity=train_total_losses["loss_velocity"] / train_total_num,
vertex=train_total_losses["loss_vertex"] / train_total_num,
lr=lr_scheduler.get_last_lr()[0],
)
return train_avg_losses
def validate_epoch(
said_model: SAID,
val_dataloader: DataLoader,
accelerator: Accelerator,
std: torch.FloatTensor,
weight_vel: float,
weight_vertex: float,
prediction_type: str = "epsilon",
num_repeat: int = 1,
) -> LossEpochOutput:
"""Validate the SAiD model one epoch.
Parameters
----------
said_model : SAID
SAiD model object
val_dataloader : DataLoader
Dataloader of the BlendVOCAValDataset
accelerator : Accelerator
Accelerator object
std : torch.FloatTensor
(1, x_dim), Standard deviation of coefficients
weight_vel: float
Weight for the velocity loss
weight_vertex: float
Weight for the vertex loss
prediction_type: str
Prediction type of the scheduler function, "epsilon", "sample", or "v_prediction", by default "epsilon"
num_repeat : int, optional
Number of the repetition, by default 1
Returns
-------
LossEpochOutput
Average losses
"""
device = accelerator.device
if std is not None:
std = std.to(device)
said_model.eval()
val_total_losses = {
"loss": 0,
"loss_predict": 0,
"loss_velocity": 0,
"loss_vertex": 0,
}
val_total_num = 0
with torch.no_grad():
for _ in range(num_repeat):
for data in val_dataloader:
curr_batch_size = len(data.waveform)
losses = random_noise_loss(
said_model, data, std, device, prediction_type
)
loss = losses.predict + weight_vel * losses.velocity
if losses.vertex is not None:
loss += weight_vertex * losses.vertex
val_total_losses["loss"] += loss.item() * curr_batch_size
val_total_losses["loss_predict"] += (
losses.predict.item() * curr_batch_size
)
val_total_losses["loss_velocity"] += (
losses.velocity.item() * curr_batch_size
)
if losses.vertex is not None:
val_total_losses["loss_vertex"] += (
losses.vertex.item() * curr_batch_size
)
val_total_num += curr_batch_size
val_avg_losses = LossEpochOutput(
total=val_total_losses["loss"] / val_total_num,
predict=val_total_losses["loss_predict"] / val_total_num,
velocity=val_total_losses["loss_velocity"] / val_total_num,
vertex=val_total_losses["loss_vertex"] / val_total_num,
)
return val_avg_losses
def main() -> None:
"""Main function"""
default_data_dir = pathlib.Path(__file__).resolve().parent.parent / "data"
# Arguments
parser = argparse.ArgumentParser(
description="Train the SAiD model using BlendVOCA dataset"
)
parser.add_argument(
"--audio_dir",
type=str,
default="../BlendVOCA/audio",
help="Directory of the audio data",
)
parser.add_argument(
"--coeffs_dir",
type=str,
default="../BlendVOCA/blendshape_coeffs",
help="Directory of the blendshape coefficients data",
)
parser.add_argument(
"--coeffs_std_path",
type=str,
default="", # default_data_dir / "coeffs_std.csv",
help="Path of the coeffs std data",
)
parser.add_argument(
"--blendshape_residuals_path",
type=str,
default="", # default_data_dir / "blendshape_residuals.pickle",
help="Path of the blendshape residuals",
)
parser.add_argument(
"--landmarks_path",
type=str,
default="", # default_data_dir / "FLAME_head_landmarks.txt",
help="Path of the landmarks data",
)
parser.add_argument(
"--output_dir",
type=str,
default="../output",
help="Directory of the outputs",
)
parser.add_argument(
"--prediction_type",
type=str,
default="epsilon",
help="Prediction type of the scheduler function, 'epsilon', 'sample', or 'v_prediction'",
)
parser.add_argument(
"--window_size_min",
type=int,
default=120,
help="Minimum window size of the blendshape coefficients sequence at training",
)
parser.add_argument(
"--batch_size", type=int, default=8, help="Batch size at training"
)
parser.add_argument(
"--epochs", type=int, default=100000, help="The number of epochs"
)
parser.add_argument(
"--num_warmup_epochs",
type=int,
default=5000,
help="The number of warmup epochs",
)
parser.add_argument(
"--num_workers", type=int, default=0, help="The number of workers"
)
parser.add_argument(
"--learning_rate", type=float, default=1e-5, help="Learning rate"
)
parser.add_argument(
"--uncond_prob",
type=float,
default=0.1,
help="Unconditional probability of waveform (for classifier-free guidance)",
)
parser.add_argument(
"--unet_feature_dim",
type=int,
default=-1,
help="Dimension of the latent feature of the UNet",
)
parser.add_argument(
"--weight_vel",
type=float,
default=1.0,
help="Weight for the velocity loss",
)
parser.add_argument(
"--weight_vertex",
type=float,
default=0.02,
help="Weight for the vertex loss",
)
parser.add_argument(
"--ema",
type=bool,
default=True,
help="Use Exponential Moving Average of models weights",
)
parser.add_argument(
"--ema_decay",
type=float,
default=0.9999,
help="Ema decay rate",
)
parser.add_argument(
"--val_period", type=int, default=200, help="Period of validating model"
)
parser.add_argument(
"--val_repeat", type=int, default=50, help="Number of repetition of val dataset"
)
parser.add_argument(
"--save_period", type=int, default=200, help="Period of saving model"
)
args = parser.parse_args()
audio_dir = args.audio_dir
coeffs_dir = args.coeffs_dir
coeffs_std_path = args.coeffs_std_path
blendshape_deltas_path = args.blendshape_residuals_path
if blendshape_deltas_path == "":
blendshape_deltas_path = None
landmarks_path = args.landmarks_path
if landmarks_path == "":
landmarks_path = None
coeffs_std = (
None if coeffs_std_path == "" else load_blendshape_coeffs(coeffs_std_path)
)
output_dir = args.output_dir
prediction_type = args.prediction_type
window_size_min = args.window_size_min
batch_size = args.batch_size
epochs = args.epochs
num_warmup_epochs = args.num_warmup_epochs
num_workers = args.num_workers
learning_rate = args.learning_rate
uncond_prob = args.uncond_prob
unet_feature_dim = args.unet_feature_dim
weight_vel = args.weight_vel
weight_vertex = args.weight_vertex
ema = args.ema
ema_decay = args.ema_decay
val_period = args.val_period
val_repeat = args.val_repeat
save_period = args.save_period
# Initialize accelerator
accelerator = Accelerator(log_with="tensorboard", project_dir=output_dir)
if accelerator.is_main_process:
accelerator.init_trackers("SAiD")
said_model = SAID_UNet1D(
feature_dim=unet_feature_dim,
prediction_type=prediction_type,
) | said_model.audio_encoder = ModifiedWav2Vec2Model.from_pretrained( | 2 | 2023-11-03 06:38:51+00:00 | 8k |
Harvard-Ophthalmology-AI-Lab/FairSeg | SAMed/segment_anything/modeling/sam.py | [
{
"identifier": "ImageEncoderViT",
"path": "SAMed/segment_anything/modeling/image_encoder.py",
"snippet": "class ImageEncoderViT(nn.Module):\n def __init__(\n self,\n img_size: int = 1024,\n patch_size: int = 16,\n in_chans: int = 3,\n embed_dim: int = 768,\n ... | import torch
from torch import nn
from torch.nn import functional as F
from icecream import ic
from typing import Any, Dict, List, Tuple
from .image_encoder import ImageEncoderViT
from .mask_decoder import MaskDecoder
from .prompt_encoder import PromptEncoder | 4,202 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class Sam(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
self,
image_encoder: ImageEncoderViT,
prompt_encoder: PromptEncoder,
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class Sam(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
self,
image_encoder: ImageEncoderViT,
prompt_encoder: PromptEncoder, | mask_decoder: MaskDecoder, | 1 | 2023-11-03 17:05:40+00:00 | 8k |
microsoft/PLEX | scripts/exps_on_MW.py | [
{
"identifier": "pretrain_EX",
"path": "PLEX/pretraining_EX.py",
"snippet": "def pretrain_EX(cmdline_args):\n os.environ[\"NCCL_DEBUG\"] = \"INFO\"\n print(\"=== Pretraining the Execuctor ===\")\n parser = argparse.ArgumentParser()\n\n # Add all relevant command-line arguments\n add_commo... | from PLEX.pretraining_EX import pretrain_EX
from PLEX.pretraining_PL import pretrain_PL
from PLEX.finetuning import finetune
import argparse
import random | 7,155 |
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--training_stage", type=str, default='ft', help = "The training stage. Can be 'ex' (pretaining the EXecutor), 'pl' (pretraining the PLanner), or 'ft' (finetuning a pretrained PLEX)")
parser.add_argument("-d", "--data_dir", type=str, default='store/data', help = "Directory path where the training data is.")
parser.add_argument("-l", "--log_dir", type=str, default='store/logs', help = "Directory path where to output logs and model checkpoints.")
parser.add_argument("-m", "--model_file", type=str, default=None, help = "Model file path.")
parser.add_argument("-t", "--target_task", type=str, default=None, help = "Directory path where the target task's data is. NOTE: applicable only if the training stage is 'ft' (finetuning).")
parser.add_argument("-w", "--num_workers", type=int, default=0, help = "Number of worker for running the evaluation episodes. NOTE: applicable only if the training stage is 'ft' (finetuning).")
args = parser.parse_args()
common_flags = ['--relative_position_encodings', '--bc_learning_mode']
common_args = {
'seed': str(random.randint(0, 1000000)),
'data_dir': args.data_dir,
'log_dir': args.log_dir,
'robot': 'Sawyer',
'camera_names': 'corner',
'modalities_to_mask': 'proprio,action',
'record_camera': 'corner',
'image_size': '84',
'reward_type': 'sparse',
'image_encoder_arch': 'resnet18',
'impute_style': 'trainable',
'embed_dim': '256',
'future_step': '1',
'activation_function': 'relu',
'device': 'cuda',
'dropout': '0.2',
'weight_decay': '1e-05',
'warmup_steps': '200',
'batch_size': '256',
'action_output_type': 'deterministic',
'model': 'PLEX',
'obs_pred.n_layer': '3',
'obs_pred.n_head': '4',
'obs_pred.K': '30',
'inv_d_pred.n_layer': '3',
'inv_d_pred.n_head': '4',
'inv_d_pred.K': '30'
}
common_pretraining_flags = ['--no_video']
common_pretraining_args = {
'pretrain_learning_rate': '0.0005',
'pretrain_steps_per_iter': '250',
'num_steps_per_ft_eval_iter': '0',
'best_metric': 'evaluation/neg_val_error',
'validation_frac': '1.0',
'validation_samples': '30',
# Validation tasks can be any MW tasks -- we don't use validation error to stop training.
# We use the target tasks as validation tasks.
'validation_tasks': 'metaworld/hand-insert-v2/--TARGET_ROBOT--/noise0/,metaworld/door-lock-v2/--TARGET_ROBOT--/noise0/,metaworld/door-unlock-v2/--TARGET_ROBOT--/noise0/,metaworld/box-close-v2/--TARGET_ROBOT--/noise0/,metaworld/bin-picking-v2/--TARGET_ROBOT--/noise0/',
}
cmdline_args = common_flags
for k in common_args:
cmdline_args.append('--' + k)
cmdline_args.append(common_args[k])
if args.training_stage == 'ex':
cmdline_args.extend(common_pretraining_flags)
for k in common_pretraining_args:
cmdline_args.append('--' + k)
cmdline_args.append(common_pretraining_args[k])
cmdline_args.extend([
'--max_iters', '10',
# To pretrain the executor, use 75 play trajectories per task.
'--max_pretrain_trajectories', '75',
# During executor pretraining, we adapt both the executor's and the encoder's weights but keep the planner frozen.
'--image_encoder_tune_style', 'all',
'--obs_pred.transformer_tune_style', 'none',
'--inv_d_pred.transformer_tune_style', 'all',
# Use the dynamics data from Meta-World ML50's 5 downstream environments.
'--noncontextual_pretrain_tasks', 'metaworld/hand-insert-v2/--TARGET_ROBOT--/noise0.5/,metaworld/door-lock-v2/--TARGET_ROBOT--/noise0.5/,metaworld/door-unlock-v2/--TARGET_ROBOT--/noise0.5/,metaworld/box-close-v2/--TARGET_ROBOT--/noise0.5/,metaworld/bin-picking-v2/--TARGET_ROBOT--/noise0.5/',
])
pretrain_EX(cmdline_args)
elif args.training_stage == 'pl':
cmdline_args.extend(common_pretraining_flags)
for k in common_pretraining_args:
cmdline_args.append('--' + k)
cmdline_args.append(common_pretraining_args[k])
cmdline_args.extend([
'--max_iters', '10',
# To pretrain the planner, use all (100) available video demonstrations per task.
'--max_pretrain_trajectories', 100,
'--context_style', 'first-success',
'--context_from_diff_traj',
# During planner pretraining, we want to keep the encoder and the executor's weights frozen, adapting only the weights of the planner itself.
'--image_encoder_tune_style', 'none',
'--obs_pred.transformer_tune_style', 'all',
'--inv_d_pred.transformer_tune_style', 'none',
# For pretraining, use video demonstrations from Meta-World ML50's 45 pretraining tasks.
'--video_tasks', 'metaworld/pick-out-of-hole-v2/Sawyer/noise0/,metaworld/door-open-v2/Sawyer/noise0/,metaworld/pick-place-wall-v2/Sawyer/noise0/,metaworld/assembly-v2/Sawyer/noise0/,metaworld/faucet-close-v2/Sawyer/noise0/,metaworld/coffee-pull-v2/Sawyer/noise0/,metaworld/plate-slide-back-side-v2/Sawyer/noise0/,metaworld/dial-turn-v2/Sawyer/noise0/,metaworld/stick-push-v2/Sawyer/noise0/,metaworld/sweep-into-v2/Sawyer/noise0/,metaworld/handle-pull-side-v2/Sawyer/noise0/,metaworld/drawer-open-v2/Sawyer/noise0/,metaworld/window-open-v2/Sawyer/noise0/,metaworld/button-press-v2/Sawyer/noise0/,metaworld/assembly-v2/Sawyer/noise0/,metaworld/faucet-close-v2/Sawyer/noise0/,metaworld/coffee-pull-v2/Sawyer/noise0/,metaworld/plate-slide-back-side-v2/Sawyer/noise0/,metaworld/dial-turn-v2/Sawyer/noise0/,metaworld/stick-push-v2/Sawyer/noise0/,metaworld/sweep-into-v2/Sawyer/noise0/,metaworld/handle-pull-side-v2/Sawyer/noise0/,metaworld/shelf-place-v2/Sawyer/noise0/,metaworld/basketball-v2/Sawyer/noise0/,metaworld/button-press-topdown-v2/Sawyer/noise0/,metaworld/button-press-topdown-wall-v2/Sawyer/noise0/,metaworld/button-press-wall-v2/Sawyer/noise0/,metaworld/coffee-button-v2/Sawyer/noise0/,metaworld/coffee-push-v2/Sawyer/noise0/,metaworld/disassemble-v2/Sawyer/noise0/,metaworld/door-close-v2/Sawyer/noise0/,metaworld/drawer-close-v2/Sawyer/noise0/,metaworld/faucet-open-v2/Sawyer/noise0/,metaworld/hammer-v2/Sawyer/noise0/,metaworld/handle-press-side-v2/Sawyer/noise0/,metaworld/handle-press-v2/Sawyer/noise0/,metaworld/handle-pull-v2/Sawyer/noise0/,metaworld/lever-pull-v2/Sawyer/noise0/,metaworld/peg-insert-side-v2/Sawyer/noise0/,metaworld/reach-v2/Sawyer/noise0/,metaworld/push-back-v2/Sawyer/noise0/,metaworld/push-v2/Sawyer/noise0/,metaworld/pick-place-v2/Sawyer/noise0/,metaworld/plate-slide-v2/Sawyer/noise0/,metaworld/plate-slide-side-v2/Sawyer/noise0/,metaworld/plate-slide-back-v2/Sawyer/noise0/,metaworld/peg-unplug-side-v2/Sawyer/noise0/,metaworld/soccer-v2/Sawyer/noise0/,metaworld/stick-pull-v2/Sawyer/noise0/,metaworld/push-wall-v2/Sawyer/noise0/,metaworld/reach-wall-v2/Sawyer/noise0/,metaworld/sweep-v2/Sawyer/noise0/,metaworld/window-close-v2/Sawyer/noise0/',
'--load_path', args.model_file
])
|
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--training_stage", type=str, default='ft', help = "The training stage. Can be 'ex' (pretaining the EXecutor), 'pl' (pretraining the PLanner), or 'ft' (finetuning a pretrained PLEX)")
parser.add_argument("-d", "--data_dir", type=str, default='store/data', help = "Directory path where the training data is.")
parser.add_argument("-l", "--log_dir", type=str, default='store/logs', help = "Directory path where to output logs and model checkpoints.")
parser.add_argument("-m", "--model_file", type=str, default=None, help = "Model file path.")
parser.add_argument("-t", "--target_task", type=str, default=None, help = "Directory path where the target task's data is. NOTE: applicable only if the training stage is 'ft' (finetuning).")
parser.add_argument("-w", "--num_workers", type=int, default=0, help = "Number of worker for running the evaluation episodes. NOTE: applicable only if the training stage is 'ft' (finetuning).")
args = parser.parse_args()
common_flags = ['--relative_position_encodings', '--bc_learning_mode']
common_args = {
'seed': str(random.randint(0, 1000000)),
'data_dir': args.data_dir,
'log_dir': args.log_dir,
'robot': 'Sawyer',
'camera_names': 'corner',
'modalities_to_mask': 'proprio,action',
'record_camera': 'corner',
'image_size': '84',
'reward_type': 'sparse',
'image_encoder_arch': 'resnet18',
'impute_style': 'trainable',
'embed_dim': '256',
'future_step': '1',
'activation_function': 'relu',
'device': 'cuda',
'dropout': '0.2',
'weight_decay': '1e-05',
'warmup_steps': '200',
'batch_size': '256',
'action_output_type': 'deterministic',
'model': 'PLEX',
'obs_pred.n_layer': '3',
'obs_pred.n_head': '4',
'obs_pred.K': '30',
'inv_d_pred.n_layer': '3',
'inv_d_pred.n_head': '4',
'inv_d_pred.K': '30'
}
common_pretraining_flags = ['--no_video']
common_pretraining_args = {
'pretrain_learning_rate': '0.0005',
'pretrain_steps_per_iter': '250',
'num_steps_per_ft_eval_iter': '0',
'best_metric': 'evaluation/neg_val_error',
'validation_frac': '1.0',
'validation_samples': '30',
# Validation tasks can be any MW tasks -- we don't use validation error to stop training.
# We use the target tasks as validation tasks.
'validation_tasks': 'metaworld/hand-insert-v2/--TARGET_ROBOT--/noise0/,metaworld/door-lock-v2/--TARGET_ROBOT--/noise0/,metaworld/door-unlock-v2/--TARGET_ROBOT--/noise0/,metaworld/box-close-v2/--TARGET_ROBOT--/noise0/,metaworld/bin-picking-v2/--TARGET_ROBOT--/noise0/',
}
cmdline_args = common_flags
for k in common_args:
cmdline_args.append('--' + k)
cmdline_args.append(common_args[k])
if args.training_stage == 'ex':
cmdline_args.extend(common_pretraining_flags)
for k in common_pretraining_args:
cmdline_args.append('--' + k)
cmdline_args.append(common_pretraining_args[k])
cmdline_args.extend([
'--max_iters', '10',
# To pretrain the executor, use 75 play trajectories per task.
'--max_pretrain_trajectories', '75',
# During executor pretraining, we adapt both the executor's and the encoder's weights but keep the planner frozen.
'--image_encoder_tune_style', 'all',
'--obs_pred.transformer_tune_style', 'none',
'--inv_d_pred.transformer_tune_style', 'all',
# Use the dynamics data from Meta-World ML50's 5 downstream environments.
'--noncontextual_pretrain_tasks', 'metaworld/hand-insert-v2/--TARGET_ROBOT--/noise0.5/,metaworld/door-lock-v2/--TARGET_ROBOT--/noise0.5/,metaworld/door-unlock-v2/--TARGET_ROBOT--/noise0.5/,metaworld/box-close-v2/--TARGET_ROBOT--/noise0.5/,metaworld/bin-picking-v2/--TARGET_ROBOT--/noise0.5/',
])
pretrain_EX(cmdline_args)
elif args.training_stage == 'pl':
cmdline_args.extend(common_pretraining_flags)
for k in common_pretraining_args:
cmdline_args.append('--' + k)
cmdline_args.append(common_pretraining_args[k])
cmdline_args.extend([
'--max_iters', '10',
# To pretrain the planner, use all (100) available video demonstrations per task.
'--max_pretrain_trajectories', 100,
'--context_style', 'first-success',
'--context_from_diff_traj',
# During planner pretraining, we want to keep the encoder and the executor's weights frozen, adapting only the weights of the planner itself.
'--image_encoder_tune_style', 'none',
'--obs_pred.transformer_tune_style', 'all',
'--inv_d_pred.transformer_tune_style', 'none',
# For pretraining, use video demonstrations from Meta-World ML50's 45 pretraining tasks.
'--video_tasks', 'metaworld/pick-out-of-hole-v2/Sawyer/noise0/,metaworld/door-open-v2/Sawyer/noise0/,metaworld/pick-place-wall-v2/Sawyer/noise0/,metaworld/assembly-v2/Sawyer/noise0/,metaworld/faucet-close-v2/Sawyer/noise0/,metaworld/coffee-pull-v2/Sawyer/noise0/,metaworld/plate-slide-back-side-v2/Sawyer/noise0/,metaworld/dial-turn-v2/Sawyer/noise0/,metaworld/stick-push-v2/Sawyer/noise0/,metaworld/sweep-into-v2/Sawyer/noise0/,metaworld/handle-pull-side-v2/Sawyer/noise0/,metaworld/drawer-open-v2/Sawyer/noise0/,metaworld/window-open-v2/Sawyer/noise0/,metaworld/button-press-v2/Sawyer/noise0/,metaworld/assembly-v2/Sawyer/noise0/,metaworld/faucet-close-v2/Sawyer/noise0/,metaworld/coffee-pull-v2/Sawyer/noise0/,metaworld/plate-slide-back-side-v2/Sawyer/noise0/,metaworld/dial-turn-v2/Sawyer/noise0/,metaworld/stick-push-v2/Sawyer/noise0/,metaworld/sweep-into-v2/Sawyer/noise0/,metaworld/handle-pull-side-v2/Sawyer/noise0/,metaworld/shelf-place-v2/Sawyer/noise0/,metaworld/basketball-v2/Sawyer/noise0/,metaworld/button-press-topdown-v2/Sawyer/noise0/,metaworld/button-press-topdown-wall-v2/Sawyer/noise0/,metaworld/button-press-wall-v2/Sawyer/noise0/,metaworld/coffee-button-v2/Sawyer/noise0/,metaworld/coffee-push-v2/Sawyer/noise0/,metaworld/disassemble-v2/Sawyer/noise0/,metaworld/door-close-v2/Sawyer/noise0/,metaworld/drawer-close-v2/Sawyer/noise0/,metaworld/faucet-open-v2/Sawyer/noise0/,metaworld/hammer-v2/Sawyer/noise0/,metaworld/handle-press-side-v2/Sawyer/noise0/,metaworld/handle-press-v2/Sawyer/noise0/,metaworld/handle-pull-v2/Sawyer/noise0/,metaworld/lever-pull-v2/Sawyer/noise0/,metaworld/peg-insert-side-v2/Sawyer/noise0/,metaworld/reach-v2/Sawyer/noise0/,metaworld/push-back-v2/Sawyer/noise0/,metaworld/push-v2/Sawyer/noise0/,metaworld/pick-place-v2/Sawyer/noise0/,metaworld/plate-slide-v2/Sawyer/noise0/,metaworld/plate-slide-side-v2/Sawyer/noise0/,metaworld/plate-slide-back-v2/Sawyer/noise0/,metaworld/peg-unplug-side-v2/Sawyer/noise0/,metaworld/soccer-v2/Sawyer/noise0/,metaworld/stick-pull-v2/Sawyer/noise0/,metaworld/push-wall-v2/Sawyer/noise0/,metaworld/reach-wall-v2/Sawyer/noise0/,metaworld/sweep-v2/Sawyer/noise0/,metaworld/window-close-v2/Sawyer/noise0/',
'--load_path', args.model_file
]) | pretrain_PL(cmdline_args) | 1 | 2023-11-06 09:38:09+00:00 | 8k |
mitre/arlin | tests/conftest.py | [
{
"identifier": "XRLDataset",
"path": "arlin/dataset/xrl_dataset.py",
"snippet": "class XRLDataset:\n \"\"\"Class to store experiences from running a policy in an environment.\"\"\"\n\n def __init__(\n self,\n environment: gym.Env,\n collector: BaseDataCollector = RandomDataCo... | import gymnasium as gym
import pytest
from stable_baselines3 import PPO
from arlin.dataset import XRLDataset
from arlin.dataset.collectors import RandomDataCollector, SB3PPODataCollector
from arlin.dataset.collectors.datapoints import BaseDatapoint, SB3PPODatapoint
from arlin.generation import generate_clusters, generate_embeddings | 5,707 |
@pytest.fixture
def env():
# Create environment
env = gym.make("LunarLander-v2", render_mode="rgb_array")
return env
@pytest.fixture
def random_dataset(env):
# Create the datapoint collector for SB3 PPO Datapoints with the model's policy
collector = RandomDataCollector(datapoint_cls=BaseDatapoint, environment=env)
# Instantiate the XRL Dataset
dataset = XRLDataset(env, collector=collector)
dataset.fill(num_datapoints=50, randomness=0.25)
return dataset
@pytest.fixture
def random_embeddings(random_dataset):
|
@pytest.fixture
def env():
# Create environment
env = gym.make("LunarLander-v2", render_mode="rgb_array")
return env
@pytest.fixture
def random_dataset(env):
# Create the datapoint collector for SB3 PPO Datapoints with the model's policy
collector = RandomDataCollector(datapoint_cls=BaseDatapoint, environment=env)
# Instantiate the XRL Dataset
dataset = XRLDataset(env, collector=collector)
dataset.fill(num_datapoints=50, randomness=0.25)
return dataset
@pytest.fixture
def random_embeddings(random_dataset): | embeddings = generate_embeddings( | 6 | 2023-11-08 13:57:45+00:00 | 8k |
Giftify-Bot/Giftify-Bot | cogs/raffles/raffle.py | [
{
"identifier": "Giftify",
"path": "bot.py",
"snippet": "class Giftify(GiftifyHelper, commands.AutoShardedBot):\r\n user: discord.ClientUser\r\n\r\n colour: int = 0xCB3045\r\n __version_info__ = \"1.1.4\"\r\n\r\n def __init__(\r\n self,\r\n *,\r\n log_handler: LogHandler... | from typing import List, Optional, Tuple, Union
from discord import app_commands
from discord.app_commands import Range, Transform
from discord.ext import commands
from bot import Giftify
from models.raffles import Raffle
from utils.constants import GIVEAWAY_EMOJI, MONEY_EMOJI
from utils.paginator import BaseButtonPaginator
from utils.transformers import MentionablesTransformer, RaffleTransformer
from utils.tree import Interaction
import discord | 6,094 |
class RafflesPaginator(BaseButtonPaginator[Raffle]):
async def format_page(self, raffles: List[Raffle], /) -> discord.Embed:
assert self.bot is not None
extras = self.extras or {}
description = "The raffles in this guild are:\n\n"
embed = discord.Embed(
title=f"{MONEY_EMOJI} {extras['guild'].name}'s Raffles",
description=description,
color=self.bot.colour,
)
for i, raffle in enumerate(raffles):
embed.add_field(
name=f"`{i + 1}.` {raffle.name}",
value=(
f"Deputy Roles: {', '.join(role.mention for role in raffle.deputy_roles)}\n"
f"Deputy Members: {', '.join(member.mention for member in raffle.deputy_members)}\n"
f"Winner: {raffle.winner.mention if raffle.winner else None}\n"
f"Total Tickets: {sum(raffle.tickets.values())}\n"
),
inline=False,
)
embed.set_thumbnail(url=self.bot.user.display_avatar)
embed.set_footer(text=f"Page {self.current_page}/{self.total_pages}")
return embed
class TicketsLeaderboardPaginator(BaseButtonPaginator[Tuple[discord.Member, int]]):
async def format_page(
self, tickets: List[Tuple[discord.Member, int]], /
) -> discord.Embed:
assert self.bot is not None
extras = self.extras or {}
description = f"The tickets of {extras['name']} raffle are:\n\n"
for i, member_tickets in enumerate(tickets):
description += (
f"`{i + 1}.` {member_tickets[0].mention} - **{member_tickets[1]:,}**\n"
)
embed = discord.Embed(
title=f"{MONEY_EMOJI} {extras['name'].title()} Raffle",
description=description,
color=self.bot.colour,
)
embed.set_thumbnail(url=self.bot.user.display_avatar)
embed.set_footer(text=f"Page {self.current_page}/{self.total_pages}")
return embed
class RaffleBase(commands.GroupCog):
"""Cog containing admin commands for raffle management."""
bot: Giftify
@app_commands.command(name="create")
@app_commands.describe(
name="The unique name of the raffle.",
deputies="The list of members or roles who can manage the raffle.",
)
@app_commands.checks.has_permissions(manage_guild=True)
@app_commands.checks.cooldown(1, 5, key=lambda i: (i.guild, i.user.id))
async def raffle_create(
self,
|
class RafflesPaginator(BaseButtonPaginator[Raffle]):
async def format_page(self, raffles: List[Raffle], /) -> discord.Embed:
assert self.bot is not None
extras = self.extras or {}
description = "The raffles in this guild are:\n\n"
embed = discord.Embed(
title=f"{MONEY_EMOJI} {extras['guild'].name}'s Raffles",
description=description,
color=self.bot.colour,
)
for i, raffle in enumerate(raffles):
embed.add_field(
name=f"`{i + 1}.` {raffle.name}",
value=(
f"Deputy Roles: {', '.join(role.mention for role in raffle.deputy_roles)}\n"
f"Deputy Members: {', '.join(member.mention for member in raffle.deputy_members)}\n"
f"Winner: {raffle.winner.mention if raffle.winner else None}\n"
f"Total Tickets: {sum(raffle.tickets.values())}\n"
),
inline=False,
)
embed.set_thumbnail(url=self.bot.user.display_avatar)
embed.set_footer(text=f"Page {self.current_page}/{self.total_pages}")
return embed
class TicketsLeaderboardPaginator(BaseButtonPaginator[Tuple[discord.Member, int]]):
async def format_page(
self, tickets: List[Tuple[discord.Member, int]], /
) -> discord.Embed:
assert self.bot is not None
extras = self.extras or {}
description = f"The tickets of {extras['name']} raffle are:\n\n"
for i, member_tickets in enumerate(tickets):
description += (
f"`{i + 1}.` {member_tickets[0].mention} - **{member_tickets[1]:,}**\n"
)
embed = discord.Embed(
title=f"{MONEY_EMOJI} {extras['name'].title()} Raffle",
description=description,
color=self.bot.colour,
)
embed.set_thumbnail(url=self.bot.user.display_avatar)
embed.set_footer(text=f"Page {self.current_page}/{self.total_pages}")
return embed
class RaffleBase(commands.GroupCog):
"""Cog containing admin commands for raffle management."""
bot: Giftify
@app_commands.command(name="create")
@app_commands.describe(
name="The unique name of the raffle.",
deputies="The list of members or roles who can manage the raffle.",
)
@app_commands.checks.has_permissions(manage_guild=True)
@app_commands.checks.cooldown(1, 5, key=lambda i: (i.guild, i.user.id))
async def raffle_create(
self, | interaction: Interaction, | 7 | 2023-11-09 15:00:15+00:00 | 8k |
Zjy0401/CoCoFormer | conditional_generate.py | [
{
"identifier": "parse_generate_args",
"path": "utilities/argument_funcs.py",
"snippet": "def parse_generate_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-midi_root\", type=str, default=\"./dataset/dataset/JSF\", help=\"Midi file to prime the generator with\")\n pars... | import torch
import torch.nn as nn
import os
import random
import math
import mido
import music21
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from utilities.argument_funcs import parse_generate_args, print_generate_args
from model.CoCoFormer import CoCoformer
from dataset.jsf import create_jsf_datasets, compute_jsf_accuracy, process_midi
from torch.utils.data import DataLoader
from torch.optim import Adam
from utilities.constants import *
from utilities.device import get_device, use_cuda
from utilities.argument_funcs import parse_generate_args | 6,803 | if note == 0:
note = i
track.append(mido.Message('note_on', note=note, velocity=96, time=0))
else:
track.append(mido.Message('note_off', note=note, velocity=96, time=time))
note = i
time = 120
track.append(mido.Message('note_on', note=note, velocity=96, time=0))
else:
time += 120
track.append(mido.Message('note_off', note=note, velocity=96, time=time))
def decode(index, file_path, single=False):
event = [word2event[i] for i in index]
print("decoding...")
s, a, t, b = [], [], [], []
if not single:
for key, value in enumerate(index):
if key % 9 == 2:
assert value < 129
s.append(value)
continue
if key % 9 == 4:
assert value < 129
a.append(value)
continue
if key % 9 == 6:
assert value < 129
t.append(value)
continue
if key % 9 == 8:
assert value < 129
b.append(value)
continue
mid = mido.MidiFile()
track_s = mido.MidiTrack()
mid.tracks.append(track_s)
create_track(track_s, s)
track_a = mido.MidiTrack()
mid.tracks.append(track_a)
create_track(track_a, a)
track_t = mido.MidiTrack()
mid.tracks.append(track_t)
create_track(track_t, t)
track_b = mido.MidiTrack()
mid.tracks.append(track_b)
create_track(track_b, b)
else:
mid = mido.MidiFile()
track = mido.MidiTrack()
mid.tracks.append(track)
create_track(track, seq)
mid.save(file_path)
plot_pianoroll(s, a, t, b)
print("midi save in:", file_path)
def plot_pianoroll(s, a, t, b):
'''
plot painoroll
input : seqs of words
output : a image of painoroll
'''
# build matrix
pianoroll = np.ones((180, 500))
def plot_track(seq):
for k, v in enumerate(seq):
pianoroll[v, k] = 0
def plot_main_track(seq):
for k, v in enumerate(seq):
pianoroll[v, k] = 2
plot_main_track(s)
plot_track(a)
plot_track(t)
plot_track(b)
pianoroll = np.flip(pianoroll[30:100], axis=0)
cmp = matplotlib.colors.ListedColormap(['g', 'w', 'b'])
plt.figure(1)
plt.imshow(pianoroll, cmap=cmp)
plt.show()
def conditional_generate(seq, chord, bs, ba, bt, bb):
assert len(seq) == len(chord) == len(bs) == len(ba) == len(bt) == len(bb)
beats = []
for i in range(len(bs)):
beats.extend((bs[i], ba[i], bt[i], bb[i]))
# deal with input: slice it < 128
input_note, input_chord, input_beats, input_bs, input_ba, input_bt, input_bb = [], [], [], [], [], [], []
loop = int(math.ceil(len(seq)/64))
for i in range(loop):
if i+64 <= len(seq):
input_note.append(seq[i*64: (i+1)*64])
input_chord.append(chord[i*64: (i+1)*64])
input_bs.append(bs[i*64: (i+1)*64])
input_ba.append(ba[i*64: (i+1)*64])
input_bt.append(bt[i*64: (i+1)*64])
input_bb.append(bb[i*64: (i+1)*64])
else:
input_note.append(seq[i:len(seq)])
input_chord.append(chord[i:len(seq)])
input_bs.append(bs[i:len(seq)])
input_ba.append(ba[i:len(seq)])
input_bt.append(bt[i:len(seq)])
input_bb.append(bb[i:len(seq)])
for p in range(len(input_bs)):
b = []
for q in range(len(input_bs[0])):
b.extend((input_bs[p][q], input_ba[p][q], input_bt[p][q], input_bb[p][q]))
input_beats.append(b)
args = parse_generate_args()
|
##### read word2event event2word
args = parse_generate_args()
f = open(args.word2event, 'rb')
word2event = pickle.load(f)
# reverse the vector event2word
event2word = {}
for key, val in word2event.items():
event2word[val] = key
def create_track(track, seq):
'''
create a midi track of seq
'''
note = 0
time = 120
for i in seq:
if note != int(i):
if note == 0:
note = i
track.append(mido.Message('note_on', note=note, velocity=96, time=0))
else:
track.append(mido.Message('note_off', note=note, velocity=96, time=time))
note = i
time = 120
track.append(mido.Message('note_on', note=note, velocity=96, time=0))
else:
time += 120
track.append(mido.Message('note_off', note=note, velocity=96, time=time))
def decode(index, file_path, single=False):
event = [word2event[i] for i in index]
print("decoding...")
s, a, t, b = [], [], [], []
if not single:
for key, value in enumerate(index):
if key % 9 == 2:
assert value < 129
s.append(value)
continue
if key % 9 == 4:
assert value < 129
a.append(value)
continue
if key % 9 == 6:
assert value < 129
t.append(value)
continue
if key % 9 == 8:
assert value < 129
b.append(value)
continue
mid = mido.MidiFile()
track_s = mido.MidiTrack()
mid.tracks.append(track_s)
create_track(track_s, s)
track_a = mido.MidiTrack()
mid.tracks.append(track_a)
create_track(track_a, a)
track_t = mido.MidiTrack()
mid.tracks.append(track_t)
create_track(track_t, t)
track_b = mido.MidiTrack()
mid.tracks.append(track_b)
create_track(track_b, b)
else:
mid = mido.MidiFile()
track = mido.MidiTrack()
mid.tracks.append(track)
create_track(track, seq)
mid.save(file_path)
plot_pianoroll(s, a, t, b)
print("midi save in:", file_path)
def plot_pianoroll(s, a, t, b):
'''
plot painoroll
input : seqs of words
output : a image of painoroll
'''
# build matrix
pianoroll = np.ones((180, 500))
def plot_track(seq):
for k, v in enumerate(seq):
pianoroll[v, k] = 0
def plot_main_track(seq):
for k, v in enumerate(seq):
pianoroll[v, k] = 2
plot_main_track(s)
plot_track(a)
plot_track(t)
plot_track(b)
pianoroll = np.flip(pianoroll[30:100], axis=0)
cmp = matplotlib.colors.ListedColormap(['g', 'w', 'b'])
plt.figure(1)
plt.imshow(pianoroll, cmap=cmp)
plt.show()
def conditional_generate(seq, chord, bs, ba, bt, bb):
assert len(seq) == len(chord) == len(bs) == len(ba) == len(bt) == len(bb)
beats = []
for i in range(len(bs)):
beats.extend((bs[i], ba[i], bt[i], bb[i]))
# deal with input: slice it < 128
input_note, input_chord, input_beats, input_bs, input_ba, input_bt, input_bb = [], [], [], [], [], [], []
loop = int(math.ceil(len(seq)/64))
for i in range(loop):
if i+64 <= len(seq):
input_note.append(seq[i*64: (i+1)*64])
input_chord.append(chord[i*64: (i+1)*64])
input_bs.append(bs[i*64: (i+1)*64])
input_ba.append(ba[i*64: (i+1)*64])
input_bt.append(bt[i*64: (i+1)*64])
input_bb.append(bb[i*64: (i+1)*64])
else:
input_note.append(seq[i:len(seq)])
input_chord.append(chord[i:len(seq)])
input_bs.append(bs[i:len(seq)])
input_ba.append(ba[i:len(seq)])
input_bt.append(bt[i:len(seq)])
input_bb.append(bb[i:len(seq)])
for p in range(len(input_bs)):
b = []
for q in range(len(input_bs[0])):
b.extend((input_bs[p][q], input_ba[p][q], input_bt[p][q], input_bb[p][q]))
input_beats.append(b)
args = parse_generate_args() | print_generate_args(args) | 1 | 2023-11-01 08:33:08+00:00 | 8k |
emadeldeen24/ECGTransForm | trainer.py | [
{
"identifier": "ecgTransForm",
"path": "models.py",
"snippet": "class ecgTransForm(nn.Module):\r\n def __init__(self, configs, hparams):\r\n super(ecgTransForm, self).__init__()\r\n\r\n filter_sizes = [5, 9, 11]\r\n self.conv1 = nn.Conv1d(configs.input_channels, configs.mid_chan... | import torch
import torch.nn.functional as F
import os
import collections
import numpy as np
import warnings
import sklearn.exceptions
from models import ecgTransForm
from dataloader import data_generator
from configs.data_configs import get_dataset_class
from configs.hparams import get_hparams_class
from utils import AverageMeter, to_device, _save_metrics, copy_Files, _plot_umap
from utils import fix_randomness, starting_logs, save_checkpoint, _calc_metrics | 4,063 | warnings.simplefilter(action='ignore', category=FutureWarning)
class trainer(object):
def __init__(self, args):
# dataset parameters
self.dataset = args.dataset
self.seed_id = args.seed_id
self.device = torch.device(args.device)
# Exp Description
self.run_description = args.run_description
self.experiment_description = args.experiment_description
# paths
self.home_path = os.getcwd()
self.save_dir = os.path.join(os.getcwd(), "experiments_logs")
self.exp_log_dir = os.path.join(self.save_dir, self.experiment_description, self.run_description)
os.makedirs(self.exp_log_dir, exist_ok=True)
self.data_path = args.data_path
# Specify runs
self.num_runs = args.num_runs
# get dataset and base model configs
self.dataset_configs, self.hparams_class = self.get_configs()
# Specify hparams
self.hparams = self.hparams_class.train_params
def get_configs(self):
dataset_class = get_dataset_class(self.dataset)
hparams_class = get_hparams_class("supervised")
return dataset_class(), hparams_class()
def load_data(self, data_type):
self.train_dl, self.val_dl, self.test_dl, self.cw_dict = \
data_generator(self.data_path, data_type, self.hparams)
def calc_results_per_run(self):
acc, f1 = _calc_metrics(self.pred_labels, self.true_labels, self.dataset_configs.class_names)
return acc, f1
def train(self):
copy_Files(self.exp_log_dir) # save a copy of training files
self.metrics = {'accuracy': [], 'f1_score': []}
# fixing random seed
fix_randomness(int(self.seed_id))
# Logging
self.logger, self.scenario_log_dir = starting_logs(self.dataset, self.exp_log_dir, self.seed_id)
self.logger.debug(self.hparams)
# Load data
self.load_data(self.dataset)
model = ecgTransForm(configs=self.dataset_configs, hparams=self.hparams)
model.to(self.device)
# Average meters
loss_avg_meters = collections.defaultdict(lambda: AverageMeter())
self.optimizer = torch.optim.Adam(
model.parameters(),
lr=self.hparams["learning_rate"],
weight_decay=self.hparams["weight_decay"],
betas=(0.9, 0.99)
)
self.cross_entropy = torch.nn.CrossEntropyLoss(weight=torch.tensor(np.array(self.cw_dict.values())).float().to(self.device))
best_acc = 0
best_f1 = 0
# training..
for epoch in range(1, self.hparams["num_epochs"] + 1):
model.train()
for step, batches in enumerate(self.train_dl):
batches = to_device(batches, self.device)
data = batches['samples'].float()
labels = batches['labels'].long()
# ====== Source =====================
self.optimizer.zero_grad()
# Src original features
logits = model(data)
# Cross-Entropy loss
x_ent_loss = self.cross_entropy(logits, labels)
x_ent_loss.backward()
self.optimizer.step()
losses = {'Total_loss': x_ent_loss.item()}
for key, val in losses.items():
loss_avg_meters[key].update(val, self.hparams["batch_size"])
self.evaluate(model, self.val_dl)
tr_acc, tr_f1 = self.calc_results_per_run()
# logging
self.logger.debug(f'[Epoch : {epoch}/{self.hparams["num_epochs"]}]')
for key, val in loss_avg_meters.items():
self.logger.debug(f'{key}\t: {val.avg:2.4f}')
self.logger.debug(f'TRAIN: Acc:{tr_acc:2.4f} \t F1:{tr_f1:2.4f}')
# VALIDATION part
self.evaluate(model, self.val_dl)
ts_acc, ts_f1 = self.calc_results_per_run()
if ts_f1 > best_f1: # save best model based on best f1.
best_f1 = ts_f1
best_acc = ts_acc
save_checkpoint(self.exp_log_dir, model, self.dataset, self.dataset_configs, self.hparams, "best")
|
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
class trainer(object):
def __init__(self, args):
# dataset parameters
self.dataset = args.dataset
self.seed_id = args.seed_id
self.device = torch.device(args.device)
# Exp Description
self.run_description = args.run_description
self.experiment_description = args.experiment_description
# paths
self.home_path = os.getcwd()
self.save_dir = os.path.join(os.getcwd(), "experiments_logs")
self.exp_log_dir = os.path.join(self.save_dir, self.experiment_description, self.run_description)
os.makedirs(self.exp_log_dir, exist_ok=True)
self.data_path = args.data_path
# Specify runs
self.num_runs = args.num_runs
# get dataset and base model configs
self.dataset_configs, self.hparams_class = self.get_configs()
# Specify hparams
self.hparams = self.hparams_class.train_params
def get_configs(self):
dataset_class = get_dataset_class(self.dataset)
hparams_class = get_hparams_class("supervised")
return dataset_class(), hparams_class()
def load_data(self, data_type):
self.train_dl, self.val_dl, self.test_dl, self.cw_dict = \
data_generator(self.data_path, data_type, self.hparams)
def calc_results_per_run(self):
acc, f1 = _calc_metrics(self.pred_labels, self.true_labels, self.dataset_configs.class_names)
return acc, f1
def train(self):
copy_Files(self.exp_log_dir) # save a copy of training files
self.metrics = {'accuracy': [], 'f1_score': []}
# fixing random seed
fix_randomness(int(self.seed_id))
# Logging
self.logger, self.scenario_log_dir = starting_logs(self.dataset, self.exp_log_dir, self.seed_id)
self.logger.debug(self.hparams)
# Load data
self.load_data(self.dataset)
model = ecgTransForm(configs=self.dataset_configs, hparams=self.hparams)
model.to(self.device)
# Average meters
loss_avg_meters = collections.defaultdict(lambda: AverageMeter())
self.optimizer = torch.optim.Adam(
model.parameters(),
lr=self.hparams["learning_rate"],
weight_decay=self.hparams["weight_decay"],
betas=(0.9, 0.99)
)
self.cross_entropy = torch.nn.CrossEntropyLoss(weight=torch.tensor(np.array(self.cw_dict.values())).float().to(self.device))
best_acc = 0
best_f1 = 0
# training..
for epoch in range(1, self.hparams["num_epochs"] + 1):
model.train()
for step, batches in enumerate(self.train_dl):
batches = to_device(batches, self.device)
data = batches['samples'].float()
labels = batches['labels'].long()
# ====== Source =====================
self.optimizer.zero_grad()
# Src original features
logits = model(data)
# Cross-Entropy loss
x_ent_loss = self.cross_entropy(logits, labels)
x_ent_loss.backward()
self.optimizer.step()
losses = {'Total_loss': x_ent_loss.item()}
for key, val in losses.items():
loss_avg_meters[key].update(val, self.hparams["batch_size"])
self.evaluate(model, self.val_dl)
tr_acc, tr_f1 = self.calc_results_per_run()
# logging
self.logger.debug(f'[Epoch : {epoch}/{self.hparams["num_epochs"]}]')
for key, val in loss_avg_meters.items():
self.logger.debug(f'{key}\t: {val.avg:2.4f}')
self.logger.debug(f'TRAIN: Acc:{tr_acc:2.4f} \t F1:{tr_f1:2.4f}')
# VALIDATION part
self.evaluate(model, self.val_dl)
ts_acc, ts_f1 = self.calc_results_per_run()
if ts_f1 > best_f1: # save best model based on best f1.
best_f1 = ts_f1
best_acc = ts_acc
save_checkpoint(self.exp_log_dir, model, self.dataset, self.dataset_configs, self.hparams, "best") | _save_metrics(self.pred_labels, self.true_labels, self.exp_log_dir, | 6 | 2023-11-06 14:11:19+00:00 | 8k |
WMD-group/CrystalSpace | app.py | [
{
"identifier": "get_plotly_embedding",
"path": "visualize_app/visualize_embedding.py",
"snippet": "def get_plotly_embedding(\n df: pd.DataFrame = None,\n opacity: float = 0.2,\n **kwargs,\n) -> go.Figure:\n \"\"\"\n Plot the embedding of a dataframe with plotly.\n\n Args:\n df:... | import os
import ase
import pandas as pd
import dash_bootstrap_components as dbc
from pathlib import Path
from fire import Fire
from pymatgen.core import Structure
from dash import Dash, html, Input, Output, dcc, dash_table, no_update
from visualize_app.visualize_embedding import get_plotly_embedding
from visualize_app.visualize_structure import get_plotly_structure
from visualize_app.utils import fn_chemical_check, blank_fig | 3,834 | html.Div(
id="table",
),
]
),
style={"border": "none"},
),
# set the footer
# add line
html.Hr(),
dbc.Row(
[
dbc.Col(
html.H6(
html.A(
"Created by Hyunsoo Park in the Materials Design Group (Imperial College London)",
href="https://github.com/wmd-group",
style={"color": "black"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 1
dbc.Col(
html.H6(
html.A(
"1. Composition generation using SMACT",
href="https://github.com/WMD-group/SMACT",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 2
dbc.Col(
html.H6(
html.A(
"2. Element embedding vectors from ElementEmbeddings",
href="https://github.com/WMD-group/ElementEmbeddings",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 3
dbc.Col(
html.H6(
html.A(
"3. Structure data from Materials Project",
href="https://materialsproject.org",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 4
dbc.Col(
html.H6(
html.A(
"4. Dimensionality reduction using scikit-learn",
href="https://scikit-learn.org/stable/",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
],
justify="start",
),
]
)
# set the callback for the scatter plot
@app.callback(
[
Output("method-name", "children"),
Output("3d-scatter-plot", "figure"),
],
Input("reduction-method-select", "value"),
Input("embedding-method-select", "value"),
Input("chemical-system-select-1", "value"),
Input("chemical-system-select-2", "value"),
)
def update_3d_scatter_plot(
reduction_method,
embedding_method,
chemical_system_1,
chemical_system_2,
):
# set the path to the embedding
path_embedding = Path(PARENT_DIR, "visualize_app/assets/reduced_embeddings_3d")
path_embedding = (
path_embedding / f"{reduction_method}_{embedding_method}_mean.pkl"
)
if not path_embedding.exists():
raise FileNotFoundError(f"Embedding file {path_embedding} does not exist.")
# read the embedding
df_embedding = pd.read_pickle(path_embedding)
df_embedding.columns = ["x", "y", "z"]
df_embedding["formula"] = df_embedding.index
# merge the total data with the embedding
df_plot = df_embedding.join(LABEL_DATA)
# check if the chemical system contains the specified species
|
PARENT_DIR = Path(os.path.dirname(__file__))
# load label data
LABEL_DATA = pd.read_pickle(PARENT_DIR / "visualize_app/assets/df_binary_label.pkl")
LABEL_DATA["label"] = LABEL_DATA["label"].astype(str)
# load materials project data
MP_DATA = pd.read_pickle(PARENT_DIR / "visualize_app/assets/df_binary_mp.pkl")
def main(
debug: bool = False,
host: str = "0.0.0.0",
port: int = 8050,
):
"""Visualize the embedding of binary compounds.
:param debug: Debug mode, defaults to False
:param host: host address, defaults to "0.0.0.0"
:param port: port number, defaults to 8050
"""
# initialize the app - incorporate a Dash Bootstrap theme
external_stylesheets = [dbc.themes.MINTY]
app = Dash(__name__, external_stylesheets=external_stylesheets)
# app layout
app.layout = dbc.Container(
[
# set the app title
dbc.Row(
[
html.H1(
"Crystal Space for Binary Compounds 🔮",
style={
"textAlign": "center",
"color": "black",
},
),
html.Hr(),
]
),
# set selector for methods
dbc.Row(
[
# set selector for dimension reduction method
dbc.Col(
dbc.Select(
id="reduction-method-select",
options=[
{"label": "t-SNE", "value": "tsne"},
{"label": "UMAP", "value": "umap"},
{"label": "PCA", "value": "pca"},
],
value="umap",
),
width=3,
),
# set selector for embedding method
dbc.Col(
dbc.Select(
id="embedding-method-select",
options=[
{"label": "magpie", "value": "magpie"},
{"label": "mat2vec", "value": "mat2vec"},
{"label": "megnet16", "value": "megnet16"},
{"label": "oliynyk", "value": "oliynyk"},
{"label": "skipatom", "value": "skipatom"},
{"label": "random_200", "value": "random_200"},
],
value="magpie",
),
width=3,
),
],
justify="start",
),
html.Br(),
# set selector for chemical systems
dbc.Row(
[
# set selector for chemical system 1
dbc.Col(
dbc.Select(
id="chemical-system-select-1",
options=[
{
"label": ase.data.chemical_symbols[i],
"value": ase.data.chemical_symbols[i],
}
if i != 0
else {"label": "species 1", "value": "default"}
for i in range(104)
],
value="default",
),
width=2,
),
# set selector for chemical system 2
dbc.Col(
dbc.Select(
id="chemical-system-select-2",
options=[
{
"label": ase.data.chemical_symbols[i],
"value": ase.data.chemical_symbols[i],
}
if i != 0
else {"label": "species 2", "value": "default"}
for i in range(104)
],
value="default",
),
width=2,
),
],
justify="start",
),
dcc.Store(id="embedding-data-store", data=None),
html.Br(),
# set scatter and crystal structure
dbc.Row(
[
# set the scatter plot
dbc.Col(
dbc.Card(
[
dbc.CardHeader(
html.H4(
"Crystal Space",
style={
"textAlign": "center",
"color": "black",
},
)
),
dbc.CardBody(
[
dcc.Markdown(
id="method-name",
children="",
style={
"textAlign": "center",
"color": "black",
"fontSize": 20,
},
),
dcc.Graph(
id="3d-scatter-plot",
figure=blank_fig(),
),
]
),
]
),
width=6,
),
# set the crystal structure
dbc.Col(
dbc.Card(
[
dbc.CardHeader(
html.H4(
"Crystal Structure",
style={
"textAlign": "center",
"color": "black",
},
)
),
dbc.CardBody(
[
# name of the crystal structure
dcc.Markdown(
id="crystal-structure-name",
children="Click a point on the scatter plot",
style={
"textAlign": "center",
"color": "black",
"fontSize": 20,
},
),
# graph
dcc.Graph(
id="crystal-structure",
figure=blank_fig(),
),
]
),
]
),
width=6,
),
],
justify="start",
),
html.Br(),
# set a table with properties
dbc.Card(
dbc.CardBody(
[
html.Div(
id="table",
),
]
),
style={"border": "none"},
),
# set the footer
# add line
html.Hr(),
dbc.Row(
[
dbc.Col(
html.H6(
html.A(
"Created by Hyunsoo Park in the Materials Design Group (Imperial College London)",
href="https://github.com/wmd-group",
style={"color": "black"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 1
dbc.Col(
html.H6(
html.A(
"1. Composition generation using SMACT",
href="https://github.com/WMD-group/SMACT",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 2
dbc.Col(
html.H6(
html.A(
"2. Element embedding vectors from ElementEmbeddings",
href="https://github.com/WMD-group/ElementEmbeddings",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 3
dbc.Col(
html.H6(
html.A(
"3. Structure data from Materials Project",
href="https://materialsproject.org",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
# add reference 4
dbc.Col(
html.H6(
html.A(
"4. Dimensionality reduction using scikit-learn",
href="https://scikit-learn.org/stable/",
style={"color": "grey"},
),
style={
"textAlign": "right",
},
),
width=12,
),
],
justify="start",
),
]
)
# set the callback for the scatter plot
@app.callback(
[
Output("method-name", "children"),
Output("3d-scatter-plot", "figure"),
],
Input("reduction-method-select", "value"),
Input("embedding-method-select", "value"),
Input("chemical-system-select-1", "value"),
Input("chemical-system-select-2", "value"),
)
def update_3d_scatter_plot(
reduction_method,
embedding_method,
chemical_system_1,
chemical_system_2,
):
# set the path to the embedding
path_embedding = Path(PARENT_DIR, "visualize_app/assets/reduced_embeddings_3d")
path_embedding = (
path_embedding / f"{reduction_method}_{embedding_method}_mean.pkl"
)
if not path_embedding.exists():
raise FileNotFoundError(f"Embedding file {path_embedding} does not exist.")
# read the embedding
df_embedding = pd.read_pickle(path_embedding)
df_embedding.columns = ["x", "y", "z"]
df_embedding["formula"] = df_embedding.index
# merge the total data with the embedding
df_plot = df_embedding.join(LABEL_DATA)
# check if the chemical system contains the specified species | mask = fn_chemical_check(df_plot, chemical_system_1, chemical_system_2) | 2 | 2023-11-07 17:10:38+00:00 | 8k |
Infotrend-Inc/OpenAI_WebUI | OpenAI_WebUI.py | [
{
"identifier": "OAI_GPT",
"path": "OpenAI_GPT.py",
"snippet": "class OAI_GPT:\n def __init__(self, apikey, save_location, models_list):\n self.last_gpt_query = 'last_gpt_query'\n\n self.apikey = apikey\n self.save_location = save_location\n\n self.models_supported = model... | import streamlit as st
import extra_streamlit_components as stx
import re
import os.path
import common_functions as cf
from OpenAI_GPT import OAI_GPT
from OpenAI_DallE import OAI_DallE
from dotenv import load_dotenv
from datetime import datetime | 6,696 | #!/usr/bin/env python3
# Based on
# https://platform.openai.com/docs/quickstart/build-your-application
# https://github.com/openai/openai-python
#####
iti_version="0.9.1"
st.set_page_config(page_title=f"OpenAI API WebUI ({iti_version})", page_icon="🫥", layout="wide", initial_sidebar_state="expanded", menu_items={'Get Help': 'https://github.com/Infotrend-Inc/OpenAI_WebUI', 'About': f"# OpenAI WebUI ({iti_version})\n Brought to you by [Infotrend Inc.](https://www.infotrend.com/)"})
#####
def main():
err = cf.check_file_r(".env", "Environment file")
if cf.isBlank(err):
load_dotenv()
# If the file is not present, hopefully the variable was set in the Docker environemnt
apikey = ''
if 'OPENAI_API_KEY' in os.environ:
apikey = os.environ.get('OPENAI_API_KEY')
if cf.isBlank(apikey):
st.error(f"Could not find the OPENAI_API_KEY environment variable")
cf.error_exit(f"Could not find the OPENAI_API_KEY environment variable")
save_location = ""
if 'OAIWUI_SAVEDIR' in os.environ:
save_location = os.environ.get('OAIWUI_SAVEDIR')
if cf.isBlank(save_location):
st.error(f"Could not find the OAIWUI_SAVEDIR environment variable")
cf.error_exit("Could not find the OAIWUI_SAVEDIR environment variable")
err = cf.check_existing_dir_w(save_location, "OAIWUI_SAVEDIR directory")
if cf.isNotBlank(err):
st.error(f"While ching OAIWUI_SAVEDIR: {err}")
cf.error_exit(f"{err}")
gpt_models = ""
if 'OAIWUI_GPT_MODELS' in os.environ:
gpt_models = os.environ.get('OAIWUI_GPT_MODELS')
else:
st.error(f"Could not find the OAIWUI_GPT_MODELS environment variable")
cf.error_exit("Could not find the OAIWUI_GPT_MODELS environment variable")
if cf.isBlank(gpt_models):
st.error(f"OAIWUI_GPT_MODELS environment variable is empty")
cf.error_exit("OAIWUI_GPT_MODELS environment variable is empty")
dalle_models = ""
if 'OAIWUI_DALLE_MODELS' in os.environ:
dalle_models = os.environ.get('OAIWUI_DALLE_MODELS')
else:
st.error(f"Could not find the OAIWUI_DALLE_MODELS environment variable")
cf.error_exit("Could not find the OAIWUI_DALLE_MODELS environment variable")
if cf.isBlank(dalle_models):
st.error(f"OAIWUI_DALLE_MODELS environment variable is empty")
cf.error_exit("OAIWUI_DALLE_MODELS environment variable is empty")
username = ""
if 'OAIWUI_USERNAME' in os.environ:
username = os.environ.get('OAIWUI_USERNAME')
if cf.isBlank(username):
st.warning(f"OAIWUI_USERNAME provided but empty, will ask for username")
else:
st.session_state['username'] = username
# Store the initial value of widgets in session state
if "visibility" not in st.session_state:
st.session_state.visibility = "visible"
st.session_state.disabled = False
if 'webui_runid' not in st.session_state:
st.session_state['webui_runid'] = datetime.now().strftime("%Y%m%d-%H%M%S")
st.empty()
# Grab a session-specific value for username
username = ""
if 'username' in st.session_state:
username = st.session_state['username']
if cf.isBlank(username):
st.image("./assets/Infotrend_Logo.png", width=600)
username = st.text_input("Enter a username (unauthorized characters will be replaced by _)")
if st.button("Save username"):
# replace non alphanumeric by _
username = re.sub('[^0-9a-zA-Z]+', '_', username)
if cf.isBlank(username):
st.error(f"Username cannot be empty")
else:
st.session_state['username'] = username
st.rerun()
else:
cf.make_wdir_error(os.path.join(save_location))
cf.make_wdir_error(os.path.join(save_location, iti_version))
long_save_location = os.path.join(save_location, iti_version, username)
cf.make_wdir_error(os.path.join(long_save_location))
cf.make_wdir_error(os.path.join(long_save_location, "dalle"))
cf.make_wdir_error(os.path.join(long_save_location, "gpt"))
set_ui(long_save_location, apikey, gpt_models, dalle_models)
#####
def set_ui(long_save_location, apikey, gpt_models, dalle_models):
oai_gpt = OAI_GPT(apikey, long_save_location, gpt_models)
oai_dalle = None
if 'OAIWUI_GPT_ONLY' in os.environ:
tmp = os.environ.get('OAIWUI_GPT_ONLY')
if tmp == "True":
oai_dalle = None
elif tmp == "False":
| #!/usr/bin/env python3
# Based on
# https://platform.openai.com/docs/quickstart/build-your-application
# https://github.com/openai/openai-python
#####
iti_version="0.9.1"
st.set_page_config(page_title=f"OpenAI API WebUI ({iti_version})", page_icon="🫥", layout="wide", initial_sidebar_state="expanded", menu_items={'Get Help': 'https://github.com/Infotrend-Inc/OpenAI_WebUI', 'About': f"# OpenAI WebUI ({iti_version})\n Brought to you by [Infotrend Inc.](https://www.infotrend.com/)"})
#####
def main():
err = cf.check_file_r(".env", "Environment file")
if cf.isBlank(err):
load_dotenv()
# If the file is not present, hopefully the variable was set in the Docker environemnt
apikey = ''
if 'OPENAI_API_KEY' in os.environ:
apikey = os.environ.get('OPENAI_API_KEY')
if cf.isBlank(apikey):
st.error(f"Could not find the OPENAI_API_KEY environment variable")
cf.error_exit(f"Could not find the OPENAI_API_KEY environment variable")
save_location = ""
if 'OAIWUI_SAVEDIR' in os.environ:
save_location = os.environ.get('OAIWUI_SAVEDIR')
if cf.isBlank(save_location):
st.error(f"Could not find the OAIWUI_SAVEDIR environment variable")
cf.error_exit("Could not find the OAIWUI_SAVEDIR environment variable")
err = cf.check_existing_dir_w(save_location, "OAIWUI_SAVEDIR directory")
if cf.isNotBlank(err):
st.error(f"While ching OAIWUI_SAVEDIR: {err}")
cf.error_exit(f"{err}")
gpt_models = ""
if 'OAIWUI_GPT_MODELS' in os.environ:
gpt_models = os.environ.get('OAIWUI_GPT_MODELS')
else:
st.error(f"Could not find the OAIWUI_GPT_MODELS environment variable")
cf.error_exit("Could not find the OAIWUI_GPT_MODELS environment variable")
if cf.isBlank(gpt_models):
st.error(f"OAIWUI_GPT_MODELS environment variable is empty")
cf.error_exit("OAIWUI_GPT_MODELS environment variable is empty")
dalle_models = ""
if 'OAIWUI_DALLE_MODELS' in os.environ:
dalle_models = os.environ.get('OAIWUI_DALLE_MODELS')
else:
st.error(f"Could not find the OAIWUI_DALLE_MODELS environment variable")
cf.error_exit("Could not find the OAIWUI_DALLE_MODELS environment variable")
if cf.isBlank(dalle_models):
st.error(f"OAIWUI_DALLE_MODELS environment variable is empty")
cf.error_exit("OAIWUI_DALLE_MODELS environment variable is empty")
username = ""
if 'OAIWUI_USERNAME' in os.environ:
username = os.environ.get('OAIWUI_USERNAME')
if cf.isBlank(username):
st.warning(f"OAIWUI_USERNAME provided but empty, will ask for username")
else:
st.session_state['username'] = username
# Store the initial value of widgets in session state
if "visibility" not in st.session_state:
st.session_state.visibility = "visible"
st.session_state.disabled = False
if 'webui_runid' not in st.session_state:
st.session_state['webui_runid'] = datetime.now().strftime("%Y%m%d-%H%M%S")
st.empty()
# Grab a session-specific value for username
username = ""
if 'username' in st.session_state:
username = st.session_state['username']
if cf.isBlank(username):
st.image("./assets/Infotrend_Logo.png", width=600)
username = st.text_input("Enter a username (unauthorized characters will be replaced by _)")
if st.button("Save username"):
# replace non alphanumeric by _
username = re.sub('[^0-9a-zA-Z]+', '_', username)
if cf.isBlank(username):
st.error(f"Username cannot be empty")
else:
st.session_state['username'] = username
st.rerun()
else:
cf.make_wdir_error(os.path.join(save_location))
cf.make_wdir_error(os.path.join(save_location, iti_version))
long_save_location = os.path.join(save_location, iti_version, username)
cf.make_wdir_error(os.path.join(long_save_location))
cf.make_wdir_error(os.path.join(long_save_location, "dalle"))
cf.make_wdir_error(os.path.join(long_save_location, "gpt"))
set_ui(long_save_location, apikey, gpt_models, dalle_models)
#####
def set_ui(long_save_location, apikey, gpt_models, dalle_models):
oai_gpt = OAI_GPT(apikey, long_save_location, gpt_models)
oai_dalle = None
if 'OAIWUI_GPT_ONLY' in os.environ:
tmp = os.environ.get('OAIWUI_GPT_ONLY')
if tmp == "True":
oai_dalle = None
elif tmp == "False": | oai_dalle = OAI_DallE(apikey, long_save_location, dalle_models) | 1 | 2023-11-09 16:01:20+00:00 | 8k |
serl-robot/serl | serl/agents/ddpg/pixel_ddpg_learner.py | [
{
"identifier": "batched_random_crop",
"path": "serl/utils/augmentations.py",
"snippet": "def batched_random_crop(key, obs, pixel_key, padding=4):\n imgs = obs[pixel_key]\n keys = jax.random.split(key, imgs.shape[0])\n imgs = jax.vmap(random_crop, (0, 0, None))(keys, imgs, padding)\n return ... | from functools import partial
from itertools import zip_longest
from typing import Callable, Dict, Optional, Sequence, Tuple, OrderedDict
from collections import OrderedDict
from jax import numpy as jnp
from flax import struct
from flax.core import FrozenDict, freeze
from flax.training.train_state import TrainState
from serl.utils.augmentations import batched_random_crop
from serl.agents.ddpg.ddpg_learner import DDPGLearner
from serl.data.dataset import DatasetDict
from serl.distributions import TanhNormal
from serl.networks import MLP, Ensemble, PixelMultiplexer, StateActionValue
from serl.networks.encoders import TwoMobileNetEncoder, TwoD4PGEncoder
from serl.utils.commons import _unpack, _share_encoder
from jax_resnet import pretrained_resnet, slice_variables
from jeffnet.linen import create_model, EfficientNet
import gym
import jax
import optax
import flax.linen as nn | 5,543 | """Implementations of algorithms for continuous control."""
# from flax.training import checkpoints
class PixelDDPGLearner(DDPGLearner):
data_augmentation_fn: Callable = struct.field(pytree_node=False)
@classmethod
def create(
cls,
seed: int,
observation_space: gym.Space,
action_space: gym.Space,
actor_lr: float = 3e-4,
critic_lr: float = 3e-4,
cnn_features: Sequence[int] = (32, 32, 32, 32),
cnn_filters: Sequence[int] = (3, 3, 3, 3),
cnn_strides: Sequence[int] = (2, 1, 1, 1),
cnn_padding: str = "VALID",
latent_dim: int = 50,
encoder: str = "d4pg",
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
critic_dropout_rate: Optional[float] = None,
critic_layer_norm: bool = False,
pixel_keys: Tuple[str, ...] = ("pixels",),
depth_keys: Tuple[str, ...] = (),
):
"""
An implementation of pixel-based DDPG
"""
action_dim = action_space.shape[-1]
observations = observation_space.sample()
actions = action_space.sample()
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key = jax.random.split(rng, 3)
if encoder == "d4pg":
encoder_cls = partial(
TwoD4PGEncoder,
features=cnn_features,
filters=cnn_filters,
strides=cnn_strides,
padding=cnn_padding,
)
elif encoder == "resnet":
# TODO: option 1 refactor this to use ResNet from huggingface, option 2 use jax_resnet
raise NotImplementedError
ResNet, resnet_variables = pretrained_resnet(18)
ResNet = ResNet()
ResNet = nn.Sequential(ResNet.layers[0:3])
resnet_variables = slice_variables(resnet_variables, end=3)
encoder_cls = partial(TwoResNetEncoder, resnet=ResNet, params=resnet_variables)
elif encoder == "mobilenet":
# TODO: unfortunately, huggingface does not support many visual encoders in JAX, so we have to reply on https://github.com/Leo428/efficientnet-jax, forked from @rwightman
MobileNet, mobilenet_variables = create_model('tf_mobilenetv3_large_100', pretrained=True)
encoder_cls = partial(TwoMobileNetEncoder, mobilenet=MobileNet, params=mobilenet_variables)
actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)
actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim)
actor_def = PixelMultiplexer(
encoder_cls=encoder_cls,
network_cls=actor_cls,
latent_dim=latent_dim,
stop_gradient=True, # do not update the encoder params
pixel_keys=pixel_keys,
depth_keys=depth_keys,
)
actor_params = actor_def.init(actor_key, observations)["params"]
actor = TrainState.create(
apply_fn=actor_def.apply,
params=actor_params,
tx=optax.adam(learning_rate=actor_lr),
)
critic_base_cls = partial(
MLP,
hidden_dims=hidden_dims,
activate_final=True,
dropout_rate=critic_dropout_rate,
use_layer_norm=critic_layer_norm,
)
| """Implementations of algorithms for continuous control."""
# from flax.training import checkpoints
class PixelDDPGLearner(DDPGLearner):
data_augmentation_fn: Callable = struct.field(pytree_node=False)
@classmethod
def create(
cls,
seed: int,
observation_space: gym.Space,
action_space: gym.Space,
actor_lr: float = 3e-4,
critic_lr: float = 3e-4,
cnn_features: Sequence[int] = (32, 32, 32, 32),
cnn_filters: Sequence[int] = (3, 3, 3, 3),
cnn_strides: Sequence[int] = (2, 1, 1, 1),
cnn_padding: str = "VALID",
latent_dim: int = 50,
encoder: str = "d4pg",
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
critic_dropout_rate: Optional[float] = None,
critic_layer_norm: bool = False,
pixel_keys: Tuple[str, ...] = ("pixels",),
depth_keys: Tuple[str, ...] = (),
):
"""
An implementation of pixel-based DDPG
"""
action_dim = action_space.shape[-1]
observations = observation_space.sample()
actions = action_space.sample()
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key = jax.random.split(rng, 3)
if encoder == "d4pg":
encoder_cls = partial(
TwoD4PGEncoder,
features=cnn_features,
filters=cnn_filters,
strides=cnn_strides,
padding=cnn_padding,
)
elif encoder == "resnet":
# TODO: option 1 refactor this to use ResNet from huggingface, option 2 use jax_resnet
raise NotImplementedError
ResNet, resnet_variables = pretrained_resnet(18)
ResNet = ResNet()
ResNet = nn.Sequential(ResNet.layers[0:3])
resnet_variables = slice_variables(resnet_variables, end=3)
encoder_cls = partial(TwoResNetEncoder, resnet=ResNet, params=resnet_variables)
elif encoder == "mobilenet":
# TODO: unfortunately, huggingface does not support many visual encoders in JAX, so we have to reply on https://github.com/Leo428/efficientnet-jax, forked from @rwightman
MobileNet, mobilenet_variables = create_model('tf_mobilenetv3_large_100', pretrained=True)
encoder_cls = partial(TwoMobileNetEncoder, mobilenet=MobileNet, params=mobilenet_variables)
actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)
actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim)
actor_def = PixelMultiplexer(
encoder_cls=encoder_cls,
network_cls=actor_cls,
latent_dim=latent_dim,
stop_gradient=True, # do not update the encoder params
pixel_keys=pixel_keys,
depth_keys=depth_keys,
)
actor_params = actor_def.init(actor_key, observations)["params"]
actor = TrainState.create(
apply_fn=actor_def.apply,
params=actor_params,
tx=optax.adam(learning_rate=actor_lr),
)
critic_base_cls = partial(
MLP,
hidden_dims=hidden_dims,
activate_final=True,
dropout_rate=critic_dropout_rate,
use_layer_norm=critic_layer_norm,
) | critic_cls = partial(StateActionValue, base_cls=critic_base_cls) | 7 | 2023-11-02 23:32:24+00:00 | 8k |
daily-demos/ai-meeting-assistant | server/call/session.py | [
{
"identifier": "BotConfig",
"path": "server/config.py",
"snippet": "class BotConfig:\n _openai_api_key: str = None\n _openai_model_name: str = None\n _log_dir_path: str = None\n _daily_room_url: str = None\n _daily_meeting_token: str = None\n\n def __init__(self,\n ope... | import asyncio
import atexit
import dataclasses
import json
import logging
import os.path
import sys
import threading
import time
from asyncio import Future
from datetime import datetime
from logging import Logger
from typing import Mapping, Any
from urllib.parse import urlparse
from daily import Daily, EventHandler, CallClient
from server.config import BotConfig, get_headless_config
from server.llm.openai_assistant import OpenAIAssistant
from server.llm.assistant import Assistant, NoContextError | 3,626 | """Class representing a single meeting happening within a Daily room.
This is responsible for all Daily operations."""
from __future__ import annotations
@dataclasses.dataclass
class Room:
"""Class representing a Daily video call room"""
url: str = None
token: str = None
name: str = None
@dataclasses.dataclass
class Summary:
"""Class representing a Daily meeting summary"""
content: str
retrieved_at: time.time()
class Session(EventHandler):
"""Class representing a single meeting happening within a Daily room."""
| """Class representing a single meeting happening within a Daily room.
This is responsible for all Daily operations."""
from __future__ import annotations
@dataclasses.dataclass
class Room:
"""Class representing a Daily video call room"""
url: str = None
token: str = None
name: str = None
@dataclasses.dataclass
class Summary:
"""Class representing a Daily meeting summary"""
content: str
retrieved_at: time.time()
class Session(EventHandler):
"""Class representing a single meeting happening within a Daily room."""
| _config: BotConfig | 0 | 2023-11-02 11:17:16+00:00 | 8k |
tiendatnguyen-vision/Orbit-symmetrize | RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py | [
{
"identifier": "LinearOperator",
"path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operator_base.py",
"snippet": "class LinearOperator(nn.Module):\n \"\"\" Common interface for performing matrix vector products\n Many iterative methods (e.g. cg, gmres) do not need to know the\... | from functools import reduce
from .linear_operator_base import LinearOperator, Lazy
from .utils import dtype_cast, device_cast, get_device
import torch | 4,554 | """ Abstract linear algebra library. """
def product(c):
""" Product of a list of numbers. """
return reduce(lambda a, b: a*b, c)
def lazify(x):
""" Convert a tensor LinearOperator. """
if isinstance(x, LinearOperator):
return x
if torch.is_tensor(x):
| """ Abstract linear algebra library. """
def product(c):
""" Product of a list of numbers. """
return reduce(lambda a, b: a*b, c)
def lazify(x):
""" Convert a tensor LinearOperator. """
if isinstance(x, LinearOperator):
return x
if torch.is_tensor(x): | return Lazy(x) | 1 | 2023-11-01 07:19:02+00:00 | 8k |
xenxxxx/BitPay-Crypto-Signal-Trading-Bot | tests/test_freqtradebot.py | [
{
"identifier": "EXMS",
"path": "tests/conftest.py",
"snippet": "EXMS = 'freqtrade.exchange.exchange.Exchange'"
},
{
"identifier": "create_mock_trades",
"path": "tests/conftest.py",
"snippet": "def create_mock_trades(fee, is_short: Optional[bool] = False, use_db: bool = True):\n \"\"\... | import logging
import time
import pytest
from copy import deepcopy
from datetime import timedelta
from typing import List
from unittest.mock import ANY, MagicMock, PropertyMock, patch
from pandas import DataFrame
from sqlalchemy import select
from freqtrade.constants import CANCEL_REASON, UNLIMITED_STAKE_AMOUNT
from freqtrade.enums import (CandleType, ExitCheckTuple, ExitType, RPCMessageType, RunMode,
SignalDirection, State)
from freqtrade.exceptions import (DependencyException, ExchangeError, InsufficientFundsError,
InvalidOrderException, OperationalException, PricingError,
TemporaryError)
from freqtrade.freqtradebot import FreqtradeBot
from freqtrade.persistence import Order, PairLocks, Trade
from freqtrade.persistence.models import PairLock
from freqtrade.plugins.protections.iprotection import ProtectionReturn
from freqtrade.util.datetime_helpers import dt_now, dt_utc
from freqtrade.worker import Worker
from tests.conftest import (EXMS, create_mock_trades, create_mock_trades_usdt,
get_patched_freqtradebot, get_patched_worker, log_has, log_has_re,
patch_edge, patch_exchange, patch_get_signal, patch_wallet,
patch_whitelist)
from tests.conftest_trades import (MOCK_TRADE_COUNT, entry_side, exit_side, mock_order_2,
mock_order_2_sell, mock_order_3, mock_order_3_sell, mock_order_4,
mock_order_5_stoploss, mock_order_6_sell)
from tests.conftest_trades_usdt import mock_trade_usdt_4 | 6,431 | (False, 1, 1),
(True, 1, 1),
])
def test_cancel_all_open_orders(mocker, default_conf_usdt, fee, limit_order, limit_order_open,
is_short, buy_calls, sell_calls):
default_conf_usdt['cancel_open_orders_on_exit'] = True
mocker.patch(
f'{EXMS}.fetch_order',
side_effect=[
ExchangeError(),
limit_order[exit_side(is_short)],
limit_order_open[entry_side(is_short)],
limit_order_open[exit_side(is_short)],
]
)
buy_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_enter')
sell_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_exit')
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
create_mock_trades(fee, is_short=is_short)
trades = Trade.session.scalars(select(Trade)).all()
assert len(trades) == MOCK_TRADE_COUNT
freqtrade.cancel_all_open_orders()
assert buy_mock.call_count == buy_calls
assert sell_mock.call_count == sell_calls
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short", [False, True])
def test_check_for_open_trades(mocker, default_conf_usdt, fee, is_short):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
freqtrade.check_for_open_trades()
assert freqtrade.rpc.send_msg.call_count == 0
create_mock_trades(fee, is_short)
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
freqtrade.check_for_open_trades()
assert freqtrade.rpc.send_msg.call_count == 1
assert 'Handle these trades manually' in freqtrade.rpc.send_msg.call_args[0][0]['status']
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.usefixtures("init_persistence")
def test_startup_update_open_orders(mocker, default_conf_usdt, fee, caplog, is_short):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
create_mock_trades(fee, is_short=is_short)
freqtrade.startup_update_open_orders()
assert not log_has_re(r"Error updating Order .*", caplog)
caplog.clear()
freqtrade.config['dry_run'] = False
freqtrade.startup_update_open_orders()
assert len(Order.get_open_orders()) == 4
matching_buy_order = mock_order_4(is_short=is_short)
matching_buy_order.update({
'status': 'closed',
})
mocker.patch(f'{EXMS}.fetch_order', return_value=matching_buy_order)
freqtrade.startup_update_open_orders()
# Only stoploss and sell orders are kept open
assert len(Order.get_open_orders()) == 3
caplog.clear()
mocker.patch(f'{EXMS}.fetch_order', side_effect=ExchangeError)
freqtrade.startup_update_open_orders()
assert log_has_re(r"Error updating Order .*", caplog)
mocker.patch(f'{EXMS}.fetch_order', side_effect=InvalidOrderException)
hto_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_order')
# Orders which are no longer found after X days should be assumed as canceled.
freqtrade.startup_update_open_orders()
assert log_has_re(r"Order is older than \d days.*", caplog)
assert hto_mock.call_count == 3
assert hto_mock.call_args_list[0][0][0]['status'] == 'canceled'
assert hto_mock.call_args_list[1][0][0]['status'] == 'canceled'
@pytest.mark.usefixtures("init_persistence")
def test_startup_backpopulate_precision(mocker, default_conf_usdt, fee, caplog):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
create_mock_trades_usdt(fee)
trades = Trade.get_trades().all()
trades[-1].exchange = 'some_other_exchange'
for trade in trades:
assert trade.price_precision is None
assert trade.amount_precision is None
assert trade.precision_mode is None
freqtrade.startup_backpopulate_precision()
trades = Trade.get_trades().all()
for trade in trades:
if trade.exchange == 'some_other_exchange':
assert trade.price_precision is None
assert trade.amount_precision is None
assert trade.precision_mode is None
else:
assert trade.price_precision is not None
assert trade.amount_precision is not None
assert trade.precision_mode is not None
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short", [False, True])
def test_update_trades_without_assigned_fees(mocker, default_conf_usdt, fee, is_short):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
def patch_with_fee(order):
order.update({'fee': {'cost': 0.1, 'rate': 0.01,
'currency': order['symbol'].split('/')[0]}})
return order
mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order',
side_effect=[
| # pragma pylint: disable=missing-docstring, C0103
# pragma pylint: disable=protected-access, too-many-lines, invalid-name, too-many-arguments
def patch_RPCManager(mocker) -> MagicMock:
"""
This function mock RPC manager to avoid repeating this code in almost every tests
:param mocker: mocker to patch RPCManager class
:return: RPCManager.send_msg MagicMock to track if this method is called
"""
mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock())
rpc_mock = mocker.patch('freqtrade.freqtradebot.RPCManager.send_msg', MagicMock())
return rpc_mock
# Unit tests
def test_freqtradebot_state(mocker, default_conf_usdt, markets) -> None:
mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets))
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
assert freqtrade.state is State.RUNNING
default_conf_usdt.pop('initial_state')
freqtrade = FreqtradeBot(default_conf_usdt)
assert freqtrade.state is State.STOPPED
def test_process_stopped(mocker, default_conf_usdt) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
coo_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.cancel_all_open_orders')
freqtrade.process_stopped()
assert coo_mock.call_count == 0
default_conf_usdt['cancel_open_orders_on_exit'] = True
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
freqtrade.process_stopped()
assert coo_mock.call_count == 1
def test_process_calls_sendmsg(mocker, default_conf_usdt) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
freqtrade.process()
assert freqtrade.rpc.process_msg_queue.call_count == 1
def test_bot_cleanup(mocker, default_conf_usdt, caplog) -> None:
mock_cleanup = mocker.patch('freqtrade.freqtradebot.Trade.commit')
coo_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.cancel_all_open_orders')
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
freqtrade.cleanup()
assert log_has('Cleaning up modules ...', caplog)
assert mock_cleanup.call_count == 1
assert coo_mock.call_count == 0
freqtrade.config['cancel_open_orders_on_exit'] = True
freqtrade.cleanup()
assert coo_mock.call_count == 1
def test_bot_cleanup_db_errors(mocker, default_conf_usdt, caplog) -> None:
mocker.patch('freqtrade.freqtradebot.Trade.commit',
side_effect=OperationalException())
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.check_for_open_trades',
side_effect=OperationalException())
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
freqtrade.emc = MagicMock()
freqtrade.emc.shutdown = MagicMock()
freqtrade.cleanup()
assert freqtrade.emc.shutdown.call_count == 1
@pytest.mark.parametrize('runmode', [
RunMode.DRY_RUN,
RunMode.LIVE
])
def test_order_dict(default_conf_usdt, mocker, runmode, caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
conf = default_conf_usdt.copy()
conf['runmode'] = runmode
conf['order_types'] = {
'entry': 'market',
'exit': 'limit',
'stoploss': 'limit',
'stoploss_on_exchange': True,
}
conf['entry_pricing']['price_side'] = 'ask'
freqtrade = FreqtradeBot(conf)
if runmode == RunMode.LIVE:
assert not log_has_re(r".*stoploss_on_exchange .* dry-run", caplog)
assert freqtrade.strategy.order_types['stoploss_on_exchange']
caplog.clear()
# is left untouched
conf = default_conf_usdt.copy()
conf['runmode'] = runmode
conf['order_types'] = {
'entry': 'market',
'exit': 'limit',
'stoploss': 'limit',
'stoploss_on_exchange': False,
}
freqtrade = FreqtradeBot(conf)
assert not freqtrade.strategy.order_types['stoploss_on_exchange']
assert not log_has_re(r".*stoploss_on_exchange .* dry-run", caplog)
def test_get_trade_stake_amount(default_conf_usdt, mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
freqtrade = FreqtradeBot(default_conf_usdt)
result = freqtrade.wallets.get_trade_stake_amount('ETH/USDT')
assert result == default_conf_usdt['stake_amount']
@pytest.mark.parametrize('runmode', [
RunMode.DRY_RUN,
RunMode.LIVE
])
def test_load_strategy_no_keys(default_conf_usdt, mocker, runmode, caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
conf = deepcopy(default_conf_usdt)
conf['runmode'] = runmode
erm = mocker.patch('freqtrade.freqtradebot.ExchangeResolver.load_exchange')
freqtrade = FreqtradeBot(conf)
strategy_config = freqtrade.strategy.config
assert id(strategy_config['exchange']) == id(conf['exchange'])
# Keys have been removed and are not passed to the exchange
assert strategy_config['exchange']['key'] == ''
assert strategy_config['exchange']['secret'] == ''
assert erm.call_count == 1
ex_conf = erm.call_args_list[0][1]['exchange_config']
assert id(ex_conf) != id(conf['exchange'])
# Keys are still present
assert ex_conf['key'] != ''
assert ex_conf['key'] == default_conf_usdt['exchange']['key']
assert ex_conf['secret'] != ''
assert ex_conf['secret'] == default_conf_usdt['exchange']['secret']
@pytest.mark.parametrize("amend_last,wallet,max_open,lsamr,expected", [
(False, 120, 2, 0.5, [60, None]),
(True, 120, 2, 0.5, [60, 58.8]),
(False, 180, 3, 0.5, [60, 60, None]),
(True, 180, 3, 0.5, [60, 60, 58.2]),
(False, 122, 3, 0.5, [60, 60, None]),
(True, 122, 3, 0.5, [60, 60, 0.0]),
(True, 167, 3, 0.5, [60, 60, 45.33]),
(True, 122, 3, 1, [60, 60, 0.0]),
])
def test_check_available_stake_amount(
default_conf_usdt, ticker_usdt, mocker, fee, limit_buy_order_usdt_open,
amend_last, wallet, max_open, lsamr, expected
) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_buy_order_usdt_open),
get_fee=fee
)
default_conf_usdt['dry_run_wallet'] = wallet
default_conf_usdt['amend_last_stake_amount'] = amend_last
default_conf_usdt['last_stake_amount_min_ratio'] = lsamr
freqtrade = FreqtradeBot(default_conf_usdt)
for i in range(0, max_open):
if expected[i] is not None:
limit_buy_order_usdt_open['id'] = str(i)
result = freqtrade.wallets.get_trade_stake_amount('ETH/USDT')
assert pytest.approx(result) == expected[i]
freqtrade.execute_entry('ETH/USDT', result)
else:
with pytest.raises(DependencyException):
freqtrade.wallets.get_trade_stake_amount('ETH/USDT')
def test_edge_called_in_process(mocker, edge_conf) -> None:
patch_RPCManager(mocker)
patch_edge(mocker)
patch_exchange(mocker)
freqtrade = FreqtradeBot(edge_conf)
patch_get_signal(freqtrade)
freqtrade.process()
assert freqtrade.active_pair_whitelist == ['NEO/BTC', 'LTC/BTC']
def test_edge_overrides_stake_amount(mocker, edge_conf) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
patch_edge(mocker)
edge_conf['dry_run_wallet'] = 999.9
freqtrade = FreqtradeBot(edge_conf)
assert freqtrade.wallets.get_trade_stake_amount(
'NEO/BTC', freqtrade.edge) == (999.9 * 0.5 * 0.01) / 0.20
assert freqtrade.wallets.get_trade_stake_amount(
'LTC/BTC', freqtrade.edge) == (999.9 * 0.5 * 0.01) / 0.21
@pytest.mark.parametrize('buy_price_mult,ignore_strat_sl', [
(0.79, False), # Override stoploss
(0.85, True), # Override strategy stoploss
])
def test_edge_overrides_stoploss(limit_order, fee, caplog, mocker,
buy_price_mult, ignore_strat_sl, edge_conf) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
patch_edge(mocker)
edge_conf['max_open_trades'] = float('inf')
# Strategy stoploss is -0.1 but Edge imposes a stoploss at -0.2
# Thus, if price falls 21%, stoploss should be triggered
#
# mocking the ticker: price is falling ...
enter_price = limit_order['buy']['price']
ticker_val = {
'bid': enter_price,
'ask': enter_price,
'last': enter_price,
}
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value=ticker_val),
get_fee=fee,
)
#############################################
# Create a trade with "limit_buy_order_usdt" price
freqtrade = FreqtradeBot(edge_conf)
freqtrade.active_pair_whitelist = ['NEO/BTC']
patch_get_signal(freqtrade)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=False)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
caplog.clear()
#############################################
ticker_val.update({
'bid': enter_price * buy_price_mult,
'ask': enter_price * buy_price_mult,
'last': enter_price * buy_price_mult,
})
# stoploss shoud be hit
assert freqtrade.handle_trade(trade) is not ignore_strat_sl
if not ignore_strat_sl:
assert log_has_re('Exit for NEO/BTC detected. Reason: stop_loss.*', caplog)
assert trade.exit_reason == ExitType.STOP_LOSS.value
# Test compatibility ...
assert trade.sell_reason == ExitType.STOP_LOSS.value
def test_total_open_trades_stakes(mocker, default_conf_usdt, ticker_usdt, fee) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
default_conf_usdt['max_open_trades'] = 2
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
assert trade is not None
assert trade.stake_amount == 60.0
assert trade.is_open
assert trade.open_date is not None
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade).order_by(Trade.id.desc())).first()
assert trade is not None
assert trade.stake_amount == 60.0
assert trade.is_open
assert trade.open_date is not None
assert Trade.total_open_trades_stakes() == 120.0
@pytest.mark.parametrize("is_short,open_rate", [
(False, 2.0),
(True, 2.2)
])
def test_create_trade(default_conf_usdt, ticker_usdt, limit_order,
fee, mocker, is_short, open_rate) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
# Save state of current whitelist
whitelist = deepcopy(default_conf_usdt['exchange']['pair_whitelist'])
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.create_trade('ETH/USDT')
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade is not None
assert pytest.approx(trade.stake_amount) == 60.0
assert trade.is_open
assert trade.open_date is not None
assert trade.exchange == 'binance'
# Simulate fulfilled LIMIT_BUY order for trade
oobj = Order.parse_from_ccxt_object(
limit_order[entry_side(is_short)], 'ADA/USDT', entry_side(is_short))
trade.update_trade(oobj)
assert trade.open_rate == open_rate
assert trade.amount == 30.0
assert whitelist == default_conf_usdt['exchange']['pair_whitelist']
def test_create_trade_no_stake_amount(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
patch_wallet(mocker, free=default_conf_usdt['stake_amount'] * 0.5)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
with pytest.raises(DependencyException, match=r'.*stake amount.*'):
freqtrade.create_trade('ETH/USDT')
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.parametrize('stake_amount,create,amount_enough,max_open_trades', [
(5.0, True, True, 99),
(0.042, True, False, 99), # Amount will be adjusted to min - which is 0.051
(0, False, True, 99),
(UNLIMITED_STAKE_AMOUNT, False, True, 0),
])
def test_create_trade_minimal_amount(
default_conf_usdt, ticker_usdt, limit_order_open, fee, mocker,
stake_amount, create, amount_enough, max_open_trades, caplog, is_short
) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
enter_mock = MagicMock(return_value=limit_order_open[entry_side(is_short)])
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=enter_mock,
get_fee=fee,
)
default_conf_usdt['max_open_trades'] = max_open_trades
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade.config['stake_amount'] = stake_amount
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
if create:
assert freqtrade.create_trade('ETH/USDT')
if amount_enough:
rate, amount = enter_mock.call_args[1]['rate'], enter_mock.call_args[1]['amount']
assert rate * amount <= default_conf_usdt['stake_amount']
else:
assert log_has_re(
r"Stake amount for pair .* is too small.*",
caplog
)
else:
assert not freqtrade.create_trade('ETH/USDT')
if not max_open_trades:
assert freqtrade.wallets.get_trade_stake_amount('ETH/USDT', freqtrade.edge) == 0
@pytest.mark.parametrize('whitelist,positions', [
(["ETH/USDT"], 1), # No pairs left
([], 0), # No pairs in whitelist
])
def test_enter_positions_no_pairs_left(default_conf_usdt, ticker_usdt, limit_buy_order_usdt_open,
fee, whitelist, positions, mocker, caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_buy_order_usdt_open),
get_fee=fee,
)
default_conf_usdt['exchange']['pair_whitelist'] = whitelist
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
n = freqtrade.enter_positions()
assert n == positions
if positions:
assert not log_has_re(r"No currency pair in active pair whitelist.*", caplog)
n = freqtrade.enter_positions()
assert n == 0
assert log_has_re(r"No currency pair in active pair whitelist.*", caplog)
else:
assert n == 0
assert log_has("Active pair whitelist is empty.", caplog)
@pytest.mark.usefixtures("init_persistence")
def test_enter_positions_global_pairlock(default_conf_usdt, ticker_usdt, limit_buy_order_usdt, fee,
mocker, caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value={'id': limit_buy_order_usdt['id']}),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
n = freqtrade.enter_positions()
message = r"Global pairlock active until.* Not creating new trades."
n = freqtrade.enter_positions()
# 0 trades, but it's not because of pairlock.
assert n == 0
assert not log_has_re(message, caplog)
caplog.clear()
PairLocks.lock_pair('*', dt_now() + timedelta(minutes=20), 'Just because', side='*')
n = freqtrade.enter_positions()
assert n == 0
assert log_has_re(message, caplog)
@pytest.mark.parametrize('is_short', [False, True])
def test_handle_protections(mocker, default_conf_usdt, fee, is_short):
default_conf_usdt['protections'] = [
{"method": "CooldownPeriod", "stop_duration": 60},
{
"method": "StoplossGuard",
"lookback_period_candles": 24,
"trade_limit": 4,
"stop_duration_candles": 4,
"only_per_pair": False
}
]
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
freqtrade.protections._protection_handlers[1].global_stop = MagicMock(
return_value=ProtectionReturn(True, dt_now() + timedelta(hours=1), "asdf"))
create_mock_trades(fee, is_short)
freqtrade.handle_protections('ETC/BTC', '*')
send_msg_mock = freqtrade.rpc.send_msg
assert send_msg_mock.call_count == 2
assert send_msg_mock.call_args_list[0][0][0]['type'] == RPCMessageType.PROTECTION_TRIGGER
assert send_msg_mock.call_args_list[1][0][0]['type'] == RPCMessageType.PROTECTION_TRIGGER_GLOBAL
def test_create_trade_no_signal(default_conf_usdt, fee, mocker) -> None:
default_conf_usdt['dry_run'] = True
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
get_fee=fee,
)
default_conf_usdt['stake_amount'] = 10
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_long=False, exit_long=False)
assert not freqtrade.create_trade('ETH/USDT')
@pytest.mark.parametrize("max_open", range(0, 5))
@pytest.mark.parametrize("tradable_balance_ratio,modifier", [(1.0, 1), (0.99, 0.8), (0.5, 0.5)])
def test_create_trades_multiple_trades(
default_conf_usdt, ticker_usdt, fee, mocker, limit_buy_order_usdt_open,
max_open, tradable_balance_ratio, modifier
) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
default_conf_usdt['max_open_trades'] = max_open
default_conf_usdt['tradable_balance_ratio'] = tradable_balance_ratio
default_conf_usdt['dry_run_wallet'] = 60.0 * max_open
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_buy_order_usdt_open),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
n = freqtrade.enter_positions()
trades = Trade.get_open_trades()
# Expected trades should be max_open * a modified value
# depending on the configured tradable_balance
assert n == max(int(max_open * modifier), 0)
assert len(trades) == max(int(max_open * modifier), 0)
def test_create_trades_preopen(default_conf_usdt, ticker_usdt, fee, mocker,
limit_buy_order_usdt_open, caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
default_conf_usdt['max_open_trades'] = 4
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_buy_order_usdt_open),
get_fee=fee,
get_funding_fees=MagicMock(side_effect=ExchangeError()),
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
# Create 2 existing trades
freqtrade.execute_entry('ETH/USDT', default_conf_usdt['stake_amount'])
freqtrade.execute_entry('NEO/BTC', default_conf_usdt['stake_amount'])
assert log_has("Could not find funding fee.", caplog)
assert len(Trade.get_open_trades()) == 2
# Change order_id for new orders
limit_buy_order_usdt_open['id'] = '123444'
# Create 2 new trades using create_trades
assert freqtrade.create_trade('ETH/USDT')
assert freqtrade.create_trade('NEO/BTC')
trades = Trade.get_open_trades()
assert len(trades) == 4
@pytest.mark.parametrize('is_short', [False, True])
def test_process_trade_creation(default_conf_usdt, ticker_usdt, limit_order, limit_order_open,
is_short, fee, mocker, caplog
) -> None:
ticker_side = 'ask' if is_short else 'bid'
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_order_open[entry_side(is_short)]),
fetch_order=MagicMock(return_value=limit_order[entry_side(is_short)]),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
trades = Trade.get_open_trades()
assert not trades
freqtrade.process()
trades = Trade.get_open_trades()
assert len(trades) == 1
trade = trades[0]
assert trade is not None
assert pytest.approx(trade.stake_amount) == default_conf_usdt['stake_amount']
assert trade.is_open
assert trade.open_date is not None
assert trade.exchange == 'binance'
assert trade.open_rate == ticker_usdt.return_value[ticker_side]
assert pytest.approx(trade.amount) == 60 / ticker_usdt.return_value[ticker_side]
assert log_has(
f'{"Short" if is_short else "Long"} signal found: about create a new trade for ETH/USDT '
'with stake_amount: 60.0 ...',
caplog
)
def test_process_exchange_failures(default_conf_usdt, ticker_usdt, mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(side_effect=TemporaryError)
)
sleep_mock = mocker.patch('time.sleep', side_effect=lambda _: None)
worker = Worker(args=None, config=default_conf_usdt)
patch_get_signal(worker.freqtrade)
worker._process_running()
assert sleep_mock.has_calls()
def test_process_operational_exception(default_conf_usdt, ticker_usdt, mocker) -> None:
msg_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(side_effect=OperationalException)
)
worker = Worker(args=None, config=default_conf_usdt)
patch_get_signal(worker.freqtrade)
assert worker.freqtrade.state == State.RUNNING
worker._process_running()
assert worker.freqtrade.state == State.STOPPED
assert 'OperationalException' in msg_mock.call_args_list[-1][0][0]['status']
def test_process_trade_handling(default_conf_usdt, ticker_usdt, limit_buy_order_usdt_open, fee,
mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_buy_order_usdt_open),
fetch_order=MagicMock(return_value=limit_buy_order_usdt_open),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
trades = Trade.get_open_trades()
assert not trades
freqtrade.process()
trades = Trade.get_open_trades()
assert len(trades) == 1
# Nothing happened ...
freqtrade.process()
assert len(trades) == 1
def test_process_trade_no_whitelist_pair(default_conf_usdt, ticker_usdt, limit_buy_order_usdt,
fee, mocker) -> None:
""" Test process with trade not in pair list """
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value={'id': limit_buy_order_usdt['id']}),
fetch_order=MagicMock(return_value=limit_buy_order_usdt),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
pair = 'BLK/BTC'
# Ensure the pair is not in the whitelist!
assert pair not in default_conf_usdt['exchange']['pair_whitelist']
# create open trade not in whitelist
Trade.session.add(Trade(
pair=pair,
stake_amount=0.001,
fee_open=fee.return_value,
fee_close=fee.return_value,
is_open=True,
amount=20,
open_rate=0.01,
exchange='binance',
))
Trade.session.add(Trade(
pair='ETH/USDT',
stake_amount=0.001,
fee_open=fee.return_value,
fee_close=fee.return_value,
is_open=True,
amount=12,
open_rate=0.001,
exchange='binance',
))
Trade.commit()
assert pair not in freqtrade.active_pair_whitelist
freqtrade.process()
assert pair in freqtrade.active_pair_whitelist
# Make sure each pair is only in the list once
assert len(freqtrade.active_pair_whitelist) == len(set(freqtrade.active_pair_whitelist))
def test_process_informative_pairs_added(default_conf_usdt, ticker_usdt, mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
refresh_mock = MagicMock()
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(side_effect=TemporaryError),
refresh_latest_ohlcv=refresh_mock,
)
inf_pairs = MagicMock(return_value=[
("BTC/ETH", '1m', CandleType.SPOT),
("ETH/USDT", "1h", CandleType.SPOT)
])
mocker.patch.multiple(
'freqtrade.strategy.interface.IStrategy',
get_exit_signal=MagicMock(return_value=(False, False)),
get_entry_signal=MagicMock(return_value=(None, None))
)
mocker.patch('time.sleep', return_value=None)
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade.strategy.informative_pairs = inf_pairs
# patch_get_signal(freqtrade)
freqtrade.process()
assert inf_pairs.call_count == 1
assert refresh_mock.call_count == 1
assert ("BTC/ETH", "1m", CandleType.SPOT) in refresh_mock.call_args[0][0]
assert ("ETH/USDT", "1h", CandleType.SPOT) in refresh_mock.call_args[0][0]
assert ("ETH/USDT", default_conf_usdt["timeframe"],
CandleType.SPOT) in refresh_mock.call_args[0][0]
@pytest.mark.parametrize("is_short,trading_mode,exchange_name,margin_mode,liq_buffer,liq_price", [
(False, 'spot', 'binance', None, 0.0, None),
(True, 'spot', 'binance', None, 0.0, None),
(False, 'spot', 'gate', None, 0.0, None),
(True, 'spot', 'gate', None, 0.0, None),
(False, 'spot', 'okx', None, 0.0, None),
(True, 'spot', 'okx', None, 0.0, None),
(True, 'futures', 'binance', 'isolated', 0.0, 11.88151815181518),
(False, 'futures', 'binance', 'isolated', 0.0, 8.080471380471382),
(True, 'futures', 'gate', 'isolated', 0.0, 11.87413417771621),
(False, 'futures', 'gate', 'isolated', 0.0, 8.085708510208207),
(True, 'futures', 'binance', 'isolated', 0.05, 11.7874422442244),
(False, 'futures', 'binance', 'isolated', 0.05, 8.17644781144781),
(True, 'futures', 'gate', 'isolated', 0.05, 11.7804274688304),
(False, 'futures', 'gate', 'isolated', 0.05, 8.181423084697796),
(True, 'futures', 'okx', 'isolated', 0.0, 11.87413417771621),
(False, 'futures', 'okx', 'isolated', 0.0, 8.085708510208207),
(True, 'futures', 'bybit', 'isolated', 0.0, 11.9),
(False, 'futures', 'bybit', 'isolated', 0.0, 8.1),
])
def test_execute_entry(mocker, default_conf_usdt, fee, limit_order,
limit_order_open, is_short, trading_mode,
exchange_name, margin_mode, liq_buffer, liq_price) -> None:
"""
exchange_name = binance, is_short = true
leverage = 5
position = 0.2 * 5
((wb + cum_b) - (side_1 * position * ep1)) / ((position * mmr_b) - (side_1 * position))
((2 + 0.01) - ((-1) * 1 * 10)) / ((1 * 0.01) - ((-1) * 1)) = 11.89108910891089
exchange_name = binance, is_short = false
((wb + cum_b) - (side_1 * position * ep1)) / ((position * mmr_b) - (side_1 * position))
((2 + 0.01) - (1 * 1 * 10)) / ((1 * 0.01) - (1 * 1)) = 8.070707070707071
exchange_name = gate/okx, is_short = true
(open_rate + (wallet_balance / position)) / (1 + (mm_ratio + taker_fee_rate))
(10 + (2 / 1)) / (1 + (0.01 + 0.0006)) = 11.87413417771621
exchange_name = gate/okx, is_short = false
(open_rate - (wallet_balance / position)) / (1 - (mm_ratio + taker_fee_rate))
(10 - (2 / 1)) / (1 - (0.01 + 0.0006)) = 8.085708510208207
"""
# TODO: Split this test into multiple tests to improve readability
open_order = limit_order_open[entry_side(is_short)]
order = limit_order[entry_side(is_short)]
default_conf_usdt['trading_mode'] = trading_mode
default_conf_usdt['liquidation_buffer'] = liq_buffer
leverage = 1.0 if trading_mode == 'spot' else 5.0
default_conf_usdt['exchange']['name'] = exchange_name
if margin_mode:
default_conf_usdt['margin_mode'] = margin_mode
mocker.patch('freqtrade.exchange.gate.Gate.validate_ordertypes')
patch_RPCManager(mocker)
patch_exchange(mocker, id=exchange_name)
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=False)
freqtrade.strategy.leverage = MagicMock(return_value=leverage)
stake_amount = 2
bid = 0.11
enter_rate_mock = MagicMock(return_value=bid)
enter_mm = MagicMock(return_value=open_order)
mocker.patch.multiple(
EXMS,
get_rate=enter_rate_mock,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=enter_mm,
get_min_pair_stake_amount=MagicMock(return_value=1),
get_max_pair_stake_amount=MagicMock(return_value=500000),
get_fee=fee,
get_funding_fees=MagicMock(return_value=0),
name=exchange_name,
get_maintenance_ratio_and_amt=MagicMock(return_value=(0.01, 0.01)),
get_max_leverage=MagicMock(return_value=10),
)
mocker.patch.multiple(
'freqtrade.exchange.okx.Okx',
get_max_pair_stake_amount=MagicMock(return_value=500000),
)
pair = 'ETH/USDT'
assert not freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
assert enter_rate_mock.call_count == 1
assert enter_mm.call_count == 0
assert freqtrade.strategy.confirm_trade_entry.call_count == 1
enter_rate_mock.reset_mock()
open_order['id'] = '22'
freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=True)
assert freqtrade.execute_entry(pair, stake_amount)
assert enter_rate_mock.call_count == 1
assert enter_mm.call_count == 1
call_args = enter_mm.call_args_list[0][1]
assert call_args['pair'] == pair
assert call_args['rate'] == bid
assert pytest.approx(call_args['amount']) == round(stake_amount / bid * leverage, 8)
enter_rate_mock.reset_mock()
# Should create an open trade with an open order id
# As the order is not fulfilled yet
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
assert trade.is_open is True
assert trade.has_open_orders
assert '22' in trade.open_orders_ids
# Test calling with price
open_order['id'] = '33'
fix_price = 0.06
assert freqtrade.execute_entry(pair, stake_amount, fix_price, is_short=is_short)
# Make sure get_rate wasn't called again
assert enter_rate_mock.call_count == 0
assert enter_mm.call_count == 2
call_args = enter_mm.call_args_list[1][1]
assert call_args['pair'] == pair
assert call_args['rate'] == fix_price
assert pytest.approx(call_args['amount']) == round(stake_amount / fix_price * leverage, 8)
# In case of closed order
order['status'] = 'closed'
order['average'] = 10
order['cost'] = 300
order['id'] = '444'
mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=order))
assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).all()[2]
trade.is_short = is_short
assert trade
assert not trade.has_open_orders
assert trade.open_rate == 10
assert trade.stake_amount == round(order['average'] * order['filled'] / leverage, 8)
assert pytest.approx(trade.liquidation_price) == liq_price
# In case of rejected or expired order and partially filled
order['status'] = 'expired'
order['amount'] = 30.0
order['filled'] = 20.0
order['remaining'] = 10.00
order['average'] = 0.5
order['cost'] = 10.0
order['id'] = '555'
mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=order))
assert freqtrade.execute_entry(pair, stake_amount)
trade = Trade.session.scalars(select(Trade)).all()[3]
trade.is_short = is_short
assert trade
assert not trade.has_open_orders
assert trade.open_rate == 0.5
assert trade.stake_amount == round(order['average'] * order['filled'] / leverage, 8)
# Test with custom stake
order['status'] = 'open'
order['id'] = '556'
freqtrade.strategy.custom_stake_amount = lambda **kwargs: 150.0
assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).all()[4]
trade.is_short = is_short
assert trade
assert pytest.approx(trade.stake_amount) == 150
# Exception case
order['id'] = '557'
freqtrade.strategy.custom_stake_amount = lambda **kwargs: 20 / 0
assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).all()[5]
trade.is_short = is_short
assert trade
assert pytest.approx(trade.stake_amount) == 2.0
# In case of the order is rejected and not filled at all
order['status'] = 'rejected'
order['amount'] = 30.0 * leverage
order['filled'] = 0.0
order['remaining'] = 30.0
order['average'] = 0.5
order['cost'] = 0.0
order['id'] = '66'
mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=order))
assert not freqtrade.execute_entry(pair, stake_amount)
assert freqtrade.strategy.leverage.call_count == 0 if trading_mode == 'spot' else 2
# Fail to get price...
mocker.patch(f'{EXMS}.get_rate', MagicMock(return_value=0.0))
with pytest.raises(PricingError, match="Could not determine entry price."):
freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
# In case of custom entry price
mocker.patch(f'{EXMS}.get_rate', return_value=0.50)
order['status'] = 'open'
order['id'] = '5566'
freqtrade.strategy.custom_entry_price = lambda **kwargs: 0.508
assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).all()[6]
trade.is_short = is_short
assert trade
assert trade.open_rate_requested == 0.508
# In case of custom entry price set to None
order['status'] = 'open'
order['id'] = '5567'
freqtrade.strategy.custom_entry_price = lambda **kwargs: None
mocker.patch.multiple(
EXMS,
get_rate=MagicMock(return_value=10),
)
assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).all()[7]
trade.is_short = is_short
assert trade
assert trade.open_rate_requested == 10
# In case of custom entry price not float type
order['status'] = 'open'
order['id'] = '5568'
freqtrade.strategy.custom_entry_price = lambda **kwargs: "string price"
assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).all()[8]
# Trade(id=9, pair=ETH/USDT, amount=0.20000000, is_short=False,
# leverage=1.0, open_rate=10.00000000, open_since=...)
# Trade(id=9, pair=ETH/USDT, amount=0.60000000, is_short=True,
# leverage=3.0, open_rate=10.00000000, open_since=...)
trade.is_short = is_short
assert trade
assert trade.open_rate_requested == 10
# In case of too high stake amount
order['status'] = 'open'
order['id'] = '55672'
mocker.patch.multiple(
EXMS,
get_max_pair_stake_amount=MagicMock(return_value=500),
)
freqtrade.exchange.get_max_pair_stake_amount = MagicMock(return_value=500)
assert freqtrade.execute_entry(pair, 2000, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).all()[9]
trade.is_short = is_short
assert pytest.approx(trade.stake_amount) == 500
order['id'] = '55673'
freqtrade.strategy.leverage.reset_mock()
assert freqtrade.execute_entry(pair, 200, leverage_=3)
assert freqtrade.strategy.leverage.call_count == 0
trade = Trade.session.scalars(select(Trade)).all()[10]
assert trade.leverage == 1 if trading_mode == 'spot' else 3
@pytest.mark.parametrize("is_short", [False, True])
def test_execute_entry_confirm_error(mocker, default_conf_usdt, fee, limit_order, is_short) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(return_value=limit_order[entry_side(is_short)]),
get_rate=MagicMock(return_value=0.11),
get_min_pair_stake_amount=MagicMock(return_value=1),
get_fee=fee,
)
stake_amount = 2
pair = 'ETH/USDT'
freqtrade.strategy.confirm_trade_entry = MagicMock(side_effect=ValueError)
assert freqtrade.execute_entry(pair, stake_amount)
limit_order[entry_side(is_short)]['id'] = '222'
freqtrade.strategy.confirm_trade_entry = MagicMock(side_effect=Exception)
assert freqtrade.execute_entry(pair, stake_amount)
limit_order[entry_side(is_short)]['id'] = '2223'
freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=True)
assert freqtrade.execute_entry(pair, stake_amount)
freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=False)
assert not freqtrade.execute_entry(pair, stake_amount)
@pytest.mark.parametrize("is_short", [False, True])
def test_execute_entry_min_leverage(mocker, default_conf_usdt, fee, limit_order, is_short) -> None:
default_conf_usdt['trading_mode'] = 'futures'
default_conf_usdt['margin_mode'] = 'isolated'
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(return_value=limit_order[entry_side(is_short)]),
get_rate=MagicMock(return_value=0.11),
# Minimum stake-amount is ~5$
get_maintenance_ratio_and_amt=MagicMock(return_value=(0.0, 0.0)),
_fetch_and_calculate_funding_fees=MagicMock(return_value=0),
get_fee=fee,
get_max_leverage=MagicMock(return_value=5.0),
)
stake_amount = 2
pair = 'SOL/BUSD:BUSD'
freqtrade.strategy.leverage = MagicMock(return_value=5.0)
assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short)
trade = Trade.session.scalars(select(Trade)).first()
assert trade.leverage == 5.0
# assert trade.stake_amount == 2
@pytest.mark.parametrize("is_short", [False, True])
def test_add_stoploss_on_exchange(mocker, default_conf_usdt, limit_order, is_short, fee) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(return_value=limit_order[entry_side(is_short)]),
get_fee=fee,
)
order = limit_order[entry_side(is_short)]
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_trade', MagicMock(return_value=True))
mocker.patch(f'{EXMS}.fetch_order', return_value=order)
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[])
stoploss = MagicMock(return_value={'id': 13434334})
mocker.patch(f'{EXMS}.create_stoploss', stoploss)
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.stoploss_order_id = None
trade.is_open = True
trades = [trade]
freqtrade.exit_positions(trades)
assert trade.stoploss_order_id == '13434334'
assert stoploss.call_count == 1
assert trade.is_open is True
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_stoploss_on_exchange(mocker, default_conf_usdt, fee, caplog, is_short,
limit_order) -> None:
stop_order_dict = {'id': "13434334"}
stoploss = MagicMock(return_value=stop_order_dict)
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
enter_order,
exit_order,
]),
get_fee=fee,
create_stoploss=stoploss
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# First case: when stoploss is not yet set but the order is open
# should get the stoploss order id immediately
# and should return false as no trade actually happened
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
trade.stoploss_order_id = None
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert stoploss.call_count == 1
assert trade.stoploss_order_id == "13434334"
# Second case: when stoploss is set but it is not yet hit
# should do nothing and return false
stop_order_dict.update({'id': "102"})
trade.is_open = True
trade.stoploss_order_id = "102"
trade.orders.append(
Order(
ft_order_side='stoploss',
ft_pair=trade.pair,
ft_is_open=True,
ft_amount=trade.amount,
ft_price=trade.stop_loss,
order_id='102',
status='open',
)
)
hanging_stoploss_order = MagicMock(return_value={'status': 'open'})
mocker.patch(f'{EXMS}.fetch_stoploss_order', hanging_stoploss_order)
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert trade.stoploss_order_id == "102"
# Third case: when stoploss was set but it was canceled for some reason
# should set a stoploss immediately and return False
caplog.clear()
trade.is_open = True
trade.stoploss_order_id = "102"
canceled_stoploss_order = MagicMock(return_value={'id': '103_1', 'status': 'canceled'})
mocker.patch(f'{EXMS}.fetch_stoploss_order', canceled_stoploss_order)
stoploss.reset_mock()
amount_before = trade.amount
stop_order_dict.update({'id': "103_1"})
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert stoploss.call_count == 1
assert trade.stoploss_order_id == "103_1"
assert trade.amount == amount_before
# Fourth case: when stoploss is set and it is hit
# should unset stoploss_order_id and return true
# as a trade actually happened
caplog.clear()
freqtrade.enter_positions()
stop_order_dict.update({'id': "104"})
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
trade.stoploss_order_id = "104"
trade.orders.append(Order(
ft_order_side='stoploss',
order_id='104',
ft_pair=trade.pair,
ft_is_open=True,
ft_amount=trade.amount,
ft_price=0.0,
))
assert trade
stoploss_order_hit = MagicMock(return_value={
'id': "104",
'status': 'closed',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'filled': enter_order['amount'],
'remaining': 0,
'amount': enter_order['amount'],
})
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hit)
assert freqtrade.handle_stoploss_on_exchange(trade) is True
assert log_has_re(r'STOP_LOSS_LIMIT is hit for Trade\(id=1, .*\)\.', caplog)
assert trade.stoploss_order_id is None
assert trade.is_open is False
caplog.clear()
mocker.patch(f'{EXMS}.create_stoploss', side_effect=ExchangeError())
trade.is_open = True
freqtrade.handle_stoploss_on_exchange(trade)
assert log_has('Unable to place a stoploss order on exchange.', caplog)
assert trade.stoploss_order_id is None
# Fifth case: fetch_order returns InvalidOrder
# It should try to add stoploss order
stop_order_dict.update({'id': "105"})
trade.stoploss_order_id = "105"
stoploss.reset_mock()
mocker.patch(f'{EXMS}.fetch_stoploss_order', side_effect=InvalidOrderException())
mocker.patch(f'{EXMS}.create_stoploss', stoploss)
freqtrade.handle_stoploss_on_exchange(trade)
assert stoploss.call_count == 1
# Sixth case: Closed Trade
# Should not create new order
trade.stoploss_order_id = None
trade.is_open = False
stoploss.reset_mock()
mocker.patch(f'{EXMS}.fetch_order')
mocker.patch(f'{EXMS}.create_stoploss', stoploss)
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert stoploss.call_count == 0
# Seventh case: emergency exit triggered
# Trailing stop should not act anymore
stoploss_order_cancelled = MagicMock(side_effect=[{
'id': "107",
'status': 'canceled',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'amount': enter_order['amount'],
'filled': 0,
'remaining': enter_order['amount'],
'info': {'stopPrice': 22},
}])
trade.stoploss_order_id = "107"
trade.is_open = True
trade.stoploss_last_update = dt_now() - timedelta(hours=1)
trade.stop_loss = 24
trade.exit_reason = None
trade.orders.append(
Order(
ft_order_side='stoploss',
ft_pair=trade.pair,
ft_is_open=True,
ft_amount=trade.amount,
ft_price=trade.stop_loss,
order_id='107',
status='open',
)
)
freqtrade.config['trailing_stop'] = True
stoploss = MagicMock(side_effect=InvalidOrderException())
Trade.commit()
mocker.patch(f'{EXMS}.cancel_stoploss_order_with_result',
side_effect=InvalidOrderException())
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_cancelled)
mocker.patch(f'{EXMS}.create_stoploss', stoploss)
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert trade.stoploss_order_id is None
assert trade.is_open is False
assert trade.exit_reason == str(ExitType.EMERGENCY_EXIT)
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_stoploss_on_exchange_partial(
mocker, default_conf_usdt, fee, is_short, limit_order) -> None:
stop_order_dict = {'id': "101", "status": "open"}
stoploss = MagicMock(return_value=stop_order_dict)
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
enter_order,
exit_order,
]),
get_fee=fee,
create_stoploss=stoploss
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
trade.stoploss_order_id = None
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert stoploss.call_count == 1
assert trade.stoploss_order_id == "101"
assert trade.amount == 30
stop_order_dict.update({'id': "102"})
# Stoploss on exchange is cancelled on exchange, but filled partially.
# Must update trade amount to guarantee successful exit.
stoploss_order_hit = MagicMock(return_value={
'id': "101",
'status': 'canceled',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'filled': trade.amount / 2,
'remaining': trade.amount / 2,
'amount': enter_order['amount'],
})
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hit)
assert freqtrade.handle_stoploss_on_exchange(trade) is False
# Stoploss filled partially ...
assert trade.amount == 15
assert trade.stoploss_order_id == "102"
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_stoploss_on_exchange_partial_cancel_here(
mocker, default_conf_usdt, fee, is_short, limit_order, caplog) -> None:
stop_order_dict = {'id': "101", "status": "open"}
default_conf_usdt['trailing_stop'] = True
stoploss = MagicMock(return_value=stop_order_dict)
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
enter_order,
exit_order,
]),
get_fee=fee,
create_stoploss=stoploss
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
trade.stoploss_order_id = None
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert stoploss.call_count == 1
assert trade.stoploss_order_id == "101"
assert trade.amount == 30
stop_order_dict.update({'id': "102"})
# Stoploss on exchange is open.
# Freqtrade cancels the stop - but cancel returns a partial filled order.
stoploss_order_hit = MagicMock(return_value={
'id': "101",
'status': 'open',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'filled': 0,
'remaining': trade.amount,
'amount': enter_order['amount'],
})
stoploss_order_cancel = MagicMock(return_value={
'id': "101",
'status': 'canceled',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'filled': trade.amount / 2,
'remaining': trade.amount / 2,
'amount': enter_order['amount'],
})
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hit)
mocker.patch(f'{EXMS}.cancel_stoploss_order_with_result', stoploss_order_cancel)
trade.stoploss_last_update = dt_now() - timedelta(minutes=10)
assert freqtrade.handle_stoploss_on_exchange(trade) is False
# Canceled Stoploss filled partially ...
assert log_has_re('Cancelling current stoploss on exchange.*', caplog)
assert trade.stoploss_order_id == "102"
assert trade.amount == 15
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_sle_cancel_cant_recreate(mocker, default_conf_usdt, fee, caplog, is_short,
limit_order) -> None:
# Sixth case: stoploss order was cancelled but couldn't create new one
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
enter_order,
exit_order,
]),
get_fee=fee,
)
mocker.patch.multiple(
EXMS,
fetch_stoploss_order=MagicMock(return_value={'status': 'canceled', 'id': 100}),
create_stoploss=MagicMock(side_effect=ExchangeError()),
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
assert trade.is_short == is_short
trade.is_open = True
trade.stoploss_order_id = "100"
trade.orders.append(
Order(
ft_order_side='stoploss',
ft_pair=trade.pair,
ft_is_open=True,
ft_amount=trade.amount,
ft_price=trade.stop_loss,
order_id='100',
status='open',
)
)
assert trade
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert log_has_re(r'Stoploss order was cancelled, but unable to recreate one.*', caplog)
assert trade.stoploss_order_id is None
assert trade.is_open is True
@pytest.mark.parametrize("is_short", [False, True])
def test_create_stoploss_order_invalid_order(
mocker, default_conf_usdt, caplog, fee, is_short, limit_order
):
open_order = limit_order[entry_side(is_short)]
order = limit_order[exit_side(is_short)]
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
create_order_mock = MagicMock(side_effect=[
open_order,
order,
])
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=create_order_mock,
get_fee=fee,
)
mocker.patch.multiple(
EXMS,
fetch_order=MagicMock(return_value={'status': 'canceled'}),
create_stoploss=MagicMock(side_effect=InvalidOrderException()),
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
caplog.clear()
rpc_mock.reset_mock()
freqtrade.create_stoploss_order(trade, 200)
assert trade.stoploss_order_id is None
assert trade.exit_reason == ExitType.EMERGENCY_EXIT.value
assert log_has("Unable to place a stoploss order on exchange. ", caplog)
assert log_has("Exiting the trade forcefully", caplog)
# Should call a market sell
assert create_order_mock.call_count == 2
assert create_order_mock.call_args[1]['ordertype'] == 'market'
assert create_order_mock.call_args[1]['pair'] == trade.pair
assert create_order_mock.call_args[1]['amount'] == trade.amount
# Rpc is sending first buy, then sell
assert rpc_mock.call_count == 2
assert rpc_mock.call_args_list[0][0][0]['sell_reason'] == ExitType.EMERGENCY_EXIT.value
assert rpc_mock.call_args_list[0][0][0]['order_type'] == 'market'
assert rpc_mock.call_args_list[0][0][0]['type'] == 'exit'
assert rpc_mock.call_args_list[1][0][0]['type'] == 'exit_fill'
@pytest.mark.parametrize("is_short", [False, True])
def test_create_stoploss_order_insufficient_funds(
mocker, default_conf_usdt, caplog, fee, limit_order, is_short
):
exit_order = limit_order[exit_side(is_short)]['id']
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mock_insuf = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_insufficient_funds')
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
limit_order[entry_side(is_short)],
exit_order,
]),
get_fee=fee,
fetch_order=MagicMock(return_value={'status': 'canceled'}),
)
mocker.patch.multiple(
EXMS,
create_stoploss=MagicMock(side_effect=InsufficientFundsError()),
)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
caplog.clear()
freqtrade.create_stoploss_order(trade, 200)
# stoploss_orderid was empty before
assert trade.stoploss_order_id is None
assert mock_insuf.call_count == 1
mock_insuf.reset_mock()
trade.stoploss_order_id = 'stoploss_orderid'
freqtrade.create_stoploss_order(trade, 200)
# No change to stoploss-orderid
assert trade.stoploss_order_id == 'stoploss_orderid'
assert mock_insuf.call_count == 1
@pytest.mark.parametrize("is_short,bid,ask,stop_price,hang_price", [
(False, [4.38, 4.16], [4.4, 4.17], ['2.0805', 4.4 * 0.95], 3),
(True, [1.09, 1.21], [1.1, 1.22], ['2.321', 1.09 * 1.05], 1.5),
])
@pytest.mark.usefixtures("init_persistence")
def test_handle_stoploss_on_exchange_trailing(
mocker, default_conf_usdt, fee, is_short, bid, ask, limit_order, stop_price, hang_price
) -> None:
# When trailing stoploss is set
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
stoploss = MagicMock(return_value={'id': 13434334, 'status': 'open'})
patch_RPCManager(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 2.19,
'ask': 2.2,
'last': 2.19,
}),
create_order=MagicMock(side_effect=[
enter_order,
exit_order,
]),
get_fee=fee,
)
mocker.patch.multiple(
EXMS,
create_stoploss=stoploss,
stoploss_adjust=MagicMock(return_value=True),
)
# enabling TSL
default_conf_usdt['trailing_stop'] = True
# disabling ROI
default_conf_usdt['minimal_roi']['0'] = 999999999
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
# enabling stoploss on exchange
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
# setting stoploss
freqtrade.strategy.stoploss = 0.05 if is_short else -0.05
# setting stoploss_on_exchange_interval to 60 seconds
freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 60
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
trade.stoploss_order_id = '100'
trade.stoploss_last_update = dt_now() - timedelta(minutes=20)
trade.orders.append(
Order(
ft_order_side='stoploss',
ft_pair=trade.pair,
ft_is_open=True,
ft_amount=trade.amount,
ft_price=trade.stop_loss,
order_id='100',
)
)
stoploss_order_hanging = MagicMock(return_value={
'id': '100',
'status': 'open',
'type': 'stop_loss_limit',
'price': hang_price,
'average': 2,
'info': {
'stopPrice': stop_price[0]
}
})
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hanging)
# stoploss initially at 5%
assert freqtrade.handle_trade(trade) is False
assert freqtrade.handle_stoploss_on_exchange(trade) is False
# price jumped 2x
mocker.patch(
f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': bid[0],
'ask': ask[0],
'last': bid[0],
})
)
cancel_order_mock = MagicMock()
stoploss_order_mock = MagicMock(return_value={'id': 'so1', 'status': 'open'})
mocker.patch(f'{EXMS}.cancel_stoploss_order', cancel_order_mock)
mocker.patch(f'{EXMS}.create_stoploss', stoploss_order_mock)
# stoploss should not be updated as the interval is 60 seconds
assert freqtrade.handle_trade(trade) is False
assert freqtrade.handle_stoploss_on_exchange(trade) is False
cancel_order_mock.assert_not_called()
stoploss_order_mock.assert_not_called()
assert freqtrade.handle_trade(trade) is False
assert trade.stop_loss == stop_price[1]
trade.stoploss_order_id = '100'
# setting stoploss_on_exchange_interval to 0 seconds
freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 0
assert freqtrade.handle_stoploss_on_exchange(trade) is False
cancel_order_mock.assert_called_once_with('100', 'ETH/USDT')
stoploss_order_mock.assert_called_once_with(
amount=30,
pair='ETH/USDT',
order_types=freqtrade.strategy.order_types,
stop_price=stop_price[1],
side=exit_side(is_short),
leverage=1.0
)
# price fell below stoploss, so dry-run sells trade.
mocker.patch(
f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': bid[1],
'ask': ask[1],
'last': bid[1],
})
)
assert freqtrade.handle_trade(trade) is True
assert trade.stoploss_order_id is None
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_stoploss_on_exchange_trailing_error(
mocker, default_conf_usdt, fee, caplog, limit_order, is_short
) -> None:
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
# When trailing stoploss is set
stoploss = MagicMock(return_value={'id': '13434334', 'status': 'open'})
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
{'id': enter_order['id']},
{'id': exit_order['id']},
]),
get_fee=fee,
)
mocker.patch.multiple(
EXMS,
create_stoploss=stoploss,
stoploss_adjust=MagicMock(return_value=True),
)
# enabling TSL
default_conf_usdt['trailing_stop'] = True
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
# enabling stoploss on exchange
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
# setting stoploss
freqtrade.strategy.stoploss = 0.05 if is_short else -0.05
# setting stoploss_on_exchange_interval to 60 seconds
freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 60
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
trade.stoploss_order_id = "abcd"
trade.stop_loss = 0.2
trade.stoploss_last_update = (dt_now() - timedelta(minutes=601)).replace(tzinfo=None)
trade.is_short = is_short
stoploss_order_hanging = {
'id': "abcd",
'status': 'open',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'info': {
'stopPrice': '0.1'
}
}
mocker.patch(f'{EXMS}.cancel_stoploss_order',
side_effect=InvalidOrderException())
mocker.patch(f'{EXMS}.fetch_stoploss_order',
return_value=stoploss_order_hanging)
freqtrade.handle_trailing_stoploss_on_exchange(trade, stoploss_order_hanging)
assert log_has_re(r"Could not cancel stoploss order abcd for pair ETH/USDT.*", caplog)
# Still try to create order
assert stoploss.call_count == 1
# Fail creating stoploss order
trade.stoploss_last_update = dt_now() - timedelta(minutes=601)
caplog.clear()
cancel_mock = mocker.patch(f'{EXMS}.cancel_stoploss_order')
mocker.patch(f'{EXMS}.create_stoploss', side_effect=ExchangeError())
freqtrade.handle_trailing_stoploss_on_exchange(trade, stoploss_order_hanging)
assert cancel_mock.call_count == 1
assert log_has_re(r"Could not create trailing stoploss order for pair ETH/USDT\..*", caplog)
def test_stoploss_on_exchange_price_rounding(
mocker, default_conf_usdt, fee, open_trade_usdt) -> None:
patch_RPCManager(mocker)
mocker.patch.multiple(
EXMS,
get_fee=fee,
)
price_mock = MagicMock(side_effect=lambda p, s, **kwargs: int(s))
stoploss_mock = MagicMock(return_value={'id': '13434334'})
adjust_mock = MagicMock(return_value=False)
mocker.patch.multiple(
EXMS,
create_stoploss=stoploss_mock,
stoploss_adjust=adjust_mock,
price_to_precision=price_mock,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
open_trade_usdt.stoploss_order_id = '13434334'
open_trade_usdt.stop_loss = 222.55
freqtrade.handle_trailing_stoploss_on_exchange(open_trade_usdt, {})
assert price_mock.call_count == 1
assert adjust_mock.call_count == 1
assert adjust_mock.call_args_list[0][0][0] == 222
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.usefixtures("init_persistence")
def test_handle_stoploss_on_exchange_custom_stop(
mocker, default_conf_usdt, fee, is_short, limit_order
) -> None:
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
# When trailing stoploss is set
stoploss = MagicMock(return_value={'id': 13434334, 'status': 'open'})
patch_RPCManager(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
enter_order,
exit_order,
]),
get_fee=fee,
)
mocker.patch.multiple(
EXMS,
create_stoploss=stoploss,
stoploss_adjust=MagicMock(return_value=True),
)
# enabling TSL
default_conf_usdt['use_custom_stoploss'] = True
# disabling ROI
default_conf_usdt['minimal_roi']['0'] = 999999999
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
# enabling stoploss on exchange
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
# setting stoploss
freqtrade.strategy.custom_stoploss = lambda *args, **kwargs: -0.04
# setting stoploss_on_exchange_interval to 60 seconds
freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 60
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
trade.stoploss_order_id = '100'
trade.stoploss_last_update = dt_now() - timedelta(minutes=601)
trade.orders.append(
Order(
ft_order_side='stoploss',
ft_pair=trade.pair,
ft_is_open=True,
ft_amount=trade.amount,
ft_price=trade.stop_loss,
order_id='100',
)
)
stoploss_order_hanging = MagicMock(return_value={
'id': '100',
'status': 'open',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'info': {
'stopPrice': '2.0805'
}
})
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hanging)
assert freqtrade.handle_trade(trade) is False
assert freqtrade.handle_stoploss_on_exchange(trade) is False
# price jumped 2x
mocker.patch(
f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': 4.38 if not is_short else 1.9 / 2,
'ask': 4.4 if not is_short else 2.2 / 2,
'last': 4.38 if not is_short else 1.9 / 2,
})
)
cancel_order_mock = MagicMock()
stoploss_order_mock = MagicMock(return_value={'id': 'so1', 'status': 'open'})
mocker.patch(f'{EXMS}.cancel_stoploss_order', cancel_order_mock)
mocker.patch(f'{EXMS}.create_stoploss', stoploss_order_mock)
trade.stoploss_order_id = '100'
# stoploss should not be updated as the interval is 60 seconds
assert freqtrade.handle_trade(trade) is False
assert freqtrade.handle_stoploss_on_exchange(trade) is False
cancel_order_mock.assert_not_called()
stoploss_order_mock.assert_not_called()
assert freqtrade.handle_trade(trade) is False
assert trade.stop_loss == 4.4 * 0.96 if not is_short else 1.1
assert trade.stop_loss_pct == -0.04 if not is_short else 0.04
# setting stoploss_on_exchange_interval to 0 seconds
freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 0
assert freqtrade.handle_stoploss_on_exchange(trade) is False
cancel_order_mock.assert_called_once_with('100', 'ETH/USDT')
# Long uses modified ask - offset, short modified bid + offset
stoploss_order_mock.assert_called_once_with(
amount=pytest.approx(trade.amount),
pair='ETH/USDT',
order_types=freqtrade.strategy.order_types,
stop_price=4.4 * 0.96 if not is_short else 0.95 * 1.04,
side=exit_side(is_short),
leverage=1.0
)
# price fell below stoploss, so dry-run sells trade.
mocker.patch(
f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': 4.17,
'ask': 4.19,
'last': 4.17
})
)
assert freqtrade.handle_trade(trade) is True
def test_tsl_on_exchange_compatible_with_edge(mocker, edge_conf, fee, limit_order) -> None:
enter_order = limit_order['buy']
exit_order = limit_order['sell']
enter_order['average'] = 2.19
# When trailing stoploss is set
stoploss = MagicMock(return_value={'id': '13434334', 'status': 'open'})
patch_RPCManager(mocker)
patch_exchange(mocker)
patch_edge(mocker)
edge_conf['max_open_trades'] = float('inf')
edge_conf['dry_run_wallet'] = 999.9
edge_conf['exchange']['name'] = 'binance'
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 2.19,
'ask': 2.2,
'last': 2.19
}),
create_order=MagicMock(side_effect=[
enter_order,
exit_order,
]),
get_fee=fee,
create_stoploss=stoploss,
)
# enabling TSL
edge_conf['trailing_stop'] = True
edge_conf['trailing_stop_positive'] = 0.01
edge_conf['trailing_stop_positive_offset'] = 0.011
# disabling ROI
edge_conf['minimal_roi']['0'] = 999999999
freqtrade = FreqtradeBot(edge_conf)
# enabling stoploss on exchange
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
# setting stoploss
freqtrade.strategy.stoploss = -0.02
# setting stoploss_on_exchange_interval to 0 seconds
freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 0
patch_get_signal(freqtrade)
freqtrade.active_pair_whitelist = freqtrade.edge.adjust(freqtrade.active_pair_whitelist)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_open = True
trade.stoploss_order_id = '100'
trade.stoploss_last_update = dt_now()
trade.orders.append(
Order(
ft_order_side='stoploss',
ft_pair=trade.pair,
ft_is_open=True,
ft_amount=trade.amount,
ft_price=trade.stop_loss,
order_id='100',
)
)
stoploss_order_hanging = MagicMock(return_value={
'id': '100',
'status': 'open',
'type': 'stop_loss_limit',
'price': 3,
'average': 2,
'stopPrice': '2.178'
})
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hanging)
# stoploss initially at 20% as edge dictated it.
assert freqtrade.handle_trade(trade) is False
assert freqtrade.handle_stoploss_on_exchange(trade) is False
assert pytest.approx(trade.stop_loss) == 1.76
cancel_order_mock = MagicMock()
stoploss_order_mock = MagicMock()
mocker.patch(f'{EXMS}.cancel_stoploss_order', cancel_order_mock)
mocker.patch(f'{EXMS}.create_stoploss', stoploss_order_mock)
# price goes down 5%
mocker.patch(f'{EXMS}.fetch_ticker', MagicMock(return_value={
'bid': 2.19 * 0.95,
'ask': 2.2 * 0.95,
'last': 2.19 * 0.95
}))
assert freqtrade.handle_trade(trade) is False
assert freqtrade.handle_stoploss_on_exchange(trade) is False
# stoploss should remain the same
assert pytest.approx(trade.stop_loss) == 1.76
# stoploss on exchange should not be canceled
cancel_order_mock.assert_not_called()
# price jumped 2x
mocker.patch(f'{EXMS}.fetch_ticker', MagicMock(return_value={
'bid': 4.38,
'ask': 4.4,
'last': 4.38
}))
assert freqtrade.handle_trade(trade) is False
assert freqtrade.handle_stoploss_on_exchange(trade) is False
# stoploss should be set to 1% as trailing is on
assert trade.stop_loss == 4.4 * 0.99
cancel_order_mock.assert_called_once_with('100', 'NEO/BTC')
stoploss_order_mock.assert_called_once_with(
amount=30,
pair='NEO/BTC',
order_types=freqtrade.strategy.order_types,
stop_price=4.4 * 0.99,
side='sell',
leverage=1.0
)
@pytest.mark.parametrize('return_value,side_effect,log_message', [
(False, None, 'Found no enter signals for whitelisted currencies. Trying again...'),
(None, DependencyException, 'Unable to create trade for ETH/USDT: ')
])
def test_enter_positions(mocker, default_conf_usdt, return_value, side_effect,
log_message, caplog) -> None:
caplog.set_level(logging.DEBUG)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mock_ct = mocker.patch(
'freqtrade.freqtradebot.FreqtradeBot.create_trade',
MagicMock(
return_value=return_value,
side_effect=side_effect
)
)
n = freqtrade.enter_positions()
assert n == 0
assert log_has(log_message, caplog)
# create_trade should be called once for every pair in the whitelist.
assert mock_ct.call_count == len(default_conf_usdt['exchange']['pair_whitelist'])
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short", [False, True])
def test_exit_positions(mocker, default_conf_usdt, limit_order, is_short, caplog) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_trade', MagicMock(return_value=True))
mocker.patch(f'{EXMS}.fetch_order', return_value=limit_order[entry_side(is_short)])
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[])
order_id = '123'
trade = Trade(
pair='ETH/USDT',
fee_open=0.001,
fee_close=0.001,
open_rate=0.01,
open_date=dt_now(),
stake_amount=0.01,
amount=11,
exchange="binance",
is_short=is_short,
leverage=1,
)
trade.orders.append(Order(
ft_order_side=entry_side(is_short),
price=0.01,
ft_pair=trade.pair,
ft_amount=trade.amount,
ft_price=trade.open_rate,
order_id=order_id,
))
Trade.session.add(trade)
Trade.commit()
trades = [trade]
freqtrade.wallets.update()
n = freqtrade.exit_positions(trades)
assert n == 0
# Test amount not modified by fee-logic
assert not log_has_re(r'Applying fee to amount for Trade .*', caplog)
gra = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=0.0)
# test amount modified by fee-logic
n = freqtrade.exit_positions(trades)
assert n == 0
assert gra.call_count == 0
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short", [False, True])
def test_exit_positions_exception(mocker, default_conf_usdt, limit_order, caplog, is_short) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order = limit_order[entry_side(is_short)]
mocker.patch(f'{EXMS}.fetch_order', return_value=order)
order_id = '123'
trade = Trade(
pair='ETH/USDT',
fee_open=0.001,
fee_close=0.001,
open_rate=0.01,
open_date=dt_now(),
stake_amount=0.01,
amount=11,
exchange="binance",
is_short=is_short,
leverage=1,
)
trade.orders.append(Order(
ft_order_side=entry_side(is_short),
price=0.01,
ft_pair=trade.pair,
ft_amount=trade.amount,
ft_price=trade.open_rate,
order_id=order_id,
ft_is_open=False,
))
Trade.session.add(trade)
Trade.commit()
freqtrade.wallets.update()
trades = [trade]
# Test raise of DependencyException exception
mocker.patch(
'freqtrade.freqtradebot.FreqtradeBot.handle_trade',
side_effect=DependencyException()
)
caplog.clear()
n = freqtrade.exit_positions(trades)
assert n == 0
assert log_has('Unable to exit trade ETH/USDT: ', caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_update_trade_state(mocker, default_conf_usdt, limit_order, is_short, caplog) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order = limit_order[entry_side(is_short)]
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_trade', MagicMock(return_value=True))
mocker.patch(f'{EXMS}.fetch_order', return_value=order)
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[])
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=0.0)
order_id = order['id']
trade = Trade(
fee_open=0.001,
fee_close=0.001,
open_rate=0.01,
open_date=dt_now(),
amount=11,
exchange="binance",
is_short=is_short,
leverage=1,
)
trade.orders.append(Order(
ft_order_side=entry_side(is_short),
price=0.01,
order_id=order_id,
))
assert not freqtrade.update_trade_state(trade, None)
assert log_has_re(r'Orderid for trade .* is empty.', caplog)
caplog.clear()
# Add datetime explicitly since sqlalchemy defaults apply only once written to database
freqtrade.update_trade_state(trade, order_id)
# Test amount not modified by fee-logic
assert not log_has_re(r'Applying fee to .*', caplog)
caplog.clear()
assert not trade.has_open_orders
assert trade.amount == order['amount']
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=0.01)
assert trade.amount == 30.0
# test amount modified by fee-logic
freqtrade.update_trade_state(trade, order_id)
assert trade.amount == 29.99
assert not trade.has_open_orders
trade.is_open = True
# Assert we call handle_trade() if trade is feasible for execution
freqtrade.update_trade_state(trade, order_id)
assert log_has_re('Found open order for.*', caplog)
limit_buy_order_usdt_new = deepcopy(limit_order)
limit_buy_order_usdt_new['filled'] = 0.0
limit_buy_order_usdt_new['status'] = 'canceled'
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', side_effect=ValueError)
mocker.patch(f'{EXMS}.fetch_order', return_value=limit_buy_order_usdt_new)
res = freqtrade.update_trade_state(trade, order_id)
# Cancelled empty
assert res is True
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.parametrize('initial_amount,has_rounding_fee', [
(30.0 + 1e-14, True),
(8.0, False)
])
def test_update_trade_state_withorderdict(
default_conf_usdt, trades_for_order, limit_order, fee, mocker, initial_amount,
has_rounding_fee, is_short, caplog
):
order = limit_order[entry_side(is_short)]
trades_for_order[0]['amount'] = initial_amount
order_id = "oid_123456"
order['id'] = order_id
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
# fetch_order should not be called!!
mocker.patch(f'{EXMS}.fetch_order', MagicMock(side_effect=ValueError))
patch_exchange(mocker)
amount = sum(x['amount'] for x in trades_for_order)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
caplog.clear()
trade = Trade(
pair='LTC/USDT',
amount=amount,
exchange='binance',
open_rate=2.0,
open_date=dt_now(),
fee_open=fee.return_value,
fee_close=fee.return_value,
is_open=True,
leverage=1,
is_short=is_short,
)
trade.orders.append(
Order(
ft_order_side=entry_side(is_short),
ft_pair=trade.pair,
ft_is_open=True,
order_id=order_id,
)
)
log_text = r'Applying fee on amount for .*'
freqtrade.update_trade_state(trade, order_id, order)
assert trade.amount != amount
if has_rounding_fee:
assert pytest.approx(trade.amount) == 29.992
assert log_has_re(log_text, caplog)
else:
assert pytest.approx(trade.amount) == order['amount']
assert not log_has_re(log_text, caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_update_trade_state_exception(mocker, default_conf_usdt, is_short, limit_order,
caplog) -> None:
order = limit_order[entry_side(is_short)]
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch(f'{EXMS}.fetch_order', return_value=order)
# TODO: should not be magicmock
trade = MagicMock()
trade.amount = 123
open_order_id = '123'
# Test raise of OperationalException exception
mocker.patch(
'freqtrade.freqtradebot.FreqtradeBot.get_real_amount',
side_effect=DependencyException()
)
freqtrade.update_trade_state(trade, open_order_id)
assert log_has('Could not update trade amount: ', caplog)
def test_update_trade_state_orderexception(mocker, default_conf_usdt, caplog) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch(f'{EXMS}.fetch_order', MagicMock(side_effect=InvalidOrderException))
# TODO: should not be magicmock
trade = MagicMock()
open_order_id = '123'
# Test raise of OperationalException exception
grm_mock = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.get_real_amount", MagicMock())
freqtrade.update_trade_state(trade, open_order_id)
assert grm_mock.call_count == 0
assert log_has(f'Unable to fetch order {open_order_id}: ', caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_update_trade_state_sell(
default_conf_usdt, trades_for_order, limit_order_open, limit_order, is_short, mocker
):
buy_order = limit_order[entry_side(is_short)]
open_order = limit_order_open[exit_side(is_short)]
l_order = limit_order[exit_side(is_short)]
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
# fetch_order should not be called!!
mocker.patch(f'{EXMS}.fetch_order', MagicMock(side_effect=ValueError))
wallet_mock = MagicMock()
mocker.patch('freqtrade.wallets.Wallets.update', wallet_mock)
patch_exchange(mocker)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
amount = l_order["amount"]
wallet_mock.reset_mock()
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=0.0025,
fee_close=0.0025,
open_date=dt_now(),
is_open=True,
interest_rate=0.0005,
leverage=1,
is_short=is_short,
)
order = Order.parse_from_ccxt_object(buy_order, 'LTC/ETH', entry_side(is_short))
trade.orders.append(order)
order = Order.parse_from_ccxt_object(open_order, 'LTC/ETH', exit_side(is_short))
trade.orders.append(order)
assert order.status == 'open'
freqtrade.update_trade_state(trade, trade.open_orders_ids[-1], l_order)
assert trade.amount == l_order['amount']
# Wallet needs to be updated after closing a limit-sell order to reenable buying
assert wallet_mock.call_count == 1
assert not trade.is_open
# Order is updated by update_trade_state
assert order.status == 'closed'
@pytest.mark.parametrize('is_short,close_profit', [
(False, 0.09451372),
(True, 0.08635224),
])
def test_handle_trade(
default_conf_usdt, limit_order_open, limit_order, fee, mocker, is_short, close_profit
) -> None:
open_order = limit_order_open[exit_side(is_short)]
enter_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 2.19,
'ask': 2.2,
'last': 2.19
}),
create_order=MagicMock(side_effect=[
enter_order,
open_order,
]),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
time.sleep(0.01) # Race condition fix
assert trade.is_open is True
freqtrade.wallets.update()
patch_get_signal(freqtrade, enter_long=False, exit_short=is_short,
exit_long=not is_short, exit_tag='sell_signal1')
assert freqtrade.handle_trade(trade) is True
assert trade.open_orders_ids[-1] == exit_order['id']
# Simulate fulfilled LIMIT_SELL order for trade
trade.orders[-1].ft_is_open = False
trade.orders[-1].status = 'closed'
trade.orders[-1].filled = trade.orders[-1].remaining
trade.orders[-1].remaining = 0.0
trade.update_trade(trade.orders[-1])
assert trade.close_rate == (2.0 if is_short else 2.2)
assert pytest.approx(trade.close_profit) == close_profit
assert pytest.approx(trade.calc_profit(trade.close_rate)) == 5.685
assert trade.close_date is not None
assert trade.exit_reason == 'sell_signal1'
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_overlapping_signals(
default_conf_usdt, ticker_usdt, limit_order_open, fee, mocker, is_short
) -> None:
open_order = limit_order_open[exit_side(is_short)]
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(side_effect=[
open_order,
{'id': 1234553382},
]),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
if is_short:
patch_get_signal(freqtrade, enter_long=False, enter_short=True, exit_short=True)
else:
patch_get_signal(freqtrade, enter_long=True, exit_long=True)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=False)
freqtrade.enter_positions()
# Buy and Sell triggering, so doing nothing ...
trades = Trade.session.scalars(select(Trade)).all()
nb_trades = len(trades)
assert nb_trades == 0
# Buy is triggering, so buying ...
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trades = Trade.session.scalars(select(Trade)).all()
for trade in trades:
trade.is_short = is_short
nb_trades = len(trades)
assert nb_trades == 1
assert trades[0].is_open is True
# Buy and Sell are not triggering, so doing nothing ...
patch_get_signal(freqtrade, enter_long=False)
assert freqtrade.handle_trade(trades[0]) is False
trades = Trade.session.scalars(select(Trade)).all()
for trade in trades:
trade.is_short = is_short
nb_trades = len(trades)
assert nb_trades == 1
assert trades[0].is_open is True
# Buy and Sell are triggering, so doing nothing ...
if is_short:
patch_get_signal(freqtrade, enter_long=False, enter_short=True, exit_short=True)
else:
patch_get_signal(freqtrade, enter_long=True, exit_long=True)
assert freqtrade.handle_trade(trades[0]) is False
trades = Trade.session.scalars(select(Trade)).all()
for trade in trades:
trade.is_short = is_short
nb_trades = len(trades)
assert nb_trades == 1
assert trades[0].is_open is True
# Sell is triggering, guess what : we are Selling!
if is_short:
patch_get_signal(freqtrade, enter_long=False, exit_short=True)
else:
patch_get_signal(freqtrade, enter_long=False, exit_long=True)
trades = Trade.session.scalars(select(Trade)).all()
for trade in trades:
trade.is_short = is_short
assert freqtrade.handle_trade(trades[0]) is True
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_trade_roi(default_conf_usdt, ticker_usdt, limit_order_open, fee, mocker, caplog,
is_short) -> None:
open_order = limit_order_open[entry_side(is_short)]
caplog.set_level(logging.DEBUG)
patch_RPCManager(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(side_effect=[
open_order,
{'id': 1234553382},
]),
get_fee=fee,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=True)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
# FIX: sniffing logs, suggest handle_trade should not execute_trade_exit
# instead that responsibility should be moved out of handle_trade(),
# we might just want to check if we are in a sell condition without
# executing
# if ROI is reached we must sell
caplog.clear()
patch_get_signal(freqtrade)
assert freqtrade.handle_trade(trade)
assert log_has("ETH/USDT - Required profit reached. exit_type=ExitType.ROI",
caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_trade_use_exit_signal(
default_conf_usdt, ticker_usdt, limit_order_open, fee, mocker, caplog, is_short
) -> None:
enter_open_order = limit_order_open[exit_side(is_short)]
exit_open_order = limit_order_open[entry_side(is_short)]
# use_exit_signal is True buy default
caplog.set_level(logging.DEBUG)
patch_RPCManager(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(side_effect=[
enter_open_order,
exit_open_order,
]),
get_fee=fee,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=False)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
patch_get_signal(freqtrade, enter_long=False, exit_long=False)
assert not freqtrade.handle_trade(trade)
if is_short:
patch_get_signal(freqtrade, enter_long=False, exit_short=True)
else:
patch_get_signal(freqtrade, enter_long=False, exit_long=True)
assert freqtrade.handle_trade(trade)
assert log_has("ETH/USDT - Sell signal received. exit_type=ExitType.EXIT_SIGNAL",
caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_close_trade(
default_conf_usdt, ticker_usdt, limit_order_open, limit_order, fee, mocker, is_short
) -> None:
open_order = limit_order_open[exit_side(is_short)]
enter_order = limit_order[exit_side(is_short)]
exit_order = limit_order[entry_side(is_short)]
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=open_order),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# Create trade and sell it
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
oobj = Order.parse_from_ccxt_object(enter_order, enter_order['symbol'], trade.entry_side)
trade.update_trade(oobj)
oobj = Order.parse_from_ccxt_object(exit_order, exit_order['symbol'], trade.exit_side)
trade.update_trade(oobj)
assert trade.is_open is False
with pytest.raises(DependencyException, match=r'.*closed trade.*'):
freqtrade.handle_trade(trade)
def test_bot_loop_start_called_once(mocker, default_conf_usdt, caplog):
ftbot = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.create_trade')
patch_get_signal(ftbot)
ftbot.strategy.bot_loop_start = MagicMock(side_effect=ValueError)
ftbot.strategy.analyze = MagicMock()
ftbot.process()
assert log_has_re(r'Strategy caused the following exception.*', caplog)
assert ftbot.strategy.bot_loop_start.call_count == 1
assert ftbot.strategy.analyze.call_count == 1
@pytest.mark.parametrize("is_short", [False, True])
def test_manage_open_orders_entry_usercustom(
default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade,
limit_sell_order_old, fee, mocker, is_short
) -> None:
old_order = limit_sell_order_old if is_short else limit_buy_order_old
old_order['id'] = open_trade.open_orders_ids[0]
default_conf_usdt["unfilledtimeout"] = {"entry": 1400, "exit": 30}
rpc_mock = patch_RPCManager(mocker)
cancel_order_mock = MagicMock(return_value=old_order)
cancel_enter_order = deepcopy(old_order)
cancel_enter_order['status'] = 'canceled'
cancel_order_wr_mock = MagicMock(return_value=cancel_enter_order)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=old_order),
cancel_order=cancel_order_mock,
cancel_order_with_result=cancel_order_wr_mock,
get_fee=fee
)
freqtrade = FreqtradeBot(default_conf_usdt)
open_trade.is_short = is_short
open_trade.orders[0].side = 'sell' if is_short else 'buy'
open_trade.orders[0].ft_order_side = 'sell' if is_short else 'buy'
Trade.session.add(open_trade)
Trade.commit()
# Ensure default is to return empty (so not mocked yet)
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
# Return false - trade remains open
freqtrade.strategy.check_entry_timeout = MagicMock(return_value=False)
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_is_open.is_(True))
.where(Order.ft_order_side != "stoploss")
.where(Order.ft_trade_id == Trade.id)
).all()
nb_trades = len(trades)
assert nb_trades == 1
assert freqtrade.strategy.check_entry_timeout.call_count == 1
freqtrade.strategy.check_entry_timeout = MagicMock(side_effect=KeyError)
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_is_open.is_(True))
.where(Order.ft_order_side != "stoploss")
.where(Order.ft_trade_id == Trade.id)
).all()
nb_trades = len(trades)
assert nb_trades == 1
assert freqtrade.strategy.check_entry_timeout.call_count == 1
freqtrade.strategy.check_entry_timeout = MagicMock(return_value=True)
# Trade should be closed since the function returns true
freqtrade.manage_open_orders()
assert cancel_order_wr_mock.call_count == 1
assert rpc_mock.call_count == 2
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_is_open.is_(True))
.where(Order.ft_order_side != "stoploss")
.where(Order.ft_trade_id == Trade.id)
).all()
nb_trades = len(trades)
assert nb_trades == 0
assert freqtrade.strategy.check_entry_timeout.call_count == 1
@pytest.mark.parametrize("is_short", [False, True])
def test_manage_open_orders_entry(
default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade,
limit_sell_order_old, fee, mocker, is_short
) -> None:
old_order = limit_sell_order_old if is_short else limit_buy_order_old
rpc_mock = patch_RPCManager(mocker)
order = Order.parse_from_ccxt_object(old_order, 'mocked', 'buy')
open_trade.orders[0] = order
limit_buy_cancel = deepcopy(old_order)
limit_buy_cancel['status'] = 'canceled'
cancel_order_mock = MagicMock(return_value=limit_buy_cancel)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=old_order),
cancel_order_with_result=cancel_order_mock,
get_fee=fee
)
freqtrade = FreqtradeBot(default_conf_usdt)
open_trade.is_short = is_short
Trade.session.add(open_trade)
Trade.commit()
freqtrade.strategy.check_entry_timeout = MagicMock(return_value=False)
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1234)
# check it does cancel entry orders over the time limit
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 1
assert rpc_mock.call_count == 2
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_is_open.is_(True))
.where(Order.ft_order_side != "stoploss")
.where(Order.ft_trade_id == Trade.id)
).all()
nb_trades = len(trades)
assert nb_trades == 0
# Custom user entry-timeout is never called
assert freqtrade.strategy.check_entry_timeout.call_count == 0
# Entry adjustment is never called
assert freqtrade.strategy.adjust_entry_price.call_count == 0
@pytest.mark.parametrize("is_short", [False, True])
def test_adjust_entry_cancel(
default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade,
limit_sell_order_old, fee, mocker, caplog, is_short
) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
old_order = limit_sell_order_old if is_short else limit_buy_order_old
old_order['id'] = open_trade.open_orders[0].order_id
limit_buy_cancel = deepcopy(old_order)
limit_buy_cancel['status'] = 'canceled'
cancel_order_mock = MagicMock(return_value=limit_buy_cancel)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=old_order),
cancel_order_with_result=cancel_order_mock,
get_fee=fee
)
open_trade.is_short = is_short
Trade.session.add(open_trade)
Trade.commit()
# Timeout to not interfere
freqtrade.strategy.ft_check_timed_out = MagicMock(return_value=False)
# check that order is cancelled
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=None)
freqtrade.manage_open_orders()
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_trade_id == Trade.id)
).all()
assert len(trades) == 0
assert len(Order.session.scalars(select(Order)).all()) == 0
assert log_has_re(
f"{'Sell' if is_short else 'Buy'} order user requested order cancel*", caplog)
assert log_has_re(
f"{'Sell' if is_short else 'Buy'} order fully cancelled.*", caplog)
# Entry adjustment is called
assert freqtrade.strategy.adjust_entry_price.call_count == 1
@pytest.mark.parametrize("is_short", [False, True])
def test_adjust_entry_maintain_replace(
default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade,
limit_sell_order_old, fee, mocker, caplog, is_short
) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
old_order = limit_sell_order_old if is_short else limit_buy_order_old
old_order['id'] = open_trade.open_orders_ids[0]
limit_buy_cancel = deepcopy(old_order)
limit_buy_cancel['status'] = 'canceled'
cancel_order_mock = MagicMock(return_value=limit_buy_cancel)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=old_order),
cancel_order_with_result=cancel_order_mock,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
open_trade.is_short = is_short
Trade.session.add(open_trade)
Trade.commit()
# Timeout to not interfere
freqtrade.strategy.ft_check_timed_out = MagicMock(return_value=False)
# Check that order is maintained
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=old_order['price'])
freqtrade.manage_open_orders()
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_is_open.is_(True))
.where(Order.ft_trade_id == Trade.id)
).all()
assert len(trades) == 1
assert len(Order.get_open_orders()) == 1
# Entry adjustment is called
assert freqtrade.strategy.adjust_entry_price.call_count == 1
# Check that order is replaced
freqtrade.get_valid_enter_price_and_stake = MagicMock(return_value={100, 10, 1})
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1234)
freqtrade.manage_open_orders()
assert freqtrade.strategy.adjust_entry_price.call_count == 1
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_is_open.is_(True))
.where(Order.ft_trade_id == Trade.id)
).all()
assert len(trades) == 1
nb_all_orders = len(Order.session.scalars(select(Order)).all())
assert nb_all_orders == 2
# New order seems to be in closed status?
# nb_open_orders = len(Order.get_open_orders())
# assert nb_open_orders == 1
assert log_has_re(
f"{'Sell' if is_short else 'Buy'} order cancelled to be replaced*", caplog)
# Entry adjustment is called
assert freqtrade.strategy.adjust_entry_price.call_count == 1
@pytest.mark.parametrize("is_short", [False, True])
def test_check_handle_cancelled_buy(
default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade,
limit_sell_order_old, fee, mocker, caplog, is_short
) -> None:
""" Handle Buy order cancelled on exchange"""
old_order = limit_sell_order_old if is_short else limit_buy_order_old
rpc_mock = patch_RPCManager(mocker)
cancel_order_mock = MagicMock()
patch_exchange(mocker)
old_order.update({"status": "canceled", 'filled': 0.0})
old_order['side'] = 'buy' if is_short else 'sell'
old_order['id'] = open_trade.open_orders[0].order_id
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=old_order),
cancel_order=cancel_order_mock,
get_fee=fee
)
freqtrade = FreqtradeBot(default_conf_usdt)
open_trade.is_short = is_short
Trade.session.add(open_trade)
Trade.commit()
# check it does cancel buy orders over the time limit
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
assert rpc_mock.call_count == 2
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_is_open.is_(True))
.where(Order.ft_trade_id == Trade.id)
).all()
assert len(trades) == 0
exit_name = 'Buy' if is_short else 'Sell'
assert log_has_re(f"{exit_name} order cancelled on exchange for Trade.*", caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_manage_open_orders_buy_exception(
default_conf_usdt, ticker_usdt, open_trade, is_short, fee, mocker
) -> None:
rpc_mock = patch_RPCManager(mocker)
cancel_order_mock = MagicMock()
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
validate_pairs=MagicMock(),
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(side_effect=ExchangeError),
cancel_order=cancel_order_mock,
get_fee=fee
)
freqtrade = FreqtradeBot(default_conf_usdt)
open_trade.is_short = is_short
Trade.session.add(open_trade)
Trade.commit()
# check it does cancel buy orders over the time limit
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
assert rpc_mock.call_count == 1
assert len(open_trade.open_orders) == 1
@pytest.mark.parametrize("is_short", [False, True])
def test_manage_open_orders_exit_usercustom(
default_conf_usdt, ticker_usdt, limit_sell_order_old, mocker,
is_short, open_trade_usdt, caplog
) -> None:
default_conf_usdt["unfilledtimeout"] = {"entry": 1440, "exit": 1440, "exit_timeout_count": 1}
if is_short:
limit_sell_order_old['side'] = 'buy'
open_trade_usdt.is_short = is_short
open_exit_order = Order.parse_from_ccxt_object(limit_sell_order_old, 'mocked',
'buy' if is_short else 'sell')
open_trade_usdt.orders[-1] = open_exit_order
rpc_mock = patch_RPCManager(mocker)
cancel_order_mock = MagicMock()
patch_exchange(mocker)
mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.0)
et_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.execute_trade_exit')
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=limit_sell_order_old),
cancel_order=cancel_order_mock
)
freqtrade = FreqtradeBot(default_conf_usdt)
open_trade_usdt.open_date = dt_now() - timedelta(hours=5)
open_trade_usdt.close_date = dt_now() - timedelta(minutes=601)
open_trade_usdt.close_profit_abs = 0.001
Trade.session.add(open_trade_usdt)
Trade.commit()
# Ensure default is false
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
freqtrade.strategy.check_exit_timeout = MagicMock(return_value=False)
freqtrade.strategy.check_entry_timeout = MagicMock(return_value=False)
# Return false - No impact
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
assert rpc_mock.call_count == 1
assert freqtrade.strategy.check_exit_timeout.call_count == 1
assert freqtrade.strategy.check_entry_timeout.call_count == 0
freqtrade.strategy.check_exit_timeout = MagicMock(side_effect=KeyError)
freqtrade.strategy.check_entry_timeout = MagicMock(side_effect=KeyError)
# Return Error - No impact
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
assert rpc_mock.call_count == 1
assert freqtrade.strategy.check_exit_timeout.call_count == 1
assert freqtrade.strategy.check_entry_timeout.call_count == 0
# Return True - sells!
freqtrade.strategy.check_exit_timeout = MagicMock(return_value=True)
freqtrade.strategy.check_entry_timeout = MagicMock(return_value=True)
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 1
assert rpc_mock.call_count == 2
assert freqtrade.strategy.check_exit_timeout.call_count == 1
assert freqtrade.strategy.check_entry_timeout.call_count == 0
# 2nd canceled trade - Fail execute exit
caplog.clear()
mocker.patch('freqtrade.persistence.Trade.get_exit_order_count', return_value=1)
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.execute_trade_exit',
side_effect=DependencyException)
freqtrade.manage_open_orders()
assert log_has_re('Unable to emergency exit .*', caplog)
et_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.execute_trade_exit')
caplog.clear()
# 2nd canceled trade ...
# If cancelling fails - no emergency exit!
with patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_exit', return_value=False):
freqtrade.manage_open_orders()
assert et_mock.call_count == 0
freqtrade.manage_open_orders()
assert log_has_re('Emergency exiting trade.*', caplog)
assert et_mock.call_count == 1
@pytest.mark.parametrize("is_short", [False, True])
def test_manage_open_orders_exit(
default_conf_usdt, ticker_usdt, limit_sell_order_old, mocker, is_short, open_trade_usdt
) -> None:
rpc_mock = patch_RPCManager(mocker)
cancel_order_mock = MagicMock()
limit_sell_order_old['id'] = '123456789_exit'
limit_sell_order_old['side'] = 'buy' if is_short else 'sell'
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=limit_sell_order_old),
cancel_order=cancel_order_mock,
get_min_pair_stake_amount=MagicMock(return_value=0),
)
freqtrade = FreqtradeBot(default_conf_usdt)
open_trade_usdt.open_date = dt_now() - timedelta(hours=5)
open_trade_usdt.close_date = dt_now() - timedelta(minutes=601)
open_trade_usdt.close_profit_abs = 0.001
open_trade_usdt.is_short = is_short
Trade.session.add(open_trade_usdt)
Trade.commit()
freqtrade.strategy.check_exit_timeout = MagicMock(return_value=False)
freqtrade.strategy.check_entry_timeout = MagicMock(return_value=False)
# check it does cancel sell orders over the time limit
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 1
assert rpc_mock.call_count == 2
assert open_trade_usdt.is_open is True
# Custom user sell-timeout is never called
assert freqtrade.strategy.check_exit_timeout.call_count == 0
assert freqtrade.strategy.check_entry_timeout.call_count == 0
@pytest.mark.parametrize("is_short", [False, True])
def test_check_handle_cancelled_exit(
default_conf_usdt, ticker_usdt, limit_sell_order_old, open_trade_usdt,
is_short, mocker, caplog
) -> None:
""" Handle sell order cancelled on exchange"""
rpc_mock = patch_RPCManager(mocker)
cancel_order_mock = MagicMock()
limit_sell_order_old.update({"status": "canceled", 'filled': 0.0})
limit_sell_order_old['side'] = 'buy' if is_short else 'sell'
limit_sell_order_old['id'] = open_trade_usdt.open_orders[0].order_id
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=limit_sell_order_old),
cancel_order_with_result=cancel_order_mock
)
freqtrade = FreqtradeBot(default_conf_usdt)
open_trade_usdt.open_date = dt_now() - timedelta(hours=5)
open_trade_usdt.close_date = dt_now() - timedelta(minutes=601)
open_trade_usdt.is_short = is_short
Trade.session.add(open_trade_usdt)
Trade.commit()
# check it does cancel sell orders over the time limit
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 0
assert rpc_mock.call_count == 2
assert open_trade_usdt.is_open is True
exit_name = 'Buy' if is_short else 'Sell'
assert log_has_re(f"{exit_name} order cancelled on exchange for Trade.*", caplog)
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.parametrize("leverage", [1, 3, 5, 10])
def test_manage_open_orders_partial(
default_conf_usdt, ticker_usdt, limit_buy_order_old_partial, is_short, leverage,
open_trade, mocker
) -> None:
rpc_mock = patch_RPCManager(mocker)
open_trade.is_short = is_short
open_trade.leverage = leverage
open_trade.orders[0].ft_order_side = 'sell' if is_short else 'buy'
limit_buy_order_old_partial['id'] = open_trade.orders[0].order_id
limit_buy_order_old_partial['side'] = 'sell' if is_short else 'buy'
limit_buy_canceled = deepcopy(limit_buy_order_old_partial)
limit_buy_canceled['status'] = 'canceled'
cancel_order_mock = MagicMock(return_value=limit_buy_canceled)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=limit_buy_order_old_partial),
cancel_order_with_result=cancel_order_mock
)
freqtrade = FreqtradeBot(default_conf_usdt)
prior_stake = open_trade.stake_amount
Trade.session.add(open_trade)
Trade.commit()
# check it does cancel buy orders over the time limit
# note this is for a partially-complete buy order
freqtrade.manage_open_orders()
assert cancel_order_mock.call_count == 1
assert rpc_mock.call_count == 3
trades = Trade.session.scalars(
select(Trade)
).all()
assert len(trades) == 1
assert trades[0].amount == 23.0
assert trades[0].stake_amount == open_trade.open_rate * trades[0].amount / leverage
assert trades[0].stake_amount != prior_stake
assert not trades[0].has_open_orders
@pytest.mark.parametrize("is_short", [False, True])
def test_manage_open_orders_partial_fee(
default_conf_usdt, ticker_usdt, open_trade, caplog, fee, is_short,
limit_buy_order_old_partial, trades_for_order,
limit_buy_order_old_partial_canceled, mocker
) -> None:
open_trade.is_short = is_short
open_trade.orders[0].ft_order_side = 'sell' if is_short else 'buy'
rpc_mock = patch_RPCManager(mocker)
limit_buy_order_old_partial['id'] = open_trade.orders[0].order_id
limit_buy_order_old_partial_canceled['id'] = open_trade.open_orders_ids[0]
limit_buy_order_old_partial['side'] = 'sell' if is_short else 'buy'
limit_buy_order_old_partial_canceled['side'] = 'sell' if is_short else 'buy'
cancel_order_mock = MagicMock(return_value=limit_buy_order_old_partial_canceled)
mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(return_value=0))
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=limit_buy_order_old_partial),
cancel_order_with_result=cancel_order_mock,
get_trades_for_order=MagicMock(return_value=trades_for_order),
)
freqtrade = FreqtradeBot(default_conf_usdt)
assert open_trade.amount == limit_buy_order_old_partial['amount']
open_trade.fee_open = fee()
open_trade.fee_close = fee()
Trade.session.add(open_trade)
Trade.commit()
# cancelling a half-filled order should update the amount to the bought amount
# and apply fees if necessary.
freqtrade.manage_open_orders()
assert log_has_re(r"Applying fee on amount for Trade.*", caplog)
assert cancel_order_mock.call_count == 1
assert rpc_mock.call_count == 3
trades = Trade.session.scalars(
select(Trade)
.where(Order.ft_trade_id == Trade.id)
).all()
assert len(trades) == 1
# Verify that trade has been updated
assert trades[0].amount == (limit_buy_order_old_partial['amount'] -
limit_buy_order_old_partial['remaining']) - 0.023
assert not trades[0].has_open_orders
assert trades[0].fee_updated(open_trade.entry_side)
assert pytest.approx(trades[0].fee_open) == 0.001
@pytest.mark.parametrize("is_short", [False, True])
def test_manage_open_orders_partial_except(
default_conf_usdt, ticker_usdt, open_trade, caplog, fee, is_short,
limit_buy_order_old_partial, trades_for_order,
limit_buy_order_old_partial_canceled, mocker
) -> None:
open_trade.is_short = is_short
open_trade.orders[0].ft_order_side = 'sell' if is_short else 'buy'
rpc_mock = patch_RPCManager(mocker)
limit_buy_order_old_partial_canceled['id'] = open_trade.open_orders_ids[0]
limit_buy_order_old_partial['id'] = open_trade.open_orders_ids[0]
if is_short:
limit_buy_order_old_partial['side'] = 'sell'
cancel_order_mock = MagicMock(return_value=limit_buy_order_old_partial_canceled)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(return_value=limit_buy_order_old_partial),
cancel_order_with_result=cancel_order_mock,
get_trades_for_order=MagicMock(return_value=trades_for_order),
)
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount',
MagicMock(side_effect=DependencyException))
freqtrade = FreqtradeBot(default_conf_usdt)
assert open_trade.amount == limit_buy_order_old_partial['amount']
open_trade.fee_open = fee()
open_trade.fee_close = fee()
Trade.session.add(open_trade)
Trade.commit()
# cancelling a half-filled order should update the amount to the bought amount
# and apply fees if necessary.
freqtrade.manage_open_orders()
assert log_has_re(r"Could not update trade amount: .*", caplog)
assert cancel_order_mock.call_count == 1
assert rpc_mock.call_count == 3
trades = Trade.session.scalars(
select(Trade)
).all()
assert len(trades) == 1
# Verify that trade has been updated
assert trades[0].amount == (limit_buy_order_old_partial['amount'] -
limit_buy_order_old_partial['remaining'])
assert not trades[0].has_open_orders
assert trades[0].fee_open == fee()
def test_manage_open_orders_exception(default_conf_usdt, ticker_usdt, open_trade_usdt, mocker,
caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
cancel_order_mock = MagicMock()
mocker.patch.multiple(
'freqtrade.freqtradebot.FreqtradeBot',
handle_cancel_enter=MagicMock(),
handle_cancel_exit=MagicMock(),
)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
fetch_order=MagicMock(side_effect=ExchangeError('Oh snap')),
cancel_order=cancel_order_mock
)
freqtrade = FreqtradeBot(default_conf_usdt)
Trade.session.add(open_trade_usdt)
Trade.commit()
caplog.clear()
freqtrade.manage_open_orders()
assert log_has_re(r"Cannot query order for Trade\(id=1, pair=ADA/USDT, amount=30.00000000, "
r"is_short=False, leverage=1.0, "
r"open_rate=2.00000000, open_since="
f"{open_trade_usdt.open_date.strftime('%Y-%m-%d %H:%M:%S')}"
r"\) due to Traceback \(most recent call last\):\n*",
caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_cancel_enter(mocker, caplog, default_conf_usdt, limit_order, is_short, fee) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
l_order = limit_order[entry_side(is_short)]
cancel_buy_order = deepcopy(limit_order[entry_side(is_short)])
cancel_buy_order['status'] = 'canceled'
del cancel_buy_order['filled']
cancel_order_mock = MagicMock(return_value=cancel_buy_order)
mocker.patch(f'{EXMS}.cancel_order_with_result', cancel_order_mock)
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade._notify_enter_cancel = MagicMock()
trade = mock_trade_usdt_4(fee, is_short)
Trade.session.add(trade)
Trade.commit()
l_order['filled'] = 0.0
l_order['status'] = 'open'
reason = CANCEL_REASON['TIMEOUT']
assert freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders_ids[0], reason)
assert cancel_order_mock.call_count == 1
cancel_order_mock.reset_mock()
caplog.clear()
l_order['filled'] = 0.01
assert not freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders_ids[0], reason)
assert cancel_order_mock.call_count == 0
assert log_has_re("Order .* for .* not cancelled, as the filled amount.* unexitable.*", caplog)
caplog.clear()
cancel_order_mock.reset_mock()
l_order['filled'] = 2
assert not freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders_ids[0], reason)
assert cancel_order_mock.call_count == 1
# Order remained open for some reason (cancel failed)
cancel_buy_order['status'] = 'open'
cancel_order_mock = MagicMock(return_value=cancel_buy_order)
mocker.patch(f'{EXMS}.cancel_order_with_result', cancel_order_mock)
assert not freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders_ids[0], reason)
assert log_has_re(r"Order .* for .* not cancelled.", caplog)
# min_pair_stake empty should not crash
mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=None)
assert not freqtrade.handle_cancel_enter(
trade, limit_order[entry_side(is_short)], trade.open_orders_ids[0], reason
)
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.parametrize("limit_buy_order_canceled_empty", ['binance', 'kraken', 'bittrex'],
indirect=['limit_buy_order_canceled_empty'])
def test_handle_cancel_enter_exchanges(mocker, caplog, default_conf_usdt, is_short, fee,
limit_buy_order_canceled_empty) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
cancel_order_mock = mocker.patch(
f'{EXMS}.cancel_order_with_result',
return_value=limit_buy_order_canceled_empty)
notify_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot._notify_enter_cancel')
freqtrade = FreqtradeBot(default_conf_usdt)
reason = CANCEL_REASON['TIMEOUT']
trade = mock_trade_usdt_4(fee, is_short)
Trade.session.add(trade)
Trade.commit()
assert freqtrade.handle_cancel_enter(
trade, limit_buy_order_canceled_empty, trade.open_orders_ids[0], reason
)
assert cancel_order_mock.call_count == 0
assert log_has_re(
f'{trade.entry_side.capitalize()} order fully cancelled. '
r'Removing .* from database\.',
caplog
)
assert notify_mock.call_count == 1
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.parametrize('cancelorder', [
{},
{'remaining': None},
'String Return value',
123
])
def test_handle_cancel_enter_corder_empty(mocker, default_conf_usdt, limit_order, is_short, fee,
cancelorder) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
l_order = limit_order[entry_side(is_short)]
cancel_order_mock = MagicMock(return_value=cancelorder)
mocker.patch.multiple(
EXMS,
cancel_order=cancel_order_mock,
fetch_order=MagicMock(side_effect=InvalidOrderException)
)
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade._notify_enter_cancel = MagicMock()
trade = mock_trade_usdt_4(fee, is_short)
Trade.session.add(trade)
Trade.commit()
l_order['filled'] = 0.0
l_order['status'] = 'open'
reason = CANCEL_REASON['TIMEOUT']
assert freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders_ids[0], reason)
assert cancel_order_mock.call_count == 1
cancel_order_mock.reset_mock()
l_order['filled'] = 1.0
order = deepcopy(l_order)
order['status'] = 'canceled'
mocker.patch(f'{EXMS}.fetch_order', return_value=order)
assert not freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders_ids[0], reason)
assert cancel_order_mock.call_count == 1
@pytest.mark.parametrize('is_short', [True, False])
@pytest.mark.parametrize('leverage', [1, 5])
@pytest.mark.parametrize('amount', [2, 50])
def test_handle_cancel_exit_limit(mocker, default_conf_usdt, fee, is_short,
leverage, amount) -> None:
send_msg_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
cancel_order_mock = MagicMock()
mocker.patch.multiple(
EXMS,
cancel_order=cancel_order_mock,
)
entry_price = 0.245441
mocker.patch(f'{EXMS}.get_rate', return_value=entry_price)
mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.2)
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_order_fee')
freqtrade = FreqtradeBot(default_conf_usdt)
trade = Trade(
pair='LTC/USDT',
amount=amount * leverage,
exchange='binance',
open_rate=entry_price,
open_date=dt_now() - timedelta(days=2),
fee_open=fee.return_value,
fee_close=fee.return_value,
close_rate=0.555,
close_date=dt_now(),
exit_reason="sell_reason_whatever",
stake_amount=entry_price * amount,
leverage=leverage,
is_short=is_short,
)
trade.orders = [
Order(
ft_order_side=entry_side(is_short),
ft_pair=trade.pair,
ft_is_open=False,
order_id='buy_123456',
status="closed",
symbol=trade.pair,
order_type="market",
side=entry_side(is_short),
price=trade.open_rate,
average=trade.open_rate,
filled=trade.amount,
remaining=0,
cost=trade.open_rate * trade.amount,
order_date=trade.open_date,
order_filled_date=trade.open_date,
),
Order(
ft_order_side=exit_side(is_short),
ft_pair=trade.pair,
ft_is_open=True,
order_id='sell_123456',
status="open",
symbol=trade.pair,
order_type="limit",
side=exit_side(is_short),
price=trade.open_rate,
average=trade.open_rate,
filled=0.0,
remaining=trade.amount,
cost=trade.open_rate * trade.amount,
order_date=trade.open_date,
order_filled_date=trade.open_date,
),
]
order = {'id': "sell_123456",
'remaining': 1,
'amount': 1,
'status': "open"}
reason = CANCEL_REASON['TIMEOUT']
send_msg_mock.reset_mock()
assert freqtrade.handle_cancel_exit(trade, order, order['id'], reason)
assert cancel_order_mock.call_count == 1
assert send_msg_mock.call_count == 1
assert trade.close_rate is None
assert trade.exit_reason is None
assert not trade.has_open_orders
send_msg_mock.reset_mock()
# Partial exit - below exit threshold
order['amount'] = amount * leverage
order['filled'] = amount * 0.99 * leverage
assert not freqtrade.handle_cancel_exit(trade, order, order['id'], reason)
# Assert cancel_order was not called (callcount remains unchanged)
assert cancel_order_mock.call_count == 1
assert send_msg_mock.call_count == 1
assert (send_msg_mock.call_args_list[0][0][0]['reason']
== CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN'])
assert not freqtrade.handle_cancel_exit(trade, order, order['id'], reason)
assert (send_msg_mock.call_args_list[0][0][0]['reason']
== CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN'])
# Message should not be iterated again
assert trade.exit_order_status == CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN']
assert send_msg_mock.call_count == 1
send_msg_mock.reset_mock()
order['filled'] = amount * 0.5 * leverage
assert freqtrade.handle_cancel_exit(trade, order, order['id'], reason)
assert send_msg_mock.call_count == 1
assert (send_msg_mock.call_args_list[0][0][0]['reason']
== CANCEL_REASON['PARTIALLY_FILLED'])
def test_handle_cancel_exit_cancel_exception(mocker, default_conf_usdt) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.0)
mocker.patch(f'{EXMS}.cancel_order_with_result', side_effect=InvalidOrderException())
freqtrade = FreqtradeBot(default_conf_usdt)
# TODO: should not be magicmock
trade = MagicMock()
order_id = '125'
reason = CANCEL_REASON['TIMEOUT']
order = {'remaining': 1,
'id': '125',
'amount': 1,
'status': "open"}
assert not freqtrade.handle_cancel_exit(trade, order, order_id, reason)
# mocker.patch(f'{EXMS}.cancel_order_with_result', return_value=order)
# assert not freqtrade.handle_cancel_exit(trade, order, reason)
@pytest.mark.parametrize("is_short, open_rate, amt", [
(False, 2.0, 30.0),
(True, 2.02, 29.70297029),
])
def test_execute_trade_exit_up(default_conf_usdt, ticker_usdt, fee, ticker_usdt_sell_up, mocker,
ticker_usdt_sell_down, is_short, open_rate, amt) -> None:
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
patch_whitelist(mocker, default_conf_usdt)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.confirm_trade_exit = MagicMock(return_value=False)
# Create some test data
freqtrade.enter_positions()
rpc_mock.reset_mock()
trade = Trade.session.scalars(select(Trade)).first()
assert trade.is_short == is_short
assert trade
assert freqtrade.strategy.confirm_trade_exit.call_count == 0
# Increase the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_down if is_short else ticker_usdt_sell_up
)
# Prevented sell ...
freqtrade.execute_trade_exit(
trade=trade,
limit=(ticker_usdt_sell_down()['ask'] if is_short else ticker_usdt_sell_up()['bid']),
exit_check=ExitCheckTuple(exit_type=ExitType.ROI)
)
assert rpc_mock.call_count == 0
assert freqtrade.strategy.confirm_trade_exit.call_count == 1
assert id(freqtrade.strategy.confirm_trade_exit.call_args_list[0][1]['trade']) != id(trade)
assert freqtrade.strategy.confirm_trade_exit.call_args_list[0][1]['trade'].id == trade.id
# Repatch with true
freqtrade.strategy.confirm_trade_exit = MagicMock(return_value=True)
freqtrade.execute_trade_exit(
trade=trade,
limit=(ticker_usdt_sell_down()['ask'] if is_short else ticker_usdt_sell_up()['bid']),
exit_check=ExitCheckTuple(exit_type=ExitType.ROI)
)
assert freqtrade.strategy.confirm_trade_exit.call_count == 1
assert rpc_mock.call_count == 1
last_msg = rpc_mock.call_args_list[-1][0][0]
assert {
'trade_id': 1,
'type': RPCMessageType.EXIT,
'exchange': 'Binance',
'pair': 'ETH/USDT',
'gain': 'profit',
'limit': 2.0 if is_short else 2.2,
'order_rate': 2.0 if is_short else 2.2,
'amount': pytest.approx(amt),
'order_type': 'limit',
'buy_tag': None,
'direction': 'Short' if trade.is_short else 'Long',
'leverage': 1.0,
'enter_tag': None,
'open_rate': open_rate,
'current_rate': 2.01 if is_short else 2.3,
'profit_amount': 0.29554455 if is_short else 5.685,
'profit_ratio': 0.00493809 if is_short else 0.09451372,
'stake_currency': 'USDT',
'fiat_currency': 'USD',
'base_currency': 'ETH',
'sell_reason': ExitType.ROI.value,
'exit_reason': ExitType.ROI.value,
'open_date': ANY,
'close_date': ANY,
'close_rate': ANY,
'sub_trade': False,
'cumulative_profit': 0.0,
'stake_amount': pytest.approx(60),
} == last_msg
@pytest.mark.parametrize("is_short", [False, True])
def test_execute_trade_exit_down(default_conf_usdt, ticker_usdt, fee, ticker_usdt_sell_down,
ticker_usdt_sell_up, mocker, is_short) -> None:
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
patch_whitelist(mocker, default_conf_usdt)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# Create some test data
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
# Decrease the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_up if is_short else ticker_usdt_sell_down
)
freqtrade.execute_trade_exit(
trade=trade, limit=(ticker_usdt_sell_up if is_short else ticker_usdt_sell_down)()['bid'],
exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS))
assert rpc_mock.call_count == 2
last_msg = rpc_mock.call_args_list[-1][0][0]
assert {
'type': RPCMessageType.EXIT,
'trade_id': 1,
'exchange': 'Binance',
'pair': 'ETH/USDT',
'direction': 'Short' if trade.is_short else 'Long',
'leverage': 1.0,
'gain': 'loss',
'limit': 2.2 if is_short else 2.01,
'order_rate': 2.2 if is_short else 2.01,
'amount': pytest.approx(29.70297029) if is_short else 30.0,
'order_type': 'limit',
'buy_tag': None,
'enter_tag': None,
'open_rate': 2.02 if is_short else 2.0,
'current_rate': 2.2 if is_short else 2.0,
'profit_amount': -5.65990099 if is_short else -0.00075,
'profit_ratio': -0.0945681 if is_short else -1.247e-05,
'stake_currency': 'USDT',
'base_currency': 'ETH',
'fiat_currency': 'USD',
'sell_reason': ExitType.STOP_LOSS.value,
'exit_reason': ExitType.STOP_LOSS.value,
'open_date': ANY,
'close_date': ANY,
'close_rate': ANY,
'sub_trade': False,
'cumulative_profit': 0.0,
'stake_amount': pytest.approx(60),
} == last_msg
@pytest.mark.parametrize(
"is_short,amount,open_rate,current_rate,limit,profit_amount,profit_ratio,profit_or_loss", [
(False, 30, 2.0, 2.3, 2.25, 7.18125, 0.11938903, 'profit'),
(True, 29.70297029, 2.02, 2.2, 2.25, -7.14876237, -0.11944465, 'loss'),
])
def test_execute_trade_exit_custom_exit_price(
default_conf_usdt, ticker_usdt, fee, ticker_usdt_sell_up, is_short, amount, open_rate,
current_rate, limit, profit_amount, profit_ratio, profit_or_loss, mocker) -> None:
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
config = deepcopy(default_conf_usdt)
config['custom_price_max_distance_ratio'] = 0.1
patch_whitelist(mocker, config)
freqtrade = FreqtradeBot(config)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.confirm_trade_exit = MagicMock(return_value=False)
# Create some test data
freqtrade.enter_positions()
rpc_mock.reset_mock()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
assert freqtrade.strategy.confirm_trade_exit.call_count == 0
# Increase the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_up
)
freqtrade.strategy.confirm_trade_exit = MagicMock(return_value=True)
# Set a custom exit price
freqtrade.strategy.custom_exit_price = lambda **kwargs: 2.25
freqtrade.execute_trade_exit(
trade=trade,
limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'],
exit_check=ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL, exit_reason='foo')
)
# Sell price must be different to default bid price
assert freqtrade.strategy.confirm_trade_exit.call_count == 1
assert rpc_mock.call_count == 1
last_msg = rpc_mock.call_args_list[-1][0][0]
assert {
'trade_id': 1,
'type': RPCMessageType.EXIT,
'exchange': 'Binance',
'pair': 'ETH/USDT',
'direction': 'Short' if trade.is_short else 'Long',
'leverage': 1.0,
'gain': profit_or_loss,
'limit': limit,
'order_rate': limit,
'amount': pytest.approx(amount),
'order_type': 'limit',
'buy_tag': None,
'enter_tag': None,
'open_rate': open_rate,
'current_rate': current_rate,
'profit_amount': pytest.approx(profit_amount),
'profit_ratio': profit_ratio,
'stake_currency': 'USDT',
'base_currency': 'ETH',
'fiat_currency': 'USD',
'sell_reason': 'foo',
'exit_reason': 'foo',
'open_date': ANY,
'close_date': ANY,
'close_rate': ANY,
'sub_trade': False,
'cumulative_profit': 0.0,
'stake_amount': pytest.approx(60),
} == last_msg
@pytest.mark.parametrize("is_short", [False, True])
def test_execute_trade_exit_down_stoploss_on_exchange_dry_run(
default_conf_usdt, ticker_usdt, fee, is_short, ticker_usdt_sell_down,
ticker_usdt_sell_up, mocker) -> None:
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
patch_whitelist(mocker, default_conf_usdt)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# Create some test data
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
assert trade.is_short == is_short
assert trade
# Decrease the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_up if is_short else ticker_usdt_sell_down
)
default_conf_usdt['dry_run'] = True
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
# Setting trade stoploss to 0.01
trade.stop_loss = 2.0 * 1.01 if is_short else 2.0 * 0.99
freqtrade.execute_trade_exit(
trade=trade, limit=trade.stop_loss,
exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS))
assert rpc_mock.call_count == 2
last_msg = rpc_mock.call_args_list[-1][0][0]
assert {
'type': RPCMessageType.EXIT,
'trade_id': 1,
'exchange': 'Binance',
'pair': 'ETH/USDT',
'direction': 'Short' if trade.is_short else 'Long',
'leverage': 1.0,
'gain': 'loss',
'limit': 2.02 if is_short else 1.98,
'order_rate': 2.02 if is_short else 1.98,
'amount': pytest.approx(29.70297029 if is_short else 30.0),
'order_type': 'limit',
'buy_tag': None,
'enter_tag': None,
'open_rate': 2.02 if is_short else 2.0,
'current_rate': 2.2 if is_short else 2.0,
'profit_amount': -0.3 if is_short else -0.8985,
'profit_ratio': -0.00501253 if is_short else -0.01493766,
'stake_currency': 'USDT',
'fiat_currency': 'USD',
'base_currency': 'ETH',
'sell_reason': ExitType.STOP_LOSS.value,
'exit_reason': ExitType.STOP_LOSS.value,
'open_date': ANY,
'close_date': ANY,
'close_rate': ANY,
'sub_trade': False,
'cumulative_profit': 0.0,
'stake_amount': pytest.approx(60),
} == last_msg
def test_execute_trade_exit_sloe_cancel_exception(
mocker, default_conf_usdt, ticker_usdt, fee, caplog) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch(f'{EXMS}.cancel_stoploss_order', side_effect=InvalidOrderException())
mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(return_value=300))
create_order_mock = MagicMock(side_effect=[
{'id': '12345554'},
{'id': '12345555'},
])
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
create_order=create_order_mock,
)
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
patch_get_signal(freqtrade)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
PairLock.session = MagicMock()
freqtrade.config['dry_run'] = False
trade.stoploss_order_id = "abcd"
freqtrade.execute_trade_exit(trade=trade, limit=1234,
exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS))
assert create_order_mock.call_count == 2
assert log_has('Could not cancel stoploss order abcd for pair ETH/USDT', caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_execute_trade_exit_with_stoploss_on_exchange(
default_conf_usdt, ticker_usdt, fee, ticker_usdt_sell_up, is_short, mocker) -> None:
default_conf_usdt['exchange']['name'] = 'binance'
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
stoploss = MagicMock(return_value={
'id': 123,
'status': 'open',
'info': {
'foo': 'bar'
}
})
mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_order_fee')
cancel_order = MagicMock(return_value=True)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
amount_to_precision=lambda s, x, y: y,
price_to_precision=lambda s, x, y: y,
create_stoploss=stoploss,
cancel_stoploss_order=cancel_order,
_dry_is_price_crossed=MagicMock(side_effect=[True, False]),
)
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# Create some test data
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
trades = [trade]
freqtrade.manage_open_orders()
freqtrade.exit_positions(trades)
# Increase the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_up
)
freqtrade.execute_trade_exit(
trade=trade,
limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'],
exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS)
)
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
assert cancel_order.call_count == 1
assert rpc_mock.call_count == 4
@pytest.mark.parametrize("is_short", [False, True])
def test_may_execute_trade_exit_after_stoploss_on_exchange_hit(
default_conf_usdt, ticker_usdt, fee, mocker, is_short) -> None:
default_conf_usdt['exchange']['name'] = 'binance'
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
amount_to_precision=lambda s, x, y: y,
price_to_precision=lambda s, x, y: y,
_dry_is_price_crossed=MagicMock(side_effect=[False, True]),
)
stoploss = MagicMock(return_value={
'id': 123,
'info': {
'foo': 'bar'
}
})
mocker.patch(f'{EXMS}.create_stoploss', stoploss)
freqtrade = FreqtradeBot(default_conf_usdt)
freqtrade.strategy.order_types['stoploss_on_exchange'] = True
patch_get_signal(freqtrade, enter_long=not is_short, enter_short=is_short)
# Create some test data
freqtrade.enter_positions()
freqtrade.manage_open_orders()
trade = Trade.session.scalars(select(Trade)).first()
trades = [trade]
assert trade.stoploss_order_id is None
freqtrade.exit_positions(trades)
assert trade
assert trade.stoploss_order_id == '123'
assert not trade.has_open_orders
# Assuming stoploss on exchange is hit
# stoploss_order_id should become None
# and trade should be sold at the price of stoploss
stoploss_executed = MagicMock(return_value={
"id": "123",
"timestamp": 1542707426845,
"datetime": "2018-11-20T09:50:26.845Z",
"lastTradeTimestamp": None,
"symbol": "BTC/USDT",
"type": "stop_loss_limit",
"side": "buy" if is_short else "sell",
"price": 1.08801,
"amount": trade.amount,
"cost": 1.08801 * trade.amount,
"average": 1.08801,
"filled": trade.amount,
"remaining": 0.0,
"status": "closed",
"fee": None,
"trades": None
})
mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_executed)
freqtrade.exit_positions(trades)
assert trade.stoploss_order_id is None
assert trade.is_open is False
assert trade.exit_reason == ExitType.STOPLOSS_ON_EXCHANGE.value
assert rpc_mock.call_count == 4
assert rpc_mock.call_args_list[1][0][0]['type'] == RPCMessageType.ENTRY
assert rpc_mock.call_args_list[1][0][0]['amount'] > 20
assert rpc_mock.call_args_list[2][0][0]['type'] == RPCMessageType.ENTRY_FILL
assert rpc_mock.call_args_list[3][0][0]['type'] == RPCMessageType.EXIT_FILL
@pytest.mark.parametrize(
"is_short,amount,current_rate,limit,profit_amount,profit_ratio,profit_or_loss", [
(False, 30, 2.3, 2.2, 5.685, 0.09451372, 'profit'),
(True, 29.70297029, 2.2, 2.3, -8.63762376, -0.1443212, 'loss'),
])
def test_execute_trade_exit_market_order(
default_conf_usdt, ticker_usdt, fee, is_short, current_rate, amount, caplog,
limit, profit_amount, profit_ratio, profit_or_loss, ticker_usdt_sell_up, mocker
) -> None:
"""
amount
long: 60 / 2.0 = 30
short: 60 / 2.02 = 29.70297029
open_value
long: (30 * 2.0) + (30 * 2.0 * 0.0025) = 60.15
short: (29.702970297029704 * 2.02) - (29.702970297029704 * 2.02 * 0.0025) = 59.85
close_value
long: (30 * 2.2) - (30 * 2.2 * 0.0025) = 65.835
short: (29.702970297029704 * 2.3) + (29.702970297029704 * 2.3 * 0.0025) = 68.48762376237624
profit
long: 65.835 - 60.15 = 5.684999999999995
short: 59.85 - 68.48762376237624 = -8.637623762376244
profit_ratio
long: (65.835/60.15) - 1 = 0.0945137157107232
short: 1 - (68.48762376237624/59.85) = -0.1443211990371971
"""
open_rate = ticker_usdt.return_value['ask' if is_short else 'bid']
rpc_mock = patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=True),
get_funding_fees=MagicMock(side_effect=ExchangeError()),
)
patch_whitelist(mocker, default_conf_usdt)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# Create some test data
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
# Increase the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_up,
_dry_is_price_crossed=MagicMock(return_value=False),
)
freqtrade.config['order_types']['exit'] = 'market'
freqtrade.execute_trade_exit(
trade=trade,
limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'],
exit_check=ExitCheckTuple(exit_type=ExitType.ROI)
)
assert log_has("Could not update funding fee.", caplog)
assert not trade.is_open
assert pytest.approx(trade.close_profit) == profit_ratio
assert rpc_mock.call_count == 4
last_msg = rpc_mock.call_args_list[-2][0][0]
assert {
'type': RPCMessageType.EXIT,
'trade_id': 1,
'exchange': 'Binance',
'pair': 'ETH/USDT',
'direction': 'Short' if trade.is_short else 'Long',
'leverage': 1.0,
'gain': profit_or_loss,
'limit': limit,
'order_rate': limit,
'amount': pytest.approx(amount),
'order_type': 'market',
'buy_tag': None,
'enter_tag': None,
'open_rate': open_rate,
'current_rate': current_rate,
'profit_amount': pytest.approx(profit_amount),
'profit_ratio': profit_ratio,
'stake_currency': 'USDT',
'base_currency': 'ETH',
'fiat_currency': 'USD',
'sell_reason': ExitType.ROI.value,
'exit_reason': ExitType.ROI.value,
'open_date': ANY,
'close_date': ANY,
'close_rate': ANY,
'sub_trade': False,
'cumulative_profit': 0.0,
'stake_amount': pytest.approx(60),
} == last_msg
@pytest.mark.parametrize("is_short", [False, True])
def test_execute_trade_exit_insufficient_funds_error(default_conf_usdt, ticker_usdt, fee, is_short,
ticker_usdt_sell_up, mocker) -> None:
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mock_insuf = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_insufficient_funds')
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
create_order=MagicMock(side_effect=[
{'id': 1234553382},
InsufficientFundsError(),
]),
)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# Create some test data
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
# Increase the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_up
)
sell_reason = ExitCheckTuple(exit_type=ExitType.ROI)
assert not freqtrade.execute_trade_exit(
trade=trade,
limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'],
exit_check=sell_reason
)
assert mock_insuf.call_count == 1
@pytest.mark.parametrize('profit_only,bid,ask,handle_first,handle_second,exit_type,is_short', [
# Enable profit
(True, 2.18, 2.2, False, True, ExitType.EXIT_SIGNAL.value, False),
(True, 2.18, 2.2, False, True, ExitType.EXIT_SIGNAL.value, True),
# # Disable profit
(False, 3.19, 3.2, True, False, ExitType.EXIT_SIGNAL.value, False),
(False, 3.19, 3.2, True, False, ExitType.EXIT_SIGNAL.value, True),
# # Enable loss
# # * Shouldn't this be ExitType.STOP_LOSS.value
(True, 0.21, 0.22, False, False, None, False),
(True, 2.41, 2.42, False, False, None, True),
# Disable loss
(False, 0.10, 0.22, True, False, ExitType.EXIT_SIGNAL.value, False),
(False, 0.10, 0.22, True, False, ExitType.EXIT_SIGNAL.value, True),
])
def test_exit_profit_only(
default_conf_usdt, limit_order, limit_order_open, is_short,
fee, mocker, profit_only, bid, ask, handle_first, handle_second, exit_type) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
eside = entry_side(is_short)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': bid,
'ask': ask,
'last': bid
}),
create_order=MagicMock(side_effect=[
limit_order[eside],
{'id': 1234553382},
]),
get_fee=fee,
)
default_conf_usdt.update({
'use_exit_signal': True,
'exit_profit_only': profit_only,
'exit_profit_offset': 0.1,
})
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.custom_exit = MagicMock(return_value=None)
if exit_type == ExitType.EXIT_SIGNAL.value:
freqtrade.strategy.min_roi_reached = MagicMock(return_value=False)
else:
freqtrade.strategy.ft_stoploss_reached = MagicMock(return_value=ExitCheckTuple(
exit_type=ExitType.NONE))
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
assert trade.is_short == is_short
oobj = Order.parse_from_ccxt_object(limit_order[eside], limit_order[eside]['symbol'], eside)
trade.update_order(limit_order[eside])
trade.update_trade(oobj)
freqtrade.wallets.update()
if profit_only:
assert freqtrade.handle_trade(trade) is False
# Custom-exit is called
freqtrade.strategy.custom_exit.call_count == 1
patch_get_signal(freqtrade, enter_long=False, exit_short=is_short, exit_long=not is_short)
assert freqtrade.handle_trade(trade) is handle_first
if handle_second:
freqtrade.strategy.exit_profit_offset = 0.0
assert freqtrade.handle_trade(trade) is True
def test_sell_not_enough_balance(default_conf_usdt, limit_order, limit_order_open,
fee, mocker, caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 0.00002172,
'ask': 0.00002173,
'last': 0.00002172
}),
create_order=MagicMock(side_effect=[
limit_order_open['buy'],
{'id': 1234553382},
]),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=False)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
amnt = trade.amount
oobj = Order.parse_from_ccxt_object(limit_order['buy'], limit_order['buy']['symbol'], 'buy')
trade.update_trade(oobj)
patch_get_signal(freqtrade, enter_long=False, exit_long=True)
mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(return_value=trade.amount * 0.985))
assert freqtrade.handle_trade(trade) is True
assert log_has_re(r'.*Falling back to wallet-amount.', caplog)
assert trade.amount != amnt
@pytest.mark.parametrize('amount_wallet,has_err', [
(95.29, False),
(91.29, True)
])
def test__safe_exit_amount(default_conf_usdt, fee, caplog, mocker, amount_wallet, has_err):
patch_RPCManager(mocker)
patch_exchange(mocker)
amount = 95.33
amount_wallet = amount_wallet
mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(return_value=amount_wallet))
wallet_update = mocker.patch('freqtrade.wallets.Wallets.update')
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
if has_err:
with pytest.raises(DependencyException, match=r"Not enough amount to exit trade."):
assert freqtrade._safe_exit_amount(trade, trade.pair, trade.amount)
else:
wallet_update.reset_mock()
assert trade.amount != amount_wallet
assert freqtrade._safe_exit_amount(trade, trade.pair, trade.amount) == amount_wallet
assert log_has_re(r'.*Falling back to wallet-amount.', caplog)
assert trade.amount == amount_wallet
assert wallet_update.call_count == 1
caplog.clear()
wallet_update.reset_mock()
assert freqtrade._safe_exit_amount(trade, trade.pair, amount_wallet) == amount_wallet
assert not log_has_re(r'.*Falling back to wallet-amount.', caplog)
assert wallet_update.call_count == 1
@pytest.mark.parametrize("is_short", [False, True])
def test_locked_pairs(default_conf_usdt, ticker_usdt, fee,
ticker_usdt_sell_down, mocker, caplog, is_short) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
# Create some test data
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
assert trade
# Decrease the price and sell it
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt_sell_down
)
freqtrade.execute_trade_exit(
trade=trade,
limit=ticker_usdt_sell_down()['ask' if is_short else 'bid'],
exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS)
)
trade.close(ticker_usdt_sell_down()['bid'])
assert freqtrade.strategy.is_pair_locked(trade.pair, side='*')
# Boths sides are locked
assert freqtrade.strategy.is_pair_locked(trade.pair, side='long')
assert freqtrade.strategy.is_pair_locked(trade.pair, side='short')
# reinit - should buy other pair.
caplog.clear()
freqtrade.enter_positions()
assert log_has_re(fr"Pair {trade.pair} \* is locked.*", caplog)
@pytest.mark.parametrize("is_short", [False, True])
def test_ignore_roi_if_entry_signal(default_conf_usdt, limit_order, limit_order_open, is_short,
fee, mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
eside = entry_side(is_short)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 2.19,
'ask': 2.2,
'last': 2.19
}),
create_order=MagicMock(side_effect=[
limit_order_open[eside],
{'id': 1234553382},
]),
get_fee=fee,
)
default_conf_usdt['ignore_roi_if_entry_signal'] = True
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=True)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
oobj = Order.parse_from_ccxt_object(
limit_order[eside], limit_order[eside]['symbol'], eside)
trade.update_trade(oobj)
freqtrade.wallets.update()
if is_short:
patch_get_signal(freqtrade, enter_long=False, enter_short=True, exit_short=True)
else:
patch_get_signal(freqtrade, enter_long=True, exit_long=True)
assert freqtrade.handle_trade(trade) is False
# Test if entry-signal is absent (should sell due to roi = true)
if is_short:
patch_get_signal(freqtrade, enter_long=False, exit_short=False, exit_tag='something')
else:
patch_get_signal(freqtrade, enter_long=False, exit_long=False, exit_tag='something')
assert freqtrade.handle_trade(trade) is True
assert trade.exit_reason == ExitType.ROI.value
@pytest.mark.parametrize("is_short,val1,val2", [
(False, 1.5, 1.1),
(True, 0.5, 0.9)
])
def test_trailing_stop_loss(default_conf_usdt, limit_order_open,
is_short, val1, val2, fee, caplog, mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 2.0,
'ask': 2.0,
'last': 2.0
}),
create_order=MagicMock(side_effect=[
limit_order_open[entry_side(is_short)],
{'id': 1234553382},
]),
get_fee=fee,
)
default_conf_usdt['trailing_stop'] = True
patch_whitelist(mocker, default_conf_usdt)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=False)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
assert trade.is_short == is_short
assert freqtrade.handle_trade(trade) is False
# Raise praise into profits
mocker.patch(f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': 2.0 * val1,
'ask': 2.0 * val1,
'last': 2.0 * val1
}))
# Stoploss should be adjusted
assert freqtrade.handle_trade(trade) is False
caplog.clear()
# Price fell
mocker.patch(f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': 2.0 * val2,
'ask': 2.0 * val2,
'last': 2.0 * val2
}))
caplog.set_level(logging.DEBUG)
# Sell as trailing-stop is reached
assert freqtrade.handle_trade(trade) is True
stop_multi = 1.1 if is_short else 0.9
assert log_has(f"ETH/USDT - HIT STOP: current price at {(2.0 * val2):6f}, "
f"stoploss is {(2.0 * val1 * stop_multi):6f}, "
f"initial stoploss was at {(2.0 * stop_multi):6f}, trade opened at 2.000000",
caplog)
assert trade.exit_reason == ExitType.TRAILING_STOP_LOSS.value
@pytest.mark.parametrize('offset,trail_if_reached,second_sl,is_short', [
(0, False, 2.0394, False),
(0.011, False, 2.0394, False),
(0.055, True, 1.8, False),
(0, False, 2.1614, True),
(0.011, False, 2.1614, True),
(0.055, True, 2.42, True),
])
def test_trailing_stop_loss_positive(
default_conf_usdt, limit_order, limit_order_open,
offset, fee, caplog, mocker, trail_if_reached, second_sl, is_short
) -> None:
enter_price = limit_order[entry_side(is_short)]['price']
patch_RPCManager(mocker)
patch_exchange(mocker)
eside = entry_side(is_short)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': enter_price - (-0.01 if is_short else 0.01),
'ask': enter_price - (-0.01 if is_short else 0.01),
'last': enter_price - (-0.01 if is_short else 0.01),
}),
create_order=MagicMock(side_effect=[
limit_order[eside],
{'id': 1234553382},
]),
get_fee=fee,
)
default_conf_usdt['trailing_stop'] = True
default_conf_usdt['trailing_stop_positive'] = 0.01
if offset:
default_conf_usdt['trailing_stop_positive_offset'] = offset
default_conf_usdt['trailing_only_offset_is_reached'] = trail_if_reached
patch_whitelist(mocker, default_conf_usdt)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=False)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
assert trade.is_short == is_short
oobj = Order.parse_from_ccxt_object(limit_order[eside], limit_order[eside]['symbol'], eside)
trade.update_order(limit_order[eside])
trade.update_trade(oobj)
caplog.set_level(logging.DEBUG)
# stop-loss not reached
assert freqtrade.handle_trade(trade) is False
# Raise ticker_usdt above buy price
mocker.patch(
f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': enter_price + (-0.06 if is_short else 0.06),
'ask': enter_price + (-0.06 if is_short else 0.06),
'last': enter_price + (-0.06 if is_short else 0.06),
})
)
caplog.clear()
# stop-loss not reached, adjusted stoploss
assert freqtrade.handle_trade(trade) is False
caplog_text = (f"ETH/USDT - Using positive stoploss: 0.01 offset: {offset} profit: "
f"{'2.49' if not is_short else '2.24'}%")
if trail_if_reached:
assert not log_has(caplog_text, caplog)
assert not log_has("ETH/USDT - Adjusting stoploss...", caplog)
else:
assert log_has(caplog_text, caplog)
assert log_has("ETH/USDT - Adjusting stoploss...", caplog)
assert pytest.approx(trade.stop_loss) == second_sl
caplog.clear()
mocker.patch(
f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': enter_price + (-0.135 if is_short else 0.125),
'ask': enter_price + (-0.135 if is_short else 0.125),
'last': enter_price + (-0.135 if is_short else 0.125),
})
)
assert freqtrade.handle_trade(trade) is False
assert log_has(
f"ETH/USDT - Using positive stoploss: 0.01 offset: {offset} profit: "
f"{'5.72' if not is_short else '5.67'}%",
caplog
)
assert log_has("ETH/USDT - Adjusting stoploss...", caplog)
mocker.patch(
f'{EXMS}.fetch_ticker',
MagicMock(return_value={
'bid': enter_price + (-0.02 if is_short else 0.02),
'ask': enter_price + (-0.02 if is_short else 0.02),
'last': enter_price + (-0.02 if is_short else 0.02),
})
)
# Lower price again (but still positive)
assert freqtrade.handle_trade(trade) is True
assert log_has(
f"ETH/USDT - HIT STOP: current price at {enter_price + (-0.02 if is_short else 0.02):.6f}, "
f"stoploss is {trade.stop_loss:.6f}, "
f"initial stoploss was at {'2.42' if is_short else '1.80'}0000, "
f"trade opened at {2.2 if is_short else 2.0}00000",
caplog)
assert trade.exit_reason == ExitType.TRAILING_STOP_LOSS.value
@pytest.mark.parametrize("is_short", [False, True])
def test_disable_ignore_roi_if_entry_signal(default_conf_usdt, limit_order, limit_order_open,
is_short, fee, mocker) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
eside = entry_side(is_short)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 2.0,
'ask': 2.0,
'last': 2.0
}),
create_order=MagicMock(side_effect=[
limit_order_open[eside],
{'id': 1234553382},
{'id': 1234553383}
]),
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=False),
)
default_conf_usdt['exit_pricing'] = {
'ignore_roi_if_entry_signal': False
}
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.strategy.min_roi_reached = MagicMock(return_value=True)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
oobj = Order.parse_from_ccxt_object(
limit_order[eside], limit_order[eside]['symbol'], eside)
trade.update_trade(oobj)
# Sell due to min_roi_reached
patch_get_signal(freqtrade, enter_long=not is_short, enter_short=is_short, exit_short=is_short)
assert freqtrade.handle_trade(trade) is True
# Test if entry-signal is absent
patch_get_signal(freqtrade)
assert freqtrade.handle_trade(trade) is True
assert trade.exit_reason == ExitType.ROI.value
def test_get_real_amount_quote(default_conf_usdt, trades_for_order, buy_order_fee, fee, caplog,
mocker):
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
amount = sum(x['amount'] for x in trades_for_order)
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
caplog.clear()
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
# Amount is reduced by "fee"
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) == (amount * 0.001)
assert log_has(
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, is_short=False,'
' leverage=1.0, open_rate=0.24544100, open_since=closed), fee=0.008.',
caplog
)
def test_get_real_amount_quote_dust(default_conf_usdt, trades_for_order, buy_order_fee, fee,
caplog, mocker):
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
walletmock = mocker.patch('freqtrade.wallets.Wallets.update')
mocker.patch('freqtrade.wallets.Wallets.get_free', return_value=8.1122)
amount = sum(x['amount'] for x in trades_for_order)
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
walletmock.reset_mock()
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
# Amount is kept as is
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) is None
assert walletmock.call_count == 1
assert log_has_re(r'Fee amount for Trade.* was in base currency '
'- Eating Fee 0.008 into dust', caplog)
def test_get_real_amount_no_trade(default_conf_usdt, buy_order_fee, caplog, mocker, fee):
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[])
amount = buy_order_fee['amount']
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
# Amount is reduced by "fee"
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) is None
assert log_has(
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, '
'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed) failed: '
'myTrade-Dict empty found',
caplog
)
@pytest.mark.parametrize(
'fee_par,fee_reduction_amount,use_ticker_usdt_rate,expected_log', [
# basic, amount does not change
({'cost': 0.008, 'currency': 'ETH'}, 0, False, None),
# no currency in fee
({'cost': 0.004, 'currency': None}, 0, True, None),
# BNB no rate
({'cost': 0.00094518, 'currency': 'BNB'}, 0, True, (
'Fee for Trade Trade(id=None, pair=LTC/ETH, amount=8.00000000, is_short=False, '
'leverage=1.0, open_rate=0.24544100, open_since=closed) [buy]: 0.00094518 BNB -'
' rate: None'
)),
# from order
({'cost': 0.004, 'currency': 'LTC'}, 0.004, False, (
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, '
'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed), fee=0.004.'
)),
# invalid, no currency in from fee dict
({'cost': 0.008, 'currency': None}, 0, True, None),
])
def test_get_real_amount(
default_conf_usdt, trades_for_order, buy_order_fee, fee, mocker, caplog,
fee_par, fee_reduction_amount, use_ticker_usdt_rate, expected_log
):
buy_order = deepcopy(buy_order_fee)
buy_order['fee'] = fee_par
trades_for_order[0]['fee'] = fee_par
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
amount = sum(x['amount'] for x in trades_for_order)
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
fee_open=fee.return_value,
fee_close=fee.return_value,
open_rate=0.245441,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
if not use_ticker_usdt_rate:
mocker.patch(f'{EXMS}.fetch_ticker', side_effect=ExchangeError)
caplog.clear()
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
res = freqtrade.get_real_amount(trade, buy_order, order_obj)
if fee_reduction_amount == 0:
assert res is None
else:
assert res == fee_reduction_amount
if expected_log:
assert log_has(expected_log, caplog)
@pytest.mark.parametrize(
'fee_cost, fee_currency, fee_reduction_amount, expected_fee, expected_log_amount', [
# basic, amount is reduced by fee
(None, None, 0.001, 0.001, 7.992),
# different fee currency on both trades, fee is average of both trade's fee
(0.02, 'BNB', 0.0005, 0.001518575, 7.996),
])
def test_get_real_amount_multi(
default_conf_usdt, trades_for_order2, buy_order_fee, caplog, fee, mocker, markets,
fee_cost, fee_currency, fee_reduction_amount, expected_fee, expected_log_amount,
):
trades_for_order = deepcopy(trades_for_order2)
if fee_cost:
trades_for_order[0]['fee']['cost'] = fee_cost
if fee_currency:
trades_for_order[0]['fee']['currency'] = fee_currency
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
amount = float(sum(x['amount'] for x in trades_for_order))
default_conf_usdt['stake_currency'] = "ETH"
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
fee_open=fee.return_value,
fee_close=fee.return_value,
open_rate=0.245441
)
# Fake markets entry to enable fee parsing
markets['BNB/ETH'] = markets['ETH/USDT']
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets))
mocker.patch(f'{EXMS}.fetch_ticker',
return_value={'ask': 0.19, 'last': 0.2})
# Amount is reduced by "fee"
expected_amount = amount * fee_reduction_amount
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) == expected_amount
assert log_has(
(
'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, '
'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed), '
f'fee={expected_amount}.'
),
caplog
)
assert trade.fee_open == expected_fee
assert trade.fee_close == expected_fee
assert trade.fee_open_cost is not None
assert trade.fee_open_currency is not None
assert trade.fee_close_cost is None
assert trade.fee_close_currency is None
def test_get_real_amount_invalid_order(default_conf_usdt, trades_for_order, buy_order_fee, fee,
mocker):
limit_buy_order_usdt = deepcopy(buy_order_fee)
limit_buy_order_usdt['fee'] = {'cost': 0.004}
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[])
amount = float(sum(x['amount'] for x in trades_for_order))
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
fee_open=fee.return_value,
fee_close=fee.return_value,
open_rate=0.245441,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
# Amount does not change
assert freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj) is None
def test_get_real_amount_fees_order(default_conf_usdt, market_buy_order_usdt_doublefee,
fee, mocker):
tfo_mock = mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[])
mocker.patch(f'{EXMS}.get_valid_pair_combination', return_value='BNB/USDT')
mocker.patch(f'{EXMS}.fetch_ticker', return_value={'last': 200})
trade = Trade(
pair='LTC/USDT',
amount=30.0,
exchange='binance',
fee_open=fee.return_value,
fee_close=fee.return_value,
open_rate=0.245441,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
# Amount does not change
assert trade.fee_open == 0.0025
order_obj = Order.parse_from_ccxt_object(market_buy_order_usdt_doublefee, 'LTC/ETH', 'buy')
assert freqtrade.get_real_amount(trade, market_buy_order_usdt_doublefee, order_obj) is None
assert tfo_mock.call_count == 0
# Fetch fees from trades dict if available to get "proper" values
assert round(trade.fee_open, 4) == 0.001
def test_get_real_amount_wrong_amount(default_conf_usdt, trades_for_order, buy_order_fee, fee,
mocker):
limit_buy_order_usdt = deepcopy(buy_order_fee)
limit_buy_order_usdt['amount'] = limit_buy_order_usdt['amount'] - 0.001
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
amount = float(sum(x['amount'] for x in trades_for_order))
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
# Amount does not change
with pytest.raises(DependencyException, match=r"Half bought\? Amounts don't match"):
freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj)
def test_get_real_amount_wrong_amount_rounding(default_conf_usdt, trades_for_order, buy_order_fee,
fee, mocker):
# Floats should not be compared directly.
limit_buy_order_usdt = deepcopy(buy_order_fee)
trades_for_order[0]['amount'] = trades_for_order[0]['amount'] + 1e-15
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order)
amount = float(sum(x['amount'] for x in trades_for_order))
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
fee_open=fee.return_value,
fee_close=fee.return_value,
open_rate=0.245441,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
# Amount changes by fee amount.
assert pytest.approx(freqtrade.get_real_amount(
trade, limit_buy_order_usdt, order_obj)) == (amount * 0.001)
def test_get_real_amount_open_trade_usdt(default_conf_usdt, fee, mocker):
amount = 12345
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value,
)
order = {
'id': 'mocked_order',
'amount': amount,
'status': 'open',
'side': 'buy',
'price': 0.245441,
}
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order_obj = Order.parse_from_ccxt_object(order, 'LTC/ETH', 'buy')
assert freqtrade.get_real_amount(trade, order, order_obj) is None
def test_get_real_amount_in_point(default_conf_usdt, buy_order_fee, fee, mocker, caplog):
limit_buy_order_usdt = deepcopy(buy_order_fee)
# Fees amount in "POINT"
trades = [{
"info": {
},
"id": "some_trade_id",
"timestamp": 1660092505903,
"datetime": "2022-08-10T00:48:25.903Z",
"symbol": "CEL/USDT",
"order": "some_order_id",
"type": None,
"side": "sell",
"takerOrMaker": "taker",
"price": 1.83255,
"amount": 83.126,
"cost": 152.3325513,
"fee": {
"currency": "POINT",
"cost": 0.3046651026
},
"fees": [
{
"cost": "0",
"currency": "USDT"
},
{
"cost": "0",
"currency": "GT"
},
{
"cost": "0.3046651026",
"currency": "POINT"
}
]
}]
mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades)
amount = float(sum(x['amount'] for x in trades))
trade = Trade(
pair='CEL/USDT',
amount=amount,
exchange='binance',
fee_open=fee.return_value,
fee_close=fee.return_value,
open_rate=0.245441
)
limit_buy_order_usdt['amount'] = amount
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy')
res = freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj)
assert res is None
assert trade.fee_open_currency is None
assert trade.fee_open_cost is None
message = "Not updating buy-fee - rate: None, POINT."
assert log_has(message, caplog)
caplog.clear()
freqtrade.config['exchange']['unknown_fee_rate'] = 1
res = freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj)
assert res is None
assert trade.fee_open_currency == 'POINT'
assert pytest.approx(trade.fee_open_cost) == 0.3046651026
assert trade.fee_open == 0.002
assert trade.fee_open != fee.return_value
assert not log_has(message, caplog)
@pytest.mark.parametrize('amount,fee_abs,wallet,amount_exp', [
(8.0, 0.0, 10, None),
(8.0, 0.0, 0, None),
(8.0, 0.1, 0, 0.1),
(8.0, 0.1, 10, None),
(8.0, 0.1, 8.0, None),
(8.0, 0.1, 7.9, 0.1),
])
def test_apply_fee_conditional(default_conf_usdt, fee, mocker, caplog,
amount, fee_abs, wallet, amount_exp):
walletmock = mocker.patch('freqtrade.wallets.Wallets.update')
mocker.patch('freqtrade.wallets.Wallets.get_free', return_value=wallet)
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value,
)
order = Order(
ft_order_side='buy',
order_id='100',
ft_pair=trade.pair,
ft_is_open=True,
)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
walletmock.reset_mock()
# Amount is kept as is
assert freqtrade.apply_fee_conditional(trade, 'LTC', amount, fee_abs, order) == amount_exp
assert walletmock.call_count == 1
if fee_abs != 0 and amount_exp is None:
assert log_has_re(r"Fee amount.*Eating.*dust\.", caplog)
@pytest.mark.parametrize('amount,fee_abs,wallet,amount_exp', [
(8.0, 0.0, 16, None),
(8.0, 0.0, 0, None),
(8.0, 0.1, 8, 0.1),
(8.0, 0.1, 20, None),
(8.0, 0.1, 16.0, None),
(8.0, 0.1, 7.9, 0.1),
(8.0, 0.1, 12, 0.1),
(8.0, 0.1, 15.9, 0.1),
])
def test_apply_fee_conditional_multibuy(default_conf_usdt, fee, mocker, caplog,
amount, fee_abs, wallet, amount_exp):
walletmock = mocker.patch('freqtrade.wallets.Wallets.update')
mocker.patch('freqtrade.wallets.Wallets.get_free', return_value=wallet)
trade = Trade(
pair='LTC/ETH',
amount=amount,
exchange='binance',
open_rate=0.245441,
fee_open=fee.return_value,
fee_close=fee.return_value
)
# One closed order
order = Order(
ft_order_side='buy',
order_id='10',
ft_pair=trade.pair,
ft_is_open=False,
filled=amount,
status="closed"
)
trade.orders.append(order)
# Add additional order - this should NOT eat into dust unless the wallet was bigger already.
order1 = Order(
ft_order_side='buy',
order_id='100',
ft_pair=trade.pair,
ft_is_open=True,
)
trade.orders.append(order1)
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
walletmock.reset_mock()
# The new trade amount will be 2x amount - fee / wallet will have to be adapted to this.
assert freqtrade.apply_fee_conditional(trade, 'LTC', amount, fee_abs, order1) == amount_exp
assert walletmock.call_count == 1
if fee_abs != 0 and amount_exp is None:
assert log_has_re(r"Fee amount.*Eating.*dust\.", caplog)
@pytest.mark.parametrize("delta, is_high_delta", [
(0.1, False),
(100, True),
])
@pytest.mark.parametrize('is_short', [False, True])
def test_order_book_depth_of_market(
default_conf_usdt, ticker_usdt, limit_order_open,
fee, mocker, order_book_l2, delta, is_high_delta, is_short
):
ticker_side = 'ask' if is_short else 'bid'
default_conf_usdt['entry_pricing']['check_depth_of_market']['enabled'] = True
default_conf_usdt['entry_pricing']['check_depth_of_market']['bids_to_ask_delta'] = delta
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch(f'{EXMS}.fetch_l2_order_book', order_book_l2)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_order_open[entry_side(is_short)]),
get_fee=fee,
)
# Save state of current whitelist
whitelist = deepcopy(default_conf_usdt['exchange']['pair_whitelist'])
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
if is_high_delta:
assert trade is None
else:
trade.is_short = is_short
assert trade is not None
assert pytest.approx(trade.stake_amount) == 60.0
assert trade.is_open
assert trade.open_date is not None
assert trade.exchange == 'binance'
assert len(Trade.session.scalars(select(Trade)).all()) == 1
# Simulate fulfilled LIMIT_BUY order for trade
oobj = Order.parse_from_ccxt_object(
limit_order_open[entry_side(is_short)], 'ADA/USDT', entry_side(is_short))
trade.update_trade(oobj)
assert trade.open_rate == ticker_usdt.return_value[ticker_side]
assert whitelist == default_conf_usdt['exchange']['pair_whitelist']
@pytest.mark.parametrize('exception_thrown,ask,last,order_book_top,order_book', [
(False, 0.045, 0.046, 2, None),
(True, 0.042, 0.046, 1, {'bids': [[]], 'asks': [[]]})
])
def test_order_book_entry_pricing1(mocker, default_conf_usdt, order_book_l2, exception_thrown,
ask, last, order_book_top, order_book, caplog) -> None:
"""
test if function get_rate will return the order book price instead of the ask rate
"""
patch_exchange(mocker)
ticker_usdt_mock = MagicMock(return_value={'ask': ask, 'last': last})
mocker.patch.multiple(
EXMS,
fetch_l2_order_book=MagicMock(return_value=order_book) if order_book else order_book_l2,
fetch_ticker=ticker_usdt_mock,
)
default_conf_usdt['exchange']['name'] = 'binance'
default_conf_usdt['entry_pricing']['use_order_book'] = True
default_conf_usdt['entry_pricing']['order_book_top'] = order_book_top
default_conf_usdt['entry_pricing']['price_last_balance'] = 0
default_conf_usdt['telegram']['enabled'] = False
freqtrade = FreqtradeBot(default_conf_usdt)
if exception_thrown:
with pytest.raises(PricingError):
freqtrade.exchange.get_rate('ETH/USDT', side="entry", is_short=False, refresh=True)
assert log_has_re(
r'ETH/USDT - Entry Price at location 1 from orderbook could not be determined.', caplog)
else:
assert freqtrade.exchange.get_rate(
'ETH/USDT', side="entry", is_short=False, refresh=True) == 0.043935
assert ticker_usdt_mock.call_count == 0
def test_check_depth_of_market(default_conf_usdt, mocker, order_book_l2) -> None:
"""
test check depth of market
"""
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_l2_order_book=order_book_l2
)
default_conf_usdt['telegram']['enabled'] = False
default_conf_usdt['exchange']['name'] = 'binance'
default_conf_usdt['entry_pricing']['check_depth_of_market']['enabled'] = True
# delta is 100 which is impossible to reach. hence function will return false
default_conf_usdt['entry_pricing']['check_depth_of_market']['bids_to_ask_delta'] = 100
freqtrade = FreqtradeBot(default_conf_usdt)
conf = default_conf_usdt['entry_pricing']['check_depth_of_market']
assert freqtrade._check_depth_of_market('ETH/BTC', conf, side=SignalDirection.LONG) is False
@pytest.mark.parametrize('is_short', [False, True])
def test_order_book_exit_pricing(
default_conf_usdt, limit_buy_order_usdt_open, limit_buy_order_usdt, fee, is_short,
limit_sell_order_usdt_open, mocker, order_book_l2, caplog) -> None:
"""
test order book ask strategy
"""
mocker.patch(f'{EXMS}.fetch_l2_order_book', order_book_l2)
default_conf_usdt['exchange']['name'] = 'binance'
default_conf_usdt['exit_pricing']['use_order_book'] = True
default_conf_usdt['exit_pricing']['order_book_top'] = 1
default_conf_usdt['telegram']['enabled'] = False
patch_RPCManager(mocker)
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=MagicMock(return_value={
'bid': 1.9,
'ask': 2.2,
'last': 1.9
}),
create_order=MagicMock(side_effect=[
limit_buy_order_usdt_open,
limit_sell_order_usdt_open,
]),
get_fee=fee,
)
freqtrade = FreqtradeBot(default_conf_usdt)
patch_get_signal(freqtrade)
freqtrade.enter_positions()
trade = Trade.session.scalars(select(Trade)).first()
assert trade
time.sleep(0.01) # Race condition fix
oobj = Order.parse_from_ccxt_object(limit_buy_order_usdt, limit_buy_order_usdt['symbol'], 'buy')
trade.update_trade(oobj)
freqtrade.wallets.update()
assert trade.is_open is True
if is_short:
patch_get_signal(freqtrade, enter_long=False, exit_short=True)
else:
patch_get_signal(freqtrade, enter_long=False, exit_long=True)
assert freqtrade.handle_trade(trade) is True
assert trade.close_rate_requested == order_book_l2.return_value['asks'][0][0]
mocker.patch(f'{EXMS}.fetch_l2_order_book', return_value={'bids': [[]], 'asks': [[]]})
with pytest.raises(PricingError):
freqtrade.handle_trade(trade)
assert log_has_re(
r"ETH/USDT - Exit Price at location 1 from orderbook could not be determined\..*",
caplog)
def test_startup_state(default_conf_usdt, mocker):
default_conf_usdt['pairlist'] = {'method': 'VolumePairList',
'config': {'number_assets': 20}
}
mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True))
worker = get_patched_worker(mocker, default_conf_usdt)
assert worker.freqtrade.state is State.RUNNING
def test_startup_trade_reinit(default_conf_usdt, edge_conf, mocker):
mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True))
reinit_mock = MagicMock()
mocker.patch('freqtrade.persistence.Trade.stoploss_reinitialization', reinit_mock)
ftbot = get_patched_freqtradebot(mocker, default_conf_usdt)
ftbot.startup()
assert reinit_mock.call_count == 1
reinit_mock.reset_mock()
ftbot = get_patched_freqtradebot(mocker, edge_conf)
ftbot.startup()
assert reinit_mock.call_count == 0
@pytest.mark.usefixtures("init_persistence")
def test_sync_wallet_dry_run(mocker, default_conf_usdt, ticker_usdt, fee, limit_buy_order_usdt_open,
caplog):
default_conf_usdt['dry_run'] = True
# Initialize to 2 times stake amount
default_conf_usdt['dry_run_wallet'] = 120.0
default_conf_usdt['max_open_trades'] = 2
default_conf_usdt['tradable_balance_ratio'] = 1.0
patch_exchange(mocker)
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker_usdt,
create_order=MagicMock(return_value=limit_buy_order_usdt_open),
get_fee=fee,
)
bot = get_patched_freqtradebot(mocker, default_conf_usdt)
patch_get_signal(bot)
assert bot.wallets.get_free('USDT') == 120.0
n = bot.enter_positions()
assert n == 2
trades = Trade.session.scalars(select(Trade)).all()
assert len(trades) == 2
bot.config['max_open_trades'] = 3
n = bot.enter_positions()
assert n == 0
assert log_has_re(r"Unable to create trade for XRP/USDT: "
r"Available balance \(0.0 USDT\) is lower than stake amount \(60.0 USDT\)",
caplog)
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short,buy_calls,sell_calls", [
(False, 1, 1),
(True, 1, 1),
])
def test_cancel_all_open_orders(mocker, default_conf_usdt, fee, limit_order, limit_order_open,
is_short, buy_calls, sell_calls):
default_conf_usdt['cancel_open_orders_on_exit'] = True
mocker.patch(
f'{EXMS}.fetch_order',
side_effect=[
ExchangeError(),
limit_order[exit_side(is_short)],
limit_order_open[entry_side(is_short)],
limit_order_open[exit_side(is_short)],
]
)
buy_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_enter')
sell_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_exit')
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
create_mock_trades(fee, is_short=is_short)
trades = Trade.session.scalars(select(Trade)).all()
assert len(trades) == MOCK_TRADE_COUNT
freqtrade.cancel_all_open_orders()
assert buy_mock.call_count == buy_calls
assert sell_mock.call_count == sell_calls
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short", [False, True])
def test_check_for_open_trades(mocker, default_conf_usdt, fee, is_short):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
freqtrade.check_for_open_trades()
assert freqtrade.rpc.send_msg.call_count == 0
create_mock_trades(fee, is_short)
trade = Trade.session.scalars(select(Trade)).first()
trade.is_short = is_short
trade.is_open = True
freqtrade.check_for_open_trades()
assert freqtrade.rpc.send_msg.call_count == 1
assert 'Handle these trades manually' in freqtrade.rpc.send_msg.call_args[0][0]['status']
@pytest.mark.parametrize("is_short", [False, True])
@pytest.mark.usefixtures("init_persistence")
def test_startup_update_open_orders(mocker, default_conf_usdt, fee, caplog, is_short):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
create_mock_trades(fee, is_short=is_short)
freqtrade.startup_update_open_orders()
assert not log_has_re(r"Error updating Order .*", caplog)
caplog.clear()
freqtrade.config['dry_run'] = False
freqtrade.startup_update_open_orders()
assert len(Order.get_open_orders()) == 4
matching_buy_order = mock_order_4(is_short=is_short)
matching_buy_order.update({
'status': 'closed',
})
mocker.patch(f'{EXMS}.fetch_order', return_value=matching_buy_order)
freqtrade.startup_update_open_orders()
# Only stoploss and sell orders are kept open
assert len(Order.get_open_orders()) == 3
caplog.clear()
mocker.patch(f'{EXMS}.fetch_order', side_effect=ExchangeError)
freqtrade.startup_update_open_orders()
assert log_has_re(r"Error updating Order .*", caplog)
mocker.patch(f'{EXMS}.fetch_order', side_effect=InvalidOrderException)
hto_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_order')
# Orders which are no longer found after X days should be assumed as canceled.
freqtrade.startup_update_open_orders()
assert log_has_re(r"Order is older than \d days.*", caplog)
assert hto_mock.call_count == 3
assert hto_mock.call_args_list[0][0][0]['status'] == 'canceled'
assert hto_mock.call_args_list[1][0][0]['status'] == 'canceled'
@pytest.mark.usefixtures("init_persistence")
def test_startup_backpopulate_precision(mocker, default_conf_usdt, fee, caplog):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
create_mock_trades_usdt(fee)
trades = Trade.get_trades().all()
trades[-1].exchange = 'some_other_exchange'
for trade in trades:
assert trade.price_precision is None
assert trade.amount_precision is None
assert trade.precision_mode is None
freqtrade.startup_backpopulate_precision()
trades = Trade.get_trades().all()
for trade in trades:
if trade.exchange == 'some_other_exchange':
assert trade.price_precision is None
assert trade.amount_precision is None
assert trade.precision_mode is None
else:
assert trade.price_precision is not None
assert trade.amount_precision is not None
assert trade.precision_mode is not None
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short", [False, True])
def test_update_trades_without_assigned_fees(mocker, default_conf_usdt, fee, is_short):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
def patch_with_fee(order):
order.update({'fee': {'cost': 0.1, 'rate': 0.01,
'currency': order['symbol'].split('/')[0]}})
return order
mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order',
side_effect=[ | patch_with_fee(mock_order_2_sell(is_short=is_short)), | 16 | 2023-11-07 18:46:03+00:00 | 8k |
awslabs/optimizing-multitask-training-through-dynamic-pipelines | tests/test_dataloader/test_dataloader.py | [
{
"identifier": "TransformerModelSpec",
"path": "dynapipe/model.py",
"snippet": "class TransformerModelSpec:\n # Default setting:\n # * mlp_hidden_size = 4x hidden_dim\n # * kv_channels = hidden_dim // num_attn_heads\n # * use FP16 mixed precision training with Adam optimizer.\n n_enco... | import os
import pytest
import torch
import torch.distributed as dist
from torch.utils.data import Dataset
from dynapipe.model import TransformerModelSpec, get_uniform_cluster
from dynapipe.pipe.data_loader import DynaPipeDataLoader, TrainingSpec
from dynapipe.pipe.instructions import ExecutionPlan, ForwardPass | 4,474 | # torchrun --standalone --nnodes=1 --nproc_per_node=4 test_dataloader.py
# Others:
# DYNAPIPE_DEBUG=DEBUG DYNAPIPE_LOGGING_DEBUG_DIR=./test_debug \
# torchrun --standalone --nnodes=1 --nproc_per_node=2 test_dataloader.py
torch.manual_seed(42)
@pytest.fixture(scope="module", autouse=True)
def init_torch_distributed():
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
torch.distributed.init_process_group("gloo")
class DummyDataset(Dataset):
def __init__(self, size, inputs_only=False):
self.size = size
torch.manual_seed(42)
# pre-generate all data
self.enc_seqlen = []
self.dec_seqlen = []
self.data = []
for _ in range(size):
enc_seqlen, dec_seqlen = torch.randint(24, 512, (2,))
self.enc_seqlen.append(enc_seqlen)
if not inputs_only:
self.dec_seqlen.append(dec_seqlen)
result = {
"text_enc": list(
torch.randint(0, 100, (enc_seqlen,)).numpy()
),
"text_dec": list(
torch.randint(0, 100, (dec_seqlen,)).numpy()
),
}
else:
result = {
"text": list(torch.randint(0, 100, (enc_seqlen,)).numpy()),
}
self.data.append(result)
def __len__(self):
return self.size
def __getitem__(self, index):
return self.data[index]
def dummy_pack_fn(tensors):
# (input, extra)
if len(tensors) == 0:
return [], 0
if isinstance(tensors[0], list):
concated_list = []
for t in tensors:
concated_list.extend(t)
return concated_list, 0
return torch.cat(tensors, dim=0), 0
def dummy_constructor_fn(
encoder_input,
encoder_extra,
decoder_input,
decoder_extra,
encoder_seqlen,
decoder_seqlen,
):
encoder_padding_len = encoder_seqlen - len(encoder_input)
if decoder_input is not None:
decoder_padding_len = decoder_seqlen - len(decoder_input)
encoder_input = torch.tensor(encoder_input, dtype=torch.long)
if decoder_input is not None:
decoder_input = torch.tensor(decoder_input, dtype=torch.long)
encoder_padded = torch.cat(
[
encoder_input,
torch.zeros(
encoder_padding_len,
dtype=encoder_input.dtype,
device=encoder_input.device,
),
],
dim=0,
)
if decoder_input is not None:
decoder_padded = torch.cat(
[
decoder_input,
torch.zeros(
decoder_padding_len,
dtype=decoder_input.dtype,
device=decoder_input.device,
),
],
dim=0,
)
return {
"text_enc": encoder_padded,
"text_dec": decoder_padded,
}
else:
return {
"text": encoder_padded,
}
def get_mb_shape_from_ep(ep: ExecutionPlan):
fw_shapes = []
for instr in ep.instructions:
if isinstance(instr, ForwardPass):
fw_shapes.append(instr.buffer_shapes)
return fw_shapes
def test_joint_data_loader(inputs_only=False):
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Note: this test requires torch
# to run this test, exec:
# If running hanging tests or multi-node tests:
# DYNAPIPE_DEBUG=DEBUG DYNAPIPE_LOGGING_DEBUG_DIR=./test_debug \
# torchrun --standalone --nnodes=1 --nproc_per_node=4 test_dataloader.py
# Others:
# DYNAPIPE_DEBUG=DEBUG DYNAPIPE_LOGGING_DEBUG_DIR=./test_debug \
# torchrun --standalone --nnodes=1 --nproc_per_node=2 test_dataloader.py
torch.manual_seed(42)
@pytest.fixture(scope="module", autouse=True)
def init_torch_distributed():
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
torch.distributed.init_process_group("gloo")
class DummyDataset(Dataset):
def __init__(self, size, inputs_only=False):
self.size = size
torch.manual_seed(42)
# pre-generate all data
self.enc_seqlen = []
self.dec_seqlen = []
self.data = []
for _ in range(size):
enc_seqlen, dec_seqlen = torch.randint(24, 512, (2,))
self.enc_seqlen.append(enc_seqlen)
if not inputs_only:
self.dec_seqlen.append(dec_seqlen)
result = {
"text_enc": list(
torch.randint(0, 100, (enc_seqlen,)).numpy()
),
"text_dec": list(
torch.randint(0, 100, (dec_seqlen,)).numpy()
),
}
else:
result = {
"text": list(torch.randint(0, 100, (enc_seqlen,)).numpy()),
}
self.data.append(result)
def __len__(self):
return self.size
def __getitem__(self, index):
return self.data[index]
def dummy_pack_fn(tensors):
# (input, extra)
if len(tensors) == 0:
return [], 0
if isinstance(tensors[0], list):
concated_list = []
for t in tensors:
concated_list.extend(t)
return concated_list, 0
return torch.cat(tensors, dim=0), 0
def dummy_constructor_fn(
encoder_input,
encoder_extra,
decoder_input,
decoder_extra,
encoder_seqlen,
decoder_seqlen,
):
encoder_padding_len = encoder_seqlen - len(encoder_input)
if decoder_input is not None:
decoder_padding_len = decoder_seqlen - len(decoder_input)
encoder_input = torch.tensor(encoder_input, dtype=torch.long)
if decoder_input is not None:
decoder_input = torch.tensor(decoder_input, dtype=torch.long)
encoder_padded = torch.cat(
[
encoder_input,
torch.zeros(
encoder_padding_len,
dtype=encoder_input.dtype,
device=encoder_input.device,
),
],
dim=0,
)
if decoder_input is not None:
decoder_padded = torch.cat(
[
decoder_input,
torch.zeros(
decoder_padding_len,
dtype=decoder_input.dtype,
device=decoder_input.device,
),
],
dim=0,
)
return {
"text_enc": encoder_padded,
"text_dec": decoder_padded,
}
else:
return {
"text": encoder_padded,
}
def get_mb_shape_from_ep(ep: ExecutionPlan):
fw_shapes = []
for instr in ep.instructions:
if isinstance(instr, ForwardPass):
fw_shapes.append(instr.buffer_shapes)
return fw_shapes
def test_joint_data_loader(inputs_only=False): | cluster_spec = get_uniform_cluster(2) | 1 | 2023-11-08 07:58:20+00:00 | 8k |
apple/ml-reed | reed/models/reward_model.py | [
{
"identifier": "get_image_encoder",
"path": "reed/models/image_encoder.py",
"snippet": "def get_image_encoder(architecture: str, obs_dim: t.List[int], out_size: int = 1,\n hidden_dim: int = 128, hidden_depth: int = 3,\n image_hidden_num_channels: int = 32,\n ... | import typing as t
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from abc import abstractmethod
from collections import OrderedDict
from reed.models.image_encoder import get_image_encoder
from reed.models.self_predictive_representations_model import StateActionSelfPredictiveRepresentationsNetworkEnsemble
from reed.data.preference_data_loader import PreferenceTripletEnsembleDataLoader | 4,613 | final_activation=final_activation,
dropout_probability=dropout_probability,
train_with_dropout=train_with_dropout
)
def _build(self):
"""
Build the 4 mini-networks that make up the model:
state encoder
action encoder
state-action encoder
prediction head
"""
# build the network that will encode the state features
self._state_encoder = nn.Sequential(OrderedDict([
('state_dense1', nn.Linear(self._in_size, self._state_embed_size)),
('state_leakyrelu1', nn.LeakyReLU(negative_slope=1e-2)),
('state_dropout1', nn.Dropout(self._dropout_prob))
]))
# build the netowrk that will encode the action features
self._action_encoder = nn.Sequential(OrderedDict([
('action_dense1', nn.Linear(self._action_size, self._action_embed_size)),
('action_leakyrelu1', nn.LeakyReLU(negative_slope=1e-2)),
('action_dropout1', nn.Dropout(self._dropout_prob))
]))
# build the network that models the relationship between the state anc action embeddings
state_action_encoder = []
for i in range(self._num_layers):
state_action_encoder.append((f'trunk_dense{i+1}', nn.Linear((self._hidden_size if i > 0 else self._state_embed_size + self._action_embed_size), self._hidden_size)))
state_action_encoder.append((f'trunk_leakyrelu{i+1}', nn.LeakyReLU(negative_slope=1e-2)))
state_action_encoder.append((f'trunk_dropout{i+1}', nn.Dropout(self._dropout_prob)))
self._state_action_encoder = nn.Sequential(OrderedDict(state_action_encoder))
# build the prediction head and select a final activation
self._prediction_head = nn.Linear(self._hidden_size, self._out_size)
if self._final_activation_type == 'tanh':
self._final_activation = nn.Tanh()
elif self._final_activation_type == 'sig':
self._final_activation = nn.Sigmoid()
else:
self._final_activation = nn.ReLU()
def _forward(self, states_action_pairs: torch.Tensor) -> torch.Tensor:
"""
Assign a reward value to each transition in the trajectory
Args:
states_action_pairs: batch of states-action pairs
expected dimensionality: (batch, state_features+action_features)
** It is expected that indices tie the states and action together
Returns:
the predicted reward for the state-action pair(s)
"""
# encode the state, the action, and the state-action pair
if len(states_action_pairs.size()) == 1:
states_embed = self._state_encoder(states_action_pairs[:self._in_size])
actions_embed = self._action_encoder(states_action_pairs[-self._action_size:])
elif len(states_action_pairs.size()) == 2:
states_embed = self._state_encoder(states_action_pairs[:, :self._in_size])
actions_embed = self._action_encoder(states_action_pairs[:, -self._action_size:])
elif len(states_action_pairs.size()) == 3:
states_embed = self._state_encoder(states_action_pairs[:, :, :self._in_size])
actions_embed = self._action_encoder(states_action_pairs[:, :, -self._action_size:])
else:
raise NotImplementedError()
state_action_embed = self._state_action_encoder(torch.cat([states_embed, actions_embed], dim=-1))
# predict the target values
prediction = self._final_activation(self._prediction_head(state_action_embed))
return prediction
class ImageStateActionNetwork(_BaseModel):
def __init__(self, obs_dim: t.List[int], out_size: int = 1,
hidden_dim: int = 128, hidden_depth: int = 3,
final_activation: str = 'tanh',
dropout_probability: float = 0.0,
train_with_dropout: bool = False,
image_encoder_architecture: str = "pixl2r",
image_hidden_num_channels: int = 32,
*kwargs):
"""
Maps state-action pairs to some type of value where the state is an image
Args:
obs_dim: dimensionality of the state images (height, width, channels)
out_size: the size of the output
hidden_dim: the size of the hidden layer(s)
hidden_depth: the number of hidden layers
final_activation: (default = tanh) the activation to use on the final layer
dropout_probability: (default = 0.) probability with which to set a weight value to 0. during a forward pass
a probability of 0, means no dropout
train_with_dropout: whether to use the dropout layers at train time (if the dropout probability is
greater than 0.)
Another use for the dropout layers is at test time to assess model uncertainty.
image_encoder_architecture: (default = "pixl2r") the architecture that is used for the image encoder
image_hidden_num_channels: (default = 32) the number of channels in the hidden layers of the image encoder
"""
self._image_encoder_architecture = image_encoder_architecture
assert image_encoder_architecture in {"pixl2r", "drqv2"}
self._image_hidden_num_channels = image_hidden_num_channels
super(ImageStateActionNetwork, self).__init__(
in_dim=obs_dim,
out_size=out_size,
hidden_dim=hidden_dim,
hidden_depth=hidden_depth,
final_activation=final_activation,
dropout_probability=dropout_probability,
train_with_dropout=train_with_dropout
)
def _build(self):
"""
"""
# build the image encoder
| #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2023 Apple Inc. All Rights Reserved.
#
def _to_grayscale(img_obs: np.ndarray, batch_states: bool) -> np.ndarray:
"""
Convert the RGB image observations to grayscale
Args:
img_obs: the batch of image observations to convert to grayscale
batch_states: whether a batch of observations or a single observation is to be processed
Returns:
the grayscale batch os images
"""
if batch_states:
obs = img_obs.astype(float)
obs[:, :, :, 0] *= 0.1140
obs[:, :, :, 1] *= 0.587
obs[:, :, :, 2] *= 0.2989
return np.sum(obs, axis=-1, keepdims=True)
else:
obs = img_obs.astype(float)
obs[:, :, 0] *= 0.1140
obs[:, :, 1] *= 0.587
obs[:, :, 2] *= 0.2989
return np.sum(obs, axis=-1, keepdims=True)
class _BaseModel(nn.Module):
"""
A base reward model
"""
def __init__(self, in_dim: t.Union[t.List[int], int], out_size: int = 1,
hidden_dim: int = 128, hidden_depth: int = 3,
final_activation: str = 'tanh',
dropout_probability: float = 0.0,
train_with_dropout: bool = False):
"""
A network to consume the state-based environment observations and actions
Args:
in_dim: dimensionality of the model's input
out_size: the size of the output
hidden_dim: the size of the hidden layer(s)
hidden_depth: the number of hidden layers
final_activation: (default = tanh) the activation to use on the final layer
dropout_probability: (default = 0.) probability with which to set a weight value to 0. during a forward pass
a probability of 0, means no dropout
train_with_dropout: whether to use the dropout layers at train time (if the dropout probability is
greater than 0.)
Another use for the dropout layers is at test time to assess model uncertainty.
"""
super(_BaseModel, self).__init__()
# track the dimensionality of the input, the output, and the hidden dimensions
self._in_size = in_dim
self._out_size = out_size
self._hidden_size = hidden_dim
self._num_layers = hidden_depth
self._final_activation_type = final_activation
self._dropout_prob = dropout_probability
self._train_with_dropout = train_with_dropout
self._dropout_enabled = dropout_probability > 0
self._build()
@abstractmethod
def _build(self):
"""
Build the network
"""
pass
@abstractmethod
def _forward(self, states_action_pairs: torch.Tensor) -> torch.Tensor:
"""
Assign a reward value to each transition in the trajectory
Args:
states_action_pairs: batch of states-action pairs
expected dimensionality: (batch, state_features+action_features)
** It is expected that indices tie the states and action together
Returns:
the predicted reward for the state-action pair(s)
"""
pass
def _enable_dropout(self):
""" Function to enable the dropout layers, e.g. during test-time """
for m in self.modules():
if 'dropout' in m.__class__.__name__:
print(m)
m.train()
self._dropout_enabled = True
def _disable_dropout(self):
""" Function to disable the dropout layers, e.g. during train time"""
for m in self.modules():
if 'dropout' in m.__class__.__name__:
m.eval()
self._dropout_enabled = False
def forward(self, states_action_pairs: torch.Tensor) -> torch.Tensor:
"""
Assign a reward value to each transition in the trajectory
Args:
states_action_pairs: batch of states-action pairs
expected dimensionality: (batch, state_features+action_features)
** It is expected that indices tie the states and action together
Returns:
the predicted reward for the state-action pair(s)
"""
if self.training and not self._train_with_dropout and self._dropout_prob > 0:
self._disable_dropout()
return self._forward(states_action_pairs)
def weight_decay_parameters(self) -> t.Tuple[t.Sequence, t.Sequence]:
"""
Sort the model parameters by whether weight decay can be applied to them
Returns:
with weight decay params
without weight decay params
"""
# need to track which weights will have L2 penalty (weight decay) applied and which won't
params_with_wd = []
params_without_wd = []
for m in self.modules():
# we get the nested Modules in their nested structure
# skip modules until we get the to leaf node modules
if len(list(m.children())) > 0: continue
if isinstance(m, nn.Linear):
params_with_wd.append(m.weight)
params_without_wd.append(m.bias)
else:
params_without_wd.extend(m.parameters())
return params_with_wd, params_without_wd
def from_pretrained(self, state_dict: t.OrderedDict[str, torch.Tensor]):
"""
Load the given state dictionary to the model
Args:
state_dict: the state dictionary to load into memory
Returns:
"""
self.load_state_dict(state_dict)
def estimate_uncertainty(self, states_action_pairs: torch.Tensor, num_samples: int = 100) -> np.ndarray:
"""
Estimate model uncertainty over the given batch of data
Args:
states_action_pairs: batch of states-action pairs
expected dimensionality: (batch, state_features+action_features)
** It is expected that indices tie the states and action together
num_samples: the number of forward passes with different dropout configurations to run to estimate
the uncertainty
Returns:
variance over predictions across the different dropout configurations
"""
with torch.no_grad():
# check how dropout started, because we want to leave it how we started
dropout_start_enabled = self._dropout_enabled
if not dropout_start_enabled:
self._enable_dropout()
# estimate the predicted values num_samples many times
repeat_estimates = []
for _ in range(num_samples):
estimate = self._forward(states_action_pairs).detach().cpu().numpy()
repeat_estimates.append(estimate)
if not dropout_start_enabled:
self._disable_dropout()
# combine the estimations
estimates = np.concatenate(repeat_estimates, axis=-1)
mean_estimation = np.mean(estimates, axis=-1, keepdims=True)
return np.mean(np.square(np.subtract(mean_estimation, estimates)), axis=-1)
def forward_with_dropout(self, states_action_pairs: torch.Tensor, num_samples: int = 100) -> np.ndarray:
"""
Execute a forward pass of the given data with all but the dropout layers in eval mode
Args:
states_action_pairs: batch of states-action pairs
expected dimensionality: (batch, state_features+action_features)
** It is expected that indices tie the states and action together
num_samples: the number of forward passes with different dropout configurations to run to estimate
the uncertainty
Returns:
dropout predictions across the different dropout configurations
"""
with torch.no_grad():
# check how dropout started, because we want to leave it how we started
dropout_start_enabled = self._dropout_enabled
if not dropout_start_enabled:
self._enable_dropout()
# estimate the predicted values num_samples many times
repeat_estimates = []
for _ in range(num_samples):
estimate = self._forward(states_action_pairs).detach().cpu().numpy()
repeat_estimates.append(estimate)
# combine the estimations
estimates = np.hstack(repeat_estimates)
if not dropout_start_enabled:
self._disable_dropout()
return estimates
def random_init_head(self):
"""
Set the final layers to be randomly initialized values
"""
self._prediction_head.reset_parameters()
class StateActionNetwork(_BaseModel):
def __init__(self, in_dim: int, out_size: int = 1,
hidden_dim: int = 128, hidden_depth: int = 3,
final_activation: str = 'tanh',
dropout_probability: float = 0.0,
train_with_dropout: bool = False):
"""
A network to consume the state-based environment observations and actions
Args:
in_dim: dimensionality of the model's input
out_size: the size of the output
hidden_dim: the size of the hidden layer(s)
hidden_depth: the number of hidden layers
final_activation: (default = tanh) the activation to use on the final layer
dropout_probability: (default = 0.) probability with which to set a weight value to 0. during a forward pass
a probability of 0, means no dropout
train_with_dropout: whether to use the dropout layers at train time (if the dropout probability is
greater than 0.)
Another use for the dropout layers is at test time to assess model uncertainty.
"""
super(StateActionNetwork, self).__init__(
in_dim=in_dim,
out_size=out_size,
hidden_dim=hidden_dim,
hidden_depth=hidden_depth,
final_activation=final_activation,
dropout_probability=dropout_probability,
train_with_dropout=train_with_dropout
)
def _build(self):
"""
Build the 4 mini-networks that make up the model:
state encoder
action encoder
state-action encoder
prediction head
"""
# build the network that models the relationship between the state anc action embeddings
network_body = []
for i in range(self._num_layers):
network_body.append((f'trunk_dense{i+1}', nn.Linear((self._hidden_size if i > 0 else self._in_size), self._hidden_size)))
network_body.append((f'trunk_leakyrelu{i+1}', nn.LeakyReLU(negative_slope=1e-2)))
network_body.append((f'trunk_dropout{i+1}', nn.Dropout(self._dropout_prob)))
self._network_body = nn.Sequential(OrderedDict(network_body))
# build the prediction head and select a final activation
self._prediction_head = nn.Linear(self._hidden_size, self._out_size)
if self._final_activation_type == 'tanh':
self._final_activation = nn.Tanh()
elif self._final_activation_type == 'sig':
self._final_activation = nn.Sigmoid()
else:
self._final_activation = nn.ReLU()
def _forward(self, states_action_pairs: torch.Tensor) -> torch.Tensor:
"""
Assign a reward value to each transition in the trajectory
Args:
states_action_pairs: batch of states-action pairs
expected dimensionality: (batch, state_features+action_features)
** It is expected that indices tie the states and action together
Returns:
the predicted reward for the state-action pair(s)
"""
state_action_embed = self._network_body(states_action_pairs)
# predict the target values
prediction = self._final_activation(self._prediction_head(state_action_embed))
return prediction
class StateActionFusionNetwork(_BaseModel):
def __init__(self, obs_dim: int, action_dim: int, out_size: int = 1,
obs_embed_dim: int = 64, action_embed_dim: int = 64,
hidden_dim: int = 128, hidden_depth: int = 3,
final_activation: str = 'tanh',
dropout_probability: float = 0.0,
train_with_dropout: bool = False):
"""
Initial pass at a network used to train state-action representations that are consistent with
the network's encoding of the state that results from applying the given action in the given state
Args:
obs_dim: dimensionality of the states
action_dim: dimensionality of the actions
out_size: the size of the output
obs_embed_dim: the size of the state embedding
action_embed_dim: the size of the action embedding
hidden_dim: the size of the hidden layer(s)
hidden_depth: the number of hidden layers
final_activation: the activation to use on the final layer
dropout_probability: (default = 0.) probability with which to set a weight value to 0. during a forward pass
a probability of 0, means no dropout
train_with_dropout: whether to use the dropout layers at train time (if the dropout probability is
greater than 0.)
Another use for the dropout layers is at test time to assess model uncertainty.
"""
self._action_size = action_dim
self._state_embed_size = obs_embed_dim # int(self._hidden_size/2)
self._action_embed_size = action_embed_dim # int(self._hidden_size/2)
super(StateActionFusionNetwork, self).__init__(
in_dim=obs_dim,
out_size=out_size,
hidden_dim=hidden_dim,
hidden_depth=hidden_depth,
final_activation=final_activation,
dropout_probability=dropout_probability,
train_with_dropout=train_with_dropout
)
def _build(self):
"""
Build the 4 mini-networks that make up the model:
state encoder
action encoder
state-action encoder
prediction head
"""
# build the network that will encode the state features
self._state_encoder = nn.Sequential(OrderedDict([
('state_dense1', nn.Linear(self._in_size, self._state_embed_size)),
('state_leakyrelu1', nn.LeakyReLU(negative_slope=1e-2)),
('state_dropout1', nn.Dropout(self._dropout_prob))
]))
# build the netowrk that will encode the action features
self._action_encoder = nn.Sequential(OrderedDict([
('action_dense1', nn.Linear(self._action_size, self._action_embed_size)),
('action_leakyrelu1', nn.LeakyReLU(negative_slope=1e-2)),
('action_dropout1', nn.Dropout(self._dropout_prob))
]))
# build the network that models the relationship between the state anc action embeddings
state_action_encoder = []
for i in range(self._num_layers):
state_action_encoder.append((f'trunk_dense{i+1}', nn.Linear((self._hidden_size if i > 0 else self._state_embed_size + self._action_embed_size), self._hidden_size)))
state_action_encoder.append((f'trunk_leakyrelu{i+1}', nn.LeakyReLU(negative_slope=1e-2)))
state_action_encoder.append((f'trunk_dropout{i+1}', nn.Dropout(self._dropout_prob)))
self._state_action_encoder = nn.Sequential(OrderedDict(state_action_encoder))
# build the prediction head and select a final activation
self._prediction_head = nn.Linear(self._hidden_size, self._out_size)
if self._final_activation_type == 'tanh':
self._final_activation = nn.Tanh()
elif self._final_activation_type == 'sig':
self._final_activation = nn.Sigmoid()
else:
self._final_activation = nn.ReLU()
def _forward(self, states_action_pairs: torch.Tensor) -> torch.Tensor:
"""
Assign a reward value to each transition in the trajectory
Args:
states_action_pairs: batch of states-action pairs
expected dimensionality: (batch, state_features+action_features)
** It is expected that indices tie the states and action together
Returns:
the predicted reward for the state-action pair(s)
"""
# encode the state, the action, and the state-action pair
if len(states_action_pairs.size()) == 1:
states_embed = self._state_encoder(states_action_pairs[:self._in_size])
actions_embed = self._action_encoder(states_action_pairs[-self._action_size:])
elif len(states_action_pairs.size()) == 2:
states_embed = self._state_encoder(states_action_pairs[:, :self._in_size])
actions_embed = self._action_encoder(states_action_pairs[:, -self._action_size:])
elif len(states_action_pairs.size()) == 3:
states_embed = self._state_encoder(states_action_pairs[:, :, :self._in_size])
actions_embed = self._action_encoder(states_action_pairs[:, :, -self._action_size:])
else:
raise NotImplementedError()
state_action_embed = self._state_action_encoder(torch.cat([states_embed, actions_embed], dim=-1))
# predict the target values
prediction = self._final_activation(self._prediction_head(state_action_embed))
return prediction
class ImageStateActionNetwork(_BaseModel):
def __init__(self, obs_dim: t.List[int], out_size: int = 1,
hidden_dim: int = 128, hidden_depth: int = 3,
final_activation: str = 'tanh',
dropout_probability: float = 0.0,
train_with_dropout: bool = False,
image_encoder_architecture: str = "pixl2r",
image_hidden_num_channels: int = 32,
*kwargs):
"""
Maps state-action pairs to some type of value where the state is an image
Args:
obs_dim: dimensionality of the state images (height, width, channels)
out_size: the size of the output
hidden_dim: the size of the hidden layer(s)
hidden_depth: the number of hidden layers
final_activation: (default = tanh) the activation to use on the final layer
dropout_probability: (default = 0.) probability with which to set a weight value to 0. during a forward pass
a probability of 0, means no dropout
train_with_dropout: whether to use the dropout layers at train time (if the dropout probability is
greater than 0.)
Another use for the dropout layers is at test time to assess model uncertainty.
image_encoder_architecture: (default = "pixl2r") the architecture that is used for the image encoder
image_hidden_num_channels: (default = 32) the number of channels in the hidden layers of the image encoder
"""
self._image_encoder_architecture = image_encoder_architecture
assert image_encoder_architecture in {"pixl2r", "drqv2"}
self._image_hidden_num_channels = image_hidden_num_channels
super(ImageStateActionNetwork, self).__init__(
in_dim=obs_dim,
out_size=out_size,
hidden_dim=hidden_dim,
hidden_depth=hidden_depth,
final_activation=final_activation,
dropout_probability=dropout_probability,
train_with_dropout=train_with_dropout
)
def _build(self):
"""
"""
# build the image encoder | self.convnet = get_image_encoder( | 0 | 2023-11-06 23:14:20+00:00 | 8k |
ApolloAuto/apollo-model-yolox | exps/example/yolox_voc/yolox_voc_l.py | [
{
"identifier": "get_yolox_datadir",
"path": "yolox/data/dataloading.py",
"snippet": "def get_yolox_datadir():\n \"\"\"\n get dataset dir of YOLOX. If environment variable named `YOLOX_DATADIR` is set,\n this function will return value of the environment variable. Otherwise, use data\n \"\"\... | import os
from yolox.data import get_yolox_datadir
from yolox.exp import Exp as MyExp
from yolox.data import VOCDetection, TrainTransform
from yolox.data import VOCDetection, ValTransform
from yolox.evaluators import VOCEvaluator | 3,834 | # encoding: utf-8
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.num_classes = 8 # TODO: KITTI class is 6
self.depth = 1.0
self.width = 1.0
self.warmup_epochs = 1
# ---------- transform config ------------ #
self.mosaic_prob = 1.0
self.mixup_prob = 1.0
self.flip_prob = 0.5
self.hsv_prob = 1.0
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
def get_dataset(self, cache: bool, cache_type: str = "disk"):
return VOCDetection(
| # encoding: utf-8
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.num_classes = 8 # TODO: KITTI class is 6
self.depth = 1.0
self.width = 1.0
self.warmup_epochs = 1
# ---------- transform config ------------ #
self.mosaic_prob = 1.0
self.mixup_prob = 1.0
self.flip_prob = 0.5
self.hsv_prob = 1.0
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
def get_dataset(self, cache: bool, cache_type: str = "disk"):
return VOCDetection( | data_dir=os.path.join(get_yolox_datadir(), "CUSTOMER"), # TODO: CUSTOMER to KITTI | 0 | 2023-11-08 07:07:24+00:00 | 8k |
ndiamant/spice | spice/datasets.py | [
{
"identifier": "select_bins",
"path": "spice/conditional_histogram.py",
"snippet": "def select_bins(y: torch.Tensor, n_bins: int) -> torch.Tensor:\n return unique_quantile(y, n_bins, first_bin_zero=False)"
},
{
"identifier": "discretize",
"path": "spice/conditional_histogram.py",
"sn... | import os
import torch
import torch.nn.functional as F
import numpy as np
import pandas as pd
from pathlib import Path
from torch.utils.data import TensorDataset, DataLoader
from pytorch_lightning import LightningDataModule
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from torch.distributions import Gamma
from spice.conditional_histogram import select_bins, discretize
from spice.utils import score_to_q_hat
from sklearn.impute import SimpleImputer | 4,841 | if 'blog_data' in name:
# https://github.com/xinbinhuang/feature-selection_blogfeedback
df = pd.read_csv(os.path.join(base_path, 'blogData_train.csv'), header=None)
X = df.iloc[:, 0:280].values
if name.endswith("_og"):
y = df.iloc[:, -1].values
else:
y = np.log(0.1 + df.iloc[:, -1].values)
if name == "concrete":
dataset = np.loadtxt(open(os.path.join(base_path, 'Concrete_Data.csv'), "rb"), delimiter=",", skiprows=1)
X = dataset[:, :-1]
y = dataset[:, -1:].squeeze()
if name == "bike":
# https://www.kaggle.com/rajmehra03/bike-sharing-demand-rmsle-0-3194
df = pd.read_csv(os.path.join(base_path, 'bike_train.csv'))
# # seperating season as per values. this is bcoz this will enhance features.
season = pd.get_dummies(df['season'], prefix='season')
df = pd.concat([df, season], axis=1)
# # # same for weather. this is bcoz this will enhance features.
weather = pd.get_dummies(df['weather'], prefix='weather')
df = pd.concat([df, weather], axis=1)
# # # now can drop weather and season.
df.drop(['season', 'weather'], inplace=True, axis=1)
df.head()
df["hour"] = [t.hour for t in pd.DatetimeIndex(df.datetime)]
df["day"] = [t.dayofweek for t in pd.DatetimeIndex(df.datetime)]
df["month"] = [t.month for t in pd.DatetimeIndex(df.datetime)]
df['year'] = [t.year for t in pd.DatetimeIndex(df.datetime)]
df['year'] = df['year'].map({2011: 0, 2012: 1})
df.drop('datetime', axis=1, inplace=True)
df.drop(['casual', 'registered'], axis=1, inplace=True)
df.columns.to_series().groupby(df.dtypes).groups
X = df.drop('count', axis=1).values
y = df['count'].values
if name == "community":
# https://github.com/vbordalo/Communities-Crime/blob/master/Crime_v1.ipynb
attrib = pd.read_csv(os.path.join(base_path, 'communities_attributes.csv'), delim_whitespace=True)
data = pd.read_csv(os.path.join(base_path, 'communities.data'), names=attrib['attributes'])
data = data.drop(columns=['state', 'county',
'community', 'communityname',
'fold'], axis=1)
data = data.replace('?', np.nan)
# Impute mean values for samples with missing values
imputer = SimpleImputer(strategy='mean')
imputer.fit(data[['OtherPerCap']])
data[['OtherPerCap']] = imputer.transform(data[['OtherPerCap']])
data = data.dropna(axis=1)
X = data.iloc[:, 0:100].values
y = data.iloc[:, 100].values
if name == "temperature":
df = pd.read_csv(os.path.join(base_path, "temperature.csv"))
df = df.drop(columns=['station', 'Date', 'Next_Tmax'])
df = df.dropna()
X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values
if name == "synth_het":
torch.manual_seed(5)
x = torch.linspace(0, 1, 2000)
noise = x * torch.rand_like(x) + 0.1
indicator = torch.randint_like(x, 2)
y = torch.where(indicator == 1, noise, -noise)
X = x.unsqueeze(1).numpy()
y = y.numpy()
X = X.astype(np.float32)
y = y.astype(np.float32)
return X, y
class RegressionData(LightningDataModule):
def __init__(
self, name: str, y_scaling: str = "min_max",
batch_size: int = 512, discretize_n_bins: int = None,
train_seed: int = 57771, smart_discretize: bool = True,
):
super().__init__()
x, y = get_dataset(name)
y = y.reshape(y.shape[0], 1)
np.random.seed(112123)
n = y.shape[0]
# train, val, calibrate, val calibration, test
dset_idx = np.random.choice(list(range(5)), p=[0.5, 0.1, 0.1, 0.1, 0.2], size=(n,))
test_idx = dset_idx == 4
# shuffle the train split based on the seed
np.random.seed(train_seed)
dset_idx[~test_idx] = np.random.permutation(dset_idx[~test_idx])
train_idx = dset_idx == 0
val_idx = dset_idx == 1
cal_idx = dset_idx == 2
cal_val_idx = dset_idx == 3
# scaling
y_scaler = {
"min_max": MinMaxScaler(feature_range=(0, 1 - 1e-5)),
"std": StandardScaler(),
}[y_scaling]
y_train = y[train_idx]
y_scaler.fit(y_train)
x_train = x[train_idx]
x_scaler = StandardScaler()
x_scaler.fit(x_train)
x = torch.tensor(x_scaler.transform(x), dtype=torch.float32)
y = torch.tensor(y_scaler.transform(y), dtype=torch.float32)
# discretize for histogram case
self.bins = None
if discretize_n_bins is not None:
transformed_train_y = torch.tensor(y_scaler.transform(y_train))
if smart_discretize:
|
DATASET_DIR = os.path.join(
Path(__file__).parent.parent, "datasets",
)
DATASET_NAMES = {
"star", "bio", "concrete", "bike", "community", "temperature",
"meps_19_og", "meps_20_og", "meps_21_og", "blog_data_og",
"synthetic_bimodal", "synth_het",
}
def add_gamma_studies():
for concentration in [6, 3, 1, 0.5, 0.1, 0.02]:
for negative in [False, True]:
neg_str = "neg" if negative else "pos"
DATASET_NAMES.add(f"synthetic_gamma_{concentration}_{neg_str}")
# add_gamma_studies()
def synthetic_bimodal() -> tuple[np.ndarray, np.ndarray]:
torch.manual_seed(5)
d = 8
n = 2000
x = torch.randn((n, d))
w = torch.randn((d, 1)) / d
w_switch = torch.randn((d, 1)) / d
switch = torch.sigmoid(x @ w_switch)
y = x @ w
y = y + torch.randn_like(y) / 5
y = torch.where(torch.rand((n, 1)) > switch, y + 1, y - 1)
y /= y.abs().max() * 2
y += 0.5
return x.numpy(), y.squeeze().numpy()
@torch.no_grad()
def synthetic_gamma(concentration: float, negative: bool = False) -> tuple[np.ndarray, np.ndarray]:
torch.manual_seed(5)
d = 8
n = 2000
x = torch.randn((n, d))
w = torch.randn((d, 1)) / d
y = x @ w
gamma = Gamma(rate=1.0, concentration=concentration)
samples = gamma.rsample(y.shape)
samples /= samples.std()
y = (y - samples) if negative else (y + samples)
return x.numpy(), y.squeeze().numpy()
def get_dataset(name: str, base_path: str = DATASET_DIR):
"""from https://github.com/yromano/cqr/tree/master/datasets"""
""" Load a dataset
Parameters
----------
name : string, dataset name
base_path : string, e.g. "path/to/datasets/directory/"
Returns
-------
X : features (nXp)
y : labels (n)
"""
assert name in DATASET_NAMES
if name == "synthetic_bimodal":
return synthetic_bimodal()
if "synthetic_gamma" in name:
concentration = float(name.split("_")[-2])
negative = name.endswith("_neg")
X, y = synthetic_gamma(concentration, negative)
if "meps_19" in name:
df = pd.read_csv(os.path.join(base_path, 'meps_19_reg.csv'))
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names != response_name]
column_names = column_names[column_names != "Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT15F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
if name.endswith("_og"):
y = df[response_name].values
else:
y = np.log(1 + df[response_name].values)
X = df[col_names].values
if "meps_20" in name:
df = pd.read_csv(os.path.join(base_path, 'meps_20_reg.csv'))
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names != response_name]
column_names = column_names[column_names != "Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT15F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
if name.endswith("_og"):
y = df[response_name].values
else:
y = np.log(1 + df[response_name].values)
X = df[col_names].values
if "meps_21" in name:
df = pd.read_csv(os.path.join(base_path, 'meps_21_reg.csv'))
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names != response_name]
column_names = column_names[column_names != "Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT16F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
if name.endswith("_og"):
y = df[response_name].values
else:
y = np.log(1 + df[response_name].values)
X = df[col_names].values
if name == "star":
df = pd.read_csv(os.path.join(base_path,'STAR.csv'))
df.loc[df['gender'] == 'female', 'gender'] = 0
df.loc[df['gender'] == 'male', 'gender'] = 1
df.loc[df['ethnicity'] == 'cauc', 'ethnicity'] = 0
df.loc[df['ethnicity'] == 'afam', 'ethnicity'] = 1
df.loc[df['ethnicity'] == 'asian', 'ethnicity'] = 2
df.loc[df['ethnicity'] == 'hispanic', 'ethnicity'] = 3
df.loc[df['ethnicity'] == 'amindian', 'ethnicity'] = 4
df.loc[df['ethnicity'] == 'other', 'ethnicity'] = 5
df.loc[df['stark'] == 'regular', 'stark'] = 0
df.loc[df['stark'] == 'small', 'stark'] = 1
df.loc[df['stark'] == 'regular+aide', 'stark'] = 2
df.loc[df['star1'] == 'regular', 'star1'] = 0
df.loc[df['star1'] == 'small', 'star1'] = 1
df.loc[df['star1'] == 'regular+aide', 'star1'] = 2
df.loc[df['star2'] == 'regular', 'star2'] = 0
df.loc[df['star2'] == 'small', 'star2'] = 1
df.loc[df['star2'] == 'regular+aide', 'star2'] = 2
df.loc[df['star3'] == 'regular', 'star3'] = 0
df.loc[df['star3'] == 'small', 'star3'] = 1
df.loc[df['star3'] == 'regular+aide', 'star3'] = 2
df.loc[df['lunchk'] == 'free', 'lunchk'] = 0
df.loc[df['lunchk'] == 'non-free', 'lunchk'] = 1
df.loc[df['lunch1'] == 'free', 'lunch1'] = 0
df.loc[df['lunch1'] == 'non-free', 'lunch1'] = 1
df.loc[df['lunch2'] == 'free', 'lunch2'] = 0
df.loc[df['lunch2'] == 'non-free', 'lunch2'] = 1
df.loc[df['lunch3'] == 'free', 'lunch3'] = 0
df.loc[df['lunch3'] == 'non-free', 'lunch3'] = 1
df.loc[df['schoolk'] == 'inner-city', 'schoolk'] = 0
df.loc[df['schoolk'] == 'suburban', 'schoolk'] = 1
df.loc[df['schoolk'] == 'rural', 'schoolk'] = 2
df.loc[df['schoolk'] == 'urban', 'schoolk'] = 3
df.loc[df['school1'] == 'inner-city', 'school1'] = 0
df.loc[df['school1'] == 'suburban', 'school1'] = 1
df.loc[df['school1'] == 'rural', 'school1'] = 2
df.loc[df['school1'] == 'urban', 'school1'] = 3
df.loc[df['school2'] == 'inner-city', 'school2'] = 0
df.loc[df['school2'] == 'suburban', 'school2'] = 1
df.loc[df['school2'] == 'rural', 'school2'] = 2
df.loc[df['school2'] == 'urban', 'school2'] = 3
df.loc[df['school3'] == 'inner-city', 'school3'] = 0
df.loc[df['school3'] == 'suburban', 'school3'] = 1
df.loc[df['school3'] == 'rural', 'school3'] = 2
df.loc[df['school3'] == 'urban', 'school3'] = 3
df.loc[df['degreek'] == 'bachelor', 'degreek'] = 0
df.loc[df['degreek'] == 'master', 'degreek'] = 1
df.loc[df['degreek'] == 'specialist', 'degreek'] = 2
df.loc[df['degreek'] == 'master+', 'degreek'] = 3
df.loc[df['degree1'] == 'bachelor', 'degree1'] = 0
df.loc[df['degree1'] == 'master', 'degree1'] = 1
df.loc[df['degree1'] == 'specialist', 'degree1'] = 2
df.loc[df['degree1'] == 'phd', 'degree1'] = 3
df.loc[df['degree2'] == 'bachelor', 'degree2'] = 0
df.loc[df['degree2'] == 'master', 'degree2'] = 1
df.loc[df['degree2'] == 'specialist', 'degree2'] = 2
df.loc[df['degree2'] == 'phd', 'degree2'] = 3
df.loc[df['degree3'] == 'bachelor', 'degree3'] = 0
df.loc[df['degree3'] == 'master', 'degree3'] = 1
df.loc[df['degree3'] == 'specialist', 'degree3'] = 2
df.loc[df['degree3'] == 'phd', 'degree3'] = 3
df.loc[df['ladderk'] == 'level1', 'ladderk'] = 0
df.loc[df['ladderk'] == 'level2', 'ladderk'] = 1
df.loc[df['ladderk'] == 'level3', 'ladderk'] = 2
df.loc[df['ladderk'] == 'apprentice', 'ladderk'] = 3
df.loc[df['ladderk'] == 'probation', 'ladderk'] = 4
df.loc[df['ladderk'] == 'pending', 'ladderk'] = 5
df.loc[df['ladderk'] == 'notladder', 'ladderk'] = 6
df.loc[df['ladder1'] == 'level1', 'ladder1'] = 0
df.loc[df['ladder1'] == 'level2', 'ladder1'] = 1
df.loc[df['ladder1'] == 'level3', 'ladder1'] = 2
df.loc[df['ladder1'] == 'apprentice', 'ladder1'] = 3
df.loc[df['ladder1'] == 'probation', 'ladder1'] = 4
df.loc[df['ladder1'] == 'noladder', 'ladder1'] = 5
df.loc[df['ladder1'] == 'notladder', 'ladder1'] = 6
df.loc[df['ladder2'] == 'level1', 'ladder2'] = 0
df.loc[df['ladder2'] == 'level2', 'ladder2'] = 1
df.loc[df['ladder2'] == 'level3', 'ladder2'] = 2
df.loc[df['ladder2'] == 'apprentice', 'ladder2'] = 3
df.loc[df['ladder2'] == 'probation', 'ladder2'] = 4
df.loc[df['ladder2'] == 'noladder', 'ladder2'] = 5
df.loc[df['ladder2'] == 'notladder', 'ladder2'] = 6
df.loc[df['ladder3'] == 'level1', 'ladder3'] = 0
df.loc[df['ladder3'] == 'level2', 'ladder3'] = 1
df.loc[df['ladder3'] == 'level3', 'ladder3'] = 2
df.loc[df['ladder3'] == 'apprentice', 'ladder3'] = 3
df.loc[df['ladder3'] == 'probation', 'ladder3'] = 4
df.loc[df['ladder3'] == 'noladder', 'ladder3'] = 5
df.loc[df['ladder3'] == 'notladder', 'ladder3'] = 6
df.loc[df['tethnicityk'] == 'cauc', 'tethnicityk'] = 0
df.loc[df['tethnicityk'] == 'afam', 'tethnicityk'] = 1
df.loc[df['tethnicity1'] == 'cauc', 'tethnicity1'] = 0
df.loc[df['tethnicity1'] == 'afam', 'tethnicity1'] = 1
df.loc[df['tethnicity2'] == 'cauc', 'tethnicity2'] = 0
df.loc[df['tethnicity2'] == 'afam', 'tethnicity2'] = 1
df.loc[df['tethnicity3'] == 'cauc', 'tethnicity3'] = 0
df.loc[df['tethnicity3'] == 'afam', 'tethnicity3'] = 1
df.loc[df['tethnicity3'] == 'asian', 'tethnicity3'] = 2
df = df.dropna()
grade = df["readk"] + df["read1"] + df["read2"] + df["read3"]
grade += df["mathk"] + df["math1"] + df["math2"] + df["math3"]
names = df.columns
target_names = names[8:16]
data_names = np.concatenate((names[0:8], names[17:]))
X = df.loc[:, data_names].values
y = grade.values
if name == "facebook_1":
df = pd.read_csv(base_path + 'facebook/Features_Variant_1.csv')
y = df.iloc[:, 53].values
X = df.iloc[:, 0:53].values
if name == "facebook_2":
df = pd.read_csv(base_path + 'facebook/Features_Variant_2.csv')
y = df.iloc[:, 53].values
X = df.iloc[:, 0:53].values
if name == "bio":
# https://github.com/joefavergel/TertiaryPhysicochemicalProperties/blob/master/RMSD-ProteinTertiaryStructures.ipynb
df = pd.read_csv(os.path.join(base_path, 'CASP.csv'))
y = df.iloc[:, 0].values
X = df.iloc[:, 1:].values
if 'blog_data' in name:
# https://github.com/xinbinhuang/feature-selection_blogfeedback
df = pd.read_csv(os.path.join(base_path, 'blogData_train.csv'), header=None)
X = df.iloc[:, 0:280].values
if name.endswith("_og"):
y = df.iloc[:, -1].values
else:
y = np.log(0.1 + df.iloc[:, -1].values)
if name == "concrete":
dataset = np.loadtxt(open(os.path.join(base_path, 'Concrete_Data.csv'), "rb"), delimiter=",", skiprows=1)
X = dataset[:, :-1]
y = dataset[:, -1:].squeeze()
if name == "bike":
# https://www.kaggle.com/rajmehra03/bike-sharing-demand-rmsle-0-3194
df = pd.read_csv(os.path.join(base_path, 'bike_train.csv'))
# # seperating season as per values. this is bcoz this will enhance features.
season = pd.get_dummies(df['season'], prefix='season')
df = pd.concat([df, season], axis=1)
# # # same for weather. this is bcoz this will enhance features.
weather = pd.get_dummies(df['weather'], prefix='weather')
df = pd.concat([df, weather], axis=1)
# # # now can drop weather and season.
df.drop(['season', 'weather'], inplace=True, axis=1)
df.head()
df["hour"] = [t.hour for t in pd.DatetimeIndex(df.datetime)]
df["day"] = [t.dayofweek for t in pd.DatetimeIndex(df.datetime)]
df["month"] = [t.month for t in pd.DatetimeIndex(df.datetime)]
df['year'] = [t.year for t in pd.DatetimeIndex(df.datetime)]
df['year'] = df['year'].map({2011: 0, 2012: 1})
df.drop('datetime', axis=1, inplace=True)
df.drop(['casual', 'registered'], axis=1, inplace=True)
df.columns.to_series().groupby(df.dtypes).groups
X = df.drop('count', axis=1).values
y = df['count'].values
if name == "community":
# https://github.com/vbordalo/Communities-Crime/blob/master/Crime_v1.ipynb
attrib = pd.read_csv(os.path.join(base_path, 'communities_attributes.csv'), delim_whitespace=True)
data = pd.read_csv(os.path.join(base_path, 'communities.data'), names=attrib['attributes'])
data = data.drop(columns=['state', 'county',
'community', 'communityname',
'fold'], axis=1)
data = data.replace('?', np.nan)
# Impute mean values for samples with missing values
imputer = SimpleImputer(strategy='mean')
imputer.fit(data[['OtherPerCap']])
data[['OtherPerCap']] = imputer.transform(data[['OtherPerCap']])
data = data.dropna(axis=1)
X = data.iloc[:, 0:100].values
y = data.iloc[:, 100].values
if name == "temperature":
df = pd.read_csv(os.path.join(base_path, "temperature.csv"))
df = df.drop(columns=['station', 'Date', 'Next_Tmax'])
df = df.dropna()
X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values
if name == "synth_het":
torch.manual_seed(5)
x = torch.linspace(0, 1, 2000)
noise = x * torch.rand_like(x) + 0.1
indicator = torch.randint_like(x, 2)
y = torch.where(indicator == 1, noise, -noise)
X = x.unsqueeze(1).numpy()
y = y.numpy()
X = X.astype(np.float32)
y = y.astype(np.float32)
return X, y
class RegressionData(LightningDataModule):
def __init__(
self, name: str, y_scaling: str = "min_max",
batch_size: int = 512, discretize_n_bins: int = None,
train_seed: int = 57771, smart_discretize: bool = True,
):
super().__init__()
x, y = get_dataset(name)
y = y.reshape(y.shape[0], 1)
np.random.seed(112123)
n = y.shape[0]
# train, val, calibrate, val calibration, test
dset_idx = np.random.choice(list(range(5)), p=[0.5, 0.1, 0.1, 0.1, 0.2], size=(n,))
test_idx = dset_idx == 4
# shuffle the train split based on the seed
np.random.seed(train_seed)
dset_idx[~test_idx] = np.random.permutation(dset_idx[~test_idx])
train_idx = dset_idx == 0
val_idx = dset_idx == 1
cal_idx = dset_idx == 2
cal_val_idx = dset_idx == 3
# scaling
y_scaler = {
"min_max": MinMaxScaler(feature_range=(0, 1 - 1e-5)),
"std": StandardScaler(),
}[y_scaling]
y_train = y[train_idx]
y_scaler.fit(y_train)
x_train = x[train_idx]
x_scaler = StandardScaler()
x_scaler.fit(x_train)
x = torch.tensor(x_scaler.transform(x), dtype=torch.float32)
y = torch.tensor(y_scaler.transform(y), dtype=torch.float32)
# discretize for histogram case
self.bins = None
if discretize_n_bins is not None:
transformed_train_y = torch.tensor(y_scaler.transform(y_train))
if smart_discretize: | self.bins = select_bins(transformed_train_y, discretize_n_bins) | 0 | 2023-11-01 18:04:29+00:00 | 8k |
nik-sm/com-hom-emg | com_hom_emg/model.py | [
{
"identifier": "EmbeddingNetwork",
"path": "com_hom_emg/basic_arch.py",
"snippet": "class EmbeddingNetwork(nn.Module):\n # TODO - design the structure of this model.\n # - consider taking ideas from transformer encoders or other domains.\n # - search for papers that extract useful features fro... | from copy import deepcopy
from itertools import chain, product
from pathlib import Path
from loguru import logger
from pytorch_lightning.loggers import TensorBoardLogger
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from torchmetrics.functional import accuracy
from vit_pytorch.simple_vit_1d import SimpleViT
from .basic_arch import EmbeddingNetwork, UnitNormLayer
from .conformer import Conformer
from .data import DataModule, get_per_subj_data, shuffle_together
from .loss import TripletCentroids, TripletLoss, TripletLossHardMining
from .scoring import get_combo_conf_mat
from .utils import PROJECT_PATH
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F | 6,523 | nn.Linear(feature_dim, feature_dim),
)
def forward(self, x1, x2, y1, y2):
y1 = F.one_hot(y1, num_classes=5)
y2 = F.one_hot(y2, num_classes=5)
avg = (x1 + x2) / 2
mlp_out = self.layer(torch.cat((x1, x2, y1, y2), dim=-1))
return avg + mlp_out
class CombinePairs(nn.Module):
def __init__(self, combine_fn: nn.Module, normalized_features: bool):
super().__init__()
self.normalized_features = normalized_features
self.combine_fn = combine_fn
def forward(self, x, y):
# Expects data and labels from single gestures
# Labels have the form (direction, modifier)
# where direction in 0, 1, 2, 3 is active, and 4 is NoDir
# same for modifier
device = x.device
dir_idx = y[:, 1] == 4 # When modifier is NoMod
mod_idx = y[:, 0] == 4 # When direction is NoDir
x_dir = x[dir_idx]
y_dir = y[dir_idx, 0]
x_mod = x[mod_idx]
y_mod = y[mod_idx, 1]
if len(x_dir) * len(x_mod) <= 1:
raise InsufficientDataError()
all_x1, all_x2, all_y1, all_y2 = [], [], [], []
for (x1, y1), (x2, y2) in product(zip(x_dir, y_dir), zip(x_mod, y_mod)):
all_x1.append(x1)
all_x2.append(x2)
all_y1.append(y1)
all_y2.append(y2)
all_x1 = torch.stack(all_x1)
all_x2 = torch.stack(all_x2)
all_y1 = torch.stack(all_y1).to(device)
all_y2 = torch.stack(all_y2).to(device)
x_aug = self.combine_fn(all_x1, all_x2, all_y1, all_y2)
y_aug = torch.stack((all_y1, all_y2), dim=-1)
if self.normalized_features:
x_aug = F.normalize(x_aug, dim=-1)
return x_aug, y_aug
def str2bool(s):
if s.lower() in ("yes", "true", "t", "y", "1"):
return True
elif s.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ValueError("Boolean value expected.")
def get_noise(x, desired_SNR):
x_std = x.std()
# SNR = 10 * log10 ( (signal_power) / (noise_power) )
# where signal_power = data_std**2 and noise_power = noise_std**2,
# and SNR is passed as argparse param
noise_std = x_std / (10 ** (desired_SNR / 20))
return torch.randn_like(x) * noise_std
class LearnedEmbedding(pl.LightningModule):
@staticmethod
def add_argparse_args(parent_parser):
parser = parent_parser.add_argument_group("LearnedEmbedding")
parser.add_argument("--encoder_arch", choices=["basic", "conformer", "vit", "identity"], default="basic")
parser.add_argument("--clf_arch", choices=["small", "large"], default="small")
parser.add_argument("--feature_dim", type=int, default=64)
# Note that with normalized features, we might need to re-normalized after making combinations
parser.add_argument("--data_noise_SNR", type=float, default=None, help="Desired SNR in dB. None for no noise.")
parser.add_argument(
"--feature_noise_SNR", type=float, default=None, help="Desired SNR in dB. None for no noise."
)
parser.add_argument("--normalized_features", type=str2bool, default=False)
parser.add_argument("--feature_combine_type", choices=["avg", "mlp"], default="avg")
parser.add_argument("--lr", type=float, default=3e-4)
parser.add_argument("--lr_decay", type=float, default=1.0)
parser.add_argument("--linearity_loss_coeff", type=float, default=1.0)
parser.add_argument("--real_CE_loss_coeff", type=float, default=1.0)
parser.add_argument("--fake_CE_loss_coeff", type=float, default=1.0)
parser.add_argument("--loss_type", choices=["triplet", "triplet-centroids", "triplet-hard"], default="triplet")
parser.add_argument("--margin", type=float, default=1.0)
parser.add_argument("--centroids_momentum", type=float, default=0.75, help="For `triplet-centroids` loss")
parser.add_argument("--triplets_per_item", type=int, default=1, help="For `triplet` loss")
parser = parent_parser.add_argument_group("LearnedEmbedding - Fine-tuning")
parser.add_argument("--finetune_steps", type=int, default=10_000)
parser.add_argument("--finetune_lr", type=float, default=3e-5)
parser.add_argument("--finetune_lr_decay", type=float, default=1.0)
parser.add_argument("--finetune_batch_size", type=float, default=32)
parser.add_argument("--finetune_test_frac", type=float, default=0.2)
parser.add_argument("--finetune_n_aug_per_class", type=int, default=-1, help="-1 for all, positive for N")
return parent_parser
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters() # Access arg from command line "--arg1" at "self.hparams.arg1", etc
# NOTE - self.example_input_array - magic pytorch lightning variable for tboard log_graph
self.example_input_array = torch.ones(1, self.hparams.input_channels, self.hparams.input_time_length)
if self.hparams.encoder_arch == "basic":
self.embedding = EmbeddingNetwork(
input_channels=self.hparams.input_channels,
input_time_length=self.hparams.input_time_length,
feature_dim=self.hparams.feature_dim,
normalized_features=self.hparams.normalized_features,
use_preprocessed_data=self.hparams.use_preprocessed_data,
)
elif self.hparams.encoder_arch == "conformer":
|
class InsufficientDataError(Exception):
...
class DummyIdentity(nn.Module):
# A null embedding. Has a single (unused) parameter to easily use in the same pl training loop
def __init__(self):
super().__init__()
self.param = nn.Parameter(torch.tensor(0.0))
def forward(self, x):
return x.flatten(1)
class MLPClf(nn.Sequential):
def __init__(self, input_dim, output_dim):
layers = [
nn.Linear(input_dim, input_dim * 2, bias=False),
nn.BatchNorm1d(input_dim * 2),
nn.ReLU(inplace=True),
nn.Dropout1d(0.05),
nn.Linear(input_dim * 2, input_dim, bias=False),
nn.BatchNorm1d(input_dim),
nn.ReLU(inplace=True),
nn.Dropout1d(0.05),
nn.Linear(input_dim, output_dim),
]
super().__init__(*layers)
class Avg(nn.Module):
def forward(self, x1, x2, _y1, _y2):
# Note that vector average is elementwise; thus we don't care
# if we have a pair of single vectors or a pair of batches
return (x1 + x2) / 2
class MLPCombine(nn.Module):
def __init__(self, feature_dim):
super().__init__()
self.layer = nn.Sequential(
# Input takes 2 feature vectors, and 2 labels (each one-hot with 5 classes)
nn.Linear(feature_dim * 2 + 5 * 2, feature_dim, bias=False),
nn.BatchNorm1d(feature_dim),
nn.ReLU(inplace=True),
nn.Dropout1d(0.05),
nn.Linear(feature_dim, feature_dim, bias=False),
nn.BatchNorm1d(feature_dim),
nn.ReLU(inplace=True),
nn.Dropout1d(0.05),
nn.Linear(feature_dim, feature_dim),
)
def forward(self, x1, x2, y1, y2):
y1 = F.one_hot(y1, num_classes=5)
y2 = F.one_hot(y2, num_classes=5)
avg = (x1 + x2) / 2
mlp_out = self.layer(torch.cat((x1, x2, y1, y2), dim=-1))
return avg + mlp_out
class CombinePairs(nn.Module):
def __init__(self, combine_fn: nn.Module, normalized_features: bool):
super().__init__()
self.normalized_features = normalized_features
self.combine_fn = combine_fn
def forward(self, x, y):
# Expects data and labels from single gestures
# Labels have the form (direction, modifier)
# where direction in 0, 1, 2, 3 is active, and 4 is NoDir
# same for modifier
device = x.device
dir_idx = y[:, 1] == 4 # When modifier is NoMod
mod_idx = y[:, 0] == 4 # When direction is NoDir
x_dir = x[dir_idx]
y_dir = y[dir_idx, 0]
x_mod = x[mod_idx]
y_mod = y[mod_idx, 1]
if len(x_dir) * len(x_mod) <= 1:
raise InsufficientDataError()
all_x1, all_x2, all_y1, all_y2 = [], [], [], []
for (x1, y1), (x2, y2) in product(zip(x_dir, y_dir), zip(x_mod, y_mod)):
all_x1.append(x1)
all_x2.append(x2)
all_y1.append(y1)
all_y2.append(y2)
all_x1 = torch.stack(all_x1)
all_x2 = torch.stack(all_x2)
all_y1 = torch.stack(all_y1).to(device)
all_y2 = torch.stack(all_y2).to(device)
x_aug = self.combine_fn(all_x1, all_x2, all_y1, all_y2)
y_aug = torch.stack((all_y1, all_y2), dim=-1)
if self.normalized_features:
x_aug = F.normalize(x_aug, dim=-1)
return x_aug, y_aug
def str2bool(s):
if s.lower() in ("yes", "true", "t", "y", "1"):
return True
elif s.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ValueError("Boolean value expected.")
def get_noise(x, desired_SNR):
x_std = x.std()
# SNR = 10 * log10 ( (signal_power) / (noise_power) )
# where signal_power = data_std**2 and noise_power = noise_std**2,
# and SNR is passed as argparse param
noise_std = x_std / (10 ** (desired_SNR / 20))
return torch.randn_like(x) * noise_std
class LearnedEmbedding(pl.LightningModule):
@staticmethod
def add_argparse_args(parent_parser):
parser = parent_parser.add_argument_group("LearnedEmbedding")
parser.add_argument("--encoder_arch", choices=["basic", "conformer", "vit", "identity"], default="basic")
parser.add_argument("--clf_arch", choices=["small", "large"], default="small")
parser.add_argument("--feature_dim", type=int, default=64)
# Note that with normalized features, we might need to re-normalized after making combinations
parser.add_argument("--data_noise_SNR", type=float, default=None, help="Desired SNR in dB. None for no noise.")
parser.add_argument(
"--feature_noise_SNR", type=float, default=None, help="Desired SNR in dB. None for no noise."
)
parser.add_argument("--normalized_features", type=str2bool, default=False)
parser.add_argument("--feature_combine_type", choices=["avg", "mlp"], default="avg")
parser.add_argument("--lr", type=float, default=3e-4)
parser.add_argument("--lr_decay", type=float, default=1.0)
parser.add_argument("--linearity_loss_coeff", type=float, default=1.0)
parser.add_argument("--real_CE_loss_coeff", type=float, default=1.0)
parser.add_argument("--fake_CE_loss_coeff", type=float, default=1.0)
parser.add_argument("--loss_type", choices=["triplet", "triplet-centroids", "triplet-hard"], default="triplet")
parser.add_argument("--margin", type=float, default=1.0)
parser.add_argument("--centroids_momentum", type=float, default=0.75, help="For `triplet-centroids` loss")
parser.add_argument("--triplets_per_item", type=int, default=1, help="For `triplet` loss")
parser = parent_parser.add_argument_group("LearnedEmbedding - Fine-tuning")
parser.add_argument("--finetune_steps", type=int, default=10_000)
parser.add_argument("--finetune_lr", type=float, default=3e-5)
parser.add_argument("--finetune_lr_decay", type=float, default=1.0)
parser.add_argument("--finetune_batch_size", type=float, default=32)
parser.add_argument("--finetune_test_frac", type=float, default=0.2)
parser.add_argument("--finetune_n_aug_per_class", type=int, default=-1, help="-1 for all, positive for N")
return parent_parser
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters() # Access arg from command line "--arg1" at "self.hparams.arg1", etc
# NOTE - self.example_input_array - magic pytorch lightning variable for tboard log_graph
self.example_input_array = torch.ones(1, self.hparams.input_channels, self.hparams.input_time_length)
if self.hparams.encoder_arch == "basic":
self.embedding = EmbeddingNetwork(
input_channels=self.hparams.input_channels,
input_time_length=self.hparams.input_time_length,
feature_dim=self.hparams.feature_dim,
normalized_features=self.hparams.normalized_features,
use_preprocessed_data=self.hparams.use_preprocessed_data,
)
elif self.hparams.encoder_arch == "conformer": | self.embedding = Conformer( | 2 | 2023-11-01 21:12:05+00:00 | 8k |
openai/weak-to-strong | train_simple.py | [
{
"identifier": "get_tokenizer",
"path": "weak_to_strong/common.py",
"snippet": "def get_tokenizer(model_name: str):\n \"\"\"\n This function returns a tokenizer based on the model name.\n\n Parameters:\n model_name: The name of the model for which the tokenizer is needed.\n\n Returns:\n ... | import json
import os
import random
import subprocess
import fire
import numpy as np
import torch
import weak_to_strong.logger as logger
from typing import Dict, List, Optional
from datasets import load_dataset, load_from_disk
from weak_to_strong.common import get_tokenizer
from weak_to_strong.datasets import (VALID_DATASETS, load_dataset,
tokenize_dataset)
from weak_to_strong.loss import logconf_loss_fn, product_loss_fn, xent_loss
from weak_to_strong.train import ModelConfig, train_and_save_model | 5,115 | optim: Optional[str] = None,
epochs: int = 2,
force_retrain: bool = False,
seed: int = 0,
minibatch_size_per_device: Optional[float] = None,
train_with_dropout: bool = False,
results_folder: str = "/tmp/results",
linear_probe: bool = False,
lr_schedule: str = "cosine_anneal",
# Note: you can pass either weak_model_size or weak_labels_path. If you pass
# weak_model_size, we will guess the path to the weak labels based on the weak
# model. If you pass weak_labels_path, we will use that path instead.
# If you pass neither, we will train on ground truth.
weak_model_size: Optional[str] = None,
weak_labels_path: Optional[str] = None,
sweep_subfolder: str = "default",
# Set to a very large value so that by default we don't do any intermediate evals but
# still do final evals (which requires eval_every to be set to a non-zero, non-None value)
eval_every: int = 1000000,
sync_command: Optional[str] = None,
):
# this is per device!
if minibatch_size_per_device is None:
minibatch_size_per_device = 1
assert ds_name in VALID_DATASETS, f"Unknown dataset {ds_name} not in {VALID_DATASETS}"
assert (
weak_model_size is None or weak_labels_path is None
), "Can't pass both weak_model_size and weak_labels_path"
model_config = MODELS_DICT[model_size]
use_default_lr = False
if lr is None:
assert (
batch_size == 32
), "Learning rates were tuned on batch size 32, you probably want to sweep LR if you are tuning batch size"
lr = model_config.default_lr
use_default_lr = True
if optim is None:
optim = model_config.default_optimizer
# The commented out terms are the ones that should not change final results
config = {
"batch_size": batch_size,
"max_ctx": max_ctx,
"ds_name": ds_name,
"loss": loss,
"n_docs": n_docs,
"n_test_docs": n_test_docs,
"model_size": model_size,
"lr": lr,
"optim": optim,
"epochs": epochs,
# "force_retrain": force_retrain,
"seed": seed,
# "minibatch_size_per_device": minibatch_size_per_device,
"train_with_dropout": train_with_dropout,
# "results_folder": results_folder,
"linear_probe": linear_probe,
"lr_schedule": lr_schedule,
"eval_every": eval_every,
# "sweep_subfolder": sweep_subfolder,
}
if weak_model_size is not None:
weak_model_config = config.copy()
weak_model_config["model_size"] = weak_model_size
weak_model_config["loss"] = "xent"
if use_default_lr:
weak_model_config["lr"] = MODELS_DICT[weak_model_size].default_lr
weak_model_config_name = get_config_foldername(weak_model_config)
weak_labels_path = (
results_folder + "/" + sweep_subfolder + "/" + weak_model_config_name + "/weak_labels"
)
eval_batch_size = model_config.eval_batch_size
random.seed(seed)
# Load dataset
dataset = load_dataset(ds_name, seed=seed, split_sizes=dict(train=n_docs, test=n_test_docs))
# Split the training dataset in half
train_dataset, test_ds = dataset["train"], dataset["test"]
if weak_labels_path is None:
split_data = train_dataset.train_test_split(test_size=0.5, seed=seed)
train1_ds, train2_ds = split_data["train"], split_data["test"]
print("len(train1):", len(train1_ds), "len(train2):", len(train2_ds))
config_name = get_config_foldername(config)
else:
if not weak_labels_path.endswith("weak_labels"):
weak_labels_path = weak_labels_path + "/weak_labels"
if sync_command is not None:
sync_command_list = sync_command.split(" ")
sync_command_list.extend(
["download", weak_labels_path.replace("/weak_labels", ""), results_folder]
)
print(f"Running sync command: {' '.join(sync_command_list)}")
result = subprocess.run(sync_command_list, check=True)
if result.returncode != 0:
raise RuntimeError(f"Sync command failed with return code {result.returncode}")
train1_ds = load_from_disk(weak_labels_path)
train2_ds = None
weak_model_config = json.load(open(weak_labels_path.replace("weak_labels", "config.json")))
config["weak_model_size"] = weak_model_config["model_size"]
config_name = get_config_foldername(config)
config["weak_model"] = weak_model_config
save_path = os.path.join(results_folder, sweep_subfolder, config_name)
logger.configure(
name="{sweep_subfolder}_{config_name}_{datetime_now}",
save_path=save_path,
sweep_subfolder=sweep_subfolder,
config_name=config_name,
)
# Tokenize datasets
tokenizer = get_tokenizer(model_config.name)
|
# NOTE learning rates are not particularly tuned, work somewhat reasonably at train batch size 32
MODEL_CONFIGS = [
ModelConfig(
name="gpt2",
default_lr=5e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-medium",
default_lr=5e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-large",
default_lr=1e-5,
eval_batch_size=32,
),
ModelConfig(
name="gpt2-xl",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
# Should use model_parallel on V100s (note: ironically if you have a single V100 it should run,
# but if you have multiple it won't run without model_parallel because of the overhead of data
# parallel training).
model_parallel=(
torch.cuda.get_device_properties(0).total_memory < 35e9
and torch.cuda.device_count() > 1
),
),
ModelConfig(
name="Qwen/Qwen-1_8B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=(
torch.cuda.get_device_properties(0).total_memory < 35e9
and torch.cuda.device_count() > 1
),
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
"revision": "5fde88dff770a7d036847211f5d9d9705f0caa69",
},
),
ModelConfig(
name="Qwen/Qwen-7B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
"revision": "d4efd21e866b9cb3466cb65b963933f5e98016d1",
},
),
ModelConfig(
name="Qwen/Qwen-14B",
default_lr=1e-5,
eval_batch_size=2,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this bf16 support and without many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
"revision": "8be2854218fea9054331e217fd26a06f3fd02004",
},
),
ModelConfig(
name="Qwen/Qwen-72B",
default_lr=1e-5,
eval_batch_size=1,
gradient_checkpointing=True,
model_parallel=True,
# note: you will probably not be able to run this without bf16 support and many gpus
custom_kwargs={
"trust_remote_code": True,
"bf16": torch.cuda.is_bf16_supported(),
"fp32": not torch.cuda.is_bf16_supported(),
"revision": "fec78c0e3b3b10dd9f0ce775c34a686a3255a7d1",
},
# This model is really big, save space by using adafactor.
# Note that even then it will take up ~60GB per GPU on an 8-GPU machine.
default_optimizer="adafactor",
),
]
MODELS_DICT: Dict[str, ModelConfig] = {
model_config.name: model_config for model_config in MODEL_CONFIGS
}
loss_dict = {
"logconf": logconf_loss_fn(),
"product": product_loss_fn(),
"xent": xent_loss(),
}
VALID_LOSSES: List[str] = list(loss_dict.keys())
def get_config_foldername(config: dict) -> str:
def shorten_key(key: str) -> str:
return "".join(word[0] for word in key.split("_"))
def shorten_value(value) -> str:
if isinstance(value, bool):
return "1" if value else "0"
elif isinstance(value, str):
value = value.split("/")[-1]
if "_" in value:
return "_".join(word[:4] for word in value.split("_"))
else:
return value
else:
return str(value)
return "-".join(f"{shorten_key(k)}={shorten_value(v)}" for k, v in sorted(config.items()))
def main(
batch_size: int = 32,
max_ctx: int = 1024,
ds_name: str = "sciq",
loss: str = "xent",
n_docs: int = 20000,
n_test_docs: int = 10000,
model_size: str = "gpt2",
lr: Optional[float] = None,
optim: Optional[str] = None,
epochs: int = 2,
force_retrain: bool = False,
seed: int = 0,
minibatch_size_per_device: Optional[float] = None,
train_with_dropout: bool = False,
results_folder: str = "/tmp/results",
linear_probe: bool = False,
lr_schedule: str = "cosine_anneal",
# Note: you can pass either weak_model_size or weak_labels_path. If you pass
# weak_model_size, we will guess the path to the weak labels based on the weak
# model. If you pass weak_labels_path, we will use that path instead.
# If you pass neither, we will train on ground truth.
weak_model_size: Optional[str] = None,
weak_labels_path: Optional[str] = None,
sweep_subfolder: str = "default",
# Set to a very large value so that by default we don't do any intermediate evals but
# still do final evals (which requires eval_every to be set to a non-zero, non-None value)
eval_every: int = 1000000,
sync_command: Optional[str] = None,
):
# this is per device!
if minibatch_size_per_device is None:
minibatch_size_per_device = 1
assert ds_name in VALID_DATASETS, f"Unknown dataset {ds_name} not in {VALID_DATASETS}"
assert (
weak_model_size is None or weak_labels_path is None
), "Can't pass both weak_model_size and weak_labels_path"
model_config = MODELS_DICT[model_size]
use_default_lr = False
if lr is None:
assert (
batch_size == 32
), "Learning rates were tuned on batch size 32, you probably want to sweep LR if you are tuning batch size"
lr = model_config.default_lr
use_default_lr = True
if optim is None:
optim = model_config.default_optimizer
# The commented out terms are the ones that should not change final results
config = {
"batch_size": batch_size,
"max_ctx": max_ctx,
"ds_name": ds_name,
"loss": loss,
"n_docs": n_docs,
"n_test_docs": n_test_docs,
"model_size": model_size,
"lr": lr,
"optim": optim,
"epochs": epochs,
# "force_retrain": force_retrain,
"seed": seed,
# "minibatch_size_per_device": minibatch_size_per_device,
"train_with_dropout": train_with_dropout,
# "results_folder": results_folder,
"linear_probe": linear_probe,
"lr_schedule": lr_schedule,
"eval_every": eval_every,
# "sweep_subfolder": sweep_subfolder,
}
if weak_model_size is not None:
weak_model_config = config.copy()
weak_model_config["model_size"] = weak_model_size
weak_model_config["loss"] = "xent"
if use_default_lr:
weak_model_config["lr"] = MODELS_DICT[weak_model_size].default_lr
weak_model_config_name = get_config_foldername(weak_model_config)
weak_labels_path = (
results_folder + "/" + sweep_subfolder + "/" + weak_model_config_name + "/weak_labels"
)
eval_batch_size = model_config.eval_batch_size
random.seed(seed)
# Load dataset
dataset = load_dataset(ds_name, seed=seed, split_sizes=dict(train=n_docs, test=n_test_docs))
# Split the training dataset in half
train_dataset, test_ds = dataset["train"], dataset["test"]
if weak_labels_path is None:
split_data = train_dataset.train_test_split(test_size=0.5, seed=seed)
train1_ds, train2_ds = split_data["train"], split_data["test"]
print("len(train1):", len(train1_ds), "len(train2):", len(train2_ds))
config_name = get_config_foldername(config)
else:
if not weak_labels_path.endswith("weak_labels"):
weak_labels_path = weak_labels_path + "/weak_labels"
if sync_command is not None:
sync_command_list = sync_command.split(" ")
sync_command_list.extend(
["download", weak_labels_path.replace("/weak_labels", ""), results_folder]
)
print(f"Running sync command: {' '.join(sync_command_list)}")
result = subprocess.run(sync_command_list, check=True)
if result.returncode != 0:
raise RuntimeError(f"Sync command failed with return code {result.returncode}")
train1_ds = load_from_disk(weak_labels_path)
train2_ds = None
weak_model_config = json.load(open(weak_labels_path.replace("weak_labels", "config.json")))
config["weak_model_size"] = weak_model_config["model_size"]
config_name = get_config_foldername(config)
config["weak_model"] = weak_model_config
save_path = os.path.join(results_folder, sweep_subfolder, config_name)
logger.configure(
name="{sweep_subfolder}_{config_name}_{datetime_now}",
save_path=save_path,
sweep_subfolder=sweep_subfolder,
config_name=config_name,
)
# Tokenize datasets
tokenizer = get_tokenizer(model_config.name) | train1_ds = tokenize_dataset(train1_ds, tokenizer, max_ctx) | 3 | 2023-12-13 23:53:13+00:00 | 8k |
linyiLYi/voice-assistant | whisper/transcribe.py | [
{
"identifier": "FRAMES_PER_SECOND",
"path": "whisper/audio.py",
"snippet": "FRAMES_PER_SECOND = SAMPLE_RATE // HOP_LENGTH # 10ms per audio frame"
},
{
"identifier": "HOP_LENGTH",
"path": "whisper/audio.py",
"snippet": "HOP_LENGTH = 160"
},
{
"identifier": "N_FRAMES",
"path"... | import sys
import warnings
import mlx.core as mx
import numpy as np
import tqdm
from typing import List, Optional, Tuple, Union
from .audio import (
FRAMES_PER_SECOND,
HOP_LENGTH,
N_FRAMES,
N_SAMPLES,
SAMPLE_RATE,
log_mel_spectrogram,
pad_or_trim,
)
from .decoding import DecodingOptions, DecodingResult
from .load_models import load_model
from .timing import add_word_timestamps
from .tokenizer import LANGUAGES, get_tokenizer | 5,388 | model_path = None
@classmethod
def get_model(cls, model_path: str, dtype: mx.Dtype):
if cls.model is None or model_path != cls.model_path:
cls.model = load_model(model_path, dtype=dtype)
cls.model_path = model_path
return cls.model
def transcribe(
audio: Union[str, np.ndarray, mx.array],
*,
path_or_hf_repo: str = "mlx-community/whisper-tiny",
verbose: Optional[bool] = None,
temperature: Union[float, Tuple[float, ...]] = (0.0, 0.2, 0.4, 0.6, 0.8, 1.0),
compression_ratio_threshold: Optional[float] = 2.4,
logprob_threshold: Optional[float] = -1.0,
no_speech_threshold: Optional[float] = 0.6,
condition_on_previous_text: bool = True,
initial_prompt: Optional[str] = None,
word_timestamps: bool = False,
prepend_punctuations: str = "\"'“¿([{-",
append_punctuations: str = "\"'.。,,!!??::”)]}、",
clip_timestamps: Union[str, List[float]] = "0",
hallucination_silence_threshold: Optional[float] = None,
**decode_options,
):
"""
Transcribe an audio file using Whisper
Parameters
----------
audio: Union[str, np.ndarray, mx.array]
The path to the audio file to open, or the audio waveform
path_or_hf_repo: str
The localpath to the Whisper model or HF Hub repo with the MLX converted weights.
verbose: bool
Whether to display the text being decoded to the console. If True, displays all the details,
If False, displays minimal details. If None, does not display anything
temperature: Union[float, Tuple[float, ...]]
Temperature for sampling. It can be a tuple of temperatures, which will be successively used
upon failures according to either `compression_ratio_threshold` or `logprob_threshold`.
compression_ratio_threshold: float
If the gzip compression ratio is above this value, treat as failed
logprob_threshold: float
If the average log probability over sampled tokens is below this value, treat as failed
no_speech_threshold: float
If the no_speech probability is higher than this value AND the average log probability
over sampled tokens is below `logprob_threshold`, consider the segment as silent
condition_on_previous_text: bool
if True, the previous output of the model is provided as a prompt for the next window;
disabling may make the text inconsistent across windows, but the model becomes less prone to
getting stuck in a failure loop, such as repetition looping or timestamps going out of sync.
word_timestamps: bool
Extract word-level timestamps using the cross-attention pattern and dynamic time warping,
and include the timestamps for each word in each segment.
prepend_punctuations: str
If word_timestamps is True, merge these punctuation symbols with the next word
append_punctuations: str
If word_timestamps is True, merge these punctuation symbols with the previous word
initial_prompt: Optional[str]
Optional text to provide as a prompt for the first window. This can be used to provide, or
"prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns
to make it more likely to predict those word correctly.
decode_options: dict
Keyword arguments to construct `DecodingOptions` instances
clip_timestamps: Union[str, List[float]]
Comma-separated list start,end,start,end,... timestamps (in seconds) of clips to process.
The last end timestamp defaults to the end of the file.
hallucination_silence_threshold: Optional[float]
When word_timestamps is True, skip silent periods longer than this threshold (in seconds)
when a possible hallucination is detected
Returns
-------
A dictionary containing the resulting text ("text") and segment-level details ("segments"), and
the spoken language ("language"), which is detected when `decode_options["language"]` is None.
"""
dtype = mx.float16 if decode_options.get("fp16", True) else mx.float32
model = ModelHolder.get_model(path_or_hf_repo, dtype)
# Pad 30-seconds of silence to the input audio, for slicing
mel = log_mel_spectrogram(audio, n_mels=model.dims.n_mels, padding=N_SAMPLES)
content_frames = mel.shape[-2] - N_FRAMES
content_duration = float(content_frames * HOP_LENGTH / SAMPLE_RATE)
if verbose:
system_encoding = sys.getdefaultencoding()
if system_encoding != "utf-8":
make_safe = lambda x: x.encode(system_encoding, errors="replace").decode(
system_encoding
)
else:
make_safe = lambda x: x
if decode_options.get("language", None) is None:
if not model.is_multilingual:
decode_options["language"] = "en"
else:
if verbose:
print(
"Detecting language using up to the first 30 seconds. "
"Use the `language` decoding option to specify the language"
)
| # Copyright © 2023 Apple Inc.
def _format_timestamp(seconds: float):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours:02d}:" if hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}.{milliseconds:03d}"
def _get_end(segments: List[dict]) -> Optional[float]:
return next(
(w["end"] for s in reversed(segments) for w in reversed(s["words"])),
segments[-1]["end"] if segments else None,
)
class ModelHolder:
model = None
model_path = None
@classmethod
def get_model(cls, model_path: str, dtype: mx.Dtype):
if cls.model is None or model_path != cls.model_path:
cls.model = load_model(model_path, dtype=dtype)
cls.model_path = model_path
return cls.model
def transcribe(
audio: Union[str, np.ndarray, mx.array],
*,
path_or_hf_repo: str = "mlx-community/whisper-tiny",
verbose: Optional[bool] = None,
temperature: Union[float, Tuple[float, ...]] = (0.0, 0.2, 0.4, 0.6, 0.8, 1.0),
compression_ratio_threshold: Optional[float] = 2.4,
logprob_threshold: Optional[float] = -1.0,
no_speech_threshold: Optional[float] = 0.6,
condition_on_previous_text: bool = True,
initial_prompt: Optional[str] = None,
word_timestamps: bool = False,
prepend_punctuations: str = "\"'“¿([{-",
append_punctuations: str = "\"'.。,,!!??::”)]}、",
clip_timestamps: Union[str, List[float]] = "0",
hallucination_silence_threshold: Optional[float] = None,
**decode_options,
):
"""
Transcribe an audio file using Whisper
Parameters
----------
audio: Union[str, np.ndarray, mx.array]
The path to the audio file to open, or the audio waveform
path_or_hf_repo: str
The localpath to the Whisper model or HF Hub repo with the MLX converted weights.
verbose: bool
Whether to display the text being decoded to the console. If True, displays all the details,
If False, displays minimal details. If None, does not display anything
temperature: Union[float, Tuple[float, ...]]
Temperature for sampling. It can be a tuple of temperatures, which will be successively used
upon failures according to either `compression_ratio_threshold` or `logprob_threshold`.
compression_ratio_threshold: float
If the gzip compression ratio is above this value, treat as failed
logprob_threshold: float
If the average log probability over sampled tokens is below this value, treat as failed
no_speech_threshold: float
If the no_speech probability is higher than this value AND the average log probability
over sampled tokens is below `logprob_threshold`, consider the segment as silent
condition_on_previous_text: bool
if True, the previous output of the model is provided as a prompt for the next window;
disabling may make the text inconsistent across windows, but the model becomes less prone to
getting stuck in a failure loop, such as repetition looping or timestamps going out of sync.
word_timestamps: bool
Extract word-level timestamps using the cross-attention pattern and dynamic time warping,
and include the timestamps for each word in each segment.
prepend_punctuations: str
If word_timestamps is True, merge these punctuation symbols with the next word
append_punctuations: str
If word_timestamps is True, merge these punctuation symbols with the previous word
initial_prompt: Optional[str]
Optional text to provide as a prompt for the first window. This can be used to provide, or
"prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns
to make it more likely to predict those word correctly.
decode_options: dict
Keyword arguments to construct `DecodingOptions` instances
clip_timestamps: Union[str, List[float]]
Comma-separated list start,end,start,end,... timestamps (in seconds) of clips to process.
The last end timestamp defaults to the end of the file.
hallucination_silence_threshold: Optional[float]
When word_timestamps is True, skip silent periods longer than this threshold (in seconds)
when a possible hallucination is detected
Returns
-------
A dictionary containing the resulting text ("text") and segment-level details ("segments"), and
the spoken language ("language"), which is detected when `decode_options["language"]` is None.
"""
dtype = mx.float16 if decode_options.get("fp16", True) else mx.float32
model = ModelHolder.get_model(path_or_hf_repo, dtype)
# Pad 30-seconds of silence to the input audio, for slicing
mel = log_mel_spectrogram(audio, n_mels=model.dims.n_mels, padding=N_SAMPLES)
content_frames = mel.shape[-2] - N_FRAMES
content_duration = float(content_frames * HOP_LENGTH / SAMPLE_RATE)
if verbose:
system_encoding = sys.getdefaultencoding()
if system_encoding != "utf-8":
make_safe = lambda x: x.encode(system_encoding, errors="replace").decode(
system_encoding
)
else:
make_safe = lambda x: x
if decode_options.get("language", None) is None:
if not model.is_multilingual:
decode_options["language"] = "en"
else:
if verbose:
print(
"Detecting language using up to the first 30 seconds. "
"Use the `language` decoding option to specify the language"
) | mel_segment = pad_or_trim(mel, N_FRAMES, axis=-2).astype(dtype) | 6 | 2023-12-09 13:33:46+00:00 | 8k |
SqueezeAILab/LLMCompiler | src/llm_compiler/planner.py | [
{
"identifier": "Plan",
"path": "src/executors/schema.py",
"snippet": "class Plan(BaseModel):\n \"\"\"Plan.\"\"\"\n\n steps: list[Step]\n \"\"\"The steps.\"\"\""
},
{
"identifier": "END_OF_PLAN",
"path": "src/llm_compiler/constants.py",
"snippet": "END_OF_PLAN = \"<END_OF_PLAN>\... | import asyncio
import re
from typing import Any, Optional, Sequence, Union
from uuid import UUID
from langchain.callbacks.base import AsyncCallbackHandler, Callbacks
from langchain.chat_models.base import BaseChatModel
from langchain.schema import LLMResult
from langchain.schema.messages import HumanMessage, SystemMessage
from src.executors.schema import Plan
from src.llm_compiler.constants import END_OF_PLAN
from src.llm_compiler.output_parser import (
ACTION_PATTERN,
THOUGHT_PATTERN,
LLMCompilerPlanParser,
instantiate_task,
)
from src.llm_compiler.task_fetching_unit import Task
from src.tools.base import StructuredTool, Tool
from src.utils.logger_utils import log | 5,415 | args=args,
thought=self.thought,
)
self.buffer = suffix
self.thought = ""
return task
return None
def ingest_token(self, token: str) -> Optional[Task]:
# Append token to buffer
if "\n" in token:
prefix, suffix = token.split("\n", 1)
prefix = prefix.strip()
self.buffer += prefix + "\n"
return self._match_buffer_and_generate_task(suffix)
else:
self.buffer += token
return None
def finalize(self):
self.buffer = self.buffer + "\n"
return self._match_buffer_and_generate_task("")
class LLMCompilerCallback(AsyncCallbackHandler):
_queue: asyncio.Queue[Optional[Task]]
_parser: StreamingGraphParser
_tools: Sequence[Union[Tool, StructuredTool]]
def __init__(
self,
queue: asyncio.Queue[Optional[str]],
tools: Sequence[Union[Tool, StructuredTool]],
):
self._queue = queue
self._parser = StreamingGraphParser(tools=tools)
async def on_llm_start(self, serialized, prompts, **kwargs: Any) -> Any:
"""Run when LLM starts running."""
async def on_llm_new_token(
self,
token: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
parsed_data = self._parser.ingest_token(token)
if parsed_data:
await self._queue.put(parsed_data)
if parsed_data.is_join:
await self._queue.put(None)
async def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
parsed_data = self._parser.finalize()
if parsed_data:
await self._queue.put(parsed_data)
await self._queue.put(None)
class Planner:
def __init__(
self,
llm: BaseChatModel,
example_prompt: str,
example_prompt_replan: str,
tools: Sequence[Union[Tool, StructuredTool]],
stop: Optional[list[str]],
):
self.llm = llm
# different system prompt is needed when replanning
# since they have different guidelines, and also examples provided by the user
self.system_prompt = generate_llm_compiler_prompt(
tools=tools,
example_prompt=example_prompt,
is_replan=False,
)
self.system_prompt_replan = generate_llm_compiler_prompt(
tools=tools,
example_prompt=example_prompt_replan,
is_replan=True,
)
self.tools = tools
self.output_parser = LLMCompilerPlanParser(tools=tools)
self.stop = stop
async def run_llm(
self,
inputs: dict[str, Any],
is_replan: bool = False,
callbacks: Callbacks = None,
) -> str:
"""Run the LLM."""
if is_replan:
system_prompt = self.system_prompt_replan
assert "context" in inputs, "If replanning, context must be provided"
human_prompt = f"Question: {inputs['input']}\n{inputs['context']}\n"
else:
system_prompt = self.system_prompt
human_prompt = f"Question: {inputs['input']}"
messages = [
SystemMessage(content=system_prompt),
HumanMessage(content=human_prompt),
]
llm_response = await self.llm._call_async(
messages,
callbacks=callbacks,
stop=self.stop,
)
| """LLM Compiler Planner"""
JOIN_DESCRIPTION = (
"join():\n"
" - Collects and combines results from prior actions.\n"
" - A LLM agent is called upon invoking join to either finalize the user query or wait until the plans are executed.\n"
" - join should always be the last action in the plan, and will be called in two scenarios:\n"
" (a) if the answer can be determined by gathering the outputs from tasks to generate the final response.\n"
" (b) if the answer cannot be determined in the planning phase before you execute the plans. "
)
def generate_llm_compiler_prompt(
tools: Sequence[Union[Tool, StructuredTool]],
example_prompt=str,
is_replan: bool = False,
):
prefix = (
"Given a user query, create a plan to solve it with the utmost parallelizability. "
f"Each plan should comprise an action from the following {len(tools) + 1} types:\n"
)
# Tools
for i, tool in enumerate(tools):
prefix += f"{i+1}. {tool.description}\n"
# Join operation
prefix += f"{i+2}. {JOIN_DESCRIPTION}\n\n"
# Guidelines
prefix += (
"Guidelines:\n"
" - Each action described above contains input/output types and description.\n"
" - You must strictly adhere to the input and output types for each action.\n"
" - The action descriptions contain the guidelines. You MUST strictly follow those guidelines when you use the actions.\n"
" - Each action in the plan should strictly be one of the above types. Follow the Python conventions for each action.\n"
" - Each action MUST have a unique ID, which is strictly increasing.\n"
" - Inputs for actions can either be constants or outputs from preceding actions. "
"In the latter case, use the format $id to denote the ID of the previous action whose output will be the input.\n"
f" - Always call join as the last action in the plan. Say '{END_OF_PLAN}' after you call join\n"
" - Ensure the plan maximizes parallelizability.\n"
" - Only use the provided action types. If a query cannot be addressed using these, invoke the join action for the next steps.\n"
" - Never explain the plan with comments (e.g. #).\n"
" - Never introduce new actions other than the ones provided.\n\n"
)
if is_replan:
prefix += (
' - You are given "Previous Plan" which is the plan that the previous agent created along with the execution results '
"(given as Observation) of each plan and a general thought (given as Thought) about the executed results."
'You MUST use these information to create the next plan under "Current Plan".\n'
' - When starting the Current Plan, you should start with "Thought" that outlines the strategy for the next plan.\n'
" - In the Current Plan, you should NEVER repeat the actions that are already executed in the Previous Plan.\n"
)
# Examples
prefix += "Here are some examples:\n\n"
prefix += example_prompt
return prefix
class StreamingGraphParser:
"""Streaming version of the GraphParser."""
buffer = ""
thought = ""
graph_dict = {}
def __init__(self, tools: Sequence[Union[Tool, StructuredTool]]) -> None:
self.tools = tools
def _match_buffer_and_generate_task(self, suffix: str) -> Optional[Task]:
"""Runs every time "\n" is encountered in the input stream or at the end of the stream.
Matches the buffer against the regex patterns and generates a task if a match is found.
Match patterns include:
1. Thought: <thought>
- this case, the thought is stored in self.thought, and we reset the buffer.
- the thought is then used as the thought for the next action.
2. <idx>. <tool_name>(<args>)
- this case, the tool is instantiated with the idx, tool_name, args, and thought.
- the thought is reset.
- the buffer is reset.
"""
if match := re.match(THOUGHT_PATTERN, self.buffer):
# Optionally, action can be preceded by a thought
self.thought = match.group(1)
self.buffer = suffix
elif match := re.match(ACTION_PATTERN, self.buffer):
# if action is parsed, return the task, and clear the buffer
idx, tool_name, args, _ = match.groups()
idx = int(idx)
task = instantiate_task(
tools=self.tools,
idx=idx,
tool_name=tool_name,
args=args,
thought=self.thought,
)
self.buffer = suffix
self.thought = ""
return task
return None
def ingest_token(self, token: str) -> Optional[Task]:
# Append token to buffer
if "\n" in token:
prefix, suffix = token.split("\n", 1)
prefix = prefix.strip()
self.buffer += prefix + "\n"
return self._match_buffer_and_generate_task(suffix)
else:
self.buffer += token
return None
def finalize(self):
self.buffer = self.buffer + "\n"
return self._match_buffer_and_generate_task("")
class LLMCompilerCallback(AsyncCallbackHandler):
_queue: asyncio.Queue[Optional[Task]]
_parser: StreamingGraphParser
_tools: Sequence[Union[Tool, StructuredTool]]
def __init__(
self,
queue: asyncio.Queue[Optional[str]],
tools: Sequence[Union[Tool, StructuredTool]],
):
self._queue = queue
self._parser = StreamingGraphParser(tools=tools)
async def on_llm_start(self, serialized, prompts, **kwargs: Any) -> Any:
"""Run when LLM starts running."""
async def on_llm_new_token(
self,
token: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
parsed_data = self._parser.ingest_token(token)
if parsed_data:
await self._queue.put(parsed_data)
if parsed_data.is_join:
await self._queue.put(None)
async def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
parsed_data = self._parser.finalize()
if parsed_data:
await self._queue.put(parsed_data)
await self._queue.put(None)
class Planner:
def __init__(
self,
llm: BaseChatModel,
example_prompt: str,
example_prompt_replan: str,
tools: Sequence[Union[Tool, StructuredTool]],
stop: Optional[list[str]],
):
self.llm = llm
# different system prompt is needed when replanning
# since they have different guidelines, and also examples provided by the user
self.system_prompt = generate_llm_compiler_prompt(
tools=tools,
example_prompt=example_prompt,
is_replan=False,
)
self.system_prompt_replan = generate_llm_compiler_prompt(
tools=tools,
example_prompt=example_prompt_replan,
is_replan=True,
)
self.tools = tools
self.output_parser = LLMCompilerPlanParser(tools=tools)
self.stop = stop
async def run_llm(
self,
inputs: dict[str, Any],
is_replan: bool = False,
callbacks: Callbacks = None,
) -> str:
"""Run the LLM."""
if is_replan:
system_prompt = self.system_prompt_replan
assert "context" in inputs, "If replanning, context must be provided"
human_prompt = f"Question: {inputs['input']}\n{inputs['context']}\n"
else:
system_prompt = self.system_prompt
human_prompt = f"Question: {inputs['input']}"
messages = [
SystemMessage(content=system_prompt),
HumanMessage(content=human_prompt),
]
llm_response = await self.llm._call_async(
messages,
callbacks=callbacks,
stop=self.stop,
) | log("LLMCompiler planner response: \n", llm_response.content, block=True) | 9 | 2023-12-06 21:12:54+00:00 | 8k |
open-compass/MixtralKit | mixtralkit/layers/moe.py | [
{
"identifier": "ModelArgs",
"path": "mixtralkit/layers/utils.py",
"snippet": "class ModelArgs:\n dim: int = 4096\n n_layers: int = 32\n n_heads: int = 32\n n_kv_heads: Optional[int] = None\n vocab_size: int = -1 # defined later by tokenizer\n multiple_of: int = 256 # make SwiGLU hid... | import math
import torch
import torch.nn.functional as F
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
from torch import nn
from .utils import ModelArgs
from .attention import TorchAttention, FairScaleAttention
from .ffn import TorchFFN, FairScaleFFN
from .transformer import TorchTransformerBlock, TorchTransformer, FairScaleTransformer
from fairscale.nn.model_parallel.layers import (
ColumnParallelLinear,
) | 5,169 | # Copyright (c) OpenMMLab. and affiliates.
# Copyright (c) Meta Platforms, Inc. and affiliates.
class MoETorchFFN(nn.Module):
def __init__(
self,
num_experts: int,
num_experts_per_tok: int,
num_shards: int,
gate_softmax: bool = False,
**kwargs,
):
super().__init__()
self.experts = nn.ModuleList([
TorchFFN(**kwargs).to(f"cuda:{i//num_shards}")
for i in range(num_experts)]
)
self.gate = nn.Linear(
kwargs["dim"], num_experts, bias=False)
self.num_experts_per_tok = num_experts_per_tok
self.gate_softmax = gate_softmax
print("Softmax for Gate:{}".format(str(gate_softmax)))
def forward(self, x):
orig_shape = x.shape
x = x.view(-1, x.shape[-1])
if self.gate_softmax:
scores = self.gate(x).softmax(dim=-1)
else:
scores = self.gate(x)
expert_weights, expert_indices = torch.topk(
scores, self.num_experts_per_tok, dim=-1)
expert_weights = expert_weights.softmax(dim=-1)
flat_expert_indices = expert_indices.view(-1)
x = x.repeat_interleave(self.num_experts_per_tok, dim=0)
y = torch.empty_like(x)
for i, expert in enumerate(self.experts):
y[flat_expert_indices == i] = expert(x[flat_expert_indices == i])
y = (y.view(*expert_weights.shape, -1) * expert_weights.unsqueeze(-1)).sum(dim=1)
return y.view(*orig_shape)
| # Copyright (c) OpenMMLab. and affiliates.
# Copyright (c) Meta Platforms, Inc. and affiliates.
class MoETorchFFN(nn.Module):
def __init__(
self,
num_experts: int,
num_experts_per_tok: int,
num_shards: int,
gate_softmax: bool = False,
**kwargs,
):
super().__init__()
self.experts = nn.ModuleList([
TorchFFN(**kwargs).to(f"cuda:{i//num_shards}")
for i in range(num_experts)]
)
self.gate = nn.Linear(
kwargs["dim"], num_experts, bias=False)
self.num_experts_per_tok = num_experts_per_tok
self.gate_softmax = gate_softmax
print("Softmax for Gate:{}".format(str(gate_softmax)))
def forward(self, x):
orig_shape = x.shape
x = x.view(-1, x.shape[-1])
if self.gate_softmax:
scores = self.gate(x).softmax(dim=-1)
else:
scores = self.gate(x)
expert_weights, expert_indices = torch.topk(
scores, self.num_experts_per_tok, dim=-1)
expert_weights = expert_weights.softmax(dim=-1)
flat_expert_indices = expert_indices.view(-1)
x = x.repeat_interleave(self.num_experts_per_tok, dim=0)
y = torch.empty_like(x)
for i, expert in enumerate(self.experts):
y[flat_expert_indices == i] = expert(x[flat_expert_indices == i])
y = (y.view(*expert_weights.shape, -1) * expert_weights.unsqueeze(-1)).sum(dim=1)
return y.view(*orig_shape)
| class MoETorchTransformerBlock(TorchTransformerBlock): | 5 | 2023-12-09 15:05:26+00:00 | 8k |
aymenfurter/microagents | agents/agent_lifecycle.py | [
{
"identifier": "MicroAgent",
"path": "agents/microagent.py",
"snippet": "class MicroAgent:\n \"\"\"\n The MicroAgent class encapsulates the behavior of a small, purpose-driven agent\n that interacts with the OpenAI API.\n \"\"\"\n\n def __init__(self, initial_prompt, purpose, depth, agen... | import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
) | 4,106 |
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 20
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
|
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 20
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT}, | {"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)} | 7 | 2023-12-11 08:17:09+00:00 | 8k |
bytedance/ImageDream | extern/ldm_zero123/models/diffusion/ddim.py | [
{
"identifier": "norm_thresholding",
"path": "extern/ldm_zero123/models/diffusion/sampling_util.py",
"snippet": "def norm_thresholding(x0, value):\n s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)\n return x0 * (value / s)"
},
{
"identifier": "renorm_threshol... | from functools import partial
from einops import rearrange
from tqdm import tqdm
from extern.ldm_zero123.models.diffusion.sampling_util import (
norm_thresholding,
renorm_thresholding,
spatial_norm_thresholding,
)
from extern.ldm_zero123.modules.diffusionmodules.util import (
extract_into_tensor,
make_ddim_sampling_parameters,
make_ddim_timesteps,
noise_like,
)
import numpy as np
import torch | 3,727 | iterator = tqdm(time_range, desc="DDIM Sampler", total=total_steps)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(
x0, ts
) # TODO: deterministic forward pass?
img = img_orig * mask + (1.0 - mask) * img
outs = self.p_sample_ddim(
img,
cond,
ts,
index=index,
use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised,
temperature=temperature,
noise_dropout=noise_dropout,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold,
)
img, pred_x0 = outs
if callback:
img = callback(i, img, pred_x0)
if img_callback:
img_callback(pred_x0, i)
if index % log_every_t == 0 or index == total_steps - 1:
intermediates["x_inter"].append(img)
intermediates["pred_x0"].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_ddim(
self,
x,
c,
t,
index,
repeat_noise=False,
use_original_steps=False,
quantize_denoised=False,
temperature=1.0,
noise_dropout=0.0,
score_corrector=None,
corrector_kwargs=None,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None,
dynamic_threshold=None,
):
b, *_, device = *x.shape, x.device
if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
if isinstance(c, dict):
assert isinstance(unconditional_conditioning, dict)
c_in = dict()
for k in c:
if isinstance(c[k], list):
c_in[k] = [
torch.cat([unconditional_conditioning[k][i], c[k][i]])
for i in range(len(c[k]))
]
else:
c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])
else:
c_in = torch.cat([unconditional_conditioning, c])
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
if score_corrector is not None:
assert self.model.parameterization == "eps"
e_t = score_corrector.modify_score(
self.model, e_t, x, t, c, **corrector_kwargs
)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = (
self.model.alphas_cumprod_prev
if use_original_steps
else self.ddim_alphas_prev
)
sqrt_one_minus_alphas = (
self.model.sqrt_one_minus_alphas_cumprod
if use_original_steps
else self.ddim_sqrt_one_minus_alphas
)
sigmas = (
self.model.ddim_sigmas_for_original_num_steps
if use_original_steps
else self.ddim_sigmas
)
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full(
(b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device
)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
print(t, sqrt_one_minus_at, a_t)
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
if dynamic_threshold is not None:
| """SAMPLING ONLY."""
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def to(self, device):
"""Same as to in torch module
Don't really underestand why this isn't a module in the first place"""
for k, v in self.__dict__.items():
if isinstance(v, torch.Tensor):
new_v = getattr(self, k).to(device)
setattr(self, k, new_v)
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(
self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True
):
self.ddim_timesteps = make_ddim_timesteps(
ddim_discr_method=ddim_discretize,
num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps,
verbose=verbose,
)
alphas_cumprod = self.model.alphas_cumprod
assert (
alphas_cumprod.shape[0] == self.ddpm_num_timesteps
), "alphas have to be defined for each timestep"
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer("betas", to_torch(self.model.betas))
self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod))
self.register_buffer(
"alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev)
)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer(
"sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_one_minus_alphas_cumprod",
to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),
)
self.register_buffer(
"log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_recipm1_alphas_cumprod",
to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),
)
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(
alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta,
verbose=verbose,
)
self.register_buffer("ddim_sigmas", ddim_sigmas)
self.register_buffer("ddim_alphas", ddim_alphas)
self.register_buffer("ddim_alphas_prev", ddim_alphas_prev)
self.register_buffer("ddim_sqrt_one_minus_alphas", np.sqrt(1.0 - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev)
/ (1 - self.alphas_cumprod)
* (1 - self.alphas_cumprod / self.alphas_cumprod_prev)
)
self.register_buffer(
"ddim_sigmas_for_original_num_steps", sigmas_for_original_sampling_steps
)
@torch.no_grad()
def sample(
self,
S,
batch_size,
shape,
conditioning=None,
callback=None,
normals_sequence=None,
img_callback=None,
quantize_x0=False,
eta=0.0,
mask=None,
x0=None,
temperature=1.0,
noise_dropout=0.0,
score_corrector=None,
corrector_kwargs=None,
verbose=True,
x_T=None,
log_every_t=100,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
dynamic_threshold=None,
**kwargs,
):
if conditioning is not None:
if isinstance(conditioning, dict):
ctmp = conditioning[list(conditioning.keys())[0]]
while isinstance(ctmp, list):
ctmp = ctmp[0]
cbs = ctmp.shape[0]
if cbs != batch_size:
print(
f"Warning: Got {cbs} conditionings but batch-size is {batch_size}"
)
else:
if conditioning.shape[0] != batch_size:
print(
f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}"
)
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
# print(f'Data shape for DDIM sampling is {size}, eta {eta}')
samples, intermediates = self.ddim_sampling(
conditioning,
size,
callback=callback,
img_callback=img_callback,
quantize_denoised=quantize_x0,
mask=mask,
x0=x0,
ddim_use_original_steps=False,
noise_dropout=noise_dropout,
temperature=temperature,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
x_T=x_T,
log_every_t=log_every_t,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold,
)
return samples, intermediates
@torch.no_grad()
def ddim_sampling(
self,
cond,
shape,
x_T=None,
ddim_use_original_steps=False,
callback=None,
timesteps=None,
quantize_denoised=False,
mask=None,
x0=None,
img_callback=None,
log_every_t=100,
temperature=1.0,
noise_dropout=0.0,
score_corrector=None,
corrector_kwargs=None,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None,
dynamic_threshold=None,
t_start=-1,
):
device = self.model.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
if timesteps is None:
timesteps = (
self.ddpm_num_timesteps
if ddim_use_original_steps
else self.ddim_timesteps
)
elif timesteps is not None and not ddim_use_original_steps:
subset_end = (
int(
min(timesteps / self.ddim_timesteps.shape[0], 1)
* self.ddim_timesteps.shape[0]
)
- 1
)
timesteps = self.ddim_timesteps[:subset_end]
timesteps = timesteps[:t_start]
intermediates = {"x_inter": [img], "pred_x0": [img]}
time_range = (
reversed(range(0, timesteps))
if ddim_use_original_steps
else np.flip(timesteps)
)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
# print(f"Running DDIM Sampling with {total_steps} timesteps")
iterator = tqdm(time_range, desc="DDIM Sampler", total=total_steps)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(
x0, ts
) # TODO: deterministic forward pass?
img = img_orig * mask + (1.0 - mask) * img
outs = self.p_sample_ddim(
img,
cond,
ts,
index=index,
use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised,
temperature=temperature,
noise_dropout=noise_dropout,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold,
)
img, pred_x0 = outs
if callback:
img = callback(i, img, pred_x0)
if img_callback:
img_callback(pred_x0, i)
if index % log_every_t == 0 or index == total_steps - 1:
intermediates["x_inter"].append(img)
intermediates["pred_x0"].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_ddim(
self,
x,
c,
t,
index,
repeat_noise=False,
use_original_steps=False,
quantize_denoised=False,
temperature=1.0,
noise_dropout=0.0,
score_corrector=None,
corrector_kwargs=None,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None,
dynamic_threshold=None,
):
b, *_, device = *x.shape, x.device
if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
if isinstance(c, dict):
assert isinstance(unconditional_conditioning, dict)
c_in = dict()
for k in c:
if isinstance(c[k], list):
c_in[k] = [
torch.cat([unconditional_conditioning[k][i], c[k][i]])
for i in range(len(c[k]))
]
else:
c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])
else:
c_in = torch.cat([unconditional_conditioning, c])
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
if score_corrector is not None:
assert self.model.parameterization == "eps"
e_t = score_corrector.modify_score(
self.model, e_t, x, t, c, **corrector_kwargs
)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = (
self.model.alphas_cumprod_prev
if use_original_steps
else self.ddim_alphas_prev
)
sqrt_one_minus_alphas = (
self.model.sqrt_one_minus_alphas_cumprod
if use_original_steps
else self.ddim_sqrt_one_minus_alphas
)
sigmas = (
self.model.ddim_sigmas_for_original_num_steps
if use_original_steps
else self.ddim_sigmas
)
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full(
(b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device
)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
print(t, sqrt_one_minus_at, a_t)
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
if dynamic_threshold is not None: | pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) | 0 | 2023-12-13 21:09:37+00:00 | 8k |
TencentARC/MotionCtrl | lvdm/modules/attention_temporal.py | [
{
"identifier": "checkpoint",
"path": "lvdm/common.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the func... | import math
import torch
import torch as th
import torch.nn.functional as F
import xformers
import xformers.ops
from inspect import isfunction
from torch import nn, einsum
from einops import rearrange, repeat
from lvdm.common import (
checkpoint,
exists,
uniq,
default,
max_neg_value,
init_
)
from lvdm.basics import (
conv_nd,
zero_module,
normalization
) | 4,083 |
if self.bidirectional_causal_attn:
mask_reverse = torch.triu(torch.ones([1, self.temporal_length, self.temporal_length], device=sim.device))
sim_reverse = sim.float().masked_fill(mask_reverse == 0, max_neg_value)
attn_reverse = sim_reverse.softmax(dim=-1)
out_reverse = einsum('b i j, b j d -> b i d', attn_reverse, v)
out += out_reverse
if self.use_relative_position:
v2 = self.relative_position_v(len_q, len_v)
out2 = einsum('b t s, t s d -> b t d', attn, v2) # TODO check
out += out2 # TODO check:先add还是先merge head?先计算rpr,on split head之后的数据,然后再merge。
out = rearrange(out, '(b h) n d -> b n (h d)', h=nh) # merge head
return self.to_out(out)
class CrossAttention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.,
sa_shared_kv=False, shared_type='only_first', **kwargs,):
super().__init__()
inner_dim = dim_head * heads
context_dim = default(context_dim, query_dim)
self.sa_shared_kv = sa_shared_kv
assert(shared_type in ['only_first', 'all_frames', 'first_and_prev', 'only_prev', 'full', 'causal', 'full_qkv'])
self.shared_type = shared_type
self.dim_head = dim_head
self.scale = dim_head ** -0.5
self.heads = heads
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, query_dim),
nn.Dropout(dropout)
)
if XFORMERS_IS_AVAILBLE:
self.forward = self.efficient_forward
def forward(self, x, context=None, mask=None):
h = self.heads
b = x.shape[0]
q = self.to_q(x)
context = default(context, x)
k = self.to_k(context)
v = self.to_v(context)
if self.sa_shared_kv:
if self.shared_type == 'only_first':
k,v = map(lambda xx: rearrange(xx[0].unsqueeze(0), 'b n c -> (b n) c').unsqueeze(0).repeat(b,1,1),
(k,v))
else:
raise NotImplementedError
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if exists(mask):
mask = rearrange(mask, 'b ... -> b (...)')
max_neg_value = -torch.finfo(sim.dtype).max
mask = repeat(mask, 'b j -> (b h) () j', h=h)
sim.masked_fill_(~mask, max_neg_value)
# attention, what we cannot get enough of
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
return self.to_out(out)
def efficient_forward(self, x, context=None, mask=None):
q = self.to_q(x)
context = default(context, x)
k = self.to_k(context)
v = self.to_v(context)
b, _, _ = q.shape
q, k, v = map(
lambda t: t.unsqueeze(3)
.reshape(b, t.shape[1], self.heads, self.dim_head)
.permute(0, 2, 1, 3)
.reshape(b * self.heads, t.shape[1], self.dim_head)
.contiguous(),
(q, k, v),
)
# actually compute the attention, what we cannot get enough of
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=None)
if exists(mask):
raise NotImplementedError
out = (
out.unsqueeze(0)
.reshape(b, self.heads, out.shape[1], self.dim_head)
.permute(0, 2, 1, 3)
.reshape(b, out.shape[1], self.heads * self.dim_head)
)
return self.to_out(out)
class VideoSpatialCrossAttention(CrossAttention):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0):
super().__init__(query_dim, context_dim, heads, dim_head, dropout)
def forward(self, x, context=None, mask=None):
b, c, t, h, w = x.shape
if context is not None:
context = context.repeat(t, 1, 1)
x = super.forward(spatial_attn_reshape(x), context=context) + x
return spatial_attn_reshape_back(x,b,h)
class BasicTransformerBlockST(nn.Module):
def __init__(self,
# Spatial Stuff
dim,
n_heads,
d_head,
dropout=0.,
context_dim=None,
gated_ff=True,
|
try:
XFORMERS_IS_AVAILBLE = True
except:
XFORMERS_IS_AVAILBLE = False
class GEGLU(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim=-1)
return x * F.gelu(gate)
class FeedForward(nn.Module):
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
project_in = nn.Sequential(
nn.Linear(dim, inner_dim),
nn.GELU()
) if not glu else GEGLU(dim, inner_dim)
self.net = nn.Sequential(
project_in,
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out)
)
def forward(self, x):
return self.net(x)
def Normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
# ---------------------------------------------------------------------------------------------------
class RelativePosition(nn.Module):
""" https://github.com/evelinehong/Transformer_Relative_Position_PyTorch/blob/master/relative_position.py """
def __init__(self, num_units, max_relative_position):
super().__init__()
self.num_units = num_units
self.max_relative_position = max_relative_position
self.embeddings_table = nn.Parameter(th.Tensor(max_relative_position * 2 + 1, num_units))
nn.init.xavier_uniform_(self.embeddings_table)
def forward(self, length_q, length_k):
device = self.embeddings_table.device
range_vec_q = th.arange(length_q, device=device)
range_vec_k = th.arange(length_k, device=device)
distance_mat = range_vec_k[None, :] - range_vec_q[:, None]
distance_mat_clipped = th.clamp(distance_mat, -self.max_relative_position, self.max_relative_position)
final_mat = distance_mat_clipped + self.max_relative_position
# final_mat = th.LongTensor(final_mat).to(self.embeddings_table.device)
# final_mat = th.tensor(final_mat, device=self.embeddings_table.device, dtype=torch.long)
final_mat = final_mat.long()
embeddings = self.embeddings_table[final_mat]
return embeddings
class TemporalCrossAttention(nn.Module):
def __init__(self,
query_dim,
context_dim=None,
heads=8,
dim_head=64,
dropout=0.,
temporal_length=None, # For relative positional representation and image-video joint training.
image_length=None, # For image-video joint training.
use_relative_position=False, # whether use relative positional representation in temporal attention.
img_video_joint_train=False, # For image-video joint training.
use_tempoal_causal_attn=False,
bidirectional_causal_attn=False,
tempoal_attn_type=None,
joint_train_mode="same_batch",
**kwargs,
):
super().__init__()
inner_dim = dim_head * heads
context_dim = default(context_dim, query_dim)
self.context_dim = context_dim
self.scale = dim_head ** -0.5
self.heads = heads
self.temporal_length = temporal_length
self.use_relative_position = use_relative_position
self.img_video_joint_train = img_video_joint_train
self.bidirectional_causal_attn = bidirectional_causal_attn
self.joint_train_mode = joint_train_mode
assert(joint_train_mode in ["same_batch", "diff_batch"])
self.tempoal_attn_type = tempoal_attn_type
if bidirectional_causal_attn:
assert use_tempoal_causal_attn
if tempoal_attn_type:
assert(tempoal_attn_type in ['sparse_causal', 'sparse_causal_first'])
assert(not use_tempoal_causal_attn)
assert(not (img_video_joint_train and (self.joint_train_mode == "same_batch")))
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
assert(not (img_video_joint_train and (self.joint_train_mode == "same_batch") and use_tempoal_causal_attn))
if img_video_joint_train:
if self.joint_train_mode == "same_batch":
mask = torch.ones([1, temporal_length+image_length, temporal_length+image_length])
# mask[:, image_length:, :] = 0
# mask[:, :, image_length:] = 0
mask[:, temporal_length:, :] = 0
mask[:, :, temporal_length:] = 0
self.mask = mask
else:
self.mask = None
elif use_tempoal_causal_attn:
# normal causal attn
self.mask = torch.tril(torch.ones([1, temporal_length, temporal_length]))
elif tempoal_attn_type == 'sparse_causal':
# all frames interact with only the `prev` & self frame
mask1 = torch.tril(torch.ones([1, temporal_length, temporal_length])).bool() # true indicates keeping
mask2 = torch.zeros([1, temporal_length, temporal_length]) # initialize to same shape with mask1
mask2[:,2:temporal_length, :temporal_length-2] = torch.tril(torch.ones([1,temporal_length-2, temporal_length-2]))
mask2=(1-mask2).bool() # false indicates masking
self.mask = mask1 & mask2
elif tempoal_attn_type == 'sparse_causal_first':
# all frames interact with only the `first` & self frame
mask1 = torch.tril(torch.ones([1, temporal_length, temporal_length])).bool() # true indicates keeping
mask2 = torch.zeros([1, temporal_length, temporal_length])
mask2[:,2:temporal_length, 1:temporal_length-1] = torch.tril(torch.ones([1,temporal_length-2, temporal_length-2]))
mask2=(1-mask2).bool() # false indicates masking
self.mask = mask1 & mask2
else:
self.mask = None
if use_relative_position:
assert(temporal_length is not None)
self.relative_position_k = RelativePosition(num_units=dim_head, max_relative_position=temporal_length)
self.relative_position_v = RelativePosition(num_units=dim_head, max_relative_position=temporal_length)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, query_dim),
nn.Dropout(dropout)
)
nn.init.constant_(self.to_q.weight, 0)
nn.init.constant_(self.to_k.weight, 0)
nn.init.constant_(self.to_v.weight, 0)
nn.init.constant_(self.to_out[0].weight, 0)
nn.init.constant_(self.to_out[0].bias, 0)
def forward(self, x, context=None, mask=None):
# if context is None:
# print(f'[Temp Attn] x={x.shape},context=None')
# else:
# print(f'[Temp Attn] x={x.shape},context={context.shape}')
nh = self.heads
out = x
q = self.to_q(out)
# if context is not None:
# print(f'temporal context 1 ={context.shape}')
# print(f'x={x.shape}')
context = default(context, x)
# print(f'temporal context 2 ={context.shape}')
k = self.to_k(context)
v = self.to_v(context)
# print(f'q ={q.shape},k={k.shape}')
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=nh), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if self.use_relative_position:
len_q, len_k, len_v = q.shape[1], k.shape[1], v.shape[1]
k2 = self.relative_position_k(len_q, len_k)
sim2 = einsum('b t d, t s d -> b t s', q, k2) * self.scale # TODO check
sim += sim2
# print('mask',mask)
if exists(self.mask):
if mask is None:
mask = self.mask.to(sim.device)
else:
mask = self.mask.to(sim.device).bool() & mask #.to(sim.device)
else:
mask = mask
# if self.img_video_joint_train:
# # process mask (make mask same shape with sim)
# c, h, w = mask.shape
# c, t, s = sim.shape
# # assert(h == w and t == s),f"mask={mask.shape}, sim={sim.shape}, h={h}, w={w}, t={t}, s={s}"
# if h > t:
# mask = mask[:, :t, :]
# elif h < t: # pad zeros to mask (no attention) only initial mask =1 area compute weights
# mask_ = torch.zeros([c,t,w]).to(mask.device)
# mask_[:, :h, :] = mask
# mask = mask_
# c, h, w = mask.shape
# if w > s:
# mask = mask[:, :, :s]
# elif w < s: # pad zeros to mask
# mask_ = torch.zeros([c,h,s]).to(mask.device)
# mask_[:, :, :w] = mask
# mask = mask_
# max_neg_value = -torch.finfo(sim.dtype).max
# sim = sim.float().masked_fill(mask == 0, max_neg_value)
if mask is not None:
max_neg_value = -1e9
sim = sim + (1-mask.float()) * max_neg_value # 1=masking,0=no masking
# print('sim after masking: ', sim)
# if torch.isnan(sim).any() or torch.isinf(sim).any() or (not sim.any()):
# print(f'sim [after masking], isnan={torch.isnan(sim).any()}, isinf={torch.isinf(sim).any()}, allzero={not sim.any()}')
attn = sim.softmax(dim=-1)
# print('attn after softmax: ', attn)
# if torch.isnan(attn).any() or torch.isinf(attn).any() or (not attn.any()):
# print(f'attn [after softmax], isnan={torch.isnan(attn).any()}, isinf={torch.isinf(attn).any()}, allzero={not attn.any()}')
# attn = torch.where(torch.isnan(attn), torch.full_like(attn,0), attn)
# if torch.isinf(attn.detach()).any():
# import pdb;pdb.set_trace()
# if torch.isnan(attn.detach()).any():
# import pdb;pdb.set_trace()
out = einsum('b i j, b j d -> b i d', attn, v)
if self.bidirectional_causal_attn:
mask_reverse = torch.triu(torch.ones([1, self.temporal_length, self.temporal_length], device=sim.device))
sim_reverse = sim.float().masked_fill(mask_reverse == 0, max_neg_value)
attn_reverse = sim_reverse.softmax(dim=-1)
out_reverse = einsum('b i j, b j d -> b i d', attn_reverse, v)
out += out_reverse
if self.use_relative_position:
v2 = self.relative_position_v(len_q, len_v)
out2 = einsum('b t s, t s d -> b t d', attn, v2) # TODO check
out += out2 # TODO check:先add还是先merge head?先计算rpr,on split head之后的数据,然后再merge。
out = rearrange(out, '(b h) n d -> b n (h d)', h=nh) # merge head
return self.to_out(out)
class CrossAttention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.,
sa_shared_kv=False, shared_type='only_first', **kwargs,):
super().__init__()
inner_dim = dim_head * heads
context_dim = default(context_dim, query_dim)
self.sa_shared_kv = sa_shared_kv
assert(shared_type in ['only_first', 'all_frames', 'first_and_prev', 'only_prev', 'full', 'causal', 'full_qkv'])
self.shared_type = shared_type
self.dim_head = dim_head
self.scale = dim_head ** -0.5
self.heads = heads
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, query_dim),
nn.Dropout(dropout)
)
if XFORMERS_IS_AVAILBLE:
self.forward = self.efficient_forward
def forward(self, x, context=None, mask=None):
h = self.heads
b = x.shape[0]
q = self.to_q(x)
context = default(context, x)
k = self.to_k(context)
v = self.to_v(context)
if self.sa_shared_kv:
if self.shared_type == 'only_first':
k,v = map(lambda xx: rearrange(xx[0].unsqueeze(0), 'b n c -> (b n) c').unsqueeze(0).repeat(b,1,1),
(k,v))
else:
raise NotImplementedError
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if exists(mask):
mask = rearrange(mask, 'b ... -> b (...)')
max_neg_value = -torch.finfo(sim.dtype).max
mask = repeat(mask, 'b j -> (b h) () j', h=h)
sim.masked_fill_(~mask, max_neg_value)
# attention, what we cannot get enough of
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
return self.to_out(out)
def efficient_forward(self, x, context=None, mask=None):
q = self.to_q(x)
context = default(context, x)
k = self.to_k(context)
v = self.to_v(context)
b, _, _ = q.shape
q, k, v = map(
lambda t: t.unsqueeze(3)
.reshape(b, t.shape[1], self.heads, self.dim_head)
.permute(0, 2, 1, 3)
.reshape(b * self.heads, t.shape[1], self.dim_head)
.contiguous(),
(q, k, v),
)
# actually compute the attention, what we cannot get enough of
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=None)
if exists(mask):
raise NotImplementedError
out = (
out.unsqueeze(0)
.reshape(b, self.heads, out.shape[1], self.dim_head)
.permute(0, 2, 1, 3)
.reshape(b, out.shape[1], self.heads * self.dim_head)
)
return self.to_out(out)
class VideoSpatialCrossAttention(CrossAttention):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0):
super().__init__(query_dim, context_dim, heads, dim_head, dropout)
def forward(self, x, context=None, mask=None):
b, c, t, h, w = x.shape
if context is not None:
context = context.repeat(t, 1, 1)
x = super.forward(spatial_attn_reshape(x), context=context) + x
return spatial_attn_reshape_back(x,b,h)
class BasicTransformerBlockST(nn.Module):
def __init__(self,
# Spatial Stuff
dim,
n_heads,
d_head,
dropout=0.,
context_dim=None,
gated_ff=True, | checkpoint=True, | 0 | 2023-12-06 07:27:45+00:00 | 8k |
TianxingWu/FreeInit | examples/AnimateDiff/animatediff/models/unet_blocks.py | [
{
"identifier": "Transformer3DModel",
"path": "examples/AnimateDiff/animatediff/models/attention.py",
"snippet": "class Transformer3DModel(ModelMixin, ConfigMixin):\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n... | import torch
import pdb
from torch import nn
from .attention import Transformer3DModel
from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
from .motion_module import get_motion_module | 5,160 |
for resnet, motion_module in zip(self.resnets, self.motion_modules):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
# add motion module
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
class CrossAttnUpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
prev_output_channel: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
add_upsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
attentions = []
motion_modules = []
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_inflated_groupnorm=use_inflated_groupnorm,
)
)
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
out_channels // attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_upsample:
| # Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_blocks.py
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
if down_block_type == "DownBlock3D":
return DownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif down_block_type == "CrossAttnDownBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
return CrossAttnDownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{down_block_type} does not exist.")
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_inflated_groupnorm=use_inflated_groupnorm,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{up_block_type} does not exist.")
class UNetMidBlock3DCrossAttn(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
output_scale_factor=1.0,
cross_attention_dim=1280,
dual_cross_attention=False,
use_linear_projection=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_inflated_groupnorm=use_inflated_groupnorm,
)
]
attentions = []
motion_modules = []
for _ in range(num_layers):
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
in_channels // attn_num_head_channels,
in_channels=in_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=in_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_inflated_groupnorm=use_inflated_groupnorm,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):
hidden_states = self.resnets[0](hidden_states, temb)
for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
hidden_states = resnet(hidden_states, temb)
return hidden_states
class CrossAttnDownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
downsample_padding=1,
add_downsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
attentions = []
motion_modules = []
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_inflated_groupnorm=use_inflated_groupnorm,
)
)
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
out_channels // attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample3D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):
output_states = ()
for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn, return_dict=False),
hidden_states,
encoder_hidden_states,
)[0]
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
# add motion module
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
class DownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_downsample=True,
downsample_padding=1,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
motion_modules = []
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_inflated_groupnorm=use_inflated_groupnorm,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample3D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None, encoder_hidden_states=None):
output_states = ()
for resnet, motion_module in zip(self.resnets, self.motion_modules):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
# add motion module
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
class CrossAttnUpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
prev_output_channel: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
add_upsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
attentions = []
motion_modules = []
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
use_inflated_groupnorm=use_inflated_groupnorm,
)
)
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
out_channels // attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_upsample: | self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)]) | 3 | 2023-12-12 13:11:24+00:00 | 8k |
allenai/unified-io-2 | t5x/examples/unified_io/data/nlp_instruction_following.py | [
{
"identifier": "MULTITASK_TFDS_DATA_DIR",
"path": "t5x/examples/unified_io/config.py",
"snippet": "MULTITASK_TFDS_DATA_DIR = None"
},
{
"identifier": "get_default_vocabulary",
"path": "t5x/examples/unified_io/data/data_utils.py",
"snippet": "def get_default_vocabulary():\n if config.TO... | import functools
import re
import seqio
import tensorflow as tf
from seqio import TaskRegistry
from seqio.preprocessors import rekey
from t5x.examples.unified_io.config import MULTITASK_TFDS_DATA_DIR
from t5x.examples.unified_io.data.data_utils import get_default_vocabulary, apply_keyword_prompt, \
random_element
from t5x.examples.unified_io.data.prompt_definition import Prompt
from t5x.examples.unified_io.data.prompt_dict import TRUNCATE
from t5x.examples.unified_io.modality_processing import unified_io_preprocessor, OUTPUT_FEATURES | 4,964 | 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115,
1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131,
1132, 1133, 1134, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1168, 1169, 1170,
1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1218,
1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234,
1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250,
1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266,
1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282,
1323, 1324, 1329, 1330, 1334, 1335, 1350, 1351, 1352, 1353, 1365, 1367, 1370, 1371, 1373, 1374,
1375, 1376, 1377, 1395, 1396, 1397, 1402, 1414, 1432, 1433, 1435, 1436, 1490, 1491, 1492, 1493,
1494, 1496, 1497, 1514, 1537, 1538, 1539, 1543, 1544, 1545, 1546, 1561, 1569, 1570, 1571, 1574,
1575, 1576, 1577, 1588, 1591, 1610, 1611, 1616, 1617, 1618, 1619, 1620, 1621, 1626, 1627, 1628,
1629, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1654, 1655, 1662, 1663, 1666, 1667, 1676, 1677,
1685, 1686, 1689, 1690, 1691, 1692
]
def filter_by_len(ds, sequence_length):
@seqio.map_over_dataset
def tokenize(ex):
voc = get_default_vocabulary()
ex["text_inputs"] = voc.encode_tf(ex["text_inputs"])
ex["text_targets"] = voc.encode_tf(ex["text_targets"])
return ex
ds = tokenize(ds)
def _filter(ex):
# Leave one space for EOS
return (
(len(ex["text_inputs"]) <= sequence_length["text_inputs"] - 1) and
(len(ex["text_targets"]) <= sequence_length["text_targets"] - 1)
)
return ds.filter(_filter)
@seqio.map_over_dataset
def tokenize_with_truncate(x, sequence_length):
"""Tokenize x but truncate from the special TRUNCATE symbol not the end"""
voc = get_default_vocabulary()
text_inputs = x["text_inputs"]
parts = tf.strings.split(text_inputs, TRUNCATE, maxsplit=2)
if tf.shape(parts)[0] == 1:
x["text_inputs_pretokenized"] = text_inputs
x["text_inputs"] = voc.encode_tf(parts[0])
else:
x["text_inputs_pretokenized"] = tf.strings.join([parts[0], parts[1]], "")
to_truncate = voc.encode_tf(parts[0])
suffix = voc.encode_tf(parts[1])
max_input_len = sequence_length["text_inputs"]
n = max_input_len - tf.shape(suffix)[0] - 1 # -1 for the EOS
x["text_inputs"] = tf.concat([to_truncate[:n], suffix], 0)
return x
def filter_non_english(ds, source):
if source == "NIv2":
def _fn(ex):
return not tf.strings.regex_full_match(ex["task_name"], f"task({'|'.join(str(x) for x in NI_NON_ENGLISH_TASKS)})_.*")
elif source == "Flan2021":
def _fn(ex):
return not tf.strings.regex_full_match(ex["task_name"], "(wmt[0-9]*_.*)|para_crawl_enes")
else:
return ds
return ds.filter(_fn)
@seqio.map_over_dataset
def preprocess_flan(ex, name):
return dict(
text_inputs=tf.strings.join(["[Text] [S] ", ex["inputs"]]),
text_targets=ex["targets"],
example_id=tf.strings.join([name, tf.strings.as_string(ex["example_num"])], "-")
)
def add_flan(name):
full_name = f"flan2_{name.lower()}"
TaskRegistry.add(
full_name,
source=seqio.TfdsDataSource(
tfds_name=f"{full_name}:1.0.0",
tfds_data_dir=MULTITASK_TFDS_DATA_DIR,
splits={
"train": "train[2000:]",
"validation": "train[:2000]"
}
),
preprocessors=[
functools.partial(filter_non_english, source=name),
functools.partial(preprocess_flan, name=full_name),
filter_by_len,
unified_io_preprocessor,
],
output_features=OUTPUT_FEATURES,
)
FLAN_DATASETS = ["Flan2021", "T0", "NIv2", "CoT", "Dialog"]
for dataset in FLAN_DATASETS:
add_flan(dataset)
# Weights from https://github.com/google-research/FLAN/blob/main/flan/v2/run_example.py#L65-L73
# git commit 7b33ac0
seqio.MixtureRegistry.add(
'flan2',
tasks=[
('flan2_flan2021', 0.4), # mixing weight = 40%
('flan2_t0', 0.32), # mixing weight = 32%
('flan2_niv2', 0.2), # mixing weight = 20%
('flan2_cot', 0.05), # mixing weight = 5%
('flan2_dialog', 0.03), # mixing weight = 3%
])
def preprocess_instruction_context(ds, dataset_name):
|
# Extracted from the language labels from the NLI README
NI_NON_ENGLISH_TASKS = [
86, 117, 171, 172, 173, 174, 175, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
262, 263, 264, 265, 266, 271, 272, 273, 312, 313, 314, 315, 334, 336, 338, 394, 395, 396, 406,
407, 408, 409, 410, 411, 412, 414, 415, 416, 417, 424, 425, 426, 427, 432, 433, 434, 435, 436,
437, 438, 439, 440, 441, 446, 447, 448, 449, 450, 451, 452, 463, 464, 465, 466, 467, 468, 473,
474, 479, 480, 481, 482, 483, 484, 485, 486, 487, 524, 525, 526, 527, 528, 529, 530, 531, 532,
533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 548, 549, 551, 552, 553,
554, 555, 556, 557, 558, 559, 561, 562, 601, 604, 612, 634, 635, 643, 644, 650, 651, 652, 653,
654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 680, 762, 763, 764, 765, 771, 772, 773, 774,
775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793,
794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812,
813, 814, 815, 816, 817, 818, 829, 830, 831, 832, 836, 837, 838, 839, 840, 841, 842, 872, 873,
877, 878, 896, 910, 911, 912, 913, 914, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948,
949, 950, 951, 952, 953, 954, 960, 961, 962, 968, 969, 974, 975, 976, 977, 978, 979, 980, 981,
982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000,
1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016,
1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032,
1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048,
1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064,
1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080,
1081, 1082, 1083, 1084, 1085, 1086, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099,
1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115,
1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131,
1132, 1133, 1134, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1168, 1169, 1170,
1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1218,
1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234,
1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250,
1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266,
1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282,
1323, 1324, 1329, 1330, 1334, 1335, 1350, 1351, 1352, 1353, 1365, 1367, 1370, 1371, 1373, 1374,
1375, 1376, 1377, 1395, 1396, 1397, 1402, 1414, 1432, 1433, 1435, 1436, 1490, 1491, 1492, 1493,
1494, 1496, 1497, 1514, 1537, 1538, 1539, 1543, 1544, 1545, 1546, 1561, 1569, 1570, 1571, 1574,
1575, 1576, 1577, 1588, 1591, 1610, 1611, 1616, 1617, 1618, 1619, 1620, 1621, 1626, 1627, 1628,
1629, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1654, 1655, 1662, 1663, 1666, 1667, 1676, 1677,
1685, 1686, 1689, 1690, 1691, 1692
]
def filter_by_len(ds, sequence_length):
@seqio.map_over_dataset
def tokenize(ex):
voc = get_default_vocabulary()
ex["text_inputs"] = voc.encode_tf(ex["text_inputs"])
ex["text_targets"] = voc.encode_tf(ex["text_targets"])
return ex
ds = tokenize(ds)
def _filter(ex):
# Leave one space for EOS
return (
(len(ex["text_inputs"]) <= sequence_length["text_inputs"] - 1) and
(len(ex["text_targets"]) <= sequence_length["text_targets"] - 1)
)
return ds.filter(_filter)
@seqio.map_over_dataset
def tokenize_with_truncate(x, sequence_length):
"""Tokenize x but truncate from the special TRUNCATE symbol not the end"""
voc = get_default_vocabulary()
text_inputs = x["text_inputs"]
parts = tf.strings.split(text_inputs, TRUNCATE, maxsplit=2)
if tf.shape(parts)[0] == 1:
x["text_inputs_pretokenized"] = text_inputs
x["text_inputs"] = voc.encode_tf(parts[0])
else:
x["text_inputs_pretokenized"] = tf.strings.join([parts[0], parts[1]], "")
to_truncate = voc.encode_tf(parts[0])
suffix = voc.encode_tf(parts[1])
max_input_len = sequence_length["text_inputs"]
n = max_input_len - tf.shape(suffix)[0] - 1 # -1 for the EOS
x["text_inputs"] = tf.concat([to_truncate[:n], suffix], 0)
return x
def filter_non_english(ds, source):
if source == "NIv2":
def _fn(ex):
return not tf.strings.regex_full_match(ex["task_name"], f"task({'|'.join(str(x) for x in NI_NON_ENGLISH_TASKS)})_.*")
elif source == "Flan2021":
def _fn(ex):
return not tf.strings.regex_full_match(ex["task_name"], "(wmt[0-9]*_.*)|para_crawl_enes")
else:
return ds
return ds.filter(_fn)
@seqio.map_over_dataset
def preprocess_flan(ex, name):
return dict(
text_inputs=tf.strings.join(["[Text] [S] ", ex["inputs"]]),
text_targets=ex["targets"],
example_id=tf.strings.join([name, tf.strings.as_string(ex["example_num"])], "-")
)
def add_flan(name):
full_name = f"flan2_{name.lower()}"
TaskRegistry.add(
full_name,
source=seqio.TfdsDataSource(
tfds_name=f"{full_name}:1.0.0",
tfds_data_dir=MULTITASK_TFDS_DATA_DIR,
splits={
"train": "train[2000:]",
"validation": "train[:2000]"
}
),
preprocessors=[
functools.partial(filter_non_english, source=name),
functools.partial(preprocess_flan, name=full_name),
filter_by_len,
unified_io_preprocessor,
],
output_features=OUTPUT_FEATURES,
)
FLAN_DATASETS = ["Flan2021", "T0", "NIv2", "CoT", "Dialog"]
for dataset in FLAN_DATASETS:
add_flan(dataset)
# Weights from https://github.com/google-research/FLAN/blob/main/flan/v2/run_example.py#L65-L73
# git commit 7b33ac0
seqio.MixtureRegistry.add(
'flan2',
tasks=[
('flan2_flan2021', 0.4), # mixing weight = 40%
('flan2_t0', 0.32), # mixing weight = 32%
('flan2_niv2', 0.2), # mixing weight = 20%
('flan2_cot', 0.05), # mixing weight = 5%
('flan2_dialog', 0.03), # mixing weight = 3%
])
def preprocess_instruction_context(ds, dataset_name): | context_prompts = Prompt().get_prompt_list("NLP Instruction Context", dataset_name) | 4 | 2023-12-12 20:23:33+00:00 | 8k |
SafeAILab/EAGLE | evaluation/gen_ea_answer_vicuna.py | [
{
"identifier": "EaModel",
"path": "model/ea_model.py",
"snippet": "class EaModel(nn.Module):\n\n def __init__(\n self,\n base_model,\n base_model_name_or_path,\n ea_model_path,\n ):\n\n super().__init__()\n self.base_model = base_model\n ... | import argparse
import json
import os
import time
import shortuuid
import ray
from fastchat.llm_judge.common import load_questions
from fastchat.model import get_conversation_template
from tqdm import tqdm
from model.ea_model import EaModel
from model.kv_cache import initialize_past_key_values
from model.utils import *
from model.choices import * | 5,371 |
for idx in range(max_steps):
candidates, cart_candidates_prob, tree_candidates = generate_candidates(
tree_logits,
tree_buffers["tree_indices"],
tree_buffers["retrieve_indices"],
sample_token,
logits_processor
)
logits, hidden_state_new, outputs = tree_decoding(
model,
tree_candidates,
past_key_values,
tree_buffers["tree_position_ids"],
input_ids,
tree_buffers["retrieve_indices_head"],
)
best_candidate, accept_length, sample_p = evaluate_posterior(
logits, candidates, logits_processor, cart_candidates_prob, tree_logits[2], tree_buffers["p_indices"],
tree_candidates, tree_buffers["b_indices"]
)
input_ids, tree_logits, new_token, hidden_state, sample_token = update_inference_inputs(
input_ids,
candidates,
best_candidate,
accept_length,
tree_buffers["retrieve_indices"],
logits_processor,
logits,
tree_logits,
new_token,
past_key_values_data,
current_length_data,
model,
hidden_state,
hidden_state_new,
sample_p
)
if tokenizer.eos_token_id in input_ids[0, input_len:].tolist():
break
if new_token > 1024:
break
if input_ids.shape[1] > 1960:
break
return input_ids, new_token, idx
def run_eval(
base_model_path,
ea_model_path,
model_id,
question_file,
question_begin,
question_end,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
num_gpus_total,
max_gpu_memory,
temperature,
tree_choices,
):
questions = load_questions(question_file, question_begin, question_end)
# random shuffle the questions to balance the loading
# random.shuffle(questions)
shuffled_ids = [q["question_id"] for q in questions]
# with open(f"data/{args.bench_name}/model_ids/{args.model_id}.shuffled_ids", "w") as fout:
# json.dump(shuffled_ids, fout)
# Split the question file into `num_gpus` files
assert num_gpus_total % num_gpus_per_model == 0
use_ray = num_gpus_total // num_gpus_per_model > 1
if use_ray:
get_answers_func = ray.remote(num_gpus=num_gpus_per_model)(
get_model_answers
).remote
else:
get_answers_func = get_model_answers
chunk_size = len(questions) // (num_gpus_total // num_gpus_per_model) # // 2
ans_handles = []
for i in range(0, len(questions), chunk_size):
ans_handles.append(
get_answers_func(
base_model_path,
ea_model_path,
model_id,
questions[i: i + chunk_size],
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
temperature,
tree_choices,
)
)
if use_ray:
ray.get(ans_handles)
@torch.inference_mode()
def get_model_answers(
base_model_path,
ea_model_path,
model_id,
questions,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
temperature,
tree_choices,
):
# temperature = 0.0
| """Generate answers with local models.
Usage:
python3 gen_model_answer.py --model-path lmsys/fastchat-t5-3b-v1.0 --model-id fastchat-t5-3b-v1.0
"""
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7"
def ea_forward(input_ids, model, tokenizer, tree_choices, logits_processor=None, max_steps=512):
assert input_ids.shape[0] == 1, "Only support batch size 1 for now!!"
# Avoid modifying the input_ids in-place
input_ids = input_ids.clone()
model.ea_layer.reset_kv()
if hasattr(model, "tree_choices") and model.tree_choices == tree_choices:
tree_buffers = model.tree_buffers
else:
tree_buffers = generate_tree_buffers(
tree_choices, device=model.base_model.model.layers[-1].self_attn.q_proj.weight.device
)
tree_buffers["retrieve_indices_head"] = tree_buffers["retrieve_indices"].to(
model.base_model.lm_head.weight.device)
model.tree_buffers = tree_buffers
model.tree_choices = tree_choices
# Initialize the past key and value states
if hasattr(model, "past_key_values"):
past_key_values = model.past_key_values
past_key_values_data = model.past_key_values_data
current_length_data = model.current_length_data
# Reset the past key and value states
current_length_data.zero_()
else:
(
past_key_values,
past_key_values_data,
current_length_data,
) = initialize_past_key_values(model.base_model)
model.past_key_values = past_key_values
model.past_key_values_data = past_key_values_data
model.current_length_data = current_length_data
input_len = input_ids.shape[1]
reset_tree_mode(model)
tree_logits, logits, hidden_state, sample_token = initialize_tree(
input_ids, model, tree_buffers["tree_attn_mask"], past_key_values, logits_processor
)
new_token = 0
for idx in range(max_steps):
candidates, cart_candidates_prob, tree_candidates = generate_candidates(
tree_logits,
tree_buffers["tree_indices"],
tree_buffers["retrieve_indices"],
sample_token,
logits_processor
)
logits, hidden_state_new, outputs = tree_decoding(
model,
tree_candidates,
past_key_values,
tree_buffers["tree_position_ids"],
input_ids,
tree_buffers["retrieve_indices_head"],
)
best_candidate, accept_length, sample_p = evaluate_posterior(
logits, candidates, logits_processor, cart_candidates_prob, tree_logits[2], tree_buffers["p_indices"],
tree_candidates, tree_buffers["b_indices"]
)
input_ids, tree_logits, new_token, hidden_state, sample_token = update_inference_inputs(
input_ids,
candidates,
best_candidate,
accept_length,
tree_buffers["retrieve_indices"],
logits_processor,
logits,
tree_logits,
new_token,
past_key_values_data,
current_length_data,
model,
hidden_state,
hidden_state_new,
sample_p
)
if tokenizer.eos_token_id in input_ids[0, input_len:].tolist():
break
if new_token > 1024:
break
if input_ids.shape[1] > 1960:
break
return input_ids, new_token, idx
def run_eval(
base_model_path,
ea_model_path,
model_id,
question_file,
question_begin,
question_end,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
num_gpus_total,
max_gpu_memory,
temperature,
tree_choices,
):
questions = load_questions(question_file, question_begin, question_end)
# random shuffle the questions to balance the loading
# random.shuffle(questions)
shuffled_ids = [q["question_id"] for q in questions]
# with open(f"data/{args.bench_name}/model_ids/{args.model_id}.shuffled_ids", "w") as fout:
# json.dump(shuffled_ids, fout)
# Split the question file into `num_gpus` files
assert num_gpus_total % num_gpus_per_model == 0
use_ray = num_gpus_total // num_gpus_per_model > 1
if use_ray:
get_answers_func = ray.remote(num_gpus=num_gpus_per_model)(
get_model_answers
).remote
else:
get_answers_func = get_model_answers
chunk_size = len(questions) // (num_gpus_total // num_gpus_per_model) # // 2
ans_handles = []
for i in range(0, len(questions), chunk_size):
ans_handles.append(
get_answers_func(
base_model_path,
ea_model_path,
model_id,
questions[i: i + chunk_size],
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
temperature,
tree_choices,
)
)
if use_ray:
ray.get(ans_handles)
@torch.inference_mode()
def get_model_answers(
base_model_path,
ea_model_path,
model_id,
questions,
answer_file,
max_new_token,
num_choices,
num_gpus_per_model,
max_gpu_memory,
temperature,
tree_choices,
):
# temperature = 0.0
| model = EaModel.from_pretrained( | 0 | 2023-12-07 19:08:39+00:00 | 8k |
zju3dv/EasyVolcap | scripts/torchxpbd/extract_unclothed_body.py | [
{
"identifier": "export_mesh",
"path": "easyvolcap/utils/data_utils.py",
"snippet": "def export_mesh(verts: torch.Tensor, faces: torch.Tensor, uv: torch.Tensor = None, img: torch.Tensor = None, uvfaces: torch.Tensor = None, colors: torch.Tensor = None, normals: torch.Tensor = None, filename: str = \"def... | import torch
import argparse
import numpy as np
import sys
from easyvolcap.utils.data_utils import export_mesh, load_mesh
from easyvolcap.utils.mesh_utils import laplacian_smoothing, hierarchical_winding_distance_remesh, get_edges, adjacency, winding_number_nooom, segment_mesh, bidirectional_icp_fitting, loop_subdivision
from easyvolcap.utils.sem_utils import semantic_dim, semantic_list | 4,804 |
# fmt: off
sys.path.append('.')
# fmt: on
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--clothed_input', default='data/xuzhen36/talk/registration/deformation/semantic_mesh.npz')
parser.add_argument('--body_input', default='data/xuzhen36/talk/registration/deformation/semantic_smplh.npz')
parser.add_argument('--body_output', default='data/xuzhen36/talk/registration/deformation/body_mesh.ply')
parser.add_argument('--cloth_output', default='data/xuzhen36/talk/registration/deformation/cloth_mesh.ply')
parser.add_argument('--cloth_list', nargs='+', default=['upper_cloth'])
args = parser.parse_args()
# global arguments
device = 'cuda'
# maybe perform subdivision before hand? and use catmull clark instead of simple subdivision provided by trimesh
# https://onrendering.com/data/papers/catmark/HalfedgeCatmullClark.pdf
# https://github.com/jdupuy/HalfedgeCatmullClark
v0, f0 = load_mesh(args.clothed_input, device)
vs0 = torch.tensor(np.load(args.clothed_input)['verts_semantics'], device=v0.device)
i1 = list(map(lambda x: semantic_list.index(x), args.cloth_list))
i1 = torch.tensor(i1, device=v0.device, dtype=torch.long)
# segment based on vertex semantices
v, f = segment_mesh(v0, f0, vs0, i1, smoothing='edge')
v, f = loop_subdivision(v, f, 1)
# save the results
export_mesh(v, f, filename=args.cloth_output)
# extract body mesh
i0 = list(map(lambda x: semantic_list.index(x), [s for s in semantic_list if s not in args.cloth_list]))
i0 = torch.tensor(i0, device=v.device, dtype=torch.long)
v0, f0 = segment_mesh(v0, f0, vs0, i0, smoothing='edge', dilate=-1)
v0, f0 = loop_subdivision(v0, f0, 1)
v1, f1 = load_mesh(args.body_input, device)
vs1 = torch.tensor(np.load(args.body_input)['verts_semantics'], device=v.device)
v1, f1 = segment_mesh(v1, f1, vs1, i1, smoothing='edge', dilate=3)
v1, f1 = loop_subdivision(v1, f1, 2)
v0, v1 = bidirectional_icp_fitting(v0, f0, v1, f1)
level_set = 0.334
v2, f2 = torch.cat([v0, v1]), torch.cat([f0, f1+len(v0)])
# v, f = v2, f2
v, f = hierarchical_winding_distance_remesh(v2, f2, level_set=level_set)
# 0.334 will produce ripple effects on perfectly normal mesh if thresh of winding number is too low
# (1 - th) / 2 + (level_set - 0.5).abs() > 0.5 - maximum_error
# 0.5 - th / 2 + 0.5 - level_set - 0.5 > - maximum_error
# th / 2 + level_set - 0.5 > maximum_error
# 0.225 + 0.334 - 0.5 = 0.059 > maximum_error
# conditional laplacian smoothing
th = 0.45 # 1-max_error
wn = winding_number_nooom(v, v2, f2) # TODO: use grid sample from previous computation to make this faster
vm = (wn - level_set).abs() < (th / 2)
# compute edges
|
# fmt: off
sys.path.append('.')
# fmt: on
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--clothed_input', default='data/xuzhen36/talk/registration/deformation/semantic_mesh.npz')
parser.add_argument('--body_input', default='data/xuzhen36/talk/registration/deformation/semantic_smplh.npz')
parser.add_argument('--body_output', default='data/xuzhen36/talk/registration/deformation/body_mesh.ply')
parser.add_argument('--cloth_output', default='data/xuzhen36/talk/registration/deformation/cloth_mesh.ply')
parser.add_argument('--cloth_list', nargs='+', default=['upper_cloth'])
args = parser.parse_args()
# global arguments
device = 'cuda'
# maybe perform subdivision before hand? and use catmull clark instead of simple subdivision provided by trimesh
# https://onrendering.com/data/papers/catmark/HalfedgeCatmullClark.pdf
# https://github.com/jdupuy/HalfedgeCatmullClark
v0, f0 = load_mesh(args.clothed_input, device)
vs0 = torch.tensor(np.load(args.clothed_input)['verts_semantics'], device=v0.device)
i1 = list(map(lambda x: semantic_list.index(x), args.cloth_list))
i1 = torch.tensor(i1, device=v0.device, dtype=torch.long)
# segment based on vertex semantices
v, f = segment_mesh(v0, f0, vs0, i1, smoothing='edge')
v, f = loop_subdivision(v, f, 1)
# save the results
export_mesh(v, f, filename=args.cloth_output)
# extract body mesh
i0 = list(map(lambda x: semantic_list.index(x), [s for s in semantic_list if s not in args.cloth_list]))
i0 = torch.tensor(i0, device=v.device, dtype=torch.long)
v0, f0 = segment_mesh(v0, f0, vs0, i0, smoothing='edge', dilate=-1)
v0, f0 = loop_subdivision(v0, f0, 1)
v1, f1 = load_mesh(args.body_input, device)
vs1 = torch.tensor(np.load(args.body_input)['verts_semantics'], device=v.device)
v1, f1 = segment_mesh(v1, f1, vs1, i1, smoothing='edge', dilate=3)
v1, f1 = loop_subdivision(v1, f1, 2)
v0, v1 = bidirectional_icp_fitting(v0, f0, v1, f1)
level_set = 0.334
v2, f2 = torch.cat([v0, v1]), torch.cat([f0, f1+len(v0)])
# v, f = v2, f2
v, f = hierarchical_winding_distance_remesh(v2, f2, level_set=level_set)
# 0.334 will produce ripple effects on perfectly normal mesh if thresh of winding number is too low
# (1 - th) / 2 + (level_set - 0.5).abs() > 0.5 - maximum_error
# 0.5 - th / 2 + 0.5 - level_set - 0.5 > - maximum_error
# th / 2 + level_set - 0.5 > maximum_error
# 0.225 + 0.334 - 0.5 = 0.059 > maximum_error
# conditional laplacian smoothing
th = 0.45 # 1-max_error
wn = winding_number_nooom(v, v2, f2) # TODO: use grid sample from previous computation to make this faster
vm = (wn - level_set).abs() < (th / 2)
# compute edges | e, i, c = get_edges(f) | 4 | 2023-12-07 08:53:42+00:00 | 8k |
minghanqin/LangSplat | scene/dataset_readers.py | [
{
"identifier": "read_extrinsics_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while T... | import os
import sys
import numpy as np
import json
from PIL import Image
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud | 4,325 | class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model=="SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="SIMPLE_RADIAL":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model=="SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="SIMPLE_RADIAL":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) | cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) | 1 | 2023-12-11 06:33:35+00:00 | 8k |
alibaba/animate-anything | app_svd.py | [
{
"identifier": "tensor_to_vae_latent",
"path": "utils/common.py",
"snippet": "def tensor_to_vae_latent(t, vae):\n video_length = t.shape[1]\n\n t = rearrange(t, \"b f c h w -> (b f) c h w\")\n latents = vae.encode(t).latent_dist.sample()\n latents = rearrange(latents, \"(b f) c h w -> b c f... | import os
import random
import math
import gradio as gr
import torch
import torchvision.transforms as T
import imageio
from argparse import ArgumentParser
from diffusers.image_processor import VaeImageProcessor
from omegaconf import OmegaConf
from PIL import Image
from diffusers import StableVideoDiffusionPipeline
from utils.common import tensor_to_vae_latent, DDPM_forward_timesteps
from models.pipeline import MaskStableVideoDiffusionPipeline | 4,270 |
css = """
.toolbutton {
margin-buttom: 0em 0em 0em 0em;
max-width: 2.5em;
min-width: 2.5em !important;
height: 2.5em;
}
"""
class AnimateController:
def __init__(self, pretrained_model_path: str, validation_data,
output_dir, motion_mask = False, motion_strength = False):
# For mixed precision training we cast the text_encoder and vae weights to half-precision
# as these models are only used for inference, keeping weights in full precision is not required.
device=torch.device("cuda")
self.validation_data = validation_data
self.output_dir = output_dir
# self.pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path,
# torch_dtype=torch.float16, variant="fp16").to(device)
self.pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path).to(device)
self.sample_idx = 0
def animate(
self,
init_img,
sample_step_slider,
seed_textbox,
fps_textbox,
num_frames_textbox,
motion_bucket_id_slider,
progress=gr.Progress(),
):
if seed_textbox != -1 and seed_textbox != "":
torch.manual_seed(int(seed_textbox))
else:
torch.seed()
seed = torch.initial_seed()
vae = self.pipeline.vae
diffusion_scheduler = self.pipeline.scheduler
validation_data = self.validation_data
validation_data.fps = int(fps_textbox)
validation_data.num_frames = int(num_frames_textbox)
validation_data.motion_bucket_id = int(motion_bucket_id_slider)
vae_processor = VaeImageProcessor()
device = vae.device
dtype = vae.dtype
pimg = Image.fromarray(init_img["background"]).convert('RGB')
width, height = pimg.size
scale = math.sqrt(width*height / (validation_data.height*validation_data.width))
block_size=64
height = round(height/scale/block_size)*block_size
width = round(width/scale/block_size)*block_size
input_image = vae_processor.preprocess(pimg, height, width)
input_image = input_image.unsqueeze(0).to(dtype).to(device)
input_image_latents = tensor_to_vae_latent(input_image, vae)
np_mask = init_img["layers"][0][:,:,3]
np_mask[np_mask!=0] = 255
if np_mask.sum() == 0:
np_mask[:] = 255
b, c, _, h, w = input_image_latents.shape
initial_latents, timesteps = DDPM_forward_timesteps(input_image_latents,
sample_step_slider, validation_data.num_frames, diffusion_scheduler)
mask = T.ToTensor()(np_mask).to(dtype).to(device)
b, c, f, h, w = initial_latents.shape
mask = T.Resize([h, w], antialias=False)(mask)
motion_mask = self.pipeline.unet.config.in_channels == 9
with torch.no_grad():
if motion_mask:
|
css = """
.toolbutton {
margin-buttom: 0em 0em 0em 0em;
max-width: 2.5em;
min-width: 2.5em !important;
height: 2.5em;
}
"""
class AnimateController:
def __init__(self, pretrained_model_path: str, validation_data,
output_dir, motion_mask = False, motion_strength = False):
# For mixed precision training we cast the text_encoder and vae weights to half-precision
# as these models are only used for inference, keeping weights in full precision is not required.
device=torch.device("cuda")
self.validation_data = validation_data
self.output_dir = output_dir
# self.pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path,
# torch_dtype=torch.float16, variant="fp16").to(device)
self.pipeline = StableVideoDiffusionPipeline.from_pretrained(pretrained_model_path).to(device)
self.sample_idx = 0
def animate(
self,
init_img,
sample_step_slider,
seed_textbox,
fps_textbox,
num_frames_textbox,
motion_bucket_id_slider,
progress=gr.Progress(),
):
if seed_textbox != -1 and seed_textbox != "":
torch.manual_seed(int(seed_textbox))
else:
torch.seed()
seed = torch.initial_seed()
vae = self.pipeline.vae
diffusion_scheduler = self.pipeline.scheduler
validation_data = self.validation_data
validation_data.fps = int(fps_textbox)
validation_data.num_frames = int(num_frames_textbox)
validation_data.motion_bucket_id = int(motion_bucket_id_slider)
vae_processor = VaeImageProcessor()
device = vae.device
dtype = vae.dtype
pimg = Image.fromarray(init_img["background"]).convert('RGB')
width, height = pimg.size
scale = math.sqrt(width*height / (validation_data.height*validation_data.width))
block_size=64
height = round(height/scale/block_size)*block_size
width = round(width/scale/block_size)*block_size
input_image = vae_processor.preprocess(pimg, height, width)
input_image = input_image.unsqueeze(0).to(dtype).to(device)
input_image_latents = tensor_to_vae_latent(input_image, vae)
np_mask = init_img["layers"][0][:,:,3]
np_mask[np_mask!=0] = 255
if np_mask.sum() == 0:
np_mask[:] = 255
b, c, _, h, w = input_image_latents.shape
initial_latents, timesteps = DDPM_forward_timesteps(input_image_latents,
sample_step_slider, validation_data.num_frames, diffusion_scheduler)
mask = T.ToTensor()(np_mask).to(dtype).to(device)
b, c, f, h, w = initial_latents.shape
mask = T.Resize([h, w], antialias=False)(mask)
motion_mask = self.pipeline.unet.config.in_channels == 9
with torch.no_grad():
if motion_mask: | video_frames = MaskStableVideoDiffusionPipeline.__call__( | 2 | 2023-12-07 08:26:29+00:00 | 8k |
yohanshin/WHAM | lib/models/wham.py | [
{
"identifier": "constants",
"path": "configs/constants.py",
"snippet": "IMG_FEAT_DIM = {\n 'resnet': 2048,\n 'vit': 1024\n}\nN_JOINTS = 17\n PARSED_DATA = f'{root}/parsed_data'\n THREEDPW_PTH = f'{root}/3DPW'\n RICH_PTH = f'{root}/RICH'\n EMDB_PTH = f'{root}/EMDB'\n NUM_JOINTS = N_... | import torch
from torch import nn
from configs import constants as _C
from lib.utils import transforms
from lib.models.layers import (MotionEncoder, MotionDecoder, TrajectoryDecoder, TrajectoryRefiner, Integrator,
rollout_global_motion, compute_camera_pose, reset_root_velocity, compute_camera_motion) | 4,909 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
class Network(nn.Module):
def __init__(self,
smpl,
pose_dr=0.1,
d_embed=512,
n_layers=3,
d_feat=2048,
rnn_type='LSTM',
**kwargs
):
super().__init__()
n_joints = _C.KEYPOINTS.NUM_JOINTS
self.smpl = smpl
in_dim = n_joints * 2 + 3
d_context = d_embed + n_joints * 3
self.mask_embedding = nn.Parameter(torch.zeros(1, 1, n_joints, 2))
# Module 1. Motion Encoder
self.motion_encoder = MotionEncoder(in_dim=in_dim,
d_embed=d_embed,
pose_dr=pose_dr,
rnn_type=rnn_type,
n_layers=n_layers,
n_joints=n_joints)
self.trajectory_decoder = TrajectoryDecoder(d_embed=d_context,
rnn_type=rnn_type,
n_layers=n_layers)
# Module 3. Feature Integrator
self.integrator = Integrator(in_channel=d_feat + d_context,
out_channel=d_context)
# Module 4. Motion Decoder
self.motion_decoder = MotionDecoder(d_embed=d_context,
rnn_type=rnn_type,
n_layers=n_layers)
# Module 5. Trajectory Refiner
self.trajectory_refiner = TrajectoryRefiner(d_embed=d_context,
d_hidden=d_embed,
rnn_type=rnn_type,
n_layers=2)
@property
def __version__(self, ):
return 'v07'
def compute_global_feet(self, duplicate=False):
# Global motion
init_trans = None# if self.training else self.output.full_cam.reshape(self.b, self.f, 3)[:, [0]]
root_world, trans = rollout_global_motion(self.pred_root, self.pred_vel, init_trans)
# # Compute world-coordinate motion
# if not duplicate:
# self.global_output = self.smpl.get_output(
# global_orient=root_world.reshape(self.b * self.f, 1, 3, 3), body_pose=self.output.body_pose,
# betas=self.output.betas, pose2rot=False
# )
# feet_world = self.global_output.feet.reshape(self.b, self.f, 4, 3) + trans.unsqueeze(-2)
cam_R, cam_T = compute_camera_motion(self.output, self.pred_pose[:, :, :6], root_world, trans, self.pred_cam)
feet_cam = self.output.feet.reshape(self.b, self.f, -1, 3) + self.output.full_cam.reshape(self.b, self.f, 1, 3)
feet_world = (cam_R.mT @ (feet_cam - cam_T.unsqueeze(-2)).mT).mT
return feet_world
def forward_smpl(self, **kwargs):
self.output = self.smpl(self.pred_pose,
self.pred_shape,
cam=self.pred_cam,
return_full_pose=not self.training,
**kwargs,
)
kp3d = self.output.joints
# Feet location in global coordinate
feet_world = self.compute_global_feet()
# Return output
output = {'feet': feet_world,
'contact': self.pred_contact,
'pose': self.pred_pose,
'betas': self.pred_shape,
'poses_root_cam': self.output.global_orient,
'verts_cam': self.output.vertices}
if self.training:
pass # TODO: Update training code
else:
| from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
class Network(nn.Module):
def __init__(self,
smpl,
pose_dr=0.1,
d_embed=512,
n_layers=3,
d_feat=2048,
rnn_type='LSTM',
**kwargs
):
super().__init__()
n_joints = _C.KEYPOINTS.NUM_JOINTS
self.smpl = smpl
in_dim = n_joints * 2 + 3
d_context = d_embed + n_joints * 3
self.mask_embedding = nn.Parameter(torch.zeros(1, 1, n_joints, 2))
# Module 1. Motion Encoder
self.motion_encoder = MotionEncoder(in_dim=in_dim,
d_embed=d_embed,
pose_dr=pose_dr,
rnn_type=rnn_type,
n_layers=n_layers,
n_joints=n_joints)
self.trajectory_decoder = TrajectoryDecoder(d_embed=d_context,
rnn_type=rnn_type,
n_layers=n_layers)
# Module 3. Feature Integrator
self.integrator = Integrator(in_channel=d_feat + d_context,
out_channel=d_context)
# Module 4. Motion Decoder
self.motion_decoder = MotionDecoder(d_embed=d_context,
rnn_type=rnn_type,
n_layers=n_layers)
# Module 5. Trajectory Refiner
self.trajectory_refiner = TrajectoryRefiner(d_embed=d_context,
d_hidden=d_embed,
rnn_type=rnn_type,
n_layers=2)
@property
def __version__(self, ):
return 'v07'
def compute_global_feet(self, duplicate=False):
# Global motion
init_trans = None# if self.training else self.output.full_cam.reshape(self.b, self.f, 3)[:, [0]]
root_world, trans = rollout_global_motion(self.pred_root, self.pred_vel, init_trans)
# # Compute world-coordinate motion
# if not duplicate:
# self.global_output = self.smpl.get_output(
# global_orient=root_world.reshape(self.b * self.f, 1, 3, 3), body_pose=self.output.body_pose,
# betas=self.output.betas, pose2rot=False
# )
# feet_world = self.global_output.feet.reshape(self.b, self.f, 4, 3) + trans.unsqueeze(-2)
cam_R, cam_T = compute_camera_motion(self.output, self.pred_pose[:, :, :6], root_world, trans, self.pred_cam)
feet_cam = self.output.feet.reshape(self.b, self.f, -1, 3) + self.output.full_cam.reshape(self.b, self.f, 1, 3)
feet_world = (cam_R.mT @ (feet_cam - cam_T.unsqueeze(-2)).mT).mT
return feet_world
def forward_smpl(self, **kwargs):
self.output = self.smpl(self.pred_pose,
self.pred_shape,
cam=self.pred_cam,
return_full_pose=not self.training,
**kwargs,
)
kp3d = self.output.joints
# Feet location in global coordinate
feet_world = self.compute_global_feet()
# Return output
output = {'feet': feet_world,
'contact': self.pred_contact,
'pose': self.pred_pose,
'betas': self.pred_shape,
'poses_root_cam': self.output.global_orient,
'verts_cam': self.output.vertices}
if self.training:
pass # TODO: Update training code
else: | pose = transforms.matrix_to_axis_angle(self.output.full_pose).reshape(-1, 72) | 1 | 2023-12-08 09:17:54+00:00 | 8k |
octo-models/octo | octo/data/dataset.py | [
{
"identifier": "obs_transforms",
"path": "octo/data/obs_transforms.py",
"snippet": "def augment(\n obs: dict, seed: tf.Tensor, augment_kwargs: Union[dict, Mapping[str, dict]]\n) -> dict:\ndef decode_and_resize(\n obs: dict,\n resize_size: Union[Tuple[int, int], Mapping[str, Tuple[int, int]]],\... | from functools import partial
from typing import Callable, Mapping, Optional, Sequence, Tuple, Union
from absl import logging
from octo.data import obs_transforms, traj_transforms
from octo.data.utils import goal_relabeling, task_augmentation
from octo.data.utils.data_utils import (
allocate_threads,
get_dataset_statistics,
NormalizationType,
normalize_action_and_proprio,
pprint_data_mixture,
tree_map,
)
import inspect
import json
import dlimp as dl
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds | 4,055 | this length (after goal relabeling and chunking).
skip_unlabeled (bool, optional): Whether to skip trajectories with no language labels.
max_action: (float, optional): If provided, trajectories in which *any* action dimension
of *any* transition has an absolute value larger than this will be skipped.
max_proprio: (float, optional): If provided, trajectories in which *any* proprio dimension
of *any* transition has an absolute value larger than this will be skipped.
task_augment_strategy (str, optional): The task augmentation strategy to use, or None for no task
augmentation. See `task_augmentation.py`.
task_augment_kwargs (dict, optional): Additional keyword arguments to pass to the task augmentation
function.
num_parallel_calls (int, optional): number of parallel calls for map operations. Default to AUTOTUNE.
"""
if skip_unlabeled:
if "language_instruction" not in dataset.element_spec["task"]:
raise ValueError(
"skip_unlabeled=True but dataset does not have language labels."
)
dataset = dataset.filter(
lambda x: tf.math.reduce_any(x["task"]["language_instruction"] != "")
)
if max_action is not None:
dataset = dataset.filter(
lambda x: tf.math.reduce_all(tf.math.abs(x["action"]) <= max_action)
)
if max_proprio is not None and "proprio" in dataset.element_spec["observation"]:
dataset = dataset.filter(
lambda x: tf.math.reduce_all(
tf.math.abs(x["observation"]["proprio"]) <= max_proprio
)
)
# marks which entires of the observation and task dicts are padding
dataset = dataset.traj_map(traj_transforms.add_pad_mask_dict, num_parallel_calls)
# updates the "task" dict
if goal_relabeling_strategy is not None:
dataset = dataset.traj_map(
partial(
getattr(goal_relabeling, goal_relabeling_strategy),
**goal_relabeling_kwargs,
),
num_parallel_calls,
)
# must run task augmentation before chunking, in case it changes goal timesteps
if train and task_augment_strategy is not None:
# perform task augmentation (e.g., dropping keys)
dataset = dataset.traj_map(
partial(
getattr(task_augmentation, task_augment_strategy),
**task_augment_kwargs,
),
num_parallel_calls,
)
# chunks observations and actions, giving them a new axis at index 1 of size `window_size` and
# `window_size + future_action_window_size`, respectively
dataset = dataset.traj_map(
partial(
traj_transforms.chunk_act_obs,
window_size=window_size,
future_action_window_size=future_action_window_size,
),
num_parallel_calls,
)
if train and subsample_length is not None:
dataset = dataset.traj_map(
partial(traj_transforms.subsample, subsample_length=subsample_length),
num_parallel_calls,
)
return dataset
def apply_frame_transforms(
dataset: dl.DLataset,
*,
train: bool,
image_augment_kwargs: Union[dict, Mapping[str, dict]] = {},
resize_size: Union[Tuple[int, int], Mapping[str, Tuple[int, int]]] = {},
depth_resize_size: Union[Tuple[int, int], Mapping[str, Tuple[int, int]]] = {},
num_parallel_calls: int = tf.data.AUTOTUNE,
) -> dl.DLataset:
"""Applies common transforms that happen at a frame level. These transforms are usually more
CPU-intensive, (e.g. decoding or resizing images).
Args:
train (bool): Whether the dataset is for training (affects image augmentation).
dataset (dl.DLataset): The dataset to transform.
image_augment_kwargs (dict|Mapping[str, dict]): Keyword arguments to pass to the image augmentation
function. See `dlimp.transforms.augment_image` for documentation of these kwargs. If a dict of
dicts is provided, then key "k" will be used for "image_{k}" (names determined by `image_obs_keys`
in `make_dataset_from_rlds`). Augmentation will be skipped for missing keys (so pass an empty dict
to skip augmentation for all images).
resize_size (Tuple[int, int]|Mapping[str, Tuple[int, int]]): If provided, images will be resized to
this size. If a dict of tuples is provided, then key "k" will be used for "image_{k}" (names
determined by `image_obs_keys` in `make_dataset_from_rlds`). Resizing will be skipped for missing
keys (so pass an empty dict to skip resizing for all images).
depth_resize_size (Tuple[int, int]|Mapping[str, Tuple[int, int]]): Same as resize_size, but for depth
images.
num_parallel_calls (int): number of parallel calls for frame_map operations. Default to AUTOTUNE.
"""
# convenience wrapper that takes a function that operates on a non-chunked "observation" dict and applies
# it to the chunked "observation" dict as well as the non-chunked "task" dict
def apply_obs_transform(fn: Callable[[dict], dict], frame: dict) -> dict:
# task is not chunked -- apply fn directly
frame["task"] = fn(frame["task"])
# observation is chunked -- apply fn along first axis
frame["observation"] = dl.vmap(fn)(frame["observation"])
return frame
# decode + resize images (and depth images)
dataset = dataset.frame_map(
partial(
apply_obs_transform,
partial(
|
def apply_trajectory_transforms(
dataset: dl.DLataset,
*,
train: bool,
goal_relabeling_strategy: Optional[str] = None,
goal_relabeling_kwargs: dict = {},
window_size: int = 1,
future_action_window_size: int = 0,
subsample_length: Optional[int] = None,
skip_unlabeled: bool = False,
max_action: Optional[float] = None,
max_proprio: Optional[float] = None,
task_augment_strategy: Optional[str] = None,
task_augment_kwargs: dict = {},
num_parallel_calls: int = tf.data.AUTOTUNE,
) -> dl.DLataset:
"""Applies common transforms that happen at a trajectory level. Such transforms are usually some sort of
"relabeling" (e.g. filtering, chunking, adding goals, dropping keys). Transforms that happen in this
function should have the following properties:
- They require access to an entire trajectory (i.e. they cannot be applied in a frame-wise manner).
- They are generally not CPU-intensive, mostly involving moving and copying data.
- They do not require decoded images.
Args:
dataset (dl.DLataset): The dataset to transform.
train (bool): Whether the dataset is for training (affects subsampling).
goal_relabeling_strategy (str, optional): The goal relabeling strategy to use, or None for
no goal relabeling. See `goal_relabeling.py`.
goal_relabeling_kwargs (dict, optional): Additional keyword arguments to pass to the goal relabeling function.
window_size (int, optional): The length of the snippets that trajectories are chunked into.
future_action_window_size (int, optional): The number of future actions beyond window_size to include
in the chunked actions.
subsample_length (int, optional): If provided, trajectories longer than this will be subsampled to
this length (after goal relabeling and chunking).
skip_unlabeled (bool, optional): Whether to skip trajectories with no language labels.
max_action: (float, optional): If provided, trajectories in which *any* action dimension
of *any* transition has an absolute value larger than this will be skipped.
max_proprio: (float, optional): If provided, trajectories in which *any* proprio dimension
of *any* transition has an absolute value larger than this will be skipped.
task_augment_strategy (str, optional): The task augmentation strategy to use, or None for no task
augmentation. See `task_augmentation.py`.
task_augment_kwargs (dict, optional): Additional keyword arguments to pass to the task augmentation
function.
num_parallel_calls (int, optional): number of parallel calls for map operations. Default to AUTOTUNE.
"""
if skip_unlabeled:
if "language_instruction" not in dataset.element_spec["task"]:
raise ValueError(
"skip_unlabeled=True but dataset does not have language labels."
)
dataset = dataset.filter(
lambda x: tf.math.reduce_any(x["task"]["language_instruction"] != "")
)
if max_action is not None:
dataset = dataset.filter(
lambda x: tf.math.reduce_all(tf.math.abs(x["action"]) <= max_action)
)
if max_proprio is not None and "proprio" in dataset.element_spec["observation"]:
dataset = dataset.filter(
lambda x: tf.math.reduce_all(
tf.math.abs(x["observation"]["proprio"]) <= max_proprio
)
)
# marks which entires of the observation and task dicts are padding
dataset = dataset.traj_map(traj_transforms.add_pad_mask_dict, num_parallel_calls)
# updates the "task" dict
if goal_relabeling_strategy is not None:
dataset = dataset.traj_map(
partial(
getattr(goal_relabeling, goal_relabeling_strategy),
**goal_relabeling_kwargs,
),
num_parallel_calls,
)
# must run task augmentation before chunking, in case it changes goal timesteps
if train and task_augment_strategy is not None:
# perform task augmentation (e.g., dropping keys)
dataset = dataset.traj_map(
partial(
getattr(task_augmentation, task_augment_strategy),
**task_augment_kwargs,
),
num_parallel_calls,
)
# chunks observations and actions, giving them a new axis at index 1 of size `window_size` and
# `window_size + future_action_window_size`, respectively
dataset = dataset.traj_map(
partial(
traj_transforms.chunk_act_obs,
window_size=window_size,
future_action_window_size=future_action_window_size,
),
num_parallel_calls,
)
if train and subsample_length is not None:
dataset = dataset.traj_map(
partial(traj_transforms.subsample, subsample_length=subsample_length),
num_parallel_calls,
)
return dataset
def apply_frame_transforms(
dataset: dl.DLataset,
*,
train: bool,
image_augment_kwargs: Union[dict, Mapping[str, dict]] = {},
resize_size: Union[Tuple[int, int], Mapping[str, Tuple[int, int]]] = {},
depth_resize_size: Union[Tuple[int, int], Mapping[str, Tuple[int, int]]] = {},
num_parallel_calls: int = tf.data.AUTOTUNE,
) -> dl.DLataset:
"""Applies common transforms that happen at a frame level. These transforms are usually more
CPU-intensive, (e.g. decoding or resizing images).
Args:
train (bool): Whether the dataset is for training (affects image augmentation).
dataset (dl.DLataset): The dataset to transform.
image_augment_kwargs (dict|Mapping[str, dict]): Keyword arguments to pass to the image augmentation
function. See `dlimp.transforms.augment_image` for documentation of these kwargs. If a dict of
dicts is provided, then key "k" will be used for "image_{k}" (names determined by `image_obs_keys`
in `make_dataset_from_rlds`). Augmentation will be skipped for missing keys (so pass an empty dict
to skip augmentation for all images).
resize_size (Tuple[int, int]|Mapping[str, Tuple[int, int]]): If provided, images will be resized to
this size. If a dict of tuples is provided, then key "k" will be used for "image_{k}" (names
determined by `image_obs_keys` in `make_dataset_from_rlds`). Resizing will be skipped for missing
keys (so pass an empty dict to skip resizing for all images).
depth_resize_size (Tuple[int, int]|Mapping[str, Tuple[int, int]]): Same as resize_size, but for depth
images.
num_parallel_calls (int): number of parallel calls for frame_map operations. Default to AUTOTUNE.
"""
# convenience wrapper that takes a function that operates on a non-chunked "observation" dict and applies
# it to the chunked "observation" dict as well as the non-chunked "task" dict
def apply_obs_transform(fn: Callable[[dict], dict], frame: dict) -> dict:
# task is not chunked -- apply fn directly
frame["task"] = fn(frame["task"])
# observation is chunked -- apply fn along first axis
frame["observation"] = dl.vmap(fn)(frame["observation"])
return frame
# decode + resize images (and depth images)
dataset = dataset.frame_map(
partial(
apply_obs_transform,
partial( | obs_transforms.decode_and_resize, | 0 | 2023-12-13 09:58:56+00:00 | 8k |
LinShan-Bin/OccNeRF | networks/occupancy_decoder.py | [
{
"identifier": "geom",
"path": "utils/geom.py",
"snippet": "def eye_4x4(B, device='cuda'):\ndef safe_inverse(a): #parallel version\ndef safe_inverse_single(a):\ndef apply_4x4(RT, xyz):\ndef get_camM_T_camXs(origin_T_camXs, ind=0):\ndef split_rt_single(rt):\ndef split_rt(rt):\ndef merge_rt(r, t):\ndef x... | import pdb
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch_efficient_distloss import eff_distloss, eff_distloss_native
from utils import geom
from utils import vox
from utils import basic
from utils import render
from ._3DCNN import S3DCNN | 5,390 | self.Z_final = self.Z
self.Y_final = self.Y
self.X_final = self.X
self.stepsize = self.opt.stepsize # voxel
self.num_voxels = self.Z_final * self.Y_final * self.X_final
self.stepsize_log = self.stepsize
self.interval = self.stepsize
if self.opt.contracted_coord:
# Sampling strategy for contracted coordinate
contracted_rate = self.opt.contracted_ratio
num_id_voxels = int(self.num_voxels * (contracted_rate)**3)
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_id_voxels).pow(1 / 3)
diagonal = (self.xyz_max - self.xyz_min).pow(2).sum().pow(1 / 2)
self.N_samples = int(diagonal / 2 / self.stepsize / self.voxel_size / contracted_rate)
if self.opt.infinite_range:
# depth_roi = [-self.far] * 3 + [self.far] * 3
zval_roi = [-diagonal] * 3 + [diagonal] * 3
fc = 1 - 0.5 / self.X # avoid NaN
zs_contracted = torch.linspace(0.0, fc, steps=self.N_samples)
zs_world = vox.contracted2world(
zs_contracted[None, :, None].repeat(1, 1, 3),
# pc_range_roi=depth_roi,
pc_range_roi=zval_roi,
ratio=self.opt.contracted_ratio)[:, :, 0]
else:
zs_world = torch.linspace(0.0, self.N_samples - 1, steps=self.N_samples)[None] * self.stepsize * self.voxel_size
self.register_buffer('Zval', zs_world)
pc_range_roi = self.xyz_min.tolist() + self.xyz_max.tolist()
self.norm_func = lambda xyz: vox.world2contracted(xyz, pc_range_roi=pc_range_roi, ratio=self.opt.contracted_ratio)
else:
self.N_samples = int(np.linalg.norm(np.array([self.Z_final // 2, self.Y_final // 2, self.X_final // 2]) + 1) / self.stepsize) + 1
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels).pow(1 / 3)
zs_world = torch.linspace(0.0, self.N_samples - 1, steps=self.N_samples)[None] * self.stepsize * self.voxel_size
self.register_buffer('Zval', zs_world)
self.norm_func = lambda xyz: (xyz - self.xyz_min.to(xyz)) / (self.xyz_max.to(xyz) - self.xyz_min.to(xyz)) * 2.0 - 1.0
length_pose_encoding = 3
if self.opt.position == 'embedding':
input_channel = self.opt.input_channel
self.pos_embedding = torch.nn.Parameter(torch.ones(
[1, input_channel, self.opt.voxels_size[1], self.opt.voxels_size[2], self.opt.voxels_size[0]]))
elif self.opt.position == 'embedding1':
input_channel = self.opt.input_channel
xyz_in_channels = 1 + 3
embedding_width = 192
embedding_depth = 5
self.embeddingnet = nn.Sequential(
nn.Linear(xyz_in_channels, embedding_width), nn.ReLU(inplace=True),
*[nn.Sequential(nn.Linear(embedding_width, embedding_width), nn.ReLU(inplace=True))
for _ in range(embedding_depth - 2)], nn.Linear(embedding_width, self.opt.input_channel),)
nn.init.constant_(self.embeddingnet[-1].bias, 0)
self.pos_embedding1 = None
self.pos_embedding_save = torch.nn.Parameter(torch.zeros([1, input_channel, self.opt.voxels_size[1], self.opt.voxels_size[2], self.opt.voxels_size[0]]), requires_grad= False)
else:
self.pos_embedding = None
self.pos_embedding1 = None
input_channel = self.opt.input_channel
scene_centroid_x = 0.0
scene_centroid_y = 0.0
scene_centroid_z = 0.0
scene_centroid = np.array([scene_centroid_x,
scene_centroid_y,
scene_centroid_z]).reshape([1, 3])
self.register_buffer('scene_centroid', torch.from_numpy(scene_centroid).float())
self.bounds = (self.opt.real_size[0], self.opt.real_size[1],
self.opt.real_size[2], self.opt.real_size[3],
self.opt.real_size[4], self.opt.real_size[5])
# bounds = (-40, 40, -40, 40, -1, 5.4)
self.vox_util = vox.Vox_util(
self.Z, self.Y, self.X,
scene_centroid=self.scene_centroid,
bounds=self.bounds, position = self.opt.position, length_pose_encoding = length_pose_encoding, opt = self.opt,
assert_cube=False)
if self.opt.position != 'No' and self.opt.position != 'embedding':
self.meta_data = self.vox_util.get_meta_data(cam_center=torch.Tensor([[1.2475, 0.0673, 1.5356]]), camB_T_camA=None).to('cuda')
activate_fun = nn.ReLU(inplace=True)
if self.opt.aggregation == '3dcnn':
out_channel = self.opt.out_channel
self._3DCNN = S3DCNN(input_planes=input_channel, out_planes=out_channel, planes=self.opt.con_channel,
activate_fun=activate_fun, opt=opt)
else:
print('please define the aggregation')
exit()
def feature2vox_simple(self, features, pix_T_cams, cam0_T_camXs, __p, __u):
pix_T_cams_ = pix_T_cams
camXs_T_cam0_ = geom.safe_inverse(cam0_T_camXs)
_, C, Hf, Wf = features.shape
sy = Hf / float(self.opt.height)
sx = Wf / float(self.opt.width)
# unproject image feature to 3d grid
featpix_T_cams_ = geom.scale_intrinsics(pix_T_cams_, sx, sy)
# pix_T_cams_ shape: [6,4,4] feature down sample -> featpix_T_cams_
feat_mems_ = self.vox_util.unproject_image_to_mem(
features,
| # Copyright Niantic 2019. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the Monodepth2 licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
from __future__ import absolute_import, division, print_function
class VolumeDecoder(nn.Module):
def __init__(self, opt):
super(VolumeDecoder, self).__init__()
self.opt = opt
self.use_semantic = self.opt.use_semantic
self.semantic_classes = self.opt.semantic_classes
self.batch = self.opt.batch_size // self.opt.cam_N
self.near = self.opt.min_depth
self.far = self.opt.max_depth
self.register_buffer('xyz_min', torch.from_numpy(
np.array([self.opt.real_size[0], self.opt.real_size[2], self.opt.real_size[4]])))
self.register_buffer('xyz_max', torch.from_numpy(
np.array([self.opt.real_size[1], self.opt.real_size[3], self.opt.real_size[5]])))
self.ZMAX = self.opt.real_size[1]
self.Z = self.opt.voxels_size[0]
self.Y = self.opt.voxels_size[1]
self.X = self.opt.voxels_size[2]
self.Z_final = self.Z
self.Y_final = self.Y
self.X_final = self.X
self.stepsize = self.opt.stepsize # voxel
self.num_voxels = self.Z_final * self.Y_final * self.X_final
self.stepsize_log = self.stepsize
self.interval = self.stepsize
if self.opt.contracted_coord:
# Sampling strategy for contracted coordinate
contracted_rate = self.opt.contracted_ratio
num_id_voxels = int(self.num_voxels * (contracted_rate)**3)
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_id_voxels).pow(1 / 3)
diagonal = (self.xyz_max - self.xyz_min).pow(2).sum().pow(1 / 2)
self.N_samples = int(diagonal / 2 / self.stepsize / self.voxel_size / contracted_rate)
if self.opt.infinite_range:
# depth_roi = [-self.far] * 3 + [self.far] * 3
zval_roi = [-diagonal] * 3 + [diagonal] * 3
fc = 1 - 0.5 / self.X # avoid NaN
zs_contracted = torch.linspace(0.0, fc, steps=self.N_samples)
zs_world = vox.contracted2world(
zs_contracted[None, :, None].repeat(1, 1, 3),
# pc_range_roi=depth_roi,
pc_range_roi=zval_roi,
ratio=self.opt.contracted_ratio)[:, :, 0]
else:
zs_world = torch.linspace(0.0, self.N_samples - 1, steps=self.N_samples)[None] * self.stepsize * self.voxel_size
self.register_buffer('Zval', zs_world)
pc_range_roi = self.xyz_min.tolist() + self.xyz_max.tolist()
self.norm_func = lambda xyz: vox.world2contracted(xyz, pc_range_roi=pc_range_roi, ratio=self.opt.contracted_ratio)
else:
self.N_samples = int(np.linalg.norm(np.array([self.Z_final // 2, self.Y_final // 2, self.X_final // 2]) + 1) / self.stepsize) + 1
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels).pow(1 / 3)
zs_world = torch.linspace(0.0, self.N_samples - 1, steps=self.N_samples)[None] * self.stepsize * self.voxel_size
self.register_buffer('Zval', zs_world)
self.norm_func = lambda xyz: (xyz - self.xyz_min.to(xyz)) / (self.xyz_max.to(xyz) - self.xyz_min.to(xyz)) * 2.0 - 1.0
length_pose_encoding = 3
if self.opt.position == 'embedding':
input_channel = self.opt.input_channel
self.pos_embedding = torch.nn.Parameter(torch.ones(
[1, input_channel, self.opt.voxels_size[1], self.opt.voxels_size[2], self.opt.voxels_size[0]]))
elif self.opt.position == 'embedding1':
input_channel = self.opt.input_channel
xyz_in_channels = 1 + 3
embedding_width = 192
embedding_depth = 5
self.embeddingnet = nn.Sequential(
nn.Linear(xyz_in_channels, embedding_width), nn.ReLU(inplace=True),
*[nn.Sequential(nn.Linear(embedding_width, embedding_width), nn.ReLU(inplace=True))
for _ in range(embedding_depth - 2)], nn.Linear(embedding_width, self.opt.input_channel),)
nn.init.constant_(self.embeddingnet[-1].bias, 0)
self.pos_embedding1 = None
self.pos_embedding_save = torch.nn.Parameter(torch.zeros([1, input_channel, self.opt.voxels_size[1], self.opt.voxels_size[2], self.opt.voxels_size[0]]), requires_grad= False)
else:
self.pos_embedding = None
self.pos_embedding1 = None
input_channel = self.opt.input_channel
scene_centroid_x = 0.0
scene_centroid_y = 0.0
scene_centroid_z = 0.0
scene_centroid = np.array([scene_centroid_x,
scene_centroid_y,
scene_centroid_z]).reshape([1, 3])
self.register_buffer('scene_centroid', torch.from_numpy(scene_centroid).float())
self.bounds = (self.opt.real_size[0], self.opt.real_size[1],
self.opt.real_size[2], self.opt.real_size[3],
self.opt.real_size[4], self.opt.real_size[5])
# bounds = (-40, 40, -40, 40, -1, 5.4)
self.vox_util = vox.Vox_util(
self.Z, self.Y, self.X,
scene_centroid=self.scene_centroid,
bounds=self.bounds, position = self.opt.position, length_pose_encoding = length_pose_encoding, opt = self.opt,
assert_cube=False)
if self.opt.position != 'No' and self.opt.position != 'embedding':
self.meta_data = self.vox_util.get_meta_data(cam_center=torch.Tensor([[1.2475, 0.0673, 1.5356]]), camB_T_camA=None).to('cuda')
activate_fun = nn.ReLU(inplace=True)
if self.opt.aggregation == '3dcnn':
out_channel = self.opt.out_channel
self._3DCNN = S3DCNN(input_planes=input_channel, out_planes=out_channel, planes=self.opt.con_channel,
activate_fun=activate_fun, opt=opt)
else:
print('please define the aggregation')
exit()
def feature2vox_simple(self, features, pix_T_cams, cam0_T_camXs, __p, __u):
pix_T_cams_ = pix_T_cams
camXs_T_cam0_ = geom.safe_inverse(cam0_T_camXs)
_, C, Hf, Wf = features.shape
sy = Hf / float(self.opt.height)
sx = Wf / float(self.opt.width)
# unproject image feature to 3d grid
featpix_T_cams_ = geom.scale_intrinsics(pix_T_cams_, sx, sy)
# pix_T_cams_ shape: [6,4,4] feature down sample -> featpix_T_cams_
feat_mems_ = self.vox_util.unproject_image_to_mem(
features, | basic.matmul2(featpix_T_cams_, camXs_T_cam0_), | 2 | 2023-12-14 15:00:21+00:00 | 8k |
Kevin-thu/DiffMorpher | model.py | [
{
"identifier": "get_img",
"path": "utils/model_utils.py",
"snippet": "def get_img(img, resolution=512):\n norm_mean = [0.5, 0.5, 0.5]\n norm_std = [0.5, 0.5, 0.5]\n transform = transforms.Compose([\n transforms.Resize((resolution, resolution)),\n transforms.ToTensor(),\n t... | import os
import torch
import torch.nn.functional as F
import tqdm
import numpy as np
import safetensors
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.models.attention_processor import AttnProcessor
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import KarrasDiffusionSchedulers
from PIL import Image
from torchvision import transforms
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import StableDiffusionPipeline
from argparse import ArgumentParser
from utils.model_utils import get_img, slerp, do_replace_attn
from utils.lora_utils import train_lora, load_lora
from utils.alpha_scheduler import AlphaScheduler | 5,856 | unconditional_input.input_ids.to(DEVICE))[0]
text_embeddings = torch.cat(
[unconditional_embeddings, text_embeddings], dim=0)
print("latents shape: ", latents.shape)
# interative sampling
self.scheduler.set_timesteps(num_inference_steps)
print("Valid timesteps: ", reversed(self.scheduler.timesteps))
# print("attributes: ", self.scheduler.__dict__)
latents_list = [latents]
pred_x0_list = [latents]
for i, t in enumerate(tqdm.tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")):
if num_actual_inference_steps is not None and i >= num_actual_inference_steps:
continue
if guidance_scale > 1.:
model_inputs = torch.cat([latents] * 2)
else:
model_inputs = latents
# predict the noise
noise_pred = self.unet(
model_inputs, t, encoder_hidden_states=text_embeddings).sample
if guidance_scale > 1.:
noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)
noise_pred = noise_pred_uncon + guidance_scale * \
(noise_pred_con - noise_pred_uncon)
# compute the previous noise sample x_t-1 -> x_t
latents, pred_x0 = self.inv_step(noise_pred, t, latents)
latents_list.append(latents)
pred_x0_list.append(pred_x0)
return latents
@torch.no_grad()
def ddim_inversion(self, latent, cond):
timesteps = reversed(self.scheduler.timesteps)
with torch.autocast(device_type='cuda', dtype=torch.float32):
for i, t in enumerate(tqdm.tqdm(timesteps, desc="DDIM inversion")):
cond_batch = cond.repeat(latent.shape[0], 1, 1)
alpha_prod_t = self.scheduler.alphas_cumprod[t]
alpha_prod_t_prev = (
self.scheduler.alphas_cumprod[timesteps[i - 1]]
if i > 0 else self.scheduler.final_alpha_cumprod
)
mu = alpha_prod_t ** 0.5
mu_prev = alpha_prod_t_prev ** 0.5
sigma = (1 - alpha_prod_t) ** 0.5
sigma_prev = (1 - alpha_prod_t_prev) ** 0.5
eps = self.unet(
latent, t, encoder_hidden_states=cond_batch).sample
pred_x0 = (latent - sigma_prev * eps) / mu_prev
latent = mu * pred_x0 + sigma * eps
# if save_latents:
# torch.save(latent, os.path.join(save_path, f'noisy_latents_{t}.pt'))
# torch.save(latent, os.path.join(save_path, f'noisy_latents_{t}.pt'))
return latent
def step(
self,
model_output: torch.FloatTensor,
timestep: int,
x: torch.FloatTensor,
):
"""
predict the sample of the next step in the denoise process.
"""
prev_timestep = timestep - \
self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
alpha_prod_t_prev = self.scheduler.alphas_cumprod[
prev_timestep] if prev_timestep > 0 else self.scheduler.final_alpha_cumprod
beta_prod_t = 1 - alpha_prod_t
pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
pred_dir = (1 - alpha_prod_t_prev)**0.5 * model_output
x_prev = alpha_prod_t_prev**0.5 * pred_x0 + pred_dir
return x_prev, pred_x0
@torch.no_grad()
def image2latent(self, image):
DEVICE = torch.device(
"cuda") if torch.cuda.is_available() else torch.device("cpu")
if type(image) is Image:
image = np.array(image)
image = torch.from_numpy(image).float() / 127.5 - 1
image = image.permute(2, 0, 1).unsqueeze(0)
# input image density range [-1, 1]
latents = self.vae.encode(image.to(DEVICE))['latent_dist'].mean
latents = latents * 0.18215
return latents
@torch.no_grad()
def latent2image(self, latents, return_type='np'):
latents = 1 / 0.18215 * latents.detach()
image = self.vae.decode(latents)['sample']
if return_type == 'np':
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
image = (image * 255).astype(np.uint8)
elif return_type == "pt":
image = (image / 2 + 0.5).clamp(0, 1)
return image
def latent2image_grad(self, latents):
latents = 1 / 0.18215 * latents
image = self.vae.decode(latents)['sample']
return image # range [-1, 1]
@torch.no_grad()
def cal_latent(self, num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha, use_lora, fix_lora=None):
# latents = torch.cos(alpha * torch.pi / 2) * img_noise_0 + \
# torch.sin(alpha * torch.pi / 2) * img_noise_1
# latents = (1 - alpha) * img_noise_0 + alpha * img_noise_1
# latents = latents / ((1 - alpha) ** 2 + alpha ** 2)
|
class StoreProcessor():
def __init__(self, original_processor, value_dict, name):
self.original_processor = original_processor
self.value_dict = value_dict
self.name = name
self.value_dict[self.name] = dict()
self.id = 0
def __call__(self, attn, hidden_states, *args, encoder_hidden_states=None, attention_mask=None, **kwargs):
# Is self attention
if encoder_hidden_states is None:
self.value_dict[self.name][self.id] = hidden_states.detach()
self.id += 1
res = self.original_processor(attn, hidden_states, *args,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
**kwargs)
return res
class LoadProcessor():
def __init__(self, original_processor, name, img0_dict, img1_dict, alpha, beta=0, lamd=0.6):
super().__init__()
self.original_processor = original_processor
self.name = name
self.img0_dict = img0_dict
self.img1_dict = img1_dict
self.alpha = alpha
self.beta = beta
self.lamd = lamd
self.id = 0
def parent_call(
self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0
):
residual = hidden_states
if attn.spatial_norm is not None:
hidden_states = attn.spatial_norm(hidden_states)
input_ndim = hidden_states.ndim
if input_ndim == 4:
batch_size, channel, height, width = hidden_states.shape
hidden_states = hidden_states.view(
batch_size, channel, height * width).transpose(1, 2)
batch_size, sequence_length, _ = (
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
)
attention_mask = attn.prepare_attention_mask(
attention_mask, sequence_length, batch_size)
if attn.group_norm is not None:
hidden_states = attn.group_norm(
hidden_states.transpose(1, 2)).transpose(1, 2)
query = attn.to_q(hidden_states) + scale * \
self.original_processor.to_q_lora(hidden_states)
query = attn.head_to_batch_dim(query)
if encoder_hidden_states is None:
encoder_hidden_states = hidden_states
elif attn.norm_cross:
encoder_hidden_states = attn.norm_encoder_hidden_states(
encoder_hidden_states)
key = attn.to_k(encoder_hidden_states) + scale * \
self.original_processor.to_k_lora(encoder_hidden_states)
value = attn.to_v(encoder_hidden_states) + scale * \
self.original_processor.to_v_lora(encoder_hidden_states)
key = attn.head_to_batch_dim(key)
value = attn.head_to_batch_dim(value)
attention_probs = attn.get_attention_scores(
query, key, attention_mask)
hidden_states = torch.bmm(attention_probs, value)
hidden_states = attn.batch_to_head_dim(hidden_states)
# linear proj
hidden_states = attn.to_out[0](
hidden_states) + scale * self.original_processor.to_out_lora(hidden_states)
# dropout
hidden_states = attn.to_out[1](hidden_states)
if input_ndim == 4:
hidden_states = hidden_states.transpose(
-1, -2).reshape(batch_size, channel, height, width)
if attn.residual_connection:
hidden_states = hidden_states + residual
hidden_states = hidden_states / attn.rescale_output_factor
return hidden_states
def __call__(self, attn, hidden_states, *args, encoder_hidden_states=None, attention_mask=None, **kwargs):
# Is self attention
if encoder_hidden_states is None:
# hardcode timestep
if self.id < 50 * self.lamd:
map0 = self.img0_dict[self.name][self.id]
map1 = self.img1_dict[self.name][self.id]
cross_map = self.beta * hidden_states + \
(1 - self.beta) * ((1 - self.alpha) * map0 + self.alpha * map1)
# cross_map = self.beta * hidden_states + \
# (1 - self.beta) * slerp(map0, map1, self.alpha)
# cross_map = slerp(slerp(map0, map1, self.alpha),
# hidden_states, self.beta)
# cross_map = hidden_states
# cross_map = torch.cat(
# ((1 - self.alpha) * map0, self.alpha * map1), dim=1)
# res = self.original_processor(attn, hidden_states, *args,
# encoder_hidden_states=cross_map,
# attention_mask=attention_mask,
# temb=temb, **kwargs)
res = self.parent_call(attn, hidden_states, *args,
encoder_hidden_states=cross_map,
attention_mask=attention_mask,
**kwargs)
else:
res = self.original_processor(attn, hidden_states, *args,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
**kwargs)
self.id += 1
# if self.id == len(self.img0_dict[self.name]):
if self.id == len(self.img0_dict[self.name]):
self.id = 0
else:
res = self.original_processor(attn, hidden_states, *args,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
**kwargs)
return res
class DiffMorpherPipeline(StableDiffusionPipeline):
def __init__(self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
requires_safety_checker: bool = True,
):
super().__init__(vae, text_encoder, tokenizer, unet, scheduler,
safety_checker, feature_extractor, requires_safety_checker)
self.img0_dict = dict()
self.img1_dict = dict()
def inv_step(
self,
model_output: torch.FloatTensor,
timestep: int,
x: torch.FloatTensor,
eta=0.,
verbose=False
):
"""
Inverse sampling for DDIM Inversion
"""
if verbose:
print("timestep: ", timestep)
next_step = timestep
timestep = min(timestep - self.scheduler.config.num_train_timesteps //
self.scheduler.num_inference_steps, 999)
alpha_prod_t = self.scheduler.alphas_cumprod[
timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod
alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step]
beta_prod_t = 1 - alpha_prod_t
pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output
x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir
return x_next, pred_x0
@torch.no_grad()
def invert(
self,
image: torch.Tensor,
prompt,
num_inference_steps=50,
num_actual_inference_steps=None,
guidance_scale=1.,
eta=0.0,
**kwds):
"""
invert a real image into noise map with determinisc DDIM inversion
"""
DEVICE = torch.device(
"cuda") if torch.cuda.is_available() else torch.device("cpu")
batch_size = image.shape[0]
if isinstance(prompt, list):
if batch_size == 1:
image = image.expand(len(prompt), -1, -1, -1)
elif isinstance(prompt, str):
if batch_size > 1:
prompt = [prompt] * batch_size
# text embeddings
text_input = self.tokenizer(
prompt,
padding="max_length",
max_length=77,
return_tensors="pt"
)
text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]
print("input text embeddings :", text_embeddings.shape)
# define initial latents
latents = self.image2latent(image)
# unconditional embedding for classifier free guidance
if guidance_scale > 1.:
max_length = text_input.input_ids.shape[-1]
unconditional_input = self.tokenizer(
[""] * batch_size,
padding="max_length",
max_length=77,
return_tensors="pt"
)
unconditional_embeddings = self.text_encoder(
unconditional_input.input_ids.to(DEVICE))[0]
text_embeddings = torch.cat(
[unconditional_embeddings, text_embeddings], dim=0)
print("latents shape: ", latents.shape)
# interative sampling
self.scheduler.set_timesteps(num_inference_steps)
print("Valid timesteps: ", reversed(self.scheduler.timesteps))
# print("attributes: ", self.scheduler.__dict__)
latents_list = [latents]
pred_x0_list = [latents]
for i, t in enumerate(tqdm.tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")):
if num_actual_inference_steps is not None and i >= num_actual_inference_steps:
continue
if guidance_scale > 1.:
model_inputs = torch.cat([latents] * 2)
else:
model_inputs = latents
# predict the noise
noise_pred = self.unet(
model_inputs, t, encoder_hidden_states=text_embeddings).sample
if guidance_scale > 1.:
noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)
noise_pred = noise_pred_uncon + guidance_scale * \
(noise_pred_con - noise_pred_uncon)
# compute the previous noise sample x_t-1 -> x_t
latents, pred_x0 = self.inv_step(noise_pred, t, latents)
latents_list.append(latents)
pred_x0_list.append(pred_x0)
return latents
@torch.no_grad()
def ddim_inversion(self, latent, cond):
timesteps = reversed(self.scheduler.timesteps)
with torch.autocast(device_type='cuda', dtype=torch.float32):
for i, t in enumerate(tqdm.tqdm(timesteps, desc="DDIM inversion")):
cond_batch = cond.repeat(latent.shape[0], 1, 1)
alpha_prod_t = self.scheduler.alphas_cumprod[t]
alpha_prod_t_prev = (
self.scheduler.alphas_cumprod[timesteps[i - 1]]
if i > 0 else self.scheduler.final_alpha_cumprod
)
mu = alpha_prod_t ** 0.5
mu_prev = alpha_prod_t_prev ** 0.5
sigma = (1 - alpha_prod_t) ** 0.5
sigma_prev = (1 - alpha_prod_t_prev) ** 0.5
eps = self.unet(
latent, t, encoder_hidden_states=cond_batch).sample
pred_x0 = (latent - sigma_prev * eps) / mu_prev
latent = mu * pred_x0 + sigma * eps
# if save_latents:
# torch.save(latent, os.path.join(save_path, f'noisy_latents_{t}.pt'))
# torch.save(latent, os.path.join(save_path, f'noisy_latents_{t}.pt'))
return latent
def step(
self,
model_output: torch.FloatTensor,
timestep: int,
x: torch.FloatTensor,
):
"""
predict the sample of the next step in the denoise process.
"""
prev_timestep = timestep - \
self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
alpha_prod_t_prev = self.scheduler.alphas_cumprod[
prev_timestep] if prev_timestep > 0 else self.scheduler.final_alpha_cumprod
beta_prod_t = 1 - alpha_prod_t
pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
pred_dir = (1 - alpha_prod_t_prev)**0.5 * model_output
x_prev = alpha_prod_t_prev**0.5 * pred_x0 + pred_dir
return x_prev, pred_x0
@torch.no_grad()
def image2latent(self, image):
DEVICE = torch.device(
"cuda") if torch.cuda.is_available() else torch.device("cpu")
if type(image) is Image:
image = np.array(image)
image = torch.from_numpy(image).float() / 127.5 - 1
image = image.permute(2, 0, 1).unsqueeze(0)
# input image density range [-1, 1]
latents = self.vae.encode(image.to(DEVICE))['latent_dist'].mean
latents = latents * 0.18215
return latents
@torch.no_grad()
def latent2image(self, latents, return_type='np'):
latents = 1 / 0.18215 * latents.detach()
image = self.vae.decode(latents)['sample']
if return_type == 'np':
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
image = (image * 255).astype(np.uint8)
elif return_type == "pt":
image = (image / 2 + 0.5).clamp(0, 1)
return image
def latent2image_grad(self, latents):
latents = 1 / 0.18215 * latents
image = self.vae.decode(latents)['sample']
return image # range [-1, 1]
@torch.no_grad()
def cal_latent(self, num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha, use_lora, fix_lora=None):
# latents = torch.cos(alpha * torch.pi / 2) * img_noise_0 + \
# torch.sin(alpha * torch.pi / 2) * img_noise_1
# latents = (1 - alpha) * img_noise_0 + alpha * img_noise_1
# latents = latents / ((1 - alpha) ** 2 + alpha ** 2) | latents = slerp(img_noise_0, img_noise_1, alpha, self.use_adain) | 1 | 2023-12-11 15:19:07+00:00 | 8k |
modelscope/richdreamer | threestudio/models/renderers/nerf_volume_renderer.py | [
{
"identifier": "BaseBackground",
"path": "threestudio/models/background/base.py",
"snippet": "class BaseBackground(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n def configure(self):\n pass\n\n def forward(self, dirs: Float[Tensor, \... | import math
import nerfacc
import torch
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from functools import partial
from threestudio.models.background.base import BaseBackground
from threestudio.models.estimators import ImportanceEstimator
from threestudio.models.geometry.base import BaseImplicitGeometry
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.networks import create_network_with_input_encoding
from threestudio.models.renderers.base import VolumeRenderer
from threestudio.systems.utils import (parse_optimizer,
parse_scheduler_to_instance,)
from threestudio.utils.ops import (chunk_batch, get_activation,
validate_empty_rays,)
from threestudio.utils.typing import * | 4,822 |
@threestudio.register("nerf-volume-renderer")
class NeRFVolumeRenderer(VolumeRenderer):
@dataclass
class Config(VolumeRenderer.Config):
num_samples_per_ray: int = 512
eval_chunk_size: int = 160000
randomized: bool = True
near_plane: float = 0.0
far_plane: float = 1e10
return_comp_normal: bool = False
return_normal_perturb: bool = False
# in ["occgrid", "proposal", "importance"]
estimator: str = "occgrid"
# for occgrid
grid_prune: bool = True
prune_alpha_threshold: bool = True
# for proposal
proposal_network_config: Optional[dict] = None
prop_optimizer_config: Optional[dict] = None
prop_scheduler_config: Optional[dict] = None
num_samples_per_ray_proposal: int = 64
# for importance
num_samples_per_ray_importance: int = 64
occ_grid_res: int = 32
depth_norm_radius: float = 1.0
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
super().configure(geometry, material, background)
if self.cfg.estimator == "occgrid":
self.estimator = nerfacc.OccGridEstimator(
roi_aabb=self.bbox.view(-1), resolution=self.cfg.occ_grid_res, levels=1
)
if not self.cfg.grid_prune:
self.estimator.occs.fill_(True)
self.estimator.binaries.fill_(True)
self.render_step_size = (
1.732 * 2 * self.cfg.radius / self.cfg.num_samples_per_ray
)
self.randomized = self.cfg.randomized
elif self.cfg.estimator == "importance":
|
@threestudio.register("nerf-volume-renderer")
class NeRFVolumeRenderer(VolumeRenderer):
@dataclass
class Config(VolumeRenderer.Config):
num_samples_per_ray: int = 512
eval_chunk_size: int = 160000
randomized: bool = True
near_plane: float = 0.0
far_plane: float = 1e10
return_comp_normal: bool = False
return_normal_perturb: bool = False
# in ["occgrid", "proposal", "importance"]
estimator: str = "occgrid"
# for occgrid
grid_prune: bool = True
prune_alpha_threshold: bool = True
# for proposal
proposal_network_config: Optional[dict] = None
prop_optimizer_config: Optional[dict] = None
prop_scheduler_config: Optional[dict] = None
num_samples_per_ray_proposal: int = 64
# for importance
num_samples_per_ray_importance: int = 64
occ_grid_res: int = 32
depth_norm_radius: float = 1.0
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
super().configure(geometry, material, background)
if self.cfg.estimator == "occgrid":
self.estimator = nerfacc.OccGridEstimator(
roi_aabb=self.bbox.view(-1), resolution=self.cfg.occ_grid_res, levels=1
)
if not self.cfg.grid_prune:
self.estimator.occs.fill_(True)
self.estimator.binaries.fill_(True)
self.render_step_size = (
1.732 * 2 * self.cfg.radius / self.cfg.num_samples_per_ray
)
self.randomized = self.cfg.randomized
elif self.cfg.estimator == "importance": | self.estimator = ImportanceEstimator() | 1 | 2023-12-06 07:53:11+00:00 | 8k |
rehg-lab/RAVE | annotator/oneformer/detectron2/data/datasets/cityscapes.py | [
{
"identifier": "BoxMode",
"path": "annotator/oneformer/detectron2/structures/boxes.py",
"snippet": "class BoxMode(IntEnum):\r\n \"\"\"\r\n Enum of different ways to represent a box.\r\n \"\"\"\r\n\r\n XYXY_ABS = 0\r\n \"\"\"\r\n (x0, y0, x1, y1) in absolute floating points coordinates... | import functools
import json
import logging
import multiprocessing as mp
import numpy as np
import os
import annotator.oneformer.pycocotools.mask as mask_util
import cv2 # noqa
import argparse
from itertools import chain
from PIL import Image
from annotator.oneformer.detectron2.structures import BoxMode
from annotator.oneformer.detectron2.utils.comm import get_world_size
from annotator.oneformer.detectron2.utils.file_io import PathManager
from annotator.oneformer.detectron2.utils.logger import setup_logger
from cityscapesscripts.helpers.labels import labels
from cityscapesscripts.helpers.labels import id2label, name2label
from shapely.geometry import MultiPolygon, Polygon
from annotator.oneformer.detectron2.data.catalog import Metadata
from annotator.oneformer.detectron2.utils.visualizer import Visualizer
from cityscapesscripts.helpers.labels import labels
| 4,762 | label = name2label[label_name[: -len("group")]]
else:
raise
if label.id < 0: # cityscapes data format
continue
# Cityscapes's raw annotations uses integer coordinates
# Therefore +0.5 here
poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
# CityscapesScript uses PIL.ImageDraw.polygon to rasterize
# polygons for evaluation. This function operates in integer space
# and draws each pixel whose center falls into the polygon.
# Therefore it draws a polygon which is 0.5 "fatter" in expectation.
# We therefore dilate the input polygon by 0.5 as our input.
poly = Polygon(poly_coord).buffer(0.5, resolution=4)
if not label.hasInstances or label.ignoreInEval:
# even if we won't store the polygon it still contributes to overlaps resolution
polygons_union = polygons_union.union(poly)
continue
# Take non-overlapping part of the polygon
poly_wo_overlaps = poly.difference(polygons_union)
if poly_wo_overlaps.is_empty:
continue
polygons_union = polygons_union.union(poly)
anno = {}
anno["iscrowd"] = label_name.endswith("group")
anno["category_id"] = label.id
if isinstance(poly_wo_overlaps, Polygon):
poly_list = [poly_wo_overlaps]
elif isinstance(poly_wo_overlaps, MultiPolygon):
poly_list = poly_wo_overlaps.geoms
else:
raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
poly_coord = []
for poly_el in poly_list:
# COCO API can work only with exterior boundaries now, hence we store only them.
# TODO: store both exterior and interior boundaries once other parts of the
# codebase support holes in polygons.
poly_coord.append(list(chain(*poly_el.exterior.coords)))
anno["segmentation"] = poly_coord
(xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
anno["bbox"] = (xmin, ymin, xmax, ymax)
anno["bbox_mode"] = BoxMode.XYXY_ABS
annos.append(anno)
else:
# See also the official annotation parsing scripts at
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
with PathManager.open(instance_id_file, "rb") as f:
inst_image = np.asarray(Image.open(f), order="F")
# ids < 24 are stuff labels (filtering them first is about 5% faster)
flattened_ids = np.unique(inst_image[inst_image >= 24])
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": inst_image.shape[0],
"width": inst_image.shape[1],
}
for instance_id in flattened_ids:
# For non-crowd annotations, instance_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
label = id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
anno = {}
anno["iscrowd"] = instance_id < 1000
anno["category_id"] = label.id
mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
inds = np.nonzero(mask)
ymin, ymax = inds[0].min(), inds[0].max()
xmin, xmax = inds[1].min(), inds[1].max()
anno["bbox"] = (xmin, ymin, xmax, ymax)
if xmax <= xmin or ymax <= ymin:
continue
anno["bbox_mode"] = BoxMode.XYXY_ABS
if to_polygons:
# This conversion comes from D4809743 and D5171122,
# when Mask-RCNN was first developed.
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
-2
]
polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
# opencv's can produce invalid polygons
if len(polygons) == 0:
continue
anno["segmentation"] = polygons
else:
anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
annos.append(anno)
ret["annotations"] = annos
return ret
if __name__ == "__main__":
"""
Test the cityscapes dataset loader.
Usage:
python -m detectron2.data.datasets.cityscapes \
cityscapes/leftImg8bit/train cityscapes/gtFine/train
"""
parser = argparse.ArgumentParser()
parser.add_argument("image_dir")
parser.add_argument("gt_dir")
parser.add_argument("--type", choices=["instance", "semantic"], default="instance")
args = parser.parse_args()
| # Copyright (c) Facebook, Inc. and its affiliates.
try:
except ImportError:
# OpenCV is an optional dependency at the moment
pass
logger = logging.getLogger(__name__)
def _get_cityscapes_files(image_dir, gt_dir):
files = []
# scan through the directory
cities = PathManager.ls(image_dir)
logger.info(f"{len(cities)} cities found in '{image_dir}'.")
for city in cities:
city_img_dir = os.path.join(image_dir, city)
city_gt_dir = os.path.join(gt_dir, city)
for basename in PathManager.ls(city_img_dir):
image_file = os.path.join(city_img_dir, basename)
suffix = "leftImg8bit.png"
assert basename.endswith(suffix), basename
basename = basename[: -len(suffix)]
instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png")
label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png")
json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json")
files.append((image_file, instance_file, label_file, json_file))
assert len(files), "No images found in {}".format(image_dir)
for f in files[0]:
assert PathManager.isfile(f), f
return files
def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
if from_json:
assert to_polygons, (
"Cityscapes's json annotations are in polygon format. "
"Converting to mask format is not supported now."
)
files = _get_cityscapes_files(image_dir, gt_dir)
logger.info("Preprocessing cityscapes annotations ...")
# This is still not fast: all workers will execute duplicate works and will
# take up to 10m on a 8GPU server.
pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
ret = pool.map(
functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),
files,
)
logger.info("Loaded {} images from {}".format(len(ret), image_dir))
# Map cityscape ids to contiguous ids
labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]
dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}
for dict_per_image in ret:
for anno in dict_per_image["annotations"]:
anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]]
return ret
def load_cityscapes_semantic(image_dir, gt_dir):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
Returns:
list[dict]: a list of dict, each has "file_name" and
"sem_seg_file_name".
"""
ret = []
# gt_dir is small and contain many small files. make sense to fetch to local first
gt_dir = PathManager.get_local_path(gt_dir)
for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir):
label_file = label_file.replace("labelIds", "labelTrainIds")
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret.append(
{
"file_name": image_file,
"sem_seg_file_name": label_file,
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(
ret[0]["sem_seg_file_name"]
), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
return ret
def _cityscapes_files_to_dict(files, from_json, to_polygons):
"""
Parse cityscapes annotation files to a instance segmentation dataset dict.
Args:
files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
A dict in Detectron2 Dataset format.
"""
image_file, instance_id_file, _, json_file = files
annos = []
if from_json:
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
# `polygons_union` contains the union of all valid polygons.
polygons_union = Polygon()
# CityscapesScripts draw the polygons in sequential order
# and each polygon *overwrites* existing ones. See
# (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
# We use reverse order, and each polygon *avoids* early ones.
# This will resolve the ploygon overlaps in the same way as CityscapesScripts.
for obj in jsonobj["objects"][::-1]:
if "deleted" in obj: # cityscapes data format specific
continue
label_name = obj["label"]
try:
label = name2label[label_name]
except KeyError:
if label_name.endswith("group"): # crowd area
label = name2label[label_name[: -len("group")]]
else:
raise
if label.id < 0: # cityscapes data format
continue
# Cityscapes's raw annotations uses integer coordinates
# Therefore +0.5 here
poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
# CityscapesScript uses PIL.ImageDraw.polygon to rasterize
# polygons for evaluation. This function operates in integer space
# and draws each pixel whose center falls into the polygon.
# Therefore it draws a polygon which is 0.5 "fatter" in expectation.
# We therefore dilate the input polygon by 0.5 as our input.
poly = Polygon(poly_coord).buffer(0.5, resolution=4)
if not label.hasInstances or label.ignoreInEval:
# even if we won't store the polygon it still contributes to overlaps resolution
polygons_union = polygons_union.union(poly)
continue
# Take non-overlapping part of the polygon
poly_wo_overlaps = poly.difference(polygons_union)
if poly_wo_overlaps.is_empty:
continue
polygons_union = polygons_union.union(poly)
anno = {}
anno["iscrowd"] = label_name.endswith("group")
anno["category_id"] = label.id
if isinstance(poly_wo_overlaps, Polygon):
poly_list = [poly_wo_overlaps]
elif isinstance(poly_wo_overlaps, MultiPolygon):
poly_list = poly_wo_overlaps.geoms
else:
raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
poly_coord = []
for poly_el in poly_list:
# COCO API can work only with exterior boundaries now, hence we store only them.
# TODO: store both exterior and interior boundaries once other parts of the
# codebase support holes in polygons.
poly_coord.append(list(chain(*poly_el.exterior.coords)))
anno["segmentation"] = poly_coord
(xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
anno["bbox"] = (xmin, ymin, xmax, ymax)
anno["bbox_mode"] = BoxMode.XYXY_ABS
annos.append(anno)
else:
# See also the official annotation parsing scripts at
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
with PathManager.open(instance_id_file, "rb") as f:
inst_image = np.asarray(Image.open(f), order="F")
# ids < 24 are stuff labels (filtering them first is about 5% faster)
flattened_ids = np.unique(inst_image[inst_image >= 24])
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": inst_image.shape[0],
"width": inst_image.shape[1],
}
for instance_id in flattened_ids:
# For non-crowd annotations, instance_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
label = id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
anno = {}
anno["iscrowd"] = instance_id < 1000
anno["category_id"] = label.id
mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
inds = np.nonzero(mask)
ymin, ymax = inds[0].min(), inds[0].max()
xmin, xmax = inds[1].min(), inds[1].max()
anno["bbox"] = (xmin, ymin, xmax, ymax)
if xmax <= xmin or ymax <= ymin:
continue
anno["bbox_mode"] = BoxMode.XYXY_ABS
if to_polygons:
# This conversion comes from D4809743 and D5171122,
# when Mask-RCNN was first developed.
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
-2
]
polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
# opencv's can produce invalid polygons
if len(polygons) == 0:
continue
anno["segmentation"] = polygons
else:
anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
annos.append(anno)
ret["annotations"] = annos
return ret
if __name__ == "__main__":
"""
Test the cityscapes dataset loader.
Usage:
python -m detectron2.data.datasets.cityscapes \
cityscapes/leftImg8bit/train cityscapes/gtFine/train
"""
parser = argparse.ArgumentParser()
parser.add_argument("image_dir")
parser.add_argument("gt_dir")
parser.add_argument("--type", choices=["instance", "semantic"], default="instance")
args = parser.parse_args()
| logger = setup_logger(name=__name__)
| 3 | 2023-12-05 02:51:53+00:00 | 8k |
DiffusionLight/DiffusionLight | relighting/pipeline_xl.py | [
{
"identifier": "custom_prepare_latents",
"path": "relighting/pipeline_utils.py",
"snippet": "def custom_prepare_latents(\n self,\n batch_size,\n num_channels_latents,\n height,\n width,\n dtype,\n device,\n generator,\n latents=None,\n ... | import torch
from typing import List, Union, Dict, Any, Callable, Optional, Tuple
from diffusers.utils.torch_utils import is_compiled_module
from diffusers.models import ControlNetModel
from diffusers.pipelines.controlnet import MultiControlNetModel
from diffusers import StableDiffusionXLControlNetInpaintPipeline
from diffusers.image_processor import PipelineImageInput
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
from relighting.pipeline_utils import custom_prepare_latents, custom_prepare_mask_latents, rescale_noise_cfg | 4,892 | crops_coords_top_left,
target_size,
aesthetic_score,
negative_aesthetic_score,
dtype=prompt_embeds.dtype,
)
add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
if do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
prompt_embeds = prompt_embeds.to(device)
add_text_embeds = add_text_embeds.to(device)
add_time_ids = add_time_ids.to(device)
# 11. Denoising loop
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
if (
denoising_end is not None
and denoising_start is not None
and denoising_value_valid(denoising_end)
and denoising_value_valid(denoising_start)
and denoising_start >= denoising_end
):
raise ValueError(
f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: "
+ f" {denoising_end} when using type float."
)
elif denoising_end is not None and denoising_value_valid(denoising_end):
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_end * self.scheduler.config.num_train_timesteps)
)
)
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
timesteps = timesteps[:num_inference_steps]
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
# concat latents, mask, masked_image_latents in the channel dimension
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
# controlnet(s) inference
if guess_mode and do_classifier_free_guidance:
# Infer ControlNet only for the conditional batch.
control_model_input = latents
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
controlnet_added_cond_kwargs = {
"text_embeds": add_text_embeds.chunk(2)[1],
"time_ids": add_time_ids.chunk(2)[1],
}
else:
control_model_input = latent_model_input
controlnet_prompt_embeds = prompt_embeds
controlnet_added_cond_kwargs = added_cond_kwargs
if isinstance(controlnet_keep[i], list):
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
else:
controlnet_cond_scale = controlnet_conditioning_scale
if isinstance(controlnet_cond_scale, list):
controlnet_cond_scale = controlnet_cond_scale[0]
cond_scale = controlnet_cond_scale * controlnet_keep[i]
# # Resize control_image to match the size of the input to the controlnet
# if control_image.shape[-2:] != control_model_input.shape[-2:]:
# control_image = F.interpolate(control_image, size=control_model_input.shape[-2:], mode="bilinear", align_corners=False)
down_block_res_samples, mid_block_res_sample = self.controlnet(
control_model_input,
t,
encoder_hidden_states=controlnet_prompt_embeds,
controlnet_cond=control_image,
conditioning_scale=cond_scale,
guess_mode=guess_mode,
added_cond_kwargs=controlnet_added_cond_kwargs,
return_dict=False,
)
if guess_mode and do_classifier_free_guidance:
# Infered ControlNet only for the conditional batch.
# To apply the output of ControlNet to both the unconditional and conditional batches,
# add 0 to the unconditional batch to keep it unchanged.
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
if num_channels_unet == 9:
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
print("rescale: ", guidance_rescale)
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
class CustomStableDiffusionXLControlNetInpaintPipeline(StableDiffusionXLControlNetInpaintPipeline):
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
prompt_2: Optional[Union[str, List[str]]] = None,
image: PipelineImageInput = None,
mask_image: PipelineImageInput = None,
control_image: Union[
PipelineImageInput,
List[PipelineImageInput],
] = None,
height: Optional[int] = None,
width: Optional[int] = None,
strength: float = 0.9999,
num_inference_steps: int = 50,
denoising_start: Optional[float] = None,
denoising_end: Optional[float] = None,
guidance_scale: float = 5.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
negative_prompt_2: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
guess_mode: bool = False,
control_guidance_start: Union[float, List[float]] = 0.0,
control_guidance_end: Union[float, List[float]] = 1.0,
guidance_rescale: float = 0.0,
original_size: Tuple[int, int] = None,
crops_coords_top_left: Tuple[int, int] = (0, 0),
target_size: Tuple[int, int] = None,
aesthetic_score: float = 6.0,
negative_aesthetic_score: float = 2.5,
newx: int = 0,
newy: int = 0,
newr: int = 256,
current_seed=0,
use_noise_moving=True,
):
# OVERWRITE METHODS
self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionXLControlNetInpaintPipeline)
self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionXLControlNetInpaintPipeline)
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
# align format for control guidance
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [
control_guidance_end
]
# # 0.0 Default height and width to unet
# height = height or self.unet.config.sample_size * self.vae_scale_factor
# width = width or self.unet.config.sample_size * self.vae_scale_factor
# 0.1 align format for control guidance
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [
control_guidance_end
]
# 1. Check inputs
self.check_inputs(
prompt,
prompt_2,
control_image,
strength,
num_inference_steps,
callback_steps,
negative_prompt,
negative_prompt_2,
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
controlnet_conditioning_scale,
control_guidance_start,
control_guidance_end,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = self.encode_prompt(
prompt=prompt,
prompt_2=prompt_2,
device=device,
num_images_per_prompt=num_images_per_prompt,
do_classifier_free_guidance=do_classifier_free_guidance,
negative_prompt=negative_prompt,
negative_prompt_2=negative_prompt_2,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. set timesteps
def denoising_value_valid(dnv):
return isinstance(denoising_end, float) and 0 < dnv < 1
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, num_inference_steps = self.get_timesteps(
num_inference_steps, strength, device, denoising_start=denoising_start if denoising_value_valid else None
)
# check that number of inference steps is not < 1 - as this doesn't make sense
if num_inference_steps < 1:
raise ValueError(
f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
)
# at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
# create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
is_strength_max = strength == 1.0
# 5. Preprocess mask and image - resizes image and mask w.r.t height and width
# 5.1 Prepare init image
init_image = self.image_processor.preprocess(image, height=height, width=width)
init_image = init_image.to(dtype=torch.float32)
# 5.2 Prepare control images
if isinstance(controlnet, ControlNetModel):
control_image = self.prepare_control_image(
image=control_image,
width=width,
height=height,
batch_size=batch_size * num_images_per_prompt,
num_images_per_prompt=num_images_per_prompt,
device=device,
dtype=controlnet.dtype,
do_classifier_free_guidance=do_classifier_free_guidance,
guess_mode=guess_mode,
)
elif isinstance(controlnet, MultiControlNetModel):
control_images = []
for control_image_ in control_image:
control_image_ = self.prepare_control_image(
image=control_image_,
width=width,
height=height,
batch_size=batch_size * num_images_per_prompt,
num_images_per_prompt=num_images_per_prompt,
device=device,
dtype=controlnet.dtype,
do_classifier_free_guidance=do_classifier_free_guidance,
guess_mode=guess_mode,
)
control_images.append(control_image_)
control_image = control_images
else:
raise ValueError(f"{controlnet.__class__} is not supported.")
# 5.3 Prepare mask
mask = self.mask_processor.preprocess(mask_image, height=height, width=width)
masked_image = init_image * (mask < 0.5)
_, _, height, width = init_image.shape
# 6. Prepare latent variables
num_channels_latents = self.vae.config.latent_channels
num_channels_unet = self.unet.config.in_channels
return_image_latents = num_channels_unet == 4
add_noise = True if denoising_start is None else False
latents_outputs = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
image=init_image,
timestep=latent_timestep,
is_strength_max=is_strength_max,
return_noise=True,
return_image_latents=return_image_latents,
newx=newx,
newy=newy,
newr=newr,
current_seed=current_seed,
use_noise_moving=use_noise_moving,
)
if return_image_latents:
latents, noise, image_latents = latents_outputs
else:
latents, noise = latents_outputs
# 7. Prepare mask latent variables
mask, masked_image_latents = self.prepare_mask_latents(
mask,
masked_image,
batch_size * num_images_per_prompt,
height,
width,
prompt_embeds.dtype,
device,
generator,
do_classifier_free_guidance,
)
# 8. Check that sizes of mask, masked image and latents match
if num_channels_unet == 9:
# default case for runwayml/stable-diffusion-inpainting
num_channels_mask = mask.shape[1]
num_channels_masked_image = masked_image_latents.shape[1]
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
raise ValueError(
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
" `pipeline.unet` or your `mask_image` or `image` input."
)
elif num_channels_unet != 4:
raise ValueError(
f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
)
# 8.1 Prepare extra step kwargs.
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 8.2 Create tensor stating which controlnets to keep
controlnet_keep = []
for i in range(len(timesteps)):
keeps = [
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
for s, e in zip(control_guidance_start, control_guidance_end)
]
if isinstance(self.controlnet, MultiControlNetModel):
controlnet_keep.append(keeps)
else:
controlnet_keep.append(keeps[0])
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
height, width = latents.shape[-2:]
height = height * self.vae_scale_factor
width = width * self.vae_scale_factor
original_size = original_size or (height, width)
target_size = target_size or (height, width)
# 10. Prepare added time ids & embeddings
add_text_embeds = pooled_prompt_embeds
add_time_ids, add_neg_time_ids = self._get_add_time_ids(
original_size,
crops_coords_top_left,
target_size,
aesthetic_score,
negative_aesthetic_score,
dtype=prompt_embeds.dtype,
)
add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
if do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
prompt_embeds = prompt_embeds.to(device)
add_text_embeds = add_text_embeds.to(device)
add_time_ids = add_time_ids.to(device)
# 11. Denoising loop
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
if (
denoising_end is not None
and denoising_start is not None
and denoising_value_valid(denoising_end)
and denoising_value_valid(denoising_start)
and denoising_start >= denoising_end
):
raise ValueError(
f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: "
+ f" {denoising_end} when using type float."
)
elif denoising_end is not None and denoising_value_valid(denoising_end):
discrete_timestep_cutoff = int(
round(
self.scheduler.config.num_train_timesteps
- (denoising_end * self.scheduler.config.num_train_timesteps)
)
)
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
timesteps = timesteps[:num_inference_steps]
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
# concat latents, mask, masked_image_latents in the channel dimension
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
# controlnet(s) inference
if guess_mode and do_classifier_free_guidance:
# Infer ControlNet only for the conditional batch.
control_model_input = latents
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
controlnet_added_cond_kwargs = {
"text_embeds": add_text_embeds.chunk(2)[1],
"time_ids": add_time_ids.chunk(2)[1],
}
else:
control_model_input = latent_model_input
controlnet_prompt_embeds = prompt_embeds
controlnet_added_cond_kwargs = added_cond_kwargs
if isinstance(controlnet_keep[i], list):
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
else:
controlnet_cond_scale = controlnet_conditioning_scale
if isinstance(controlnet_cond_scale, list):
controlnet_cond_scale = controlnet_cond_scale[0]
cond_scale = controlnet_cond_scale * controlnet_keep[i]
# # Resize control_image to match the size of the input to the controlnet
# if control_image.shape[-2:] != control_model_input.shape[-2:]:
# control_image = F.interpolate(control_image, size=control_model_input.shape[-2:], mode="bilinear", align_corners=False)
down_block_res_samples, mid_block_res_sample = self.controlnet(
control_model_input,
t,
encoder_hidden_states=controlnet_prompt_embeds,
controlnet_cond=control_image,
conditioning_scale=cond_scale,
guess_mode=guess_mode,
added_cond_kwargs=controlnet_added_cond_kwargs,
return_dict=False,
)
if guess_mode and do_classifier_free_guidance:
# Infered ControlNet only for the conditional batch.
# To apply the output of ControlNet to both the unconditional and conditional batches,
# add 0 to the unconditional batch to keep it unchanged.
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
if num_channels_unet == 9:
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
if do_classifier_free_guidance and guidance_rescale > 0.0:
print("rescale: ", guidance_rescale)
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf | noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) | 2 | 2023-12-07 14:03:31+00:00 | 8k |
eliphatfs/zerorf | lib/core/ssdnerf_gui.py | [
{
"identifier": "vdb_utils",
"path": "lib/core/utils/vdb_utils.py",
"snippet": "def bit_index(xyz, ratio, level):\ndef write_lenstr(buffer: list, s: str):\ndef getlen(buffer: list):\ndef coo_to_mask(nelem: int, coo: list):\ndef coo_to_dense(nelem: int, coo: list, vals: list):\ndef write_inter_node(buffe... | import os
import random
import math
import copy
import json
import numpy as np
import trimesh
import torch
import torch.nn.functional as F
import cv2
import mmcv
import dearpygui.dearpygui as dpg
import matplotlib.pyplot as plotlib
from scipy.spatial.transform import Rotation as R
from mmgen.models.builder import build_module
from mmgen.models.architectures.common import get_module_device
from mmgen.apis import set_random_seed # isort:skip # noqa
from .utils import extract_geometry, surround_views, vdb_utils, rgetattr, rsetattr
from .utils.nerf_utils import extract_fields
from lib.datasets.shapenet_srn import load_pose, load_intrinsics
from videoio import VideoWriter | 5,012 | self.model.test_cfg['extra_scene_step'] = app_data - 1
with dpg.group(horizontal=True):
dpg.add_button(label='load input img', callback=lambda: dpg.show_item('guide_image_dialog'))
dpg.add_slider_float(
label='overlay', min_value=0.0, max_value=1.0, width=170,
default_value=self.overlay_opacity, callback=callback_set_guide_overlay)
dpg.add_text('Guidance params:')
dpg.add_input_float(
label='guidance gain', width=130, default_value=self.guide_gain, callback=callback_set_guide_gain)
dpg.add_input_float(
label='SNR power', width=100,
default_value=self.model_diffusion.test_cfg.get(
'snr_weight_power', self.model_diffusion.timestep_sampler.power),
format='%.3f', callback=callback_set_snr_power)
with dpg.group(horizontal=True):
dpg.add_input_int(
label='langevin steps', width=90, default_value=self.model_diffusion.test_cfg.get('langevin_steps', 0),
min_value=0, max_value=100, min_clamped=True, callback=callback_set_langevin_steps)
dpg.add_input_float(
label='delta', width=100, default_value=self.model_diffusion.test_cfg.get('langevin_delta', 0.4),
format='%.2f', callback=callback_set_langevin_delta)
dpg.add_text('Finetuning optim params:')
dpg.add_input_float(
label='ddpm loss gain', width=130,
default_value=rgetattr(self.model, self.ddpm_loss_key) / self.train_ddpm_weight,
callback=callback_set_ddpm_loss_gain)
dpg.add_input_float(
label='learning rate', width=130, default_value=self.ft_optimizer['lr'], format='%.2e',
callback=callback_set_learning_rate)
with dpg.group(horizontal=True):
dpg.add_input_int(
label='Outer steps', width=90, default_value=self.model.test_cfg.get('n_inverse_steps', 25),
min_value=0, max_value=1000, min_clamped=True, callback=callback_set_outer_loop_steps)
dpg.add_input_int(
label='Inner steps', width=90, default_value=self.model.test_cfg.get('extra_scene_step', 3) + 1,
min_value=1, max_value=100, min_clamped=True, callback=callback_set_inner_loop_steps)
with dpg.collapsing_header(label='Camera options', default_open=True):
def callback_set_cam(sender, app_data):
self.active_cam = getattr(self, app_data + '_cam')
update_camera_status()
self.need_update = True
def callback_reset_camera(sender, app_data):
self.active_cam.fovy = self.default_cam_fovy
self.active_cam.radius = self.default_cam_radius
self.active_cam.set_euler(self.default_cam_euler)
self.active_cam.center = np.array([0, 0, 0], dtype=np.float32)
update_camera_status()
self.need_update = True
with dpg.group(horizontal=True):
dpg.add_combo(
['default', 'guide'], label='camera', width=150,
default_value=self.active_cam.name, callback=callback_set_cam, tag='cam_combo')
dpg.add_button(label='Reset camera', callback=callback_reset_camera)
def callback_set_fovy(sender, app_data):
self.active_cam.fovy = app_data
update_camera_status()
self.need_update = True
def callback_set_cam_r(sender, app_data):
self.active_cam.radius = app_data
update_camera_status()
self.need_update = True
def callback_set_euler(sender, app_data, axis):
euler = self.active_cam.euler
euler[axis] = app_data
self.active_cam.set_euler(euler)
update_camera_status()
self.need_update = True
def callback_set_center(sender, app_data, axis):
self.active_cam.center[axis] = app_data
update_camera_status()
self.need_update = True
dpg.add_slider_float(
label='FoV (vertical)', min_value=1, max_value=120, clamped=True, format='%.1f deg',
default_value=self.active_cam.fovy, callback=callback_set_fovy, tag='fov')
dpg.add_slider_float(
label='radius', min_value=1.0, max_value=5.0, format='%.2f',
default_value=self.active_cam.radius, callback=callback_set_cam_r, tag='radius')
dpg.add_slider_float(
label='azimuth', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[2],
callback=lambda x, y: callback_set_euler(x, y, 2), tag='azimuth')
dpg.add_slider_float(
label='elevation', min_value=-89, max_value=89, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[1],
callback=lambda x, y: callback_set_euler(x, y, 1), tag='elevation')
dpg.add_slider_float(
label='roll', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[0],
callback=lambda x, y: callback_set_euler(x, y, 0), tag='roll')
dpg.add_text('Orbit center:')
with dpg.group(horizontal=True):
dpg.add_input_float(
width=110, format='x: %.2f', tag='center_x',
default_value=self.active_cam.center[0], callback=lambda x, y: callback_set_center(x, y, 0))
dpg.add_input_float(
width=110, format='y: %.2f', tag='center_y',
default_value=self.active_cam.center[1], callback=lambda x, y: callback_set_center(x, y, 1))
dpg.add_input_float(
width=110, format='z: %.2f', tag='center_z',
default_value=self.active_cam.center[2], callback=lambda x, y: callback_set_center(x, y, 2))
def callback_load_intrinsic(sender, app_data):
fx, fy, cx, cy, h, w = load_intrinsics(app_data['file_path_name'])
assert fx == fy and cx == w / 2 and cy == h / 2, 'GUI supports only rectified images'
self.active_cam.fovy = np.rad2deg(2 * np.arctan2(h / 2, fy))
update_camera_status()
self.need_update = True
def callback_load_extrinsic(sender, app_data):
| # modified from torch-ngp
def load_img(path, background=[1., 1., 1.]):
bgra = mmcv.imread(
path, flag='unchanged', channel_order='bgr'
).astype(np.float32) / 255
bgr = bgra[:, :, :3]
rgb = bgr[:, :, ::-1]
if bgra.shape[2] == 4:
alpha = bgra[:, :, 3:4]
rgb = rgb * alpha + np.array(background, dtype=np.float32) * (1 - alpha)
return np.ascontiguousarray(rgb)
class OrbitCamera:
def __init__(self, name, W, H, r=2., fovy=60., euler=[0, 0, 0]):
self.name = name
self.W = W
self.H = H
self.radius = r # camera distance from center
self.fovy = fovy # in degree
self.center = np.array([0, 0, 0], dtype=np.float32) # look at this point
self.default_rot = R.from_quat([0.5, -0.5, 0.5, -0.5])
self.rot = copy.deepcopy(self.default_rot)
self.up = np.array([0, 0, 1], dtype=np.float32) # need to be normalized!
self.set_euler(euler)
# pose
@property
def pose(self):
# first move camera to radius
res = np.eye(4, dtype=np.float32)
res[2, 3] -= self.radius
# rotate
rot = np.eye(4, dtype=np.float32)
rot[:3, :3] = self.rot.as_matrix()
res = rot @ res
# translate
res[:3, 3] -= self.center
return res
def set_pose(self, pose):
self.rot = R.from_matrix(pose[:3, :3])
self.center = -pose[:3, 3] - self.rot.as_matrix()[:3, 2] * self.radius
@property
def intrinsics(self):
focal = self.H / (2 * np.tan(np.radians(self.fovy) / 2))
return np.array([focal, focal, self.W / 2, self.H / 2])
@property
def euler(self):
return (self.rot * self.default_rot.inv()).as_euler('xyz', degrees=True)
def set_euler(self, euler):
self.rot = R.from_euler('xyz', euler, degrees=True) * self.default_rot
def orbit(self, dx, dy):
# rotate along camera up/side axis!
side = self.rot.as_matrix()[:3, 0] # why this is side --> ? # already normalized.
rotvec_x = self.up * np.radians(-0.1 * dx)
rotvec_y = side * np.radians(-0.1 * dy)
self.rot = R.from_rotvec(rotvec_x) * R.from_rotvec(rotvec_y) * self.rot
def scale(self, delta):
self.radius *= 1.1 ** (-delta)
def pan(self, dx, dy, dz=0):
# pan in camera coordinate system (careful on the sensitivity!)
self.center += 0.0005 * self.rot.as_matrix()[:3, :3] @ np.array([dx, dy, dz])
def pose2str(self):
with np.printoptions(precision=3, suppress=True):
return str(self.pose)
class SSDNeRFGUI:
default_cam_fovy = 52.0
default_cam_radius = 2.6
default_cam_euler = [0.0, 23.0, -47.4]
def __init__(self, model, W=512, H=512, max_spp=1, debug=True):
self.W = W
self.H = H
self.max_spp = max_spp
self.default_cam = OrbitCamera(
'default', W, H, r=self.default_cam_radius, fovy=self.default_cam_fovy, euler=self.default_cam_euler)
self.guide_cam = OrbitCamera(
'guide', W, H, r=self.default_cam_radius, fovy=self.default_cam_fovy, euler=self.default_cam_euler)
self.active_cam = self.default_cam
self.debug = debug
self.bg_color = torch.ones(3, dtype=torch.float32) # default white bg
self.step = 0 # training step
self.model = model
self.model_decoder = model.decoder_ema if model.decoder_use_ema else model.decoder
self.model_diffusion = model.diffusion_ema if model.diffusion_use_ema else model.diffusion
self.video_sec = 4
self.video_fps = 30
self.video_res = 256
self.render_buffer = np.zeros((self.H, self.W, 3), dtype=np.float32)
self.need_update = True # camera moved, should reset accumulation
self.spp = 1 # sample per pixel
self.dt_gamma_scale = 0.0
self.density_thresh = 0.1
self.mode = 'image' # choose from ['image', 'depth']
self.mesh_resolution = 256
self.mesh_threshold = 10
self.scene_name = 'model_default'
self.sampling_mode = 'text'
self.pos_prompt = ''
self.neg_prompt = ''
self.diffusion_seed = -1
self.diffusion_steps = model.test_cfg.get('num_timesteps', 20)
self.diffusion_sampler = 'DDIM'
self.cfg_scale = 1.0
self.embed_guidance_scale = 0.0
self.clip_denoised = True
dtype = next(self.model_decoder.parameters()).dtype
if self.model.init_code is None:
self.code_buffer = torch.zeros(
self.model.code_size, device=get_module_device(self.model), dtype=dtype)
else:
self.code_buffer = self.model.init_code.clone().to(dtype)
_, self.density_bitfield = self.model.get_density(
self.model_decoder, self.code_buffer[None],
cfg=dict(density_thresh=self.density_thresh, density_step=16))
self.dynamic_resolution = False
self.downscale = 1
self.image_enhancer = build_module(dict(
type='SRVGGNetCompact',
# num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu',
num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu',
# pretrained='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth'
pretrained='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth'
)).half().eval().requires_grad_(False)
if torch.cuda.is_available():
self.image_enhancer.cuda()
self.use_image_enhancer = False
self.guide_image = None
self.guide_image_overlay = None
if 'guidance_gain' in model.test_cfg and 'n_inverse_rays' in model.test_cfg:
self.guide_gain = model.test_cfg['guidance_gain'] / model.test_cfg['n_inverse_rays']
else:
self.guide_gain = 1.0
self.overlay_opacity = 0.3
self.code_viz_range = model.test_cfg.get('clip_range', [-1, 1])
self.ddpm_loss_key = 'diffusion_ema.ddpm_loss.weight_scale' if model.diffusion_use_ema else 'diffusion.ddpm_loss.weight_scale'
self.train_ddpm_weight = model.train_cfg_backup.get(
self.ddpm_loss_key, rgetattr(model, self.ddpm_loss_key))
self.loss_coef = 0.1 # ignore model's test cfg
self.ft_optimizer = model.test_cfg.get(
'optimizer', dict(type='Adam', lr=model.train_cfg['optimizer']['lr'] / 2, weight_decay=0.))
self.ft_lr_scheduler = model.test_cfg.get(
'lr_scheduler', dict(type='ExponentialLR', gamma=0.998))
self.extrinsic_ndc_scale = 2.0 # default shapenet dataset value
dpg.create_context()
if self.debug:
dpg.configure_app(manual_callback_management=True)
self.register_dpg()
self.test_step()
def __del__(self):
dpg.destroy_context()
def prepare_buffer(self, outputs):
if self.mode == 'image':
return outputs['image']
else:
return np.expand_dims(outputs['depth'], -1).repeat(3, -1)
def test_gui(self, pose, intrinsics, W, H, bg_color, spp, dt_gamma_scale, downscale):
with torch.no_grad():
self.model.bg_color = bg_color.to(self.code_buffer.device)
if self.use_image_enhancer and self.mode == 'image':
rH, rW = H // 2, W // 2
intrinsics = intrinsics / 2
else:
rH, rW = H, W
image, depth = self.model.render(
self.model_decoder,
self.code_buffer[None],
self.density_bitfield[None], rH, rW,
self.code_buffer.new_tensor(intrinsics * downscale, dtype=torch.float32)[None, None],
self.code_buffer.new_tensor(pose, dtype=torch.float32)[None, None],
cfg=dict(dt_gamma_scale=dt_gamma_scale))
if self.use_image_enhancer and self.mode == 'image':
image = self.image_enhancer(image[0].half().permute(0, 3, 1, 2))
image = F.interpolate(image, size=(H, W), mode='area').permute(0, 2, 3, 1)[None].float()
results = dict(
image=image[0, 0],
depth=depth[0, 0])
if downscale != 1:
results['image'] = F.interpolate(
results['image'].permute(2, 0, 1)[None], size=(H, W), mode='nearest'
).permute(0, 2, 3, 1).reshape(H, W, 3)
results['depth'] = F.interpolate(results['depth'][None, None], size=(H, W), mode='nearest').reshape(H, W)
if self.overlay_opacity > 0.003 and self.guide_image is not None and self.active_cam.name == 'guide':
results['image'] = self.guide_image_overlay * self.overlay_opacity + results['image'] * (1 - self.overlay_opacity)
results['image'] = results['image'].cpu().numpy()
results['depth'] = results['depth'].cpu().numpy()
return results
def update_params(self):
with torch.no_grad():
self.density_bitfield = self.model.get_density(
self.model_decoder, self.code_buffer[None],
cfg=dict(density_thresh=self.density_thresh, density_step=16))[1].squeeze(0)
def test_step(self):
# TODO: seems we have to move data from GPU --> CPU --> GPU?
if self.need_update or self.spp < self.max_spp:
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
starter.record()
outputs = self.test_gui(
self.active_cam.pose, self.active_cam.intrinsics,
self.W, self.H, self.bg_color, self.spp, self.dt_gamma_scale, self.downscale)
ender.record()
torch.cuda.synchronize()
t = starter.elapsed_time(ender)
# update dynamic resolution
if self.dynamic_resolution:
# max allowed infer time per-frame is 200 ms
full_t = t / (self.downscale ** 2)
downscale = min(1, max(1 / 4, math.sqrt(200 / full_t)))
if downscale > self.downscale * 1.2 or downscale < self.downscale * 0.8:
self.downscale = downscale
if self.need_update:
self.render_buffer = np.ascontiguousarray(self.prepare_buffer(outputs))
self.spp = 1
self.need_update = False
else:
self.render_buffer = (self.render_buffer * self.spp + self.prepare_buffer(outputs)) / (self.spp + 1)
self.spp += 1
dpg.set_value('_log_infer_time', f'{t:.4f}ms ({int(1000 / t)} FPS)')
dpg.set_value('_log_resolution', f'{int(self.downscale * self.W)}x{int(self.downscale * self.H)}')
dpg.set_value('_log_spp', self.spp)
dpg.set_value('_log_scene_name', self.scene_name)
dpg.set_value('_texture', self.render_buffer)
def register_dpg(self):
### register texture
with dpg.texture_registry(show=False):
dpg.add_raw_texture(self.W, self.H, self.render_buffer, format=dpg.mvFormat_Float_rgb, tag='_texture')
### register window
# the rendered image, as the primary window
with dpg.window(tag='_primary_window', width=self.W, height=self.H):
# add the texture
dpg.add_image('_texture')
dpg.set_primary_window('_primary_window', True)
def update_camera_status():
if self.debug:
dpg.set_value('_log_pose', self.active_cam.pose2str())
dpg.set_value('fov', self.active_cam.fovy)
dpg.set_value('radius', self.active_cam.radius)
euler = self.active_cam.euler
dpg.set_value('roll', euler[0])
dpg.set_value('elevation', euler[1])
dpg.set_value('azimuth', euler[2])
center = self.active_cam.center
dpg.set_value('center_x', center[0])
dpg.set_value('center_y', center[1])
dpg.set_value('center_z', center[2])
# control window
with dpg.window(label='Control', tag='_control_window', width=380, height=self.H, pos=[self.W, 0]):
# button theme
with dpg.theme() as theme_button:
with dpg.theme_component(dpg.mvButton):
dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18))
dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47))
dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83))
dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)
dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)
# time
with dpg.group(horizontal=True):
dpg.add_text('Infer time: ')
dpg.add_text('no data', tag='_log_infer_time')
with dpg.group(horizontal=True):
dpg.add_text('SPP: ')
dpg.add_text('1', tag='_log_spp')
with dpg.collapsing_header(label='SSDNeRF', default_open=True):
def callback_diffusion_generate(sender, app_data):
diffusion_seed = random.randint(0, 2**31) if self.diffusion_seed == -1 else self.diffusion_seed
set_random_seed(diffusion_seed, deterministic=True)
noise = torch.randn((1,) + self.model.code_size)
self.model_diffusion.test_cfg['num_timesteps'] = self.diffusion_steps
self.model_diffusion.sample_method = self.diffusion_sampler
self.model_diffusion.test_cfg['cfg_scale'] = self.cfg_scale
self.model_diffusion.test_cfg['embed_guidance_scale'] = self.embed_guidance_scale
self.model_diffusion.test_cfg['clip_denoised'] = self.clip_denoised
device = get_module_device(self.model)
data = dict(
noise=noise.to(device),
scene_id=[0],
scene_name=['seed_{}'.format(diffusion_seed)],
prompts=[self.pos_prompt],
neg_prompts=[self.neg_prompt])
if self.sampling_mode == 'image_text':
data['extra_cond_img'] = self.extra_cond_image
data['extra_pose_cond'] = torch.tensor(self.guide_cam.pose[:3].reshape(1, 12)).to(device).float()
if self.sampling_mode in ['guide', 'optim']:
scale = max(self.guide_image.shape[1] / self.W, self.guide_image.shape[0] / self.H)
data['cond_imgs'] = self.guide_image[None, None]
data['cond_intrinsics'] = torch.tensor(
self.guide_cam.intrinsics[None, None] * np.array([
scale, scale,
self.guide_image.size(1) / self.W, self.guide_image.size(0) / self.H])
).to(device).float()
data['cond_poses'] = torch.tensor(self.guide_cam.pose[None, None]).to(device).float()
self.model_diffusion.test_cfg['n_inverse_rays'] = self.guide_image.numel()
self.model.test_cfg['loss_coef'] = self.loss_coef / self.guide_image.numel()
if self.sampling_mode == 'guide':
self.model_diffusion.test_cfg['guidance_gain'] = self.guide_gain * self.guide_image.numel()
if self.sampling_mode == 'optim':
self.model.test_cfg['optimizer'] = self.ft_optimizer
self.model.test_cfg['lr_scheduler'] = self.ft_lr_scheduler
optim_kwargs = dict(
code_=self.model.code_activation.inverse(self.code_buffer[None]))
else:
optim_kwargs = dict()
with torch.no_grad():
sample_fun = getattr(self.model, 'val_' + self.sampling_mode)
code, density_grid, density_bitfield = sample_fun(
data, show_pbar=True, **optim_kwargs)
self.code_buffer = code[0].to(self.code_buffer)
self.density_bitfield = density_bitfield[0]
self.scene_name = 'seed_{}'.format(diffusion_seed)
self.need_update = True
print("Peak VRAM usage:", int(torch.cuda.max_memory_allocated() / 1024 ** 2 + 1), "(M)")
def callback_change_mode(sender, app_data):
self.sampling_mode = app_data
def callback_change_sampler(sender, app_data):
self.diffusion_sampler = app_data
with dpg.group(horizontal=True):
dpg.add_combo(
('text', 'image_text', 'uncond', 'guide', 'optim'), label='mode', default_value=self.sampling_mode,
width=75, callback=callback_change_mode)
dpg.add_combo(
self.model_diffusion.available_samplers, label='sampler', default_value=self.diffusion_sampler,
width=190, callback=callback_change_sampler)
def callback_set_pos_prompt(sender, app_data):
self.pos_prompt = app_data
dpg.add_input_text(
label='prompt', width=290, default_value=self.pos_prompt, callback=callback_set_pos_prompt)
def callback_set_neg_prompt(sender, app_data):
self.neg_prompt = app_data
dpg.add_input_text(
label='neg prompt', width=290, default_value=self.neg_prompt, callback=callback_set_neg_prompt)
def callback_set_cfg_scale(sender, app_data):
self.cfg_scale = app_data
dpg.add_input_float(
label='prompt scale', width=100, default_value=self.cfg_scale, callback=callback_set_cfg_scale)
def callback_set_embed_guidance_scale(sender, app_data):
self.embed_guidance_scale = app_data
dpg.add_input_float(
label='embed guidance', width=100, default_value=self.embed_guidance_scale, callback=callback_set_embed_guidance_scale)
def callback_set_diffusion_seed(sender, app_data):
self.diffusion_seed = app_data
def callback_set_diffusion_steps(sender, app_data):
self.diffusion_steps = app_data
def callback_set_clip_denoised(sender, app_data):
self.clip_denoised = app_data
dpg.add_checkbox(label='clip denoised', callback=callback_set_clip_denoised,
default_value=self.clip_denoised)
with dpg.group(horizontal=True):
dpg.add_button(label='Generate', callback=callback_diffusion_generate)
dpg.add_input_int(
label='seed', width=130, min_value=-1, max_value=2**31 - 1, min_clamped=True, max_clamped=True,
default_value=self.diffusion_seed, callback=callback_set_diffusion_seed, tag='seed')
dpg.add_input_int(
label='steps', width=80, min_value=1, max_value=1000, min_clamped=True, max_clamped=True,
default_value=self.diffusion_steps, callback=callback_set_diffusion_steps)
def callback_save_scene(sender, app_data):
path = app_data['file_path_name']
out = dict(
param=dict(
code=self.code_buffer.cpu(),
density_bitfield=self.density_bitfield.cpu()))
torch.save(out, path)
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_save_scene, tag='save_scene_dialog'):
dpg.add_file_extension('.pth')
with dpg.group(horizontal=True):
dpg.add_button(label='Save scene', callback=lambda: dpg.show_item('save_scene_dialog'))
# scene selector
def callback_load_scene(sender, app_data):
self.scene_name = os.path.splitext(app_data['file_name'])[0]
scene = torch.load(app_data['file_path_name'], map_location='cpu')
self.code_buffer = (
scene['param']['code'] if 'code' in scene['param']
else self.model.code_activation(scene['param']['code_'])).to(self.code_buffer)
self.update_params()
print('Loaded scene: ' + self.scene_name)
self.need_update = True
def callback_recover_seed(sender, app_data):
if self.scene_name.startswith('seed_'):
seed = int(self.scene_name[5:])
self.diffusion_seed = seed
dpg.set_value('seed', seed)
print('Recovered seed: ' + str(seed))
else:
print('Failed to recover seed: ' + self.scene_name)
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_load_scene, tag='scene_selector_dialog'):
dpg.add_file_extension('.pth')
with dpg.group(horizontal=True):
dpg.add_button(label='Load scene', callback=lambda: dpg.show_item('scene_selector_dialog'))
dpg.add_text(tag='_log_scene_name')
dpg.add_button(label='Recover seed', callback=callback_recover_seed)
# save geometry
def callback_export_mesh(sender, app_data):
self.export_mesh(app_data['file_path_name'])
def callback_export_vdb(sender, app_data):
self.export_vdb(app_data['file_path_name'])
def callback_save_code(sender, app_data):
dir_path = app_data['file_path_name']
assert os.path.isdir(dir_path), dir_path + ' is not a directory'
self.model_decoder.visualize(
self.code_buffer[None], [self.scene_name], dir_path, code_range=self.code_viz_range)
def callback_set_vmin(sender, app_data):
self.code_viz_range[0] = app_data
def callback_set_vmax(sender, app_data):
self.code_viz_range[1] = app_data
def callback_set_mesh_resolution(sender, app_data):
self.mesh_resolution = app_data
def callback_set_mesh_threshold(sender, app_data):
self.mesh_threshold = app_data
def callback_set_video_resolution(sender, app_data):
self.video_res = app_data
def callback_set_video_sec(sender, app_data):
self.video_sec = app_data
def callback_export_screenshot(sender, app_data):
path = app_data['file_path_name']
cv2.imwrite(path, np.round(self.render_buffer[..., ::-1] * 255).astype(np.uint8))
def callback_export_multi_view(sender, app_data):
dir_path = app_data['file_path_name']
assert os.path.isdir(dir_path), dir_path + ' is not a directory'
self.export_multi_view_data(dir_path)
def callback_export_video(sender, app_data):
path = app_data['file_path_name']
num_frames = int(round(self.video_fps * self.video_sec))
tmp_cam = OrbitCamera(
'tmp', self.video_res, self.video_res,
r=self.default_cam_radius, fovy=self.default_cam_fovy, euler=self.default_cam_euler)
camera_poses = surround_views(
self.code_buffer.new_tensor(tmp_cam.pose, dtype=torch.float32), num_frames=num_frames)
writer = VideoWriter(
path,
resolution=(self.video_res, self.video_res),
lossless=False,
fps=self.video_fps)
bs = 4
device = self.code_buffer.device
with torch.no_grad():
prog = mmcv.ProgressBar(num_frames)
prog.start()
for pose_batch in camera_poses.split(bs, dim=0):
intrinsics = self.code_buffer.new_tensor(
tmp_cam.intrinsics[None], dtype=torch.float32).expand(pose_batch.size(0), -1)[None]
res = self.video_res
if self.use_image_enhancer:
res = res // 2
intrinsics = intrinsics * (res / self.video_res)
image_batch, depth = self.model.render(
self.model_decoder,
self.code_buffer[None],
self.density_bitfield[None], res, res,
intrinsics,
pose_batch.to(device)[None])
if self.use_image_enhancer:
image_batch = self.image_enhancer(image_batch[0].half().permute(0, 3, 1, 2).clamp(min=0, max=1))
image_batch = F.interpolate(
image_batch, size=(self.video_res, self.video_res), mode='area'
).permute(0, 2, 3, 1)[None]
for image in torch.round(image_batch[0].clamp(min=0, max=1) * 255).to(torch.uint8).cpu().numpy():
writer.write(image)
prog.update(bs)
writer.close()
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_export_mesh, tag='export_mesh_dialog'):
dpg.add_file_extension('.stl')
dpg.add_file_extension('.dict')
dpg.add_file_extension('.json')
dpg.add_file_extension('.glb')
dpg.add_file_extension('.obj')
dpg.add_file_extension('.gltf')
dpg.add_file_extension('.dict64')
dpg.add_file_extension('.msgpack')
dpg.add_file_extension('.stl_ascii')
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_export_vdb, tag='export_vdb_dialog'):
dpg.add_file_extension('.vdb')
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_save_code, tag='save_code_dialog'):
dpg.add_file_extension('.')
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_export_screenshot, tag='export_screenshot_dialog'):
dpg.add_file_extension('.png')
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_export_multi_view, tag='export_multi_view_dialog'):
dpg.add_file_extension('.')
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_export_video, tag='export_video_dialog'):
dpg.add_file_extension('.mp4')
with dpg.group(horizontal=True):
dpg.add_button(label='Export screenshot', callback=lambda: dpg.show_item('export_screenshot_dialog'))
dpg.add_button(label='Export multi-view', callback=lambda: dpg.show_item('export_multi_view_dialog'))
with dpg.group(horizontal=True):
dpg.add_button(label='Export video', callback=lambda: dpg.show_item('export_video_dialog'))
dpg.add_input_int(
label='res', width=90, min_value=4, max_value=1024, min_clamped=True, max_clamped=True,
default_value=self.video_res, callback=callback_set_video_resolution)
dpg.add_input_float(
label='len', width=100, min_value=0, max_value=10, min_clamped=True, max_clamped=True,
default_value=self.video_sec, callback=callback_set_video_sec, format='%.1f sec')
with dpg.group(horizontal=True):
dpg.add_button(label='Export mesh', callback=lambda: dpg.show_item('export_mesh_dialog'))
dpg.add_input_int(
label='res', width=90, min_value=4, max_value=1024, min_clamped=True, max_clamped=True,
default_value=self.mesh_resolution, callback=callback_set_mesh_resolution)
dpg.add_input_float(
label='thr', width=100, min_value=0, max_value=1000, min_clamped=True, max_clamped=True,
format='%.2f', default_value=self.mesh_threshold, callback=callback_set_mesh_threshold)
dpg.add_button(label='Export volume', callback=lambda: dpg.show_item('export_vdb_dialog'))
with dpg.group(horizontal=True):
dpg.add_button(label='Export code viz', callback=lambda: dpg.show_item('save_code_dialog'))
dpg.add_input_float(
label='vmin', width=85, format='%.1f',
default_value=self.code_viz_range[0], callback=callback_set_vmin)
dpg.add_input_float(
label='vmax', width=85, format='%.1f',
default_value=self.code_viz_range[1], callback=callback_set_vmax)
with dpg.collapsing_header(label='Guidance/finetuning options', default_open=False):
def callback_load_guide_image(sender, app_data):
img = load_img(app_data['file_path_name'], [0.5, 0.5, 0.5])
img = (img - 0.5) * 1.2
self.extra_cond_image = torch.tensor(
cv2.resize(img, [384, 384], interpolation=cv2.INTER_LINEAR)
)[None].float().to(self.code_buffer.device)
self.guide_image = torch.tensor(
load_img(app_data['file_path_name'])).float().to(self.code_buffer.device)
bg = self.bg_color.to(self.guide_image.device)[:, None, None]
scale = min(self.W / self.guide_image.shape[1], self.H / self.guide_image.shape[0])
grid = F.affine_grid(
torch.tensor(
[[self.W / (self.guide_image.shape[1] * scale), 0, 0],
[0, self.H / (self.guide_image.shape[0] * scale), 0]],
dtype=self.guide_image.dtype, device=self.guide_image.device)[None],
[1, 3, self.H, self.W], align_corners=False)
self.guide_image_overlay = (F.grid_sample(
self.guide_image.permute(2, 0, 1)[None] - bg,
grid, mode='nearest', padding_mode='zeros', align_corners=False,
) + bg).squeeze(0).permute(1, 2, 0)
self.active_cam = self.guide_cam
update_camera_status()
dpg.set_value('cam_combo', 'guide')
self.need_update = True
with dpg.file_dialog(directory_selector=False, show=False, width=450, height=400,
callback=callback_load_guide_image, tag='guide_image_dialog'):
dpg.add_file_extension('.png')
def callback_set_guide_gain(sender, app_data):
self.guide_gain = app_data
def callback_set_guide_overlay(sender, app_data):
self.overlay_opacity = app_data
self.need_update = True
def callback_set_snr_power(sender, app_data):
self.model_diffusion.test_cfg['snr_weight_power'] = app_data
def callback_set_langevin_steps(sender, app_data):
self.model_diffusion.test_cfg['langevin_steps'] = app_data
def callback_set_langevin_delta(sender, app_data):
self.model_diffusion.test_cfg['langevin_delta'] = app_data
def callback_set_ddpm_loss_gain(sender, app_data):
rsetattr(self.model, self.ddpm_loss_key, app_data * self.train_ddpm_weight)
def callback_set_learning_rate(sender, app_data):
self.ft_optimizer['lr'] = app_data
def callback_set_outer_loop_steps(sender, app_data):
self.model.test_cfg['n_inverse_steps'] = app_data
def callback_set_inner_loop_steps(sender, app_data):
self.model.test_cfg['extra_scene_step'] = app_data - 1
with dpg.group(horizontal=True):
dpg.add_button(label='load input img', callback=lambda: dpg.show_item('guide_image_dialog'))
dpg.add_slider_float(
label='overlay', min_value=0.0, max_value=1.0, width=170,
default_value=self.overlay_opacity, callback=callback_set_guide_overlay)
dpg.add_text('Guidance params:')
dpg.add_input_float(
label='guidance gain', width=130, default_value=self.guide_gain, callback=callback_set_guide_gain)
dpg.add_input_float(
label='SNR power', width=100,
default_value=self.model_diffusion.test_cfg.get(
'snr_weight_power', self.model_diffusion.timestep_sampler.power),
format='%.3f', callback=callback_set_snr_power)
with dpg.group(horizontal=True):
dpg.add_input_int(
label='langevin steps', width=90, default_value=self.model_diffusion.test_cfg.get('langevin_steps', 0),
min_value=0, max_value=100, min_clamped=True, callback=callback_set_langevin_steps)
dpg.add_input_float(
label='delta', width=100, default_value=self.model_diffusion.test_cfg.get('langevin_delta', 0.4),
format='%.2f', callback=callback_set_langevin_delta)
dpg.add_text('Finetuning optim params:')
dpg.add_input_float(
label='ddpm loss gain', width=130,
default_value=rgetattr(self.model, self.ddpm_loss_key) / self.train_ddpm_weight,
callback=callback_set_ddpm_loss_gain)
dpg.add_input_float(
label='learning rate', width=130, default_value=self.ft_optimizer['lr'], format='%.2e',
callback=callback_set_learning_rate)
with dpg.group(horizontal=True):
dpg.add_input_int(
label='Outer steps', width=90, default_value=self.model.test_cfg.get('n_inverse_steps', 25),
min_value=0, max_value=1000, min_clamped=True, callback=callback_set_outer_loop_steps)
dpg.add_input_int(
label='Inner steps', width=90, default_value=self.model.test_cfg.get('extra_scene_step', 3) + 1,
min_value=1, max_value=100, min_clamped=True, callback=callback_set_inner_loop_steps)
with dpg.collapsing_header(label='Camera options', default_open=True):
def callback_set_cam(sender, app_data):
self.active_cam = getattr(self, app_data + '_cam')
update_camera_status()
self.need_update = True
def callback_reset_camera(sender, app_data):
self.active_cam.fovy = self.default_cam_fovy
self.active_cam.radius = self.default_cam_radius
self.active_cam.set_euler(self.default_cam_euler)
self.active_cam.center = np.array([0, 0, 0], dtype=np.float32)
update_camera_status()
self.need_update = True
with dpg.group(horizontal=True):
dpg.add_combo(
['default', 'guide'], label='camera', width=150,
default_value=self.active_cam.name, callback=callback_set_cam, tag='cam_combo')
dpg.add_button(label='Reset camera', callback=callback_reset_camera)
def callback_set_fovy(sender, app_data):
self.active_cam.fovy = app_data
update_camera_status()
self.need_update = True
def callback_set_cam_r(sender, app_data):
self.active_cam.radius = app_data
update_camera_status()
self.need_update = True
def callback_set_euler(sender, app_data, axis):
euler = self.active_cam.euler
euler[axis] = app_data
self.active_cam.set_euler(euler)
update_camera_status()
self.need_update = True
def callback_set_center(sender, app_data, axis):
self.active_cam.center[axis] = app_data
update_camera_status()
self.need_update = True
dpg.add_slider_float(
label='FoV (vertical)', min_value=1, max_value=120, clamped=True, format='%.1f deg',
default_value=self.active_cam.fovy, callback=callback_set_fovy, tag='fov')
dpg.add_slider_float(
label='radius', min_value=1.0, max_value=5.0, format='%.2f',
default_value=self.active_cam.radius, callback=callback_set_cam_r, tag='radius')
dpg.add_slider_float(
label='azimuth', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[2],
callback=lambda x, y: callback_set_euler(x, y, 2), tag='azimuth')
dpg.add_slider_float(
label='elevation', min_value=-89, max_value=89, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[1],
callback=lambda x, y: callback_set_euler(x, y, 1), tag='elevation')
dpg.add_slider_float(
label='roll', min_value=-180, max_value=180, clamped=True, format='%.1f deg',
default_value=self.active_cam.euler[0],
callback=lambda x, y: callback_set_euler(x, y, 0), tag='roll')
dpg.add_text('Orbit center:')
with dpg.group(horizontal=True):
dpg.add_input_float(
width=110, format='x: %.2f', tag='center_x',
default_value=self.active_cam.center[0], callback=lambda x, y: callback_set_center(x, y, 0))
dpg.add_input_float(
width=110, format='y: %.2f', tag='center_y',
default_value=self.active_cam.center[1], callback=lambda x, y: callback_set_center(x, y, 1))
dpg.add_input_float(
width=110, format='z: %.2f', tag='center_z',
default_value=self.active_cam.center[2], callback=lambda x, y: callback_set_center(x, y, 2))
def callback_load_intrinsic(sender, app_data):
fx, fy, cx, cy, h, w = load_intrinsics(app_data['file_path_name'])
assert fx == fy and cx == w / 2 and cy == h / 2, 'GUI supports only rectified images'
self.active_cam.fovy = np.rad2deg(2 * np.arctan2(h / 2, fy))
update_camera_status()
self.need_update = True
def callback_load_extrinsic(sender, app_data): | c2w = load_pose(app_data['file_path_name']) | 6 | 2023-12-14 03:29:28+00:00 | 8k |
geopavlakos/hamer | hamer/datasets/image_dataset.py | [
{
"identifier": "Dataset",
"path": "hamer/datasets/dataset.py",
"snippet": "class Dataset(metaclass=DatasetRegistration):\n \"\"\"\n Base Dataset class\n \"\"\"\n def __init__(self, *args, **kwargs):\n pass"
},
{
"identifier": "get_example",
"path": "hamer/datasets/utils.p... | import copy
import os
import numpy as np
import torch
import braceexpand
import cv2
import webdataset as wds
from typing import List
from yacs.config import CfgNode
from .dataset import Dataset
from .utils import get_example, expand_to_aspect_ratio | 4,568 | # Load the dataset
if epoch_size is not None:
resampled = True
#corrupt_filter = lambda sample: (sample['__key__'] not in CORRUPT_KEYS)
dataset = wds.WebDataset(expand_urls(urls),
nodesplitter=wds.split_by_node,
shardshuffle=True,
resampled=resampled,
cache_dir=cache_dir,
) #.select(corrupt_filter)
if train:
dataset = dataset.shuffle(100)
dataset = dataset.decode('rgb8').rename(jpg='jpg;jpeg;png')
# Process the dataset
dataset = dataset.compose(split_data)
# Filter/clean the dataset
SUPPRESS_KP_CONF_THRESH = cfg.DATASETS.get('SUPPRESS_KP_CONF_THRESH', 0.0)
SUPPRESS_BETAS_THRESH = cfg.DATASETS.get('SUPPRESS_BETAS_THRESH', 0.0)
SUPPRESS_BAD_POSES = cfg.DATASETS.get('SUPPRESS_BAD_POSES', False)
POSES_BETAS_SIMULTANEOUS = cfg.DATASETS.get('POSES_BETAS_SIMULTANEOUS', False)
BETAS_REG = cfg.DATASETS.get('BETAS_REG', False)
FILTER_NO_POSES = cfg.DATASETS.get('FILTER_NO_POSES', False)
FILTER_NUM_KP = cfg.DATASETS.get('FILTER_NUM_KP', 4)
FILTER_NUM_KP_THRESH = cfg.DATASETS.get('FILTER_NUM_KP_THRESH', 0.0)
FILTER_REPROJ_THRESH = cfg.DATASETS.get('FILTER_REPROJ_THRESH', 0.0)
FILTER_MIN_BBOX_SIZE = cfg.DATASETS.get('FILTER_MIN_BBOX_SIZE', 0.0)
if SUPPRESS_KP_CONF_THRESH > 0:
dataset = dataset.map(lambda x: suppress_bad_kps(x, thresh=SUPPRESS_KP_CONF_THRESH))
if SUPPRESS_BETAS_THRESH > 0:
dataset = dataset.map(lambda x: supress_bad_betas(x, thresh=SUPPRESS_BETAS_THRESH))
if SUPPRESS_BAD_POSES:
dataset = dataset.map(lambda x: supress_bad_poses(x))
if POSES_BETAS_SIMULTANEOUS:
dataset = dataset.map(lambda x: poses_betas_simultaneous(x))
if FILTER_NO_POSES:
dataset = dataset.select(lambda x: filter_no_poses(x))
if FILTER_NUM_KP > 0:
dataset = dataset.select(lambda x: filter_numkp(x, numkp=FILTER_NUM_KP, thresh=FILTER_NUM_KP_THRESH))
if FILTER_REPROJ_THRESH > 0:
dataset = dataset.select(lambda x: filter_reproj_error(x, thresh=FILTER_REPROJ_THRESH))
if FILTER_MIN_BBOX_SIZE > 0:
dataset = dataset.select(lambda x: filter_bbox_size(x, thresh=FILTER_MIN_BBOX_SIZE))
if BETAS_REG:
dataset = dataset.map(lambda x: set_betas_for_reg(x)) # NOTE: Must be at the end
use_skimage_antialias = cfg.DATASETS.get('USE_SKIMAGE_ANTIALIAS', False)
border_mode = {
'constant': cv2.BORDER_CONSTANT,
'replicate': cv2.BORDER_REPLICATE,
}[cfg.DATASETS.get('BORDER_MODE', 'constant')]
# Process the dataset further
dataset = dataset.map(lambda x: ImageDataset.process_webdataset_tar_item(x, train,
augm_config=cfg.DATASETS.CONFIG,
MEAN=MEAN, STD=STD, IMG_SIZE=IMG_SIZE,
BBOX_SHAPE=BBOX_SHAPE,
use_skimage_antialias=use_skimage_antialias,
border_mode=border_mode,
))
if epoch_size is not None:
dataset = dataset.with_epoch(epoch_size)
return dataset
@staticmethod
def process_webdataset_tar_item(item, train,
augm_config=None,
MEAN=DEFAULT_MEAN,
STD=DEFAULT_STD,
IMG_SIZE=DEFAULT_IMG_SIZE,
BBOX_SHAPE=None,
use_skimage_antialias=False,
border_mode=cv2.BORDER_CONSTANT,
):
# Read data from item
key = item['__key__']
image = item['jpg']
data = item['data.pyd']
mask = item['mask']
keypoints_2d = data['keypoints_2d']
keypoints_3d = data['keypoints_3d']
center = data['center']
scale = data['scale']
hand_pose = data['hand_pose']
betas = data['betas']
right = data['right']
has_hand_pose = data['has_hand_pose']
has_betas = data['has_betas']
# image_file = data['image_file']
# Process data
orig_keypoints_2d = keypoints_2d.copy()
center_x = center[0]
center_y = center[1]
bbox_size = expand_to_aspect_ratio(scale*200, target_aspect_ratio=BBOX_SHAPE).max()
if bbox_size < 1:
breakpoint()
mano_params = {'global_orient': hand_pose[:3],
'hand_pose': hand_pose[3:],
'betas': betas
}
has_mano_params = {'global_orient': has_hand_pose,
'hand_pose': has_hand_pose,
'betas': has_betas
}
mano_params_is_axis_angle = {'global_orient': True,
'hand_pose': True,
'betas': False
}
augm_config = copy.deepcopy(augm_config)
# Crop image and (possibly) perform data augmentation
img_rgba = np.concatenate([image, mask.astype(np.uint8)[:,:,None]*255], axis=2)
|
def expand(s):
return os.path.expanduser(os.path.expandvars(s))
def expand_urls(urls: str|List[str]):
if isinstance(urls, str):
urls = [urls]
urls = [u for url in urls for u in braceexpand.braceexpand(expand(url))]
return urls
FLIP_KEYPOINT_PERMUTATION = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
DEFAULT_MEAN = 255. * np.array([0.485, 0.456, 0.406])
DEFAULT_STD = 255. * np.array([0.229, 0.224, 0.225])
DEFAULT_IMG_SIZE = 256
class ImageDataset(Dataset):
@staticmethod
def load_tars_as_webdataset(cfg: CfgNode, urls: str|List[str], train: bool,
resampled=False,
epoch_size=None,
cache_dir=None,
**kwargs) -> Dataset:
"""
Loads the dataset from a webdataset tar file.
"""
IMG_SIZE = cfg.MODEL.IMAGE_SIZE
BBOX_SHAPE = cfg.MODEL.get('BBOX_SHAPE', None)
MEAN = 255. * np.array(cfg.MODEL.IMAGE_MEAN)
STD = 255. * np.array(cfg.MODEL.IMAGE_STD)
def split_data(source):
for item in source:
datas = item['data.pyd']
for data in datas:
if 'detection.npz' in item:
det_idx = data['extra_info']['detection_npz_idx']
mask = item['detection.npz']['masks'][det_idx]
else:
mask = np.ones_like(item['jpg'][:,:,0], dtype=bool)
yield {
'__key__': item['__key__'],
'jpg': item['jpg'],
'data.pyd': data,
'mask': mask,
}
def suppress_bad_kps(item, thresh=0.0):
if thresh > 0:
kp2d = item['data.pyd']['keypoints_2d']
kp2d_conf = np.where(kp2d[:, 2] < thresh, 0.0, kp2d[:, 2])
item['data.pyd']['keypoints_2d'] = np.concatenate([kp2d[:,:2], kp2d_conf[:,None]], axis=1)
return item
def filter_numkp(item, numkp=4, thresh=0.0):
kp_conf = item['data.pyd']['keypoints_2d'][:, 2]
return (kp_conf > thresh).sum() > numkp
def filter_reproj_error(item, thresh=10**4.5):
losses = item['data.pyd'].get('extra_info', {}).get('fitting_loss', np.array({})).item()
reproj_loss = losses.get('reprojection_loss', None)
return reproj_loss is None or reproj_loss < thresh
def filter_bbox_size(item, thresh=1):
bbox_size_min = item['data.pyd']['scale'].min().item() * 200.
return bbox_size_min > thresh
def filter_no_poses(item):
return (item['data.pyd']['has_hand_pose'] > 0)
def supress_bad_betas(item, thresh=3):
has_betas = item['data.pyd']['has_betas']
if thresh > 0 and has_betas:
betas_abs = np.abs(item['data.pyd']['betas'])
if (betas_abs > thresh).any():
item['data.pyd']['has_betas'] = False
return item
def supress_bad_poses(item):
has_hand_pose = item['data.pyd']['has_hand_pose']
if has_hand_pose:
hand_pose = item['data.pyd']['hand_pose']
pose_is_probable = poses_check_probable(torch.from_numpy(hand_pose)[None, 3:], amass_poses_hist100_smooth).item()
if not pose_is_probable:
item['data.pyd']['has_hand_pose'] = False
return item
def poses_betas_simultaneous(item):
# We either have both hand_pose and betas, or neither
has_betas = item['data.pyd']['has_betas']
has_hand_pose = item['data.pyd']['has_hand_pose']
item['data.pyd']['has_betas'] = item['data.pyd']['has_hand_pose'] = np.array(float((has_hand_pose>0) and (has_betas>0)))
return item
def set_betas_for_reg(item):
# Always have betas set to true
has_betas = item['data.pyd']['has_betas']
betas = item['data.pyd']['betas']
if not (has_betas>0):
item['data.pyd']['has_betas'] = np.array(float((True)))
item['data.pyd']['betas'] = betas * 0
return item
# Load the dataset
if epoch_size is not None:
resampled = True
#corrupt_filter = lambda sample: (sample['__key__'] not in CORRUPT_KEYS)
dataset = wds.WebDataset(expand_urls(urls),
nodesplitter=wds.split_by_node,
shardshuffle=True,
resampled=resampled,
cache_dir=cache_dir,
) #.select(corrupt_filter)
if train:
dataset = dataset.shuffle(100)
dataset = dataset.decode('rgb8').rename(jpg='jpg;jpeg;png')
# Process the dataset
dataset = dataset.compose(split_data)
# Filter/clean the dataset
SUPPRESS_KP_CONF_THRESH = cfg.DATASETS.get('SUPPRESS_KP_CONF_THRESH', 0.0)
SUPPRESS_BETAS_THRESH = cfg.DATASETS.get('SUPPRESS_BETAS_THRESH', 0.0)
SUPPRESS_BAD_POSES = cfg.DATASETS.get('SUPPRESS_BAD_POSES', False)
POSES_BETAS_SIMULTANEOUS = cfg.DATASETS.get('POSES_BETAS_SIMULTANEOUS', False)
BETAS_REG = cfg.DATASETS.get('BETAS_REG', False)
FILTER_NO_POSES = cfg.DATASETS.get('FILTER_NO_POSES', False)
FILTER_NUM_KP = cfg.DATASETS.get('FILTER_NUM_KP', 4)
FILTER_NUM_KP_THRESH = cfg.DATASETS.get('FILTER_NUM_KP_THRESH', 0.0)
FILTER_REPROJ_THRESH = cfg.DATASETS.get('FILTER_REPROJ_THRESH', 0.0)
FILTER_MIN_BBOX_SIZE = cfg.DATASETS.get('FILTER_MIN_BBOX_SIZE', 0.0)
if SUPPRESS_KP_CONF_THRESH > 0:
dataset = dataset.map(lambda x: suppress_bad_kps(x, thresh=SUPPRESS_KP_CONF_THRESH))
if SUPPRESS_BETAS_THRESH > 0:
dataset = dataset.map(lambda x: supress_bad_betas(x, thresh=SUPPRESS_BETAS_THRESH))
if SUPPRESS_BAD_POSES:
dataset = dataset.map(lambda x: supress_bad_poses(x))
if POSES_BETAS_SIMULTANEOUS:
dataset = dataset.map(lambda x: poses_betas_simultaneous(x))
if FILTER_NO_POSES:
dataset = dataset.select(lambda x: filter_no_poses(x))
if FILTER_NUM_KP > 0:
dataset = dataset.select(lambda x: filter_numkp(x, numkp=FILTER_NUM_KP, thresh=FILTER_NUM_KP_THRESH))
if FILTER_REPROJ_THRESH > 0:
dataset = dataset.select(lambda x: filter_reproj_error(x, thresh=FILTER_REPROJ_THRESH))
if FILTER_MIN_BBOX_SIZE > 0:
dataset = dataset.select(lambda x: filter_bbox_size(x, thresh=FILTER_MIN_BBOX_SIZE))
if BETAS_REG:
dataset = dataset.map(lambda x: set_betas_for_reg(x)) # NOTE: Must be at the end
use_skimage_antialias = cfg.DATASETS.get('USE_SKIMAGE_ANTIALIAS', False)
border_mode = {
'constant': cv2.BORDER_CONSTANT,
'replicate': cv2.BORDER_REPLICATE,
}[cfg.DATASETS.get('BORDER_MODE', 'constant')]
# Process the dataset further
dataset = dataset.map(lambda x: ImageDataset.process_webdataset_tar_item(x, train,
augm_config=cfg.DATASETS.CONFIG,
MEAN=MEAN, STD=STD, IMG_SIZE=IMG_SIZE,
BBOX_SHAPE=BBOX_SHAPE,
use_skimage_antialias=use_skimage_antialias,
border_mode=border_mode,
))
if epoch_size is not None:
dataset = dataset.with_epoch(epoch_size)
return dataset
@staticmethod
def process_webdataset_tar_item(item, train,
augm_config=None,
MEAN=DEFAULT_MEAN,
STD=DEFAULT_STD,
IMG_SIZE=DEFAULT_IMG_SIZE,
BBOX_SHAPE=None,
use_skimage_antialias=False,
border_mode=cv2.BORDER_CONSTANT,
):
# Read data from item
key = item['__key__']
image = item['jpg']
data = item['data.pyd']
mask = item['mask']
keypoints_2d = data['keypoints_2d']
keypoints_3d = data['keypoints_3d']
center = data['center']
scale = data['scale']
hand_pose = data['hand_pose']
betas = data['betas']
right = data['right']
has_hand_pose = data['has_hand_pose']
has_betas = data['has_betas']
# image_file = data['image_file']
# Process data
orig_keypoints_2d = keypoints_2d.copy()
center_x = center[0]
center_y = center[1]
bbox_size = expand_to_aspect_ratio(scale*200, target_aspect_ratio=BBOX_SHAPE).max()
if bbox_size < 1:
breakpoint()
mano_params = {'global_orient': hand_pose[:3],
'hand_pose': hand_pose[3:],
'betas': betas
}
has_mano_params = {'global_orient': has_hand_pose,
'hand_pose': has_hand_pose,
'betas': has_betas
}
mano_params_is_axis_angle = {'global_orient': True,
'hand_pose': True,
'betas': False
}
augm_config = copy.deepcopy(augm_config)
# Crop image and (possibly) perform data augmentation
img_rgba = np.concatenate([image, mask.astype(np.uint8)[:,:,None]*255], axis=2) | img_patch_rgba, keypoints_2d, keypoints_3d, mano_params, has_mano_params, img_size, trans = get_example(img_rgba, | 1 | 2023-12-08 09:07:07+00:00 | 8k |
baidubce/app-builder | appbuilder/core/components/llms/base.py | [
{
"identifier": "GATEWAY_URL",
"path": "appbuilder/core/constants.py",
"snippet": "GATEWAY_URL = \"https://appbuilder.baidu.com\""
},
{
"identifier": "GATEWAY_INNER_URL",
"path": "appbuilder/core/constants.py",
"snippet": "GATEWAY_INNER_URL = \"http://appbuilder.sdns.baidu.com\""
},
... | import itertools
import json
import uuid
import requests
from enum import Enum
from appbuilder.core.constants import GATEWAY_URL, GATEWAY_INNER_URL
from pydantic import BaseModel, Field, ValidationError, HttpUrl, validator
from pydantic.types import confloat
from appbuilder.core.component import Component
from appbuilder.core.message import Message, _T
from appbuilder.utils.logger_util import logger
from typing import Dict, List, Optional, Any
from appbuilder.core.component import ComponentArguments
from appbuilder.core._exception import AppBuilderServerException
from appbuilder.core.utils import ModelInfo
from appbuilder.utils.sse_util import SSEClient
from appbuilder.core._exception import AppBuilderServerException, ModelNotSupportedException
from collections.abc import Generator | 4,620 |
if "code" in data and "message" in data and "requestId" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
if "code" in data and "message" in data and "status" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
self.result = data.get("answer", None)
trace_log_list = data.get("trace_log", None)
if trace_log_list is not None:
for trace_log in trace_log_list:
key = trace_log["tool"]
result_list = trace_log["result"]
self.extra[key] = result_list
def parse_stream_data(self, parsed_str):
"""解析流式数据块并提取answer字段"""
try:
data = json.loads(parsed_str)
if "code" in data and "message" in data and "requestId" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
if "code" in data and "message" in data and "status" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
return data
except json.JSONDecodeError:
# 处理可能的解析错误
print("error: " + parsed_str)
raise AppBuilderServerException("unknown", "unknown", parsed_str)
def get_stream_data(self):
"""获取处理过的流式数据的迭代器"""
return self.result
def to_message(self):
"""将响应结果转换为Message对象。
Returns:
Message: Message对象。
"""
message = LLMMessage()
message.id = self.log_id
message.content = self.result
message.extra = self.extra
return self.message_iterable_wrapper(message)
def message_iterable_wrapper(self, message):
"""
对模型输出的 Message 对象进行包装。
当 Message 是流式数据时,数据被迭代完后,将重新更新 content 为 blocking 的字符串。
"""
class IterableWrapper:
def __init__(self, stream_content):
self._content = stream_content
self._concat = ""
self._extra = {}
def __iter__(self):
return self
def __next__(self):
try:
result_json = next(self._content)
char = result_json.get("answer", "")
result_list = result_json.get("result")
key = result_json.get("tool")
if result_list is not None:
self._extra[key] = result_list
message.extra = self._extra # Update the original extra
self._concat += char
return char
except StopIteration:
message.content = self._concat # Update the original content
raise
if isinstance(message.content, Generator):
# Replace the original content with the custom iterable
message.content = IterableWrapper(message.content)
return message
class CompletionBaseComponent(Component):
name: str
version: str
base_url: str = "/rpc/2.0/cloud_hub/v1/ai_engine/copilot_engine"
model_name: str = ""
model_url: str = ""
model_type: str = "chat"
excluded_models: List[str] = ["Yi-34B-Chat", "ChatLaw"]
model_info: ModelInfo = None
model_config: Dict[str, Any] = {
"model": {
"provider": "baidu",
"name": "ERNIE-Bot",
"completion_params": {
"temperature": 1e-10,
"top_p": 0,
"presence_penalty": 0,
"frequency_penalty": 0
}
}
}
def __init__(self, meta: ComponentArguments, model=None, secret_key: Optional[str] = None,
gateway: str = ""):
"""
Args:
meta (ComponentArguments): 组件参数信息
model (str, optional): 模型名称. Defaults to None.
secret_key (Optional[str], optional): 可选的密钥. Defaults to None.
gateway (str, optional): 网关地址. Defaults to "".
"""
super().__init__(meta=meta, secret_key=secret_key, gateway=gateway)
if model and model in self.excluded_models:
| # Copyright (c) 2023 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LLMMessage(Message):
content: Optional[_T] = {}
extra: Optional[Dict] = {}
def __str__(self):
return f"Message(name={self.name}, content={self.content}, mtype={self.mtype}, extra={self.extra})"
class CompletionRequest(object):
r"""ShortSpeechRecognitionRequest."""
params = None
response_mode = "blocking"
def __init__(self, params: Dict[str, Any] = None, response_mode: str = None, **kwargs):
r""" __init__ the client state.
"""
self.params = params
self.response_mode = response_mode
class ModelArgsConfig(BaseModel):
stream: bool = Field(default=False, description="是否流式响应。默认为 False。")
temperature: confloat(gt=0.0, le=1.0) = Field(default=1e-10, description="模型的温度参数,范围从 0.0 到 1.0。")
top_p: confloat(gt=0.0, le=1.0) = Field(default=1e-10, description="模型的top_p参数,范围从 0.0 到 1.0。")
class CompletionResponse(object):
r"""ShortSpeechRecognitionResponse."""
error_no = 0
error_msg = ""
result = None
log_id = ""
extra = None
def __init__(self, response, stream: bool = False):
"""初始化客户端状态。"""
self.error_no = 0
self.error_msg = ""
self.log_id = response.headers.get("X-Appbuilder-Request-Id", None)
self.extra = {}
if stream:
# 流式数据处理
def stream_data():
sse_client = SSEClient(response)
for event in sse_client.events():
if not event:
continue
answer = self.parse_stream_data(event.data)
if answer is not None:
yield answer
self.result = stream_data()
else:
# 非流式数据的处理
if response.status_code != 200:
self.error_no = response.status_code
self.error_msg = "error"
self.result = response.text
raise AppBuilderServerException(self.log_id, self.error_no, self.result)
else:
data = response.json()
if "code" in data and "message" in data and "requestId" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
if "code" in data and "message" in data and "status" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
self.result = data.get("answer", None)
trace_log_list = data.get("trace_log", None)
if trace_log_list is not None:
for trace_log in trace_log_list:
key = trace_log["tool"]
result_list = trace_log["result"]
self.extra[key] = result_list
def parse_stream_data(self, parsed_str):
"""解析流式数据块并提取answer字段"""
try:
data = json.loads(parsed_str)
if "code" in data and "message" in data and "requestId" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
if "code" in data and "message" in data and "status" in data:
raise AppBuilderServerException(self.log_id, data["code"], data["message"])
return data
except json.JSONDecodeError:
# 处理可能的解析错误
print("error: " + parsed_str)
raise AppBuilderServerException("unknown", "unknown", parsed_str)
def get_stream_data(self):
"""获取处理过的流式数据的迭代器"""
return self.result
def to_message(self):
"""将响应结果转换为Message对象。
Returns:
Message: Message对象。
"""
message = LLMMessage()
message.id = self.log_id
message.content = self.result
message.extra = self.extra
return self.message_iterable_wrapper(message)
def message_iterable_wrapper(self, message):
"""
对模型输出的 Message 对象进行包装。
当 Message 是流式数据时,数据被迭代完后,将重新更新 content 为 blocking 的字符串。
"""
class IterableWrapper:
def __init__(self, stream_content):
self._content = stream_content
self._concat = ""
self._extra = {}
def __iter__(self):
return self
def __next__(self):
try:
result_json = next(self._content)
char = result_json.get("answer", "")
result_list = result_json.get("result")
key = result_json.get("tool")
if result_list is not None:
self._extra[key] = result_list
message.extra = self._extra # Update the original extra
self._concat += char
return char
except StopIteration:
message.content = self._concat # Update the original content
raise
if isinstance(message.content, Generator):
# Replace the original content with the custom iterable
message.content = IterableWrapper(message.content)
return message
class CompletionBaseComponent(Component):
name: str
version: str
base_url: str = "/rpc/2.0/cloud_hub/v1/ai_engine/copilot_engine"
model_name: str = ""
model_url: str = ""
model_type: str = "chat"
excluded_models: List[str] = ["Yi-34B-Chat", "ChatLaw"]
model_info: ModelInfo = None
model_config: Dict[str, Any] = {
"model": {
"provider": "baidu",
"name": "ERNIE-Bot",
"completion_params": {
"temperature": 1e-10,
"top_p": 0,
"presence_penalty": 0,
"frequency_penalty": 0
}
}
}
def __init__(self, meta: ComponentArguments, model=None, secret_key: Optional[str] = None,
gateway: str = ""):
"""
Args:
meta (ComponentArguments): 组件参数信息
model (str, optional): 模型名称. Defaults to None.
secret_key (Optional[str], optional): 可选的密钥. Defaults to None.
gateway (str, optional): 网关地址. Defaults to "".
"""
super().__init__(meta=meta, secret_key=secret_key, gateway=gateway)
if model and model in self.excluded_models: | raise ModelNotSupportedException(f"Model {model} not supported") | 11 | 2023-12-05 01:48:12+00:00 | 8k |
corfyi/UCMCTrack | util/run_ucmc.py | [
{
"identifier": "Detector",
"path": "detector/detector.py",
"snippet": "class Detector:\n def __init__(self):\n self.seq_length = 0\n self.gmc = None\n\n def load(self,cam_para_file, det_file, gmc_file = None):\n self.mapper = Mapper(cam_para_file,\"MOT17\")\n self.load... | from detector.detector import Detector, Detection
from tracker.ucmc import UCMCTrack
from tracker.kalman import TrackStatus
from eval.interpolation import interpolate
import os,time
import argparse | 4,011 |
class Tracklet():
def __init__(self,frame_id,box):
self.is_active = False
self.boxes = dict()
self.boxes[frame_id] = box
def add_box(self, frame_id, box):
self.boxes[frame_id] = box
def activate(self):
self.is_active = True
def make_args():
parser = argparse.ArgumentParser(description='Process some arguments.')
parser.add_argument('--seq', type=str, default = "MOT17-02", help='seq name')
parser.add_argument('--fps', type=float, default=30.0, help='fps')
parser.add_argument('--wx', type=float, default=0.1, help='wx')
parser.add_argument('--wy', type=float, default=0.1, help='wy')
parser.add_argument('--vmax', type=float, default=0.5, help='vmax')
parser.add_argument('--a', type=float, default=10.0, help='assignment threshold')
parser.add_argument('--cdt', type=float, default=30.0, help='coasted deletion time')
parser.add_argument('--high_score', type=float, default=0.6, help='high score threshold')
parser.add_argument('--conf_thresh', type=float, default=0.5, help='detection confidence threshold')
parser.add_argument("--cmc", action="store_true", help="use cmc or not.")
parser.add_argument("--hp", action="store_true", help="use head padding or not.")
args = parser.parse_args()
return args
def run_ucmc(args, det_path = "det_results/mot17/yolox_x_ablation",
cam_path = "cam_para/mot17",
gmc_path = "gmc/mot17",
out_path = "output/mot17",
exp_name = "val",
dataset = "MOT17"):
seq_name = args.seq
eval_path = os.path.join(out_path,exp_name)
orig_save_path = os.path.join(eval_path,seq_name)
if not os.path.exists(orig_save_path):
os.makedirs(orig_save_path)
if dataset == "MOT17":
det_file = os.path.join(det_path, f"{seq_name}-SDP.txt")
cam_para = os.path.join(cam_path, f"{seq_name}-SDP.txt")
result_file = os.path.join(orig_save_path,f"{seq_name}-SDP.txt")
elif dataset == "MOT20":
det_file = os.path.join(det_path, f"{seq_name}.txt")
cam_para = os.path.join(cam_path, f"{seq_name}.txt")
result_file = os.path.join(orig_save_path,f"{seq_name}.txt")
gmc_file = os.path.join(gmc_path, f"GMC-{seq_name}.txt")
print(det_file)
print(cam_para)
detector = Detector()
detector.load(cam_para, det_file,gmc_file)
print(f"seq_length = {detector.seq_length}")
a1 = args.a
a2 = args.a
high_score = args.high_score
conf_thresh = args.conf_thresh
fps = args.fps
cdt = args.cdt
wx = args.wx
wy = args.wy
vmax = args.vmax
|
class Tracklet():
def __init__(self,frame_id,box):
self.is_active = False
self.boxes = dict()
self.boxes[frame_id] = box
def add_box(self, frame_id, box):
self.boxes[frame_id] = box
def activate(self):
self.is_active = True
def make_args():
parser = argparse.ArgumentParser(description='Process some arguments.')
parser.add_argument('--seq', type=str, default = "MOT17-02", help='seq name')
parser.add_argument('--fps', type=float, default=30.0, help='fps')
parser.add_argument('--wx', type=float, default=0.1, help='wx')
parser.add_argument('--wy', type=float, default=0.1, help='wy')
parser.add_argument('--vmax', type=float, default=0.5, help='vmax')
parser.add_argument('--a', type=float, default=10.0, help='assignment threshold')
parser.add_argument('--cdt', type=float, default=30.0, help='coasted deletion time')
parser.add_argument('--high_score', type=float, default=0.6, help='high score threshold')
parser.add_argument('--conf_thresh', type=float, default=0.5, help='detection confidence threshold')
parser.add_argument("--cmc", action="store_true", help="use cmc or not.")
parser.add_argument("--hp", action="store_true", help="use head padding or not.")
args = parser.parse_args()
return args
def run_ucmc(args, det_path = "det_results/mot17/yolox_x_ablation",
cam_path = "cam_para/mot17",
gmc_path = "gmc/mot17",
out_path = "output/mot17",
exp_name = "val",
dataset = "MOT17"):
seq_name = args.seq
eval_path = os.path.join(out_path,exp_name)
orig_save_path = os.path.join(eval_path,seq_name)
if not os.path.exists(orig_save_path):
os.makedirs(orig_save_path)
if dataset == "MOT17":
det_file = os.path.join(det_path, f"{seq_name}-SDP.txt")
cam_para = os.path.join(cam_path, f"{seq_name}-SDP.txt")
result_file = os.path.join(orig_save_path,f"{seq_name}-SDP.txt")
elif dataset == "MOT20":
det_file = os.path.join(det_path, f"{seq_name}.txt")
cam_para = os.path.join(cam_path, f"{seq_name}.txt")
result_file = os.path.join(orig_save_path,f"{seq_name}.txt")
gmc_file = os.path.join(gmc_path, f"GMC-{seq_name}.txt")
print(det_file)
print(cam_para)
detector = Detector()
detector.load(cam_para, det_file,gmc_file)
print(f"seq_length = {detector.seq_length}")
a1 = args.a
a2 = args.a
high_score = args.high_score
conf_thresh = args.conf_thresh
fps = args.fps
cdt = args.cdt
wx = args.wx
wy = args.wy
vmax = args.vmax
| tracker = UCMCTrack(a1, a2, wx,wy,vmax, cdt, fps, dataset, high_score,args.cmc,detector) | 2 | 2023-12-12 07:29:20+00:00 | 8k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.