repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
ZS-YANG/FemtoDet-v3
mmdet/datasets/transforms/transforms.py
[ { "identifier": "TRANSFORMS", "path": "mmdet/registry.py", "snippet": "TRANSFORMS = Registry(\n 'transform',\n parent=MMENGINE_TRANSFORMS,\n locations=['mmdet.datasets.transforms'])" }, { "identifier": "autocast_box_type", "path": "mmdet/structures/bbox/box_type.py", "snippet": ...
import copy import inspect import math import warnings import cv2 import mmcv import numpy as np import albumentations from typing import List, Optional, Sequence, Tuple, Union from mmcv.image import imresize from mmcv.image.geometric import _scale_size from mmcv.transforms import BaseTransform from mmcv.transforms import Pad as MMCV_Pad from mmcv.transforms import RandomFlip as MMCV_RandomFlip from mmcv.transforms import Resize as MMCV_Resize from mmcv.transforms.utils import avoid_cache_randomness, cache_randomness from mmengine.dataset import BaseDataset from mmengine.utils import is_str from numpy import random from mmdet.registry import TRANSFORMS from mmdet.structures.bbox import HorizontalBoxes, autocast_box_type from mmdet.structures.mask import BitmapMasks, PolygonMasks from mmdet.utils import log_img_scale from imagecorruptions import corrupt from albumentations import Compose
17,550
results['gt_bboxes'].translate_([left, top]) # expand masks if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'].expand( int(h * ratio), int(w * ratio), top, left) # expand segmentation map if results.get('gt_seg_map', None) is not None: gt_seg = results['gt_seg_map'] expand_gt_seg = np.full((int(h * ratio), int(w * ratio)), self.seg_ignore_label, dtype=gt_seg.dtype) expand_gt_seg[top:top + h, left:left + w] = gt_seg results['gt_seg_map'] = expand_gt_seg return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, ' repr_str += f'ratio_range={self.ratio_range}, ' repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' repr_str += f'prob={self.prob})' return repr_str @TRANSFORMS.register_module() class MinIoURandomCrop(BaseTransform): """Random crop the image & bboxes & masks & segmentation map, the cropped patches have minimum IoU requirement with original image & bboxes & masks. & segmentation map, the IoU threshold is randomly selected from min_ious. Required Keys: - img - img_shape - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_ignore_flags (bool) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_bboxes_labels - gt_masks - gt_ignore_flags - gt_seg_map Args: min_ious (Sequence[float]): minimum IoU threshold for all intersections with bounding boxes. min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, where a >= min_crop_size). bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, min_ious: Sequence[float] = (0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size: float = 0.3, bbox_clip_border: bool = True) -> None: self.min_ious = min_ious self.sample_mode = (1, *min_ious, 0) self.min_crop_size = min_crop_size self.bbox_clip_border = bbox_clip_border @cache_randomness def _random_mode(self) -> Number: return random.choice(self.sample_mode) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to crop images and bounding boxes with minimum IoU constraint. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images and bounding boxes cropped, \ 'img_shape' key is updated. """ assert 'img' in results, '`img` is not found in results' assert 'gt_bboxes' in results, '`gt_bboxes` is not found in results' img = results['img'] boxes = results['gt_bboxes'] h, w, c = img.shape while True: mode = self._random_mode() self.mode = mode if mode == 1: return results min_iou = self.mode for i in range(50): new_w = random.uniform(self.min_crop_size * w, w) new_h = random.uniform(self.min_crop_size * h, h) # h / w in [0.5, 2] if new_h / new_w < 0.5 or new_h / new_w > 2: continue left = random.uniform(w - new_w) top = random.uniform(h - new_h) patch = np.array( (int(left), int(top), int(left + new_w), int(top + new_h))) # Line or point crop is not allowed if patch[2] == patch[0] or patch[3] == patch[1]: continue overlaps = boxes.overlaps(
# Copyright (c) OpenMMLab. All rights reserved. try: except ImportError: corrupt = None try: except ImportError: albumentations = None Compose = None Number = Union[int, float] def _fixed_scale_size( size: Tuple[int, int], scale: Union[float, int, tuple], ) -> Tuple[int, int]: """Rescale a size by a ratio. Args: size (tuple[int]): (w, h). scale (float | tuple(float)): Scaling factor. Returns: tuple[int]: scaled size. """ if isinstance(scale, (float, int)): scale = (scale, scale) w, h = size # don't need o.5 offset return int(w * float(scale[0])), int(h * float(scale[1])) def rescale_size(old_size: tuple, scale: Union[float, int, tuple], return_scale: bool = False) -> tuple: """Calculate the new size to be rescaled to. Args: old_size (tuple[int]): The old size (w, h) of image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image size. Returns: tuple[int]: The new rescaled image size. """ w, h = old_size if isinstance(scale, (float, int)): if scale <= 0: raise ValueError(f'Invalid scale {scale}, must be positive.') scale_factor = scale elif isinstance(scale, tuple): max_long_edge = max(scale) max_short_edge = min(scale) scale_factor = min(max_long_edge / max(h, w), max_short_edge / min(h, w)) else: raise TypeError( f'Scale must be a number or tuple of int, but got {type(scale)}') # only change this new_size = _fixed_scale_size((w, h), scale_factor) if return_scale: return new_size, scale_factor else: return new_size def imrescale( img: np.ndarray, scale: Union[float, Tuple[int, int]], return_scale: bool = False, interpolation: str = 'bilinear', backend: Optional[str] = None ) -> Union[np.ndarray, Tuple[np.ndarray, float]]: """Resize image while keeping the aspect ratio. Args: img (ndarray): The input image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image. interpolation (str): Same as :func:`resize`. backend (str | None): Same as :func:`resize`. Returns: ndarray: The rescaled image. """ h, w = img.shape[:2] new_size, scale_factor = rescale_size((w, h), scale, return_scale=True) rescaled_img = imresize( img, new_size, interpolation=interpolation, backend=backend) if return_scale: return rescaled_img, scale_factor else: return rescaled_img @TRANSFORMS.register_module() class Resize(MMCV_Resize): """Resize images & bbox & seg. This transform resizes the input image according to ``scale`` or ``scale_factor``. Bboxes, masks, and seg map are then resized with the same scale factor. if ``scale`` and ``scale_factor`` are both set, it will use ``scale`` to resize. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_masks - gt_seg_map Added Keys: - scale - scale_factor - keep_ratio - homography_matrix Args: scale (int or tuple): Images scales for resizing. Defaults to None scale_factor (float or tuple[float]): Scale factors for resizing. Defaults to None. keep_ratio (bool): Whether to keep the aspect ratio when resizing the image. Defaults to False. clip_object_border (bool): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. backend (str): Image resize backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. Defaults to 'bilinear'. """ def _resize_masks(self, results: dict) -> None: """Resize masks with ``results['scale']``""" if results.get('gt_masks', None) is not None: if self.keep_ratio: results['gt_masks'] = results['gt_masks'].rescale( results['scale']) else: results['gt_masks'] = results['gt_masks'].resize( results['img_shape']) def _resize_bboxes(self, results: dict) -> None: """Resize bounding boxes with ``results['scale_factor']``.""" if results.get('gt_bboxes', None) is not None: results['gt_bboxes'].rescale_(results['scale_factor']) if self.clip_object_border: results['gt_bboxes'].clip_(results['img_shape']) def _record_homography_matrix(self, results: dict) -> None: """Record the homography matrix for the Resize.""" w_scale, h_scale = results['scale_factor'] homography_matrix = np.array( [[w_scale, 0, 0], [0, h_scale, 0], [0, 0, 1]], dtype=np.float32) if results.get('homography_matrix', None) is None: results['homography_matrix'] = homography_matrix else: results['homography_matrix'] = homography_matrix @ results[ 'homography_matrix'] @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to resize images, bounding boxes and semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map', 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys are updated in result dict. """ if self.scale: results['scale'] = self.scale else: img_shape = results['img'].shape[:2] results['scale'] = _scale_size(img_shape[::-1], self.scale_factor) self._resize_img(results) self._resize_bboxes(results) self._resize_masks(results) self._resize_seg(results) self._record_homography_matrix(results) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(scale={self.scale}, ' repr_str += f'scale_factor={self.scale_factor}, ' repr_str += f'keep_ratio={self.keep_ratio}, ' repr_str += f'clip_object_border={self.clip_object_border}), ' repr_str += f'backend={self.backend}), ' repr_str += f'interpolation={self.interpolation})' return repr_str @TRANSFORMS.register_module() class FixScaleResize(Resize): """Compared to Resize, FixScaleResize fixes the scaling issue when `keep_ratio=true`.""" def _resize_img(self, results): """Resize images with ``results['scale']``.""" if results.get('img', None) is not None: if self.keep_ratio: img, scale_factor = imrescale( results['img'], results['scale'], interpolation=self.interpolation, return_scale=True, backend=self.backend) new_h, new_w = img.shape[:2] h, w = results['img'].shape[:2] w_scale = new_w / w h_scale = new_h / h else: img, w_scale, h_scale = mmcv.imresize( results['img'], results['scale'], interpolation=self.interpolation, return_scale=True, backend=self.backend) results['img'] = img results['img_shape'] = img.shape[:2] results['scale_factor'] = (w_scale, h_scale) results['keep_ratio'] = self.keep_ratio @TRANSFORMS.register_module() class ResizeShortestEdge(BaseTransform): """Resize the image and mask while keeping the aspect ratio unchanged. Modified from https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/transforms/augmentation_impl.py#L130 # noqa:E501 This transform attempts to scale the shorter edge to the given `scale`, as long as the longer edge does not exceed `max_size`. If `max_size` is reached, then downscale so that the longer edge does not exceed `max_size`. Required Keys: - img - gt_seg_map (optional) Modified Keys: - img - img_shape - gt_seg_map (optional)) Added Keys: - scale - scale_factor - keep_ratio Args: scale (Union[int, Tuple[int, int]]): The target short edge length. If it's tuple, will select the min value as the short edge length. max_size (int): The maximum allowed longest edge length. """ def __init__(self, scale: Union[int, Tuple[int, int]], max_size: Optional[int] = None, resize_type: str = 'Resize', **resize_kwargs) -> None: super().__init__() self.scale = scale self.max_size = max_size self.resize_cfg = dict(type=resize_type, **resize_kwargs) self.resize = TRANSFORMS.build({'scale': 0, **self.resize_cfg}) def _get_output_shape( self, img: np.ndarray, short_edge_length: Union[int, Tuple[int, int]]) -> Tuple[int, int]: """Compute the target image shape with the given `short_edge_length`. Args: img (np.ndarray): The input image. short_edge_length (Union[int, Tuple[int, int]]): The target short edge length. If it's tuple, will select the min value as the short edge length. """ h, w = img.shape[:2] if isinstance(short_edge_length, int): size = short_edge_length * 1.0 elif isinstance(short_edge_length, tuple): size = min(short_edge_length) * 1.0 scale = size / min(h, w) if h < w: new_h, new_w = size, scale * w else: new_h, new_w = scale * h, size if self.max_size and max(new_h, new_w) > self.max_size: scale = self.max_size * 1.0 / max(new_h, new_w) new_h *= scale new_w *= scale new_h = int(new_h + 0.5) new_w = int(new_w + 0.5) return new_w, new_h def transform(self, results: dict) -> dict: self.resize.scale = self._get_output_shape(results['img'], self.scale) return self.resize(results) @TRANSFORMS.register_module() class FixShapeResize(Resize): """Resize images & bbox & seg to the specified size. This transform resizes the input image according to ``width`` and ``height``. Bboxes, masks, and seg map are then resized with the same parameters. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_masks - gt_seg_map Added Keys: - scale - scale_factor - keep_ratio - homography_matrix Args: width (int): width for resizing. height (int): height for resizing. Defaults to None. pad_val (Number | dict[str, Number], optional): Padding value for if the pad_mode is "constant". If it is a single number, the value to pad the image is the number and to pad the semantic segmentation map is 255. If it is a dict, it should have the following keys: - img: The value to pad the image. - seg: The value to pad the semantic segmentation map. Defaults to dict(img=0, seg=255). keep_ratio (bool): Whether to keep the aspect ratio when resizing the image. Defaults to False. clip_object_border (bool): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. backend (str): Image resize backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. Defaults to 'bilinear'. """ def __init__(self, width: int, height: int, pad_val: Union[Number, dict] = dict(img=0, seg=255), keep_ratio: bool = False, clip_object_border: bool = True, backend: str = 'cv2', interpolation: str = 'bilinear') -> None: assert width is not None and height is not None, ( '`width` and' '`height` can not be `None`') self.width = width self.height = height self.scale = (width, height) self.backend = backend self.interpolation = interpolation self.keep_ratio = keep_ratio self.clip_object_border = clip_object_border if keep_ratio is True: # padding to the fixed size when keep_ratio=True self.pad_transform = Pad(size=self.scale, pad_val=pad_val) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to resize images, bounding boxes and semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map', 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys are updated in result dict. """ img = results['img'] h, w = img.shape[:2] if self.keep_ratio: scale_factor = min(self.width / w, self.height / h) results['scale_factor'] = (scale_factor, scale_factor) real_w, real_h = int(w * float(scale_factor) + 0.5), int(h * float(scale_factor) + 0.5) img, scale_factor = mmcv.imrescale( results['img'], (real_w, real_h), interpolation=self.interpolation, return_scale=True, backend=self.backend) # the w_scale and h_scale has minor difference # a real fix should be done in the mmcv.imrescale in the future results['img'] = img results['img_shape'] = img.shape[:2] results['keep_ratio'] = self.keep_ratio results['scale'] = (real_w, real_h) else: results['scale'] = (self.width, self.height) results['scale_factor'] = (self.width / w, self.height / h) super()._resize_img(results) self._resize_bboxes(results) self._resize_masks(results) self._resize_seg(results) self._record_homography_matrix(results) if self.keep_ratio: self.pad_transform(results) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(width={self.width}, height={self.height}, ' repr_str += f'keep_ratio={self.keep_ratio}, ' repr_str += f'clip_object_border={self.clip_object_border}), ' repr_str += f'backend={self.backend}), ' repr_str += f'interpolation={self.interpolation})' return repr_str @TRANSFORMS.register_module() class RandomFlip(MMCV_RandomFlip): """Flip the image & bbox & mask & segmentation map. Added or Updated keys: flip, flip_direction, img, gt_bboxes, and gt_seg_map. There are 3 flip modes: - ``prob`` is float, ``direction`` is string: the image will be ``direction``ly flipped with probability of ``prob`` . E.g., ``prob=0.5``, ``direction='horizontal'``, then image will be horizontally flipped with probability of 0.5. - ``prob`` is float, ``direction`` is list of string: the image will be ``direction[i]``ly flipped with probability of ``prob/len(direction)``. E.g., ``prob=0.5``, ``direction=['horizontal', 'vertical']``, then image will be horizontally flipped with probability of 0.25, vertically with probability of 0.25. - ``prob`` is list of float, ``direction`` is list of string: given ``len(prob) == len(direction)``, the image will be ``direction[i]``ly flipped with probability of ``prob[i]``. E.g., ``prob=[0.3, 0.5]``, ``direction=['horizontal', 'vertical']``, then image will be horizontally flipped with probability of 0.3, vertically with probability of 0.5. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - gt_bboxes - gt_masks - gt_seg_map Added Keys: - flip - flip_direction - homography_matrix Args: prob (float | list[float], optional): The flipping probability. Defaults to None. direction(str | list[str]): The flipping direction. Options If input is a list, the length must equal ``prob``. Each element in ``prob`` indicates the flip probability of corresponding direction. Defaults to 'horizontal'. """ def _record_homography_matrix(self, results: dict) -> None: """Record the homography matrix for the RandomFlip.""" cur_dir = results['flip_direction'] h, w = results['img'].shape[:2] if cur_dir == 'horizontal': homography_matrix = np.array([[-1, 0, w], [0, 1, 0], [0, 0, 1]], dtype=np.float32) elif cur_dir == 'vertical': homography_matrix = np.array([[1, 0, 0], [0, -1, h], [0, 0, 1]], dtype=np.float32) elif cur_dir == 'diagonal': homography_matrix = np.array([[-1, 0, w], [0, -1, h], [0, 0, 1]], dtype=np.float32) else: homography_matrix = np.eye(3, dtype=np.float32) if results.get('homography_matrix', None) is None: results['homography_matrix'] = homography_matrix else: results['homography_matrix'] = homography_matrix @ results[ 'homography_matrix'] @autocast_box_type() def _flip(self, results: dict) -> None: """Flip images, bounding boxes, and semantic segmentation map.""" # flip image results['img'] = mmcv.imflip( results['img'], direction=results['flip_direction']) img_shape = results['img'].shape[:2] # flip bboxes if results.get('gt_bboxes', None) is not None: results['gt_bboxes'].flip_(img_shape, results['flip_direction']) # flip masks if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'].flip( results['flip_direction']) # flip segs if results.get('gt_seg_map', None) is not None: results['gt_seg_map'] = mmcv.imflip( results['gt_seg_map'], direction=results['flip_direction']) # record homography matrix for flip self._record_homography_matrix(results) @TRANSFORMS.register_module() class RandomShift(BaseTransform): """Shift the image and box given shift pixels and probability. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) - gt_bboxes_labels (np.int64) - gt_ignore_flags (bool) (optional) Modified Keys: - img - gt_bboxes - gt_bboxes_labels - gt_ignore_flags (bool) (optional) Args: prob (float): Probability of shifts. Defaults to 0.5. max_shift_px (int): The max pixels for shifting. Defaults to 32. filter_thr_px (int): The width and height threshold for filtering. The bbox and the rest of the targets below the width and height threshold will be filtered. Defaults to 1. """ def __init__(self, prob: float = 0.5, max_shift_px: int = 32, filter_thr_px: int = 1) -> None: assert 0 <= prob <= 1 assert max_shift_px >= 0 self.prob = prob self.max_shift_px = max_shift_px self.filter_thr_px = int(filter_thr_px) @cache_randomness def _random_prob(self) -> float: return random.uniform(0, 1) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to random shift images, bounding boxes. Args: results (dict): Result dict from loading pipeline. Returns: dict: Shift results. """ if self._random_prob() < self.prob: img_shape = results['img'].shape[:2] random_shift_x = random.randint(-self.max_shift_px, self.max_shift_px) random_shift_y = random.randint(-self.max_shift_px, self.max_shift_px) new_x = max(0, random_shift_x) ori_x = max(0, -random_shift_x) new_y = max(0, random_shift_y) ori_y = max(0, -random_shift_y) # TODO: support mask and semantic segmentation maps. bboxes = results['gt_bboxes'].clone() bboxes.translate_([random_shift_x, random_shift_y]) # clip border bboxes.clip_(img_shape) # remove invalid bboxes valid_inds = (bboxes.widths > self.filter_thr_px).numpy() & ( bboxes.heights > self.filter_thr_px).numpy() # If the shift does not contain any gt-bbox area, skip this # image. if not valid_inds.any(): return results bboxes = bboxes[valid_inds] results['gt_bboxes'] = bboxes results['gt_bboxes_labels'] = results['gt_bboxes_labels'][ valid_inds] if results.get('gt_ignore_flags', None) is not None: results['gt_ignore_flags'] = \ results['gt_ignore_flags'][valid_inds] # shift img img = results['img'] new_img = np.zeros_like(img) img_h, img_w = img.shape[:2] new_h = img_h - np.abs(random_shift_y) new_w = img_w - np.abs(random_shift_x) new_img[new_y:new_y + new_h, new_x:new_x + new_w] \ = img[ori_y:ori_y + new_h, ori_x:ori_x + new_w] results['img'] = new_img return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(prob={self.prob}, ' repr_str += f'max_shift_px={self.max_shift_px}, ' repr_str += f'filter_thr_px={self.filter_thr_px})' return repr_str @TRANSFORMS.register_module() class Pad(MMCV_Pad): """Pad the image & segmentation map. There are three padding modes: (1) pad to a fixed size and (2) pad to the minimum size that is divisible by some number. and (3)pad to square. Also, pad to square and pad to the minimum size can be used as the same time. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_masks - gt_seg_map Added Keys: - pad_shape - pad_fixed_size - pad_size_divisor Args: size (tuple, optional): Fixed padding size. Expected padding shape (width, height). Defaults to None. size_divisor (int, optional): The divisor of padded size. Defaults to None. pad_to_square (bool): Whether to pad the image into a square. Currently only used for YOLOX. Defaults to False. pad_val (Number | dict[str, Number], optional) - Padding value for if the pad_mode is "constant". If it is a single number, the value to pad the image is the number and to pad the semantic segmentation map is 255. If it is a dict, it should have the following keys: - img: The value to pad the image. - seg: The value to pad the semantic segmentation map. Defaults to dict(img=0, seg=255). padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Defaults to 'constant'. - constant: pads with a constant value, this value is specified with pad_val. - edge: pads with the last value at the edge of the image. - reflect: pads with reflection of image without repeating the last value on the edge. For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode will result in [3, 2, 1, 2, 3, 4, 3, 2]. - symmetric: pads with reflection of image repeating the last value on the edge. For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode will result in [2, 1, 1, 2, 3, 4, 4, 3] """ def _pad_masks(self, results: dict) -> None: """Pad masks according to ``results['pad_shape']``.""" if results.get('gt_masks', None) is not None: pad_val = self.pad_val.get('masks', 0) pad_shape = results['pad_shape'][:2] results['gt_masks'] = results['gt_masks'].pad( pad_shape, pad_val=pad_val) def transform(self, results: dict) -> dict: """Call function to pad images, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: dict: Updated result dict. """ self._pad_img(results) self._pad_seg(results) self._pad_masks(results) return results @TRANSFORMS.register_module() class RandomCrop(BaseTransform): """Random crop the image & bboxes & masks. The absolute ``crop_size`` is sampled based on ``crop_type`` and ``image_size``, then the cropped results are generated. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_ignore_flags (bool) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes (optional) - gt_bboxes_labels (optional) - gt_masks (optional) - gt_ignore_flags (optional) - gt_seg_map (optional) - gt_instances_ids (options, only used in MOT/VIS) Added Keys: - homography_matrix Args: crop_size (tuple): The relative ratio or absolute pixels of (width, height). crop_type (str, optional): One of "relative_range", "relative", "absolute", "absolute_range". "relative" randomly crops (h * crop_size[0], w * crop_size[1]) part from an input of size (h, w). "relative_range" uniformly samples relative crop size from range [crop_size[0], 1] and [crop_size[1], 1] for height and width respectively. "absolute" crops from an input with absolute size (crop_size[0], crop_size[1]). "absolute_range" uniformly samples crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w in range [crop_size[0], min(w, crop_size[1])]. Defaults to "absolute". allow_negative_crop (bool, optional): Whether to allow a crop that does not contain any bbox area. Defaults to False. recompute_bbox (bool, optional): Whether to re-compute the boxes based on cropped instance masks. Defaults to False. bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Note: - If the image is smaller than the absolute crop size, return the original image. - The keys for bboxes, labels and masks must be aligned. That is, ``gt_bboxes`` corresponds to ``gt_labels`` and ``gt_masks``, and ``gt_bboxes_ignore`` corresponds to ``gt_labels_ignore`` and ``gt_masks_ignore``. - If the crop does not contain any gt-bbox region and ``allow_negative_crop`` is set to False, skip this image. """ def __init__(self, crop_size: tuple, crop_type: str = 'absolute', allow_negative_crop: bool = False, recompute_bbox: bool = False, bbox_clip_border: bool = True) -> None: if crop_type not in [ 'relative_range', 'relative', 'absolute', 'absolute_range' ]: raise ValueError(f'Invalid crop_type {crop_type}.') if crop_type in ['absolute', 'absolute_range']: assert crop_size[0] > 0 and crop_size[1] > 0 assert isinstance(crop_size[0], int) and isinstance( crop_size[1], int) if crop_type == 'absolute_range': assert crop_size[0] <= crop_size[1] else: assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1 self.crop_size = crop_size self.crop_type = crop_type self.allow_negative_crop = allow_negative_crop self.bbox_clip_border = bbox_clip_border self.recompute_bbox = recompute_bbox def _crop_data(self, results: dict, crop_size: Tuple[int, int], allow_negative_crop: bool) -> Union[dict, None]: """Function to randomly crop images, bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. crop_size (Tuple[int, int]): Expected absolute size after cropping, (h, w). allow_negative_crop (bool): Whether to allow a crop that does not contain any bbox area. Returns: results (Union[dict, None]): Randomly cropped results, 'img_shape' key in result dict is updated according to crop size. None will be returned when there is no valid bbox after cropping. """ assert crop_size[0] > 0 and crop_size[1] > 0 img = results['img'] margin_h = max(img.shape[0] - crop_size[0], 0) margin_w = max(img.shape[1] - crop_size[1], 0) offset_h, offset_w = self._rand_offset((margin_h, margin_w)) crop_y1, crop_y2 = offset_h, offset_h + crop_size[0] crop_x1, crop_x2 = offset_w, offset_w + crop_size[1] # Record the homography matrix for the RandomCrop homography_matrix = np.array( [[1, 0, -offset_w], [0, 1, -offset_h], [0, 0, 1]], dtype=np.float32) if results.get('homography_matrix', None) is None: results['homography_matrix'] = homography_matrix else: results['homography_matrix'] = homography_matrix @ results[ 'homography_matrix'] # crop the image img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] img_shape = img.shape results['img'] = img results['img_shape'] = img_shape[:2] # crop bboxes accordingly and clip to the image boundary if results.get('gt_bboxes', None) is not None: bboxes = results['gt_bboxes'] bboxes.translate_([-offset_w, -offset_h]) if self.bbox_clip_border: bboxes.clip_(img_shape[:2]) valid_inds = bboxes.is_inside(img_shape[:2]).numpy() # If the crop does not contain any gt-bbox area and # allow_negative_crop is False, skip this image. if (not valid_inds.any() and not allow_negative_crop): return None results['gt_bboxes'] = bboxes[valid_inds] if results.get('gt_ignore_flags', None) is not None: results['gt_ignore_flags'] = \ results['gt_ignore_flags'][valid_inds] if results.get('gt_bboxes_labels', None) is not None: results['gt_bboxes_labels'] = \ results['gt_bboxes_labels'][valid_inds] if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'][ valid_inds.nonzero()[0]].crop( np.asarray([crop_x1, crop_y1, crop_x2, crop_y2])) if self.recompute_bbox: results['gt_bboxes'] = results['gt_masks'].get_bboxes( type(results['gt_bboxes'])) # We should remove the instance ids corresponding to invalid boxes. if results.get('gt_instances_ids', None) is not None: results['gt_instances_ids'] = \ results['gt_instances_ids'][valid_inds] # crop semantic seg if results.get('gt_seg_map', None) is not None: results['gt_seg_map'] = results['gt_seg_map'][crop_y1:crop_y2, crop_x1:crop_x2] return results @cache_randomness def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]: """Randomly generate crop offset. Args: margin (Tuple[int, int]): The upper bound for the offset generated randomly. Returns: Tuple[int, int]: The random offset for the crop. """ margin_h, margin_w = margin offset_h = np.random.randint(0, margin_h + 1) offset_w = np.random.randint(0, margin_w + 1) return offset_h, offset_w @cache_randomness def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]: """Randomly generates the absolute crop size based on `crop_type` and `image_size`. Args: image_size (Tuple[int, int]): (h, w). Returns: crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels. """ h, w = image_size if self.crop_type == 'absolute': return min(self.crop_size[1], h), min(self.crop_size[0], w) elif self.crop_type == 'absolute_range': crop_h = np.random.randint( min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1) crop_w = np.random.randint( min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1) return crop_h, crop_w elif self.crop_type == 'relative': crop_w, crop_h = self.crop_size return int(h * crop_h + 0.5), int(w * crop_w + 0.5) else: # 'relative_range' crop_size = np.asarray(self.crop_size, dtype=np.float32) crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size) return int(h * crop_h + 0.5), int(w * crop_w + 0.5) @autocast_box_type() def transform(self, results: dict) -> Union[dict, None]: """Transform function to randomly crop images, bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: results (Union[dict, None]): Randomly cropped results, 'img_shape' key in result dict is updated according to crop size. None will be returned when there is no valid bbox after cropping. """ image_size = results['img'].shape[:2] crop_size = self._get_crop_size(image_size) results = self._crop_data(results, crop_size, self.allow_negative_crop) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(crop_size={self.crop_size}, ' repr_str += f'crop_type={self.crop_type}, ' repr_str += f'allow_negative_crop={self.allow_negative_crop}, ' repr_str += f'recompute_bbox={self.recompute_bbox}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @TRANSFORMS.register_module() class SegRescale(BaseTransform): """Rescale semantic segmentation maps. This transform rescale the ``gt_seg_map`` according to ``scale_factor``. Required Keys: - gt_seg_map Modified Keys: - gt_seg_map Args: scale_factor (float): The scale factor of the final output. Defaults to 1. backend (str): Image rescale backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. """ def __init__(self, scale_factor: float = 1, backend: str = 'cv2') -> None: self.scale_factor = scale_factor self.backend = backend def transform(self, results: dict) -> dict: """Transform function to scale the semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with semantic segmentation map scaled. """ if self.scale_factor != 1: results['gt_seg_map'] = mmcv.imrescale( results['gt_seg_map'], self.scale_factor, interpolation='nearest', backend=self.backend) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(scale_factor={self.scale_factor}, ' repr_str += f'backend={self.backend})' return repr_str @TRANSFORMS.register_module() class PhotoMetricDistortion(BaseTransform): """Apply photometric distortion to image sequentially, every transformation is applied with a probability of 0.5. The position of random contrast is in second or second to last. 1. random brightness 2. random contrast (mode 0) 3. convert color from BGR to HSV 4. random saturation 5. random hue 6. convert color from HSV to BGR 7. random contrast (mode 1) 8. randomly swap channels Required Keys: - img (np.uint8) Modified Keys: - img (np.float32) Args: brightness_delta (int): delta of brightness. contrast_range (sequence): range of contrast. saturation_range (sequence): range of saturation. hue_delta (int): delta of hue. """ def __init__(self, brightness_delta: int = 32, contrast_range: Sequence[Number] = (0.5, 1.5), saturation_range: Sequence[Number] = (0.5, 1.5), hue_delta: int = 18) -> None: self.brightness_delta = brightness_delta self.contrast_lower, self.contrast_upper = contrast_range self.saturation_lower, self.saturation_upper = saturation_range self.hue_delta = hue_delta @cache_randomness def _random_flags(self) -> Sequence[Number]: mode = random.randint(2) brightness_flag = random.randint(2) contrast_flag = random.randint(2) saturation_flag = random.randint(2) hue_flag = random.randint(2) swap_flag = random.randint(2) delta_value = random.uniform(-self.brightness_delta, self.brightness_delta) alpha_value = random.uniform(self.contrast_lower, self.contrast_upper) saturation_value = random.uniform(self.saturation_lower, self.saturation_upper) hue_value = random.uniform(-self.hue_delta, self.hue_delta) swap_value = random.permutation(3) return (mode, brightness_flag, contrast_flag, saturation_flag, hue_flag, swap_flag, delta_value, alpha_value, saturation_value, hue_value, swap_value) def transform(self, results: dict) -> dict: """Transform function to perform photometric distortion on images. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images distorted. """ assert 'img' in results, '`img` is not found in results' img = results['img'] img = img.astype(np.float32) (mode, brightness_flag, contrast_flag, saturation_flag, hue_flag, swap_flag, delta_value, alpha_value, saturation_value, hue_value, swap_value) = self._random_flags() # random brightness if brightness_flag: img += delta_value # mode == 0 --> do random contrast first # mode == 1 --> do random contrast last if mode == 1: if contrast_flag: img *= alpha_value # convert color from BGR to HSV img = mmcv.bgr2hsv(img) # random saturation if saturation_flag: img[..., 1] *= saturation_value # For image(type=float32), after convert bgr to hsv by opencv, # valid saturation value range is [0, 1] if saturation_value > 1: img[..., 1] = img[..., 1].clip(0, 1) # random hue if hue_flag: img[..., 0] += hue_value img[..., 0][img[..., 0] > 360] -= 360 img[..., 0][img[..., 0] < 0] += 360 # convert color from HSV to BGR img = mmcv.hsv2bgr(img) # random contrast if mode == 0: if contrast_flag: img *= alpha_value # randomly swap channels if swap_flag: img = img[..., swap_value] results['img'] = img return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(brightness_delta={self.brightness_delta}, ' repr_str += 'contrast_range=' repr_str += f'{(self.contrast_lower, self.contrast_upper)}, ' repr_str += 'saturation_range=' repr_str += f'{(self.saturation_lower, self.saturation_upper)}, ' repr_str += f'hue_delta={self.hue_delta})' return repr_str @TRANSFORMS.register_module() class Expand(BaseTransform): """Random expand the image & bboxes & masks & segmentation map. Randomly place the original image on a canvas of ``ratio`` x original image size filled with mean values. The ratio is in the range of ratio_range. Required Keys: - img - img_shape - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_masks - gt_seg_map Args: mean (sequence): mean value of dataset. to_rgb (bool): if need to convert the order of mean to align with RGB. ratio_range (sequence)): range of expand ratio. seg_ignore_label (int): label of ignore segmentation map. prob (float): probability of applying this transformation """ def __init__(self, mean: Sequence[Number] = (0, 0, 0), to_rgb: bool = True, ratio_range: Sequence[Number] = (1, 4), seg_ignore_label: int = None, prob: float = 0.5) -> None: self.to_rgb = to_rgb self.ratio_range = ratio_range if to_rgb: self.mean = mean[::-1] else: self.mean = mean self.min_ratio, self.max_ratio = ratio_range self.seg_ignore_label = seg_ignore_label self.prob = prob @cache_randomness def _random_prob(self) -> float: return random.uniform(0, 1) @cache_randomness def _random_ratio(self) -> float: return random.uniform(self.min_ratio, self.max_ratio) @cache_randomness def _random_left_top(self, ratio: float, h: int, w: int) -> Tuple[int, int]: left = int(random.uniform(0, w * ratio - w)) top = int(random.uniform(0, h * ratio - h)) return left, top @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to expand images, bounding boxes, masks, segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images, bounding boxes, masks, segmentation map expanded. """ if self._random_prob() > self.prob: return results assert 'img' in results, '`img` is not found in results' img = results['img'] h, w, c = img.shape ratio = self._random_ratio() # speedup expand when meets large image if np.all(self.mean == self.mean[0]): expand_img = np.empty((int(h * ratio), int(w * ratio), c), img.dtype) expand_img.fill(self.mean[0]) else: expand_img = np.full((int(h * ratio), int(w * ratio), c), self.mean, dtype=img.dtype) left, top = self._random_left_top(ratio, h, w) expand_img[top:top + h, left:left + w] = img results['img'] = expand_img results['img_shape'] = expand_img.shape[:2] # expand bboxes if results.get('gt_bboxes', None) is not None: results['gt_bboxes'].translate_([left, top]) # expand masks if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'].expand( int(h * ratio), int(w * ratio), top, left) # expand segmentation map if results.get('gt_seg_map', None) is not None: gt_seg = results['gt_seg_map'] expand_gt_seg = np.full((int(h * ratio), int(w * ratio)), self.seg_ignore_label, dtype=gt_seg.dtype) expand_gt_seg[top:top + h, left:left + w] = gt_seg results['gt_seg_map'] = expand_gt_seg return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, ' repr_str += f'ratio_range={self.ratio_range}, ' repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' repr_str += f'prob={self.prob})' return repr_str @TRANSFORMS.register_module() class MinIoURandomCrop(BaseTransform): """Random crop the image & bboxes & masks & segmentation map, the cropped patches have minimum IoU requirement with original image & bboxes & masks. & segmentation map, the IoU threshold is randomly selected from min_ious. Required Keys: - img - img_shape - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_ignore_flags (bool) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_bboxes_labels - gt_masks - gt_ignore_flags - gt_seg_map Args: min_ious (Sequence[float]): minimum IoU threshold for all intersections with bounding boxes. min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, where a >= min_crop_size). bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, min_ious: Sequence[float] = (0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size: float = 0.3, bbox_clip_border: bool = True) -> None: self.min_ious = min_ious self.sample_mode = (1, *min_ious, 0) self.min_crop_size = min_crop_size self.bbox_clip_border = bbox_clip_border @cache_randomness def _random_mode(self) -> Number: return random.choice(self.sample_mode) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to crop images and bounding boxes with minimum IoU constraint. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images and bounding boxes cropped, \ 'img_shape' key is updated. """ assert 'img' in results, '`img` is not found in results' assert 'gt_bboxes' in results, '`gt_bboxes` is not found in results' img = results['img'] boxes = results['gt_bboxes'] h, w, c = img.shape while True: mode = self._random_mode() self.mode = mode if mode == 1: return results min_iou = self.mode for i in range(50): new_w = random.uniform(self.min_crop_size * w, w) new_h = random.uniform(self.min_crop_size * h, h) # h / w in [0.5, 2] if new_h / new_w < 0.5 or new_h / new_w > 2: continue left = random.uniform(w - new_w) top = random.uniform(h - new_h) patch = np.array( (int(left), int(top), int(left + new_w), int(top + new_h))) # Line or point crop is not allowed if patch[2] == patch[0] or patch[3] == patch[1]: continue overlaps = boxes.overlaps(
HorizontalBoxes(patch.reshape(-1, 4).astype(np.float32)),
2
2023-12-11 15:23:03+00:00
24k
open-mmlab/PIA
animatediff/pipelines/validation_pipeline.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Opti...
import inspect import random import argparse import numpy as np import torch import os from typing import Callable, List, Optional, Union from dataclasses import dataclass from tqdm import tqdm from omegaconf import OmegaConf from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from safetensors import safe_open from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipelines import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from animatediff.models.unet import UNet3DConditionModel from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint from animatediff.utils.convert_lora_safetensor_to_diffusers import convert_lora from animatediff.utils.util import prepare_mask_coef, save_videos_grid from animatediff.models.resnet import InflatedConv3d from PIL import Image from accelerate import cpu_offload
14,614
raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): shape = shape # shape = (1,) + shape[1:] latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], use_image: bool, video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, **kwargs, ): # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype if use_image != False: shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) image = Image.open(f'test_image/init_image{use_image}.png').convert('RGB') image = preprocess_image(image).to(device) if isinstance(generator, list): image_latent = [ self.vae.encode(image[k : k + 1]).latent_dist.sample(generator[k]) for k in range(batch_size) ] image_latent = torch.cat(image_latent, dim=0).to(device=device) else: image_latent = self.vae.encode(image).latent_dist.sample(generator).to(device=device) image_latent = torch.nn.functional.interpolate(image_latent, size=[shape[-2], shape[-1]]) image_latent_padding = image_latent.clone() * 0.18215 mask = torch.zeros((shape[0], 1, shape[2], shape[3], shape[4])).to(device)
# Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py PIL_INTERPOLATION = { "linear": Image.Resampling.BILINEAR, "bilinear": Image.Resampling.BILINEAR, "bicubic": Image.Resampling.BICUBIC, "lanczos": Image.Resampling.LANCZOS, "nearest": Image.Resampling.NEAREST, } def preprocess_image(image): if isinstance(image, torch.Tensor): return image elif isinstance(image, Image.Image): image = [image] if isinstance(image[0], Image.Image): w, h = image[0].size w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8 image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) if len(image.shape) == 3: image = image.reshape(image.shape[0], image.shape[1], image.shape[2], 1) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class ValidationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0])): video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): shape = shape # shape = (1,) + shape[1:] latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], use_image: bool, video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, **kwargs, ): # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype if use_image != False: shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) image = Image.open(f'test_image/init_image{use_image}.png').convert('RGB') image = preprocess_image(image).to(device) if isinstance(generator, list): image_latent = [ self.vae.encode(image[k : k + 1]).latent_dist.sample(generator[k]) for k in range(batch_size) ] image_latent = torch.cat(image_latent, dim=0).to(device=device) else: image_latent = self.vae.encode(image).latent_dist.sample(generator).to(device=device) image_latent = torch.nn.functional.interpolate(image_latent, size=[shape[-2], shape[-1]]) image_latent_padding = image_latent.clone() * 0.18215 mask = torch.zeros((shape[0], 1, shape[2], shape[3], shape[4])).to(device)
mask_coef = prepare_mask_coef(video_length, 0, kwargs['mask_sim_range'])
5
2023-12-21 03:29:34+00:00
24k
chinhsuanwu/ifusion
model/zero123.py
[ { "identifier": "inject_trainable_lora_extended", "path": "ldm/lora.py", "snippet": "def inject_trainable_lora_extended(\n model: nn.Module,\n target_replace_module: Set[str] = UNET_EXTENDED_TARGET_REPLACE,\n r: int = 4,\n loras=None, # path to lora .pt\n eval=True,\n):\n \"\"\"\n ...
import itertools import torch import torch.nn as nn from dataclasses import dataclass from diffusers import DDIMScheduler from einops import rearrange from omegaconf import OmegaConf from ldm.lora import ( inject_trainable_lora_extended, monkeypatch_remove_lora, save_lora_weight, ) from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import load_model_from_config from util.pose import make_T from util.typing import * from util.util import default
15,731
@dataclass class Config: pretrained_model_name_or_path: str = "ldm/ckpt/zero123-xl.ckpt" pretrained_config: str = "ldm/ckpt/sd-objaverse-finetune-c_concat-256.yaml" vram_O: bool = False min_step_percent: float = 0.02 max_step_percent: float = 0.98 config: Config def __init__(self, **kwargs) -> None: super().__init__() self.config = OmegaConf.structured(self.Config(**kwargs)) self.device = "cuda" self.require_grad_params = [] self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32 self.model: LatentDiffusion = load_model_from_config( self.pretrained_config, self.config.pretrained_model_name_or_path, device=self.device, vram_O=self.config.vram_O, ) for p in self.model.parameters(): p.requires_grad_(False) self.num_train_timesteps = self.pretrained_config.model.params.timesteps self.scheduler = DDIMScheduler( self.num_train_timesteps, self.pretrained_config.model.params.linear_start, self.pretrained_config.model.params.linear_end, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1, ) self.num_train_timesteps = self.scheduler.config.num_train_timesteps self.set_min_max_steps( min_step_percent=self.config.min_step_percent, max_step_percent=self.config.max_step_percent, ) print("[INFO] Loaded Zero123") @torch.cuda.amp.autocast(enabled=False) def set_min_max_steps( self, min_step_percent: float = 0.02, max_step_percent: float = 0.98 ): self.min_step = int(self.num_train_timesteps * min_step_percent) self.max_step = int(self.num_train_timesteps * max_step_percent) @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def get_image_embeds( self, image: Float[Tensor, "B 3 256 256"] ) -> Tuple[Float[Tensor, "B 1 768"], Float[Tensor, "B 4 32 32"]]: c_crossattn = self.model.get_learned_conditioning(image.to(self.weights_dtype)) c_concat = self.model.encode_first_stage(image.to(self.weights_dtype)).mode() return c_crossattn, c_concat @torch.cuda.amp.autocast(enabled=False) def encode_image( self, image: Float[Tensor, "B 3 256 256"] ) -> Float[Tensor, "B 4 32 32"]: input_dtype = image.dtype latent = self.model.get_first_stage_encoding( self.model.encode_first_stage(image.to(self.weights_dtype)) ) return latent.to(input_dtype) # [B, 4, 32, 32] Latent space image @torch.cuda.amp.autocast(enabled=False) def decode_latent( self, latent: Float[Tensor, "B 4 H W"], ) -> Float[Tensor, "B 3 512 512"]: input_dtype = latent.dtype image = self.model.decode_first_stage(latent) image = (image * 0.5 + 0.5).clamp(0, 1) return image.to(input_dtype) @staticmethod @torch.no_grad() def make_cond(cond): """Add zeros to the beginning of cond""" return {k: [torch.cat([torch.zeros_like(v), v])] for k, v in cond.items()} @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def clip_camera_projection( self, theta: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], distance: Float[Tensor, "B"], c_crossattn: Float[Tensor, "B 1 768"], in_deg: bool = False, ): T = make_T(theta, azimuth, distance, in_deg=in_deg).T[:, None, :] clip_emb = self.model.cc_projection(torch.cat([c_crossattn, T], dim=-1)) return clip_emb def inject_lora( self, ckpt_fp: str = None, rank: int = 12, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], eval: bool = False, ): print( f"[INFO] Injecting LoRA from " + (str(ckpt_fp) if ckpt_fp is not None else "scratch"), )
class Zero123(nn.Module): @dataclass class Config: pretrained_model_name_or_path: str = "ldm/ckpt/zero123-xl.ckpt" pretrained_config: str = "ldm/ckpt/sd-objaverse-finetune-c_concat-256.yaml" vram_O: bool = False min_step_percent: float = 0.02 max_step_percent: float = 0.98 config: Config def __init__(self, **kwargs) -> None: super().__init__() self.config = OmegaConf.structured(self.Config(**kwargs)) self.device = "cuda" self.require_grad_params = [] self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32 self.model: LatentDiffusion = load_model_from_config( self.pretrained_config, self.config.pretrained_model_name_or_path, device=self.device, vram_O=self.config.vram_O, ) for p in self.model.parameters(): p.requires_grad_(False) self.num_train_timesteps = self.pretrained_config.model.params.timesteps self.scheduler = DDIMScheduler( self.num_train_timesteps, self.pretrained_config.model.params.linear_start, self.pretrained_config.model.params.linear_end, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1, ) self.num_train_timesteps = self.scheduler.config.num_train_timesteps self.set_min_max_steps( min_step_percent=self.config.min_step_percent, max_step_percent=self.config.max_step_percent, ) print("[INFO] Loaded Zero123") @torch.cuda.amp.autocast(enabled=False) def set_min_max_steps( self, min_step_percent: float = 0.02, max_step_percent: float = 0.98 ): self.min_step = int(self.num_train_timesteps * min_step_percent) self.max_step = int(self.num_train_timesteps * max_step_percent) @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def get_image_embeds( self, image: Float[Tensor, "B 3 256 256"] ) -> Tuple[Float[Tensor, "B 1 768"], Float[Tensor, "B 4 32 32"]]: c_crossattn = self.model.get_learned_conditioning(image.to(self.weights_dtype)) c_concat = self.model.encode_first_stage(image.to(self.weights_dtype)).mode() return c_crossattn, c_concat @torch.cuda.amp.autocast(enabled=False) def encode_image( self, image: Float[Tensor, "B 3 256 256"] ) -> Float[Tensor, "B 4 32 32"]: input_dtype = image.dtype latent = self.model.get_first_stage_encoding( self.model.encode_first_stage(image.to(self.weights_dtype)) ) return latent.to(input_dtype) # [B, 4, 32, 32] Latent space image @torch.cuda.amp.autocast(enabled=False) def decode_latent( self, latent: Float[Tensor, "B 4 H W"], ) -> Float[Tensor, "B 3 512 512"]: input_dtype = latent.dtype image = self.model.decode_first_stage(latent) image = (image * 0.5 + 0.5).clamp(0, 1) return image.to(input_dtype) @staticmethod @torch.no_grad() def make_cond(cond): """Add zeros to the beginning of cond""" return {k: [torch.cat([torch.zeros_like(v), v])] for k, v in cond.items()} @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def clip_camera_projection( self, theta: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], distance: Float[Tensor, "B"], c_crossattn: Float[Tensor, "B 1 768"], in_deg: bool = False, ): T = make_T(theta, azimuth, distance, in_deg=in_deg).T[:, None, :] clip_emb = self.model.cc_projection(torch.cat([c_crossattn, T], dim=-1)) return clip_emb def inject_lora( self, ckpt_fp: str = None, rank: int = 12, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], eval: bool = False, ): print( f"[INFO] Injecting LoRA from " + (str(ckpt_fp) if ckpt_fp is not None else "scratch"), )
lora_params, _ = inject_trainable_lora_extended(
0
2023-12-17 12:45:38+00:00
24k
penghao-wu/vstar
VisualSearch/utils/dataset.py
[ { "identifier": "conversation", "path": "VisualSearch/model/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n ...
import glob import os import random import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image from pycocotools import mask from transformers import CLIPImageProcessor from transformers import OwlViTProcessor from VisualSearch.model.llava import conversation as conversation_lib from VisualSearch.model.llava.constants import (DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX) from VisualSearch.model.llava.mm_utils import tokenizer_image_token from VisualSearch.utils.data_processing import get_mask_from_json from VisualSearch.utils.refer import REFER from VisualSearch.utils.refer_seg_dataset import ReferSegDataset from VisualSearch.utils.general_segdet_dataset import SegDetDataset from VisualSearch.utils.mixed_grounding_dataset import MixedGroundingDataset from VisualSearch.utils.vqa_dataset import VQADataset from VisualSearch.utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN) from VisualSearch.utils.utils import box_xyxy_to_cxcywh, expand2square
16,388
def __getitem__(self, idx): ind = np.random.choice(list(range(len(self.datasets))), p=self.sample_rate) data = self.all_datasets[ind] inference = False return *data[0], inference class ValDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, val_dataset, ): self.base_dir = base_dir splits = val_dataset.split("|") if len(splits) == 2: ds, split = splits images = glob.glob( os.path.join(self.base_dir, "reason_seg", ds, split, "*.jpg") ) self.images = images self.data_type = "reason_seg" elif len(splits) == 3: self.base_dir = os.path.join(self.base_dir, 'refer_seg') ds, splitBy, split = splits refer_api = REFER(self.base_dir, ds, splitBy) ref_ids_val = refer_api.getRefIds(split=split) images_ids_val = refer_api.getImgIds(ref_ids=ref_ids_val) refs_val = refer_api.loadRefs(ref_ids=ref_ids_val) refer_seg_ds = {} refer_seg_ds["images"] = [] loaded_images = refer_api.loadImgs(image_ids=images_ids_val) for item in loaded_images: item = item.copy() if ds == "refclef": item["file_name"] = os.path.join( self.base_dir, "images/saiapr_tc-12", item["file_name"] ) elif ds in ["refcoco", "refcoco+", "refcocog", "grefcoco"]: item["file_name"] = os.path.join( self.base_dir, "images/mscoco/images/train2014", item["file_name"], ) refer_seg_ds["images"].append(item) refer_seg_ds["annotations"] = refer_api.Anns # anns_val img2refs = {} for ref in refs_val: image_id = ref["image_id"] img2refs[image_id] = img2refs.get(image_id, []) + [ ref, ] refer_seg_ds["img2refs"] = img2refs self.refer_seg_ds = refer_seg_ds self.data_type = "refer_seg" self.ds = ds self.tokenizer = tokenizer self.transform = OwlViTProcessor.from_pretrained("google/owlvit-base-patch16") self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower) def __len__(self): if self.data_type == "refer_seg": return len(self.refer_seg_ds["images"]) else: return len(self.images) def preprocess(self, x: torch.Tensor) -> torch.Tensor: """Normalize pixel values and pad to a square input.""" # Normalize colors x = (x - self.pixel_mean) / self.pixel_std # Pad h, w = x.shape[-2:] padh = self.img_size - h padw = self.img_size - w x = F.pad(x, (0, padw, 0, padh)) return x def __getitem__(self, idx): if self.data_type == "refer_seg": refer_seg_ds = self.refer_seg_ds images = refer_seg_ds["images"] annotations = refer_seg_ds["annotations"] img2refs = refer_seg_ds["img2refs"] image_info = images[idx] image_path = image_info["file_name"] image_id = image_info["id"] refs = img2refs[image_id] if len(refs) == 0: raise ValueError("image {} has no refs".format(image_id)) sents = [] ann_ids = [] for ref in refs: for sent in ref["sentences"]: sents.append(sent["sent"].strip().lower()) ann_ids.append(ref["ann_id"]) sampled_sents = sents sampled_ann_ids = ann_ids image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) is_sentence = False else: image_path = self.images[idx] image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) json_path = image_path.replace(".jpg", ".json")
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [ tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversation_list ] input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id ) attention_masks = input_ids.ne(tokenizer.pad_token_id) for i in range(len(bboxes_valid_list)): bboxes_valid = bboxes_valid_list[i] attention_mask = attention_masks[i] if not bboxes_valid: attention_mask = attention_mask & input_ids[i].ne(tokenizer("[LOC]", add_special_tokens=False).input_ids[0]) attention_masks[i] = attention_mask conv = conversation_lib.default_conversation.copy() targets = input_ids.clone() if conv_type == "llava_v1": sep = conv.sep + conv.roles[1] + ": " else: sep = "[/INST] " for conversation, target in zip(conversation_list, targets): total_len = int(target.ne(tokenizer.pad_token_id).sum()) rounds = conversation.split(conv.sep2) cur_len = 1 target[:cur_len] = IGNORE_INDEX for i, rou in enumerate(rounds): if rou == "": break parts = rou.split(sep) # if len(parts) != 2: # break assert len(parts) == 2, (len(parts), rou) parts[0] += sep if DEFAULT_IMAGE_TOKEN in conversation: round_len = len(tokenizer_image_token(rou, tokenizer)) instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2 else: round_len = len(tokenizer(rou).input_ids) instruction_len = len(tokenizer(parts[0]).input_ids) - 2 target[cur_len : cur_len + instruction_len] = IGNORE_INDEX cur_len += round_len target[cur_len:] = IGNORE_INDEX if False: z = target.clone() z = torch.where(z == IGNORE_INDEX, tokenizer.unk_token_id, z) if local_rank == 0: print( "conversation: ", conversation, "tokenizer.decode(z): ", tokenizer.decode(z), ) if cur_len < tokenizer.model_max_length: assert cur_len == total_len if inferences[0] == False: truncate_len = tokenizer.model_max_length - 255 if input_ids.shape[1] > truncate_len: input_ids = input_ids[:, :truncate_len] targets = targets[:, :truncate_len] attention_masks = attention_masks[:, :truncate_len] return { "image_paths": image_path_list, "images": torch.stack(images_list, dim=0), "images_clip": torch.stack(images_clip_list, dim=0), "input_ids": input_ids, "labels": targets, "bboxes_labels_list": bboxes_labels_list, "bboxes_valid_list": torch.tensor(bboxes_valid_list), "masks_valid_list": masks_valid_list, "attention_masks": attention_masks, "masks_list": masks_list, "label_list": label_list, "resize_list": resize_list, "offset": torch.LongTensor(offset_list), "questions_list": questions_list, "sampled_classes_list": sampled_classes_list, "inference": inferences[0], "conversation_list": conversation_list, } class HybridDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, samples_per_epoch=500 * 8 * 2 * 10, precision: str = "fp32", num_classes_per_sample: int = 3, exclude_val=False, dataset="general_segdet||refer_seg||vqa||reason_seg", sample_rate=[9, 3, 3, 1], general_segdet_data="objects365||cocostuff||paco_lvis", general_segdet_sample_rate=[2,1,1], refer_seg_data="refclef||refcoco||refcoco+||refcocog", vqa_data="possible_locations_conv_86k||llava_instruct_80k", vqa_sample_rate=[2,1], ): self.exclude_val = exclude_val self.dataset = dataset self.samples_per_epoch = samples_per_epoch self.num_classes_per_sample = num_classes_per_sample sample_rate = np.array(sample_rate) self.sample_rate = sample_rate / sample_rate.sum() self.base_dir = base_dir self.tokenizer = tokenizer self.precision = precision self.datasets = dataset.split("||") self.all_datasets = [] for dataset in self.datasets: if dataset == "general_segdet": self.all_datasets.append( SegDetDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, general_segdet_data, general_segdet_sample_rate, ) ) elif dataset == "refer_seg": self.all_datasets.append( ReferSegDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, refer_seg_data, ) ) elif dataset == "vqa": self.all_datasets.append( VQADataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, vqa_data, vqa_sample_rate, ) ) elif dataset == "mixed_grounding": self.all_datasets.append( MixedGroundingDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, ) ) def __len__(self): return self.samples_per_epoch def __getitem__(self, idx): ind = np.random.choice(list(range(len(self.datasets))), p=self.sample_rate) data = self.all_datasets[ind] inference = False return *data[0], inference class ValDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, val_dataset, ): self.base_dir = base_dir splits = val_dataset.split("|") if len(splits) == 2: ds, split = splits images = glob.glob( os.path.join(self.base_dir, "reason_seg", ds, split, "*.jpg") ) self.images = images self.data_type = "reason_seg" elif len(splits) == 3: self.base_dir = os.path.join(self.base_dir, 'refer_seg') ds, splitBy, split = splits refer_api = REFER(self.base_dir, ds, splitBy) ref_ids_val = refer_api.getRefIds(split=split) images_ids_val = refer_api.getImgIds(ref_ids=ref_ids_val) refs_val = refer_api.loadRefs(ref_ids=ref_ids_val) refer_seg_ds = {} refer_seg_ds["images"] = [] loaded_images = refer_api.loadImgs(image_ids=images_ids_val) for item in loaded_images: item = item.copy() if ds == "refclef": item["file_name"] = os.path.join( self.base_dir, "images/saiapr_tc-12", item["file_name"] ) elif ds in ["refcoco", "refcoco+", "refcocog", "grefcoco"]: item["file_name"] = os.path.join( self.base_dir, "images/mscoco/images/train2014", item["file_name"], ) refer_seg_ds["images"].append(item) refer_seg_ds["annotations"] = refer_api.Anns # anns_val img2refs = {} for ref in refs_val: image_id = ref["image_id"] img2refs[image_id] = img2refs.get(image_id, []) + [ ref, ] refer_seg_ds["img2refs"] = img2refs self.refer_seg_ds = refer_seg_ds self.data_type = "refer_seg" self.ds = ds self.tokenizer = tokenizer self.transform = OwlViTProcessor.from_pretrained("google/owlvit-base-patch16") self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower) def __len__(self): if self.data_type == "refer_seg": return len(self.refer_seg_ds["images"]) else: return len(self.images) def preprocess(self, x: torch.Tensor) -> torch.Tensor: """Normalize pixel values and pad to a square input.""" # Normalize colors x = (x - self.pixel_mean) / self.pixel_std # Pad h, w = x.shape[-2:] padh = self.img_size - h padw = self.img_size - w x = F.pad(x, (0, padw, 0, padh)) return x def __getitem__(self, idx): if self.data_type == "refer_seg": refer_seg_ds = self.refer_seg_ds images = refer_seg_ds["images"] annotations = refer_seg_ds["annotations"] img2refs = refer_seg_ds["img2refs"] image_info = images[idx] image_path = image_info["file_name"] image_id = image_info["id"] refs = img2refs[image_id] if len(refs) == 0: raise ValueError("image {} has no refs".format(image_id)) sents = [] ann_ids = [] for ref in refs: for sent in ref["sentences"]: sents.append(sent["sent"].strip().lower()) ann_ids.append(ref["ann_id"]) sampled_sents = sents sampled_ann_ids = ann_ids image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) is_sentence = False else: image_path = self.images[idx] image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) json_path = image_path.replace(".jpg", ".json")
mask_json, sampled_sents, is_sentence = get_mask_from_json(json_path, image)
5
2023-12-15 14:58:24+00:00
24k
sinoyou/nelf-pro
nerfstudio/data/utils/dataloaders.py
[ { "identifier": "Cameras", "path": "nerfstudio/cameras/cameras.py", "snippet": "class Cameras(TensorDataclass):\n \"\"\"Dataparser outputs for the image dataset and the ray generator.\n\n Note: currently only supports cameras with the same principal points and types. The reason we type\n the fo...
import concurrent.futures import multiprocessing import random import torch from abc import abstractmethod from typing import Dict, Optional, Tuple, Union from rich.progress import Console, track from torch.utils.data import Dataset from torch.utils.data.dataloader import DataLoader from nerfstudio.cameras.cameras import Cameras from nerfstudio.cameras.rays import RayBundle from nerfstudio.data.datasets.base_dataset import InputDataset from nerfstudio.data.utils.nerfstudio_collate import nerfstudio_collate from nerfstudio.utils.misc import get_dict_to_torch
16,976
self.first_time = True self.cached_collated_batch = None if self.cache_all_images: CONSOLE.print(f"Caching all {len(self.dataset)} images.") if len(self.dataset) > 500: CONSOLE.print( "[bold yellow]Warning: If you run out of memory, try reducing the number of images to sample from." ) self.cached_collated_batch = self._get_collated_batch() elif self.num_times_to_repeat_images == -1: CONSOLE.print( f"Caching {self.num_images_to_sample_from} out of {len(self.dataset)} images, without resampling." ) else: CONSOLE.print( f"Caching {self.num_images_to_sample_from} out of {len(self.dataset)} images, " f"resampling every {self.num_times_to_repeat_images} iters." ) def __getitem__(self, idx): return self.dataset.__getitem__(idx) def _get_batch_list(self): """Returns a list of batches from the dataset attribute.""" # todo: to enable local image caching, the samples indices should be consecutive rather than random. # each time when we switches the batch size, we should print out the image name lists. (in a sorting manner) # indices = random.sample(range(len(self.dataset)), k=self.num_images_to_sample_from) # consecutive sampling start_indices = random.sample(range(len(self.dataset)), 1) indices_circle_list = list(range(len(self.dataset))) + list(range(len(self.dataset))) indices = indices_circle_list[start_indices[0]:start_indices[0]+self.num_images_to_sample_from] random.shuffle(indices) # start_or_end_indices = random.sample(range(len(self.dataset) - self.num_images_to_sample_from + 1), 1)[0] # indices_list = list(range(len(self.dataset))) # indices = indices_list[start_or_end_indices:start_or_end_indices+self.num_images_to_sample_from] # # random.shuffle(indices) batch_list = [] results = [] num_threads = int(self.num_workers) * 4 num_threads = min(num_threads, multiprocessing.cpu_count() - 1) num_threads = max(num_threads, 1) with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor: for idx in indices: res = executor.submit(self.dataset.__getitem__, idx) results.append(res) for res in track( results, description="Loading data batch", transient=True, disable=(self.num_images_to_sample_from == 1) ): batch_list.append(res.result()) # print out filenames cached_image_filenames = [batch["image_filename"] for batch in batch_list] sorted_cached_image_filenames = sorted(cached_image_filenames) CONSOLE.print(f"New Loaded Image filenames: {sorted_cached_image_filenames}") return batch_list def _get_collated_batch(self): """Returns a collated batch.""" batch_list = self._get_batch_list() collated_batch = self.collate_fn(batch_list) collated_batch = get_dict_to_torch(collated_batch, device=self.device, exclude=["image"]) return collated_batch def __iter__(self): while True: if self.cache_all_images: collated_batch = self.cached_collated_batch elif self.first_time or ( self.num_times_to_repeat_images != -1 and self.num_repeated >= self.num_times_to_repeat_images ): # trigger a reset self.num_repeated = 0 collated_batch = self._get_collated_batch() # possibly save a cached item self.cached_collated_batch = collated_batch if self.num_times_to_repeat_images != 0 else None self.first_time = False else: collated_batch = self.cached_collated_batch self.num_repeated += 1 yield collated_batch class EvalDataloader(DataLoader): """Evaluation dataloader base class Args: input_dataset: InputDataset to load data from device: Device to load data to """ def __init__( self, input_dataset: InputDataset, device: Union[torch.device, str] = "cpu", **kwargs, ): self.input_dataset = input_dataset self.cameras = input_dataset.cameras.to(device) self.device = device self.kwargs = kwargs super().__init__(dataset=input_dataset) @abstractmethod def __iter__(self): """Iterates over the dataset""" return self @abstractmethod def __next__(self) -> Tuple[RayBundle, Dict]: """Returns the next batch of data"""
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Code for sampling images from a dataset of images. """ # for multithreading CONSOLE = Console(width=120) class CacheDataloader(DataLoader): """Collated image dataset that implements caching of default-pytorch-collatable data. Creates batches of the InputDataset return type. Args: dataset: Dataset to sample from. num_samples_to_collate: How many images to sample rays for each batch. -1 for all images. num_times_to_repeat_images: How often to collate new images. -1 to never pick new images. device: Device to perform computation. collate_fn: The function we will use to collate our training data """ def __init__( self, dataset: Dataset, num_images_to_sample_from: int = -1, num_times_to_repeat_images: int = -1, device: Union[torch.device, str] = "cpu", collate_fn=nerfstudio_collate, **kwargs, ): self.dataset = dataset super().__init__(dataset=dataset, **kwargs) # This will set self.dataset self.num_times_to_repeat_images = num_times_to_repeat_images self.cache_all_images = (num_images_to_sample_from == -1) or (num_images_to_sample_from >= len(self.dataset)) self.num_images_to_sample_from = len(self.dataset) if self.cache_all_images else num_images_to_sample_from self.device = device self.collate_fn = collate_fn self.num_workers = kwargs.get("num_workers", 0) self.num_repeated = self.num_times_to_repeat_images # starting value self.first_time = True self.cached_collated_batch = None if self.cache_all_images: CONSOLE.print(f"Caching all {len(self.dataset)} images.") if len(self.dataset) > 500: CONSOLE.print( "[bold yellow]Warning: If you run out of memory, try reducing the number of images to sample from." ) self.cached_collated_batch = self._get_collated_batch() elif self.num_times_to_repeat_images == -1: CONSOLE.print( f"Caching {self.num_images_to_sample_from} out of {len(self.dataset)} images, without resampling." ) else: CONSOLE.print( f"Caching {self.num_images_to_sample_from} out of {len(self.dataset)} images, " f"resampling every {self.num_times_to_repeat_images} iters." ) def __getitem__(self, idx): return self.dataset.__getitem__(idx) def _get_batch_list(self): """Returns a list of batches from the dataset attribute.""" # todo: to enable local image caching, the samples indices should be consecutive rather than random. # each time when we switches the batch size, we should print out the image name lists. (in a sorting manner) # indices = random.sample(range(len(self.dataset)), k=self.num_images_to_sample_from) # consecutive sampling start_indices = random.sample(range(len(self.dataset)), 1) indices_circle_list = list(range(len(self.dataset))) + list(range(len(self.dataset))) indices = indices_circle_list[start_indices[0]:start_indices[0]+self.num_images_to_sample_from] random.shuffle(indices) # start_or_end_indices = random.sample(range(len(self.dataset) - self.num_images_to_sample_from + 1), 1)[0] # indices_list = list(range(len(self.dataset))) # indices = indices_list[start_or_end_indices:start_or_end_indices+self.num_images_to_sample_from] # # random.shuffle(indices) batch_list = [] results = [] num_threads = int(self.num_workers) * 4 num_threads = min(num_threads, multiprocessing.cpu_count() - 1) num_threads = max(num_threads, 1) with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor: for idx in indices: res = executor.submit(self.dataset.__getitem__, idx) results.append(res) for res in track( results, description="Loading data batch", transient=True, disable=(self.num_images_to_sample_from == 1) ): batch_list.append(res.result()) # print out filenames cached_image_filenames = [batch["image_filename"] for batch in batch_list] sorted_cached_image_filenames = sorted(cached_image_filenames) CONSOLE.print(f"New Loaded Image filenames: {sorted_cached_image_filenames}") return batch_list def _get_collated_batch(self): """Returns a collated batch.""" batch_list = self._get_batch_list() collated_batch = self.collate_fn(batch_list) collated_batch = get_dict_to_torch(collated_batch, device=self.device, exclude=["image"]) return collated_batch def __iter__(self): while True: if self.cache_all_images: collated_batch = self.cached_collated_batch elif self.first_time or ( self.num_times_to_repeat_images != -1 and self.num_repeated >= self.num_times_to_repeat_images ): # trigger a reset self.num_repeated = 0 collated_batch = self._get_collated_batch() # possibly save a cached item self.cached_collated_batch = collated_batch if self.num_times_to_repeat_images != 0 else None self.first_time = False else: collated_batch = self.cached_collated_batch self.num_repeated += 1 yield collated_batch class EvalDataloader(DataLoader): """Evaluation dataloader base class Args: input_dataset: InputDataset to load data from device: Device to load data to """ def __init__( self, input_dataset: InputDataset, device: Union[torch.device, str] = "cpu", **kwargs, ): self.input_dataset = input_dataset self.cameras = input_dataset.cameras.to(device) self.device = device self.kwargs = kwargs super().__init__(dataset=input_dataset) @abstractmethod def __iter__(self): """Iterates over the dataset""" return self @abstractmethod def __next__(self) -> Tuple[RayBundle, Dict]: """Returns the next batch of data"""
def get_camera(self, image_idx: int = 0) -> Cameras:
0
2023-12-15 20:07:22+00:00
24k
amazon-science/c2f-seg
data/dataloader_transformer.py
[ { "identifier": "FishBowl", "path": "data/dataloader_Fishbowl.py", "snippet": "class FishBowl(object):\n def __init__(self, config, mode, subtest=None):\n self.datatype = mode\n data_dir = config.root_path\n\n self.img_path = os.path.join(data_dir, self.datatype+\"_data\", self.d...
from data.dataloader_Fishbowl import FishBowl from data.dataloader_MOViD_A import MOViD_A from data.dataloader_KINS import Kins_Fusion_dataset, KINS_Aisformer_VRSP_Intersection from data.dataloader_COCOA import COCOA_Fusion_dataset, COCOA_VRSP
21,312
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA": train_dataset = COCOA_Fusion_dataset(config, mode='train') test_dataset = COCOA_Fusion_dataset(config, mode='test') elif args.dataset=="Fishbowl": train_dataset = FishBowl(config, mode='train') test_dataset = FishBowl(config, mode='test') elif args.dataset=="MOViD_A": train_dataset = MOViD_A(config, mode='train') test_dataset = MOViD_A(config, mode='test') return train_dataset, test_dataset else: if args.dataset=="KINS":
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA": train_dataset = COCOA_Fusion_dataset(config, mode='train') test_dataset = COCOA_Fusion_dataset(config, mode='test') elif args.dataset=="Fishbowl": train_dataset = FishBowl(config, mode='train') test_dataset = FishBowl(config, mode='test') elif args.dataset=="MOViD_A": train_dataset = MOViD_A(config, mode='train') test_dataset = MOViD_A(config, mode='test') return train_dataset, test_dataset else: if args.dataset=="KINS":
test_dataset = KINS_Aisformer_VRSP_Intersection(config, mode='test')
3
2023-12-21 04:25:47+00:00
24k
alipay/PainlessInferenceAcceleration
pia/lookahead/models/llama/modeling_llama_batch.py
[ { "identifier": "LookaheadPreTrainedModel", "path": "pia/lookahead/common/pretrained_model_batch.py", "snippet": "class LookaheadPreTrainedModel(PreTrainedModel):\n _batch_generation = True\n _stream_generation = False\n\n def __init__(self, config):\n super().__init__(config=config)\n\n...
import math import os import torch import torch.nn.functional as F import torch.utils.checkpoint from typing import List, Optional, Tuple, Union from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, \ SequenceClassifierOutputWithPast from transformers.models.llama.configuration_llama import LlamaConfig from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, \ replace_return_docstrings from pia.lookahead.common.pretrained_model_batch import LookaheadPreTrainedModel from pia.lookahead.csrc.triton.rms_norm import rmsnorm_wrapper
16,207
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch LLaMA model.""" # from transformers.modeling_utils import PreTrainedModel os.environ['TOKENIZERS_PARALLELISM'] = 'false' logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LlamaConfig" # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class LlamaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ LlamaRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states):
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch LLaMA model.""" # from transformers.modeling_utils import PreTrainedModel os.environ['TOKENIZERS_PARALLELISM'] = 'false' logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LlamaConfig" # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class LlamaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ LlamaRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states):
return rmsnorm_wrapper(hidden_states, self.weight, eps=self.variance_epsilon)
1
2023-12-19 13:11:38+00:00
24k
MingtaoGuo/AnimateAnyone_unofficial
aldm/aldm.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(...
import einops import torch import torch as th import torch.nn as nn from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer, SpatialTransformerPlus from ldm.modules.diffusionmodules.openaimodel import ResBlock, TimestepEmbedSequential, Downsample, AttentionBlock, Upsample, normalization, checkpoint, convert_module_to_f16, convert_module_to_f32 from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig from omegaconf.listconfig import ListConfig
19,184
use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ), ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) self.output_blocks.apply(convert_module_to_f32) def forward(self, x, timesteps=None, context=None, y=None,**kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" refs = [] hs = []
class ReferenceNet(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ), ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) self.output_blocks.apply(convert_module_to_f32) def forward(self, x, timesteps=None, context=None, y=None,**kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" refs = [] hs = []
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
3
2023-12-16 03:31:33+00:00
24k
yasserben/CLOUDS
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "clouds/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME...
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import ( MetadataCatalog, build_detection_train_loader, build_detection_test_loader, ) from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.modeling import build_model from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, inference_on_dataset, print_csv_format, DatasetEvaluator, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from fvcore.nn.precise_bn import get_bn_modules from clouds import ( CityscapesSemSegEvaluator, ClassicalSemSegEvaluator, MapperTrain, MapperTest, add_maskformer2_config, add_clouds_config, add_wandb_config, add_prerocessing_training_set_config, PersoEvalHook, add_repeat_factors, ) from clouds.utils import setup_wandb, WandbWriter import warnings import copy import itertools import logging import os import ast import torch import detectron2.utils.comm as comm
14,536
""" Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg) add_clouds_config(cfg) add_wandb_config(cfg)
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper mapper = MapperTrain(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_test_loader(cls, cfg, dataset_name): mapper = MapperTest(cfg, False) return build_detection_test_loader( cfg, dataset_name, batch_size=1, mapper=mapper ) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue if cfg.MODEL.CLOUDS.OVERWRITING: if any( ignored_module in module_name for ignored_module in ["sem_seg_head_ema.", "sam.sam."] ): continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = ( hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER ) if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain( *[x["params"] for x in self.param_groups] ) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test(cls, cfg, model, output_folder=None, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg) add_clouds_config(cfg) add_wandb_config(cfg)
add_prerocessing_training_set_config(cfg)
3
2023-12-15 15:40:58+00:00
24k
modelscope/scepter
scepter/studio/inference/inference.py
[ { "identifier": "Config", "path": "scepter/modules/utils/config.py", "snippet": "class Config(object):\n def __init__(self,\n cfg_dict={},\n load=True,\n cfg_file=None,\n logger=None,\n parser_ins=None):\n '''\n ...
import os import gradio as gr import scepter from glob import glob from scepter.modules.utils.config import Config from scepter.modules.utils.file_system import FS from scepter.studio.inference.inference_manager.infer_runer import \ PipelineManager from scepter.studio.inference.inference_ui.component_names import \ InferenceUIName from scepter.studio.inference.inference_ui.control_ui import ControlUI from scepter.studio.inference.inference_ui.diffusion_ui import DiffusionUI from scepter.studio.inference.inference_ui.gallery_ui import GalleryUI from scepter.studio.inference.inference_ui.mantra_ui import MantraUI from scepter.studio.inference.inference_ui.model_manage_ui import ModelManageUI from scepter.studio.inference.inference_ui.refiner_ui import RefinerUI from scepter.studio.inference.inference_ui.tuner_ui import TunerUI from scepter.studio.utils.env import init_env
16,268
# -*- coding: utf-8 -*- class InferenceUI(): def __init__(self, cfg_general_file, is_debug=False, language='en', root_work_dir='./'): config_dir = os.path.dirname(cfg_general_file) cfg_general = Config(cfg_file=cfg_general_file) cfg_general.WORK_DIR = os.path.join(root_work_dir, cfg_general.WORK_DIR) if not FS.exists(cfg_general.WORK_DIR): FS.make_dir(cfg_general.WORK_DIR) cfg_general = init_env(cfg_general) # official mantra mantra_book = Config( cfg_file=os.path.join(os.path.dirname(scepter.dirname), cfg_general.EXTENSION_PARAS.MANTRA_BOOK)) cfg_general.MANTRAS = mantra_book.MANTRAS # official tuners official_tuners = Config( cfg_file=os.path.join(os.path.dirname(scepter.dirname), cfg_general.EXTENSION_PARAS.OFFICIAL_TUNERS)) cfg_general.TUNERS = official_tuners.TUNERS official_controllers = Config(cfg_file=os.path.join( os.path.dirname(scepter.dirname), cfg_general.EXTENSION_PARAS.OFFICIAL_CONTROLLERS)) cfg_general.CONTROLLERS = official_controllers.CONTROLLERS pipe_manager = PipelineManager() config_list = glob(os.path.join(config_dir, '*/*_pro.yaml'), recursive=True) for config_file in config_list: pipe_manager.register_pipeline(Config(cfg_file=config_file)) for one_tuner in cfg_general.TUNERS: pipe_manager.register_tuner( one_tuner, name=one_tuner.NAME_ZH if language == 'zh' else one_tuner.NAME) for one_controller in cfg_general.CONTROLLERS: pipe_manager.register_controllers(one_controller) self.model_manage_ui = ModelManageUI(cfg_general, pipe_manager, is_debug=is_debug, language=language)
# -*- coding: utf-8 -*- class InferenceUI(): def __init__(self, cfg_general_file, is_debug=False, language='en', root_work_dir='./'): config_dir = os.path.dirname(cfg_general_file) cfg_general = Config(cfg_file=cfg_general_file) cfg_general.WORK_DIR = os.path.join(root_work_dir, cfg_general.WORK_DIR) if not FS.exists(cfg_general.WORK_DIR): FS.make_dir(cfg_general.WORK_DIR) cfg_general = init_env(cfg_general) # official mantra mantra_book = Config( cfg_file=os.path.join(os.path.dirname(scepter.dirname), cfg_general.EXTENSION_PARAS.MANTRA_BOOK)) cfg_general.MANTRAS = mantra_book.MANTRAS # official tuners official_tuners = Config( cfg_file=os.path.join(os.path.dirname(scepter.dirname), cfg_general.EXTENSION_PARAS.OFFICIAL_TUNERS)) cfg_general.TUNERS = official_tuners.TUNERS official_controllers = Config(cfg_file=os.path.join( os.path.dirname(scepter.dirname), cfg_general.EXTENSION_PARAS.OFFICIAL_CONTROLLERS)) cfg_general.CONTROLLERS = official_controllers.CONTROLLERS pipe_manager = PipelineManager() config_list = glob(os.path.join(config_dir, '*/*_pro.yaml'), recursive=True) for config_file in config_list: pipe_manager.register_pipeline(Config(cfg_file=config_file)) for one_tuner in cfg_general.TUNERS: pipe_manager.register_tuner( one_tuner, name=one_tuner.NAME_ZH if language == 'zh' else one_tuner.NAME) for one_controller in cfg_general.CONTROLLERS: pipe_manager.register_controllers(one_controller) self.model_manage_ui = ModelManageUI(cfg_general, pipe_manager, is_debug=is_debug, language=language)
self.gallery_ui = GalleryUI(cfg_general,
6
2023-12-21 02:01:48+00:00
24k
Ruiyuan-Zhang/CCS
multi_part_assembly/utils/wx_transformer_utilities/transformer_layer.py
[ { "identifier": "LayerNorm", "path": "multi_part_assembly/utils/wx_transformer_utilities/layer_norm.py", "snippet": "def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):\n if not export and torch.cuda.is_available() and has_fused_layernorm:\n return FusedLayerNorm(...
from typing import Dict, List, Optional from .layer_norm import LayerNorm from .multihead_attention import MultiheadAttention from .relational_memory import RelationalMemory from .group_linear_layer import GroupLinearLayer from .basic_mha import MemoryAttention from .quant_noise import quant_noise from .fairseq_dropout import FairseqDropout from torch import Tensor import torch import torch.nn as nn import multi_part_assembly.utils.wx_transformer_utilities.fairseq_utils as utils import random import torch.nn.functional as F
18,016
#should return these q,k,v and save to a big list. Also pull in from the list passed in and concat along dim=3, i.e. so that it's nblocks * nlayers. #print('running comm attention with shapes', q.shape, k.shape, v.shape) score = torch.matmul(q, k.transpose(3,4)) #print('score shape', score.shape) score = F.softmax(score, dim=-1) out = torch.matmul(score, v).transpose(2,3) #print('out shape', out.shape) score = score.mean(dim=2) out = out.reshape(seq_len, bsz, self.n_blocks * self.head_dim * self.n_heads) out = self.final(out) out = out.view(seq_len, bsz, self.dim) return out, score class NormLayer(nn.Module): def __init__(self, num_rims, dim, export=False): super(NormLayer, self).__init__() self.num_rims = num_rims self.dim = dim self.weight = nn.Parameter(torch.ones(1,1,dim*num_rims,)) self.bias = nn.Parameter(torch.zeros(1,1,dim*num_rims,)) self.norm = LayerNorm(dim, export=export, elementwise_affine=False) def forward(self, x): seq_len, bsz, _ = x.shape x = x.view(seq_len, bsz, self.num_rims, self.dim) x = self.norm(x) x = x.view(seq_len, bsz, self.num_rims * self.dim) weight_use = self.weight.repeat(seq_len, bsz, 1) bias_use = self.bias.repeat(seq_len, bsz, 1) x = x * weight_use + bias_use return x class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, nb, blockatt, blockatt_memory, use_nfm, out_proj_dim=None): super().__init__() self.blockatt = blockatt self.blockatt_memory = blockatt_memory self.embed_dim = args.encoder_embed_dim self.quant_noise = getattr(args, "quant_noise_pq", 0) self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8) self.use_nfm = use_nfm print('using nfm?', self.use_nfm) self.nb = nb self.norm_blocks = self.nb self.self_attn = self.build_self_attention(self.embed_dim, args) #should divide embed_dim by nb. Then raise embed_dim in args self.self_attn_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__) self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) print("SETUP TRANSFORMER LAYER", 'blocks', self.nb) activation_dropout_p = getattr(args, "activation_dropout", 0) if activation_dropout_p == 0: # for backwards compatibility with models that use args.relu_dropout activation_dropout_p = getattr(args, "relu_dropout", 0) self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = args.encoder_normalize_before self.fc1 = self.build_fc1( self.embed_dim, args.encoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size ) self.fc2 = self.build_fc2( args.encoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size ) self.final_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) if self.blockatt: self.comm = Attention(args.encoder_attention_heads, self.nb, self.embed_dim, self.use_nfm) self.comm_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) if self.blockatt_memory: memory_slots = 4 memory_head_size = 128 memory_num_heads = 1 gate_style = 'memory' print('not using special key size gate_style is', gate_style, memory_slots, memory_num_heads, memory_head_size) self.memory_layer = RelationalMemory(mem_slots=memory_slots, head_size=memory_head_size, input_size=self.embed_dim, output_size=self.embed_dim, num_heads=memory_num_heads, num_blocks=1, forget_bias=1., input_bias=0., attention_mlp_layers=5, gate_style=gate_style) #self.n_blocks_val * self.block_dim_val #self.block_dim_val = dim_val // self.n_blocks_val
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer class TransformerEncoderLayerVanilla(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, out_proj = None): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = self.build_self_attention(self.embed_dim, args) self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) self.activation_dropout = getattr(args, "activation_dropout", 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, "relu_dropout", 0) self.normalize_before = args.encoder_normalize_before self.fc1 = self.build_fc1(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = self.build_fc2(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) if out_proj is not None: self.final_linear = nn.Linear(args.encoder_embed_dim, out_proj) else: self.final_linear = None def build_fc1(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_fc2(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_self_attention(self, embed_dim, args): return MultiheadAttention( embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=args.self_attention, shared_memory_attention = args.shared_memory_attention, use_topk = args.use_topk, topk = args.topk, num_steps = args.num_steps, mem_slots = args.mem_slots, null_attention = args.null_attention, regressive = args.regressive ) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"} for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layer_norms.{}.{}".format(name, old, m) if k in state_dict: state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor] = None, state = None, memory = None): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. attn_mask (ByteTensor): binary tensor of shape (T_tgt, T_src), where T_tgt is the length of query, while T_src is the length of key, though here both query and key is x here, attn_mask[t_tgt, t_src] = 1 means when calculating embedding for t_tgt, t_src is excluded (or masked out), =0 means it is included in attention Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) if attn_mask is not None: attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8) # anything in original attn_mask = 1, becomes -1e8 # anything in original attn_mask = 0, becomes 0 # Note that we cannot use -inf here, because at some edge cases, # the attention weight (before softmax) for some padded element in query # will become -inf, which results in NaN in model parameters # TODO: to formally solve this problem, we need to change fairseq's # MultiheadAttention. We will do this later on. #print(state is not None) x, memory, _ = self.self_attn( query=state if state is not None else x, key=x, value=x, key_padding_mask=encoder_padding_mask, attn_mask=attn_mask, memory = memory ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=float(self.activation_dropout), training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x if not self.normalize_before: x = self.final_layer_norm(x) if self.final_linear is not None: x = self.final_linear(x) return x, memory class Attention(nn.Module): def __init__(self, n_heads, n_blocks, dim, use_nfm): super(Attention, self).__init__() self.use_nfm = use_nfm #self.n_heads = n_heads self.n_heads = 12 self.n_blocks = n_blocks self.dim = dim self.block_dim = dim // self.n_blocks #self.head_dim = self.block_dim // self.n_heads self.head_dim = 64 self.scale = self.head_dim ** -0.5 self.query_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.key_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.value_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.final = GroupLinearLayer(self.head_dim * self.n_heads, self.block_dim, n_blocks) def forward(self, x, qkv=None): use_exshare = False if qkv is not None: klst, vlst = qkv seq_len, bsz, _ = x.shape if use_exshare: x = x.view(seq_len, bsz, self.n_blocks * self.block_dim) q = self.query_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) k = self.key_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) v = self.value_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) else: x = x.view(seq_len, bsz, self.n_blocks * self.block_dim) q = self.query_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) k = self.key_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) v = self.value_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) q = q.transpose(2,3) * self.scale k = k.transpose(2,3) v = v.transpose(2,3) if random.uniform(0,1) < 0.00001: print('use NFM?', self.use_nfm) if self.use_nfm: if qkv is not None: klst.append(k) vlst.append(v) #print('len qlst', len(qlst)) #for kval in klst: # print(kval.shape) k = torch.cat(klst, dim=3) v = torch.cat(vlst, dim=3) #should return these q,k,v and save to a big list. Also pull in from the list passed in and concat along dim=3, i.e. so that it's nblocks * nlayers. #print('running comm attention with shapes', q.shape, k.shape, v.shape) score = torch.matmul(q, k.transpose(3,4)) #print('score shape', score.shape) score = F.softmax(score, dim=-1) out = torch.matmul(score, v).transpose(2,3) #print('out shape', out.shape) score = score.mean(dim=2) out = out.reshape(seq_len, bsz, self.n_blocks * self.head_dim * self.n_heads) out = self.final(out) out = out.view(seq_len, bsz, self.dim) return out, score class NormLayer(nn.Module): def __init__(self, num_rims, dim, export=False): super(NormLayer, self).__init__() self.num_rims = num_rims self.dim = dim self.weight = nn.Parameter(torch.ones(1,1,dim*num_rims,)) self.bias = nn.Parameter(torch.zeros(1,1,dim*num_rims,)) self.norm = LayerNorm(dim, export=export, elementwise_affine=False) def forward(self, x): seq_len, bsz, _ = x.shape x = x.view(seq_len, bsz, self.num_rims, self.dim) x = self.norm(x) x = x.view(seq_len, bsz, self.num_rims * self.dim) weight_use = self.weight.repeat(seq_len, bsz, 1) bias_use = self.bias.repeat(seq_len, bsz, 1) x = x * weight_use + bias_use return x class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, nb, blockatt, blockatt_memory, use_nfm, out_proj_dim=None): super().__init__() self.blockatt = blockatt self.blockatt_memory = blockatt_memory self.embed_dim = args.encoder_embed_dim self.quant_noise = getattr(args, "quant_noise_pq", 0) self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8) self.use_nfm = use_nfm print('using nfm?', self.use_nfm) self.nb = nb self.norm_blocks = self.nb self.self_attn = self.build_self_attention(self.embed_dim, args) #should divide embed_dim by nb. Then raise embed_dim in args self.self_attn_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__) self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) print("SETUP TRANSFORMER LAYER", 'blocks', self.nb) activation_dropout_p = getattr(args, "activation_dropout", 0) if activation_dropout_p == 0: # for backwards compatibility with models that use args.relu_dropout activation_dropout_p = getattr(args, "relu_dropout", 0) self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = args.encoder_normalize_before self.fc1 = self.build_fc1( self.embed_dim, args.encoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size ) self.fc2 = self.build_fc2( args.encoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size ) self.final_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) if self.blockatt: self.comm = Attention(args.encoder_attention_heads, self.nb, self.embed_dim, self.use_nfm) self.comm_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) if self.blockatt_memory: memory_slots = 4 memory_head_size = 128 memory_num_heads = 1 gate_style = 'memory' print('not using special key size gate_style is', gate_style, memory_slots, memory_num_heads, memory_head_size) self.memory_layer = RelationalMemory(mem_slots=memory_slots, head_size=memory_head_size, input_size=self.embed_dim, output_size=self.embed_dim, num_heads=memory_num_heads, num_blocks=1, forget_bias=1., input_bias=0., attention_mlp_layers=5, gate_style=gate_style) #self.n_blocks_val * self.block_dim_val #self.block_dim_val = dim_val // self.n_blocks_val
self.memory_attention = MemoryAttention(n_blocks_query=self.nb, n_blocks_val=8, dim_query=self.embed_dim, dim_val=memory_head_size*memory_num_heads*memory_slots)
4
2023-12-15 13:13:01+00:00
24k
camenduru/FreeInit-hf
app.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in...
import os import torch import random import gradio as gr from glob import glob from omegaconf import OmegaConf from safetensors import safe_open from diffusers import AutoencoderKL from diffusers import EulerDiscreteScheduler, DDIMScheduler from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer from animatediff.models.unet import UNet3DConditionModel from animatediff.pipelines.pipeline_animation import AnimationFreeInitPipeline from animatediff.utils.util import save_videos_grid from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint from diffusers.training_utils import set_seed from animatediff.utils.freeinit_utils import get_freq_filter from collections import namedtuple
14,510
# ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda() self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda() self.freq_filter = None self.update_base_model(self.base_model_list[-2]) self.update_motion_module(self.motion_module_list[0]) self.update_filter(512, 512, self.filter_type_list[0], 0.25, 0.25) def refresh_motion_module(self): motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt")) self.motion_module_list = sorted([os.path.basename(p) for p in motion_module_list]) def refresh_personalized_model(self): base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors")) self.base_model_list = sorted([os.path.basename(p) for p in base_model_list]) def update_base_model(self, base_model_dropdown): self.selected_base_model = base_model_dropdown base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config) self.vae.load_state_dict(converted_vae_checkpoint) converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config) self.unet.load_state_dict(converted_unet_checkpoint, strict=False) self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict) return gr.Dropdown.update() def update_motion_module(self, motion_module_dropdown): self.selected_motion_module = motion_module_dropdown motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown) motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu") _, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False) assert len(unexpected) == 0 return gr.Dropdown.update() # def update_filter(self, shape, method, n, d_s, d_t): def update_filter(self, width_slider, height_slider, filter_type_dropdown, d_s_slider, d_t_slider): self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s_slider self.set_d_t = d_t_slider vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) shape = [1, 4, 16, self.set_width//vae_scale_factor, self.set_height//vae_scale_factor]
pretrained_model_path = "models/StableDiffusion/stable-diffusion-v1-5" inference_config_path = "configs/inference/inference-v1.yaml" css = """ .toolbutton { margin-buttom: 0em 0em 0em 0em; max-width: 2.5em; min-width: 2.5em !important; height: 2.5em; } """ examples = [ # 0-RealisticVision [ "realisticVisionV51_v20Novae.safetensors", "mm_sd_v14.ckpt", "A panda standing on a surfboard in the ocean under moonlight.", "worst quality, low quality, nsfw, logo", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 1-ToonYou [ "toonyou_beta3.safetensors", "mm_sd_v14.ckpt", "(best quality, masterpiece), 1girl, looking at viewer, blurry background, upper body, contemporary, dress", "(worst quality, low quality)", 512, 512, "478028150728261", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 2-Lyriel [ "lyriel_v16.safetensors", "mm_sd_v14.ckpt", "hypercars cyberpunk moving, muted colors, swirling color smokes, legend, cityscape, space", "3d, cartoon, anime, sketches, worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 3-RCNZ [ "rcnzCartoon3d_v10.safetensors", "mm_sd_v14.ckpt", "A cute raccoon playing guitar in a boat on the ocean", "worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 4-MajicMix [ "majicmixRealistic_v5Preview.safetensors", "mm_sd_v14.ckpt", "1girl, reading book", "(ng_deepnegative_v1_75t:1.2), (badhandv4:1), (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, watermark, moles", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # # 5-RealisticVision # [ # "realisticVisionV51_v20Novae.safetensors", # "mm_sd_v14.ckpt", # "A panda standing on a surfboard in the ocean in sunset.", # "worst quality, low quality, nsfw, logo", # 512, 512, "2005563494988190", # "butterworth", 0.25, 0.25, 3, # ["use_fp16"] # ] ] # clean unrelated ckpts # ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda() self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda() self.freq_filter = None self.update_base_model(self.base_model_list[-2]) self.update_motion_module(self.motion_module_list[0]) self.update_filter(512, 512, self.filter_type_list[0], 0.25, 0.25) def refresh_motion_module(self): motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt")) self.motion_module_list = sorted([os.path.basename(p) for p in motion_module_list]) def refresh_personalized_model(self): base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors")) self.base_model_list = sorted([os.path.basename(p) for p in base_model_list]) def update_base_model(self, base_model_dropdown): self.selected_base_model = base_model_dropdown base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config) self.vae.load_state_dict(converted_vae_checkpoint) converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config) self.unet.load_state_dict(converted_unet_checkpoint, strict=False) self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict) return gr.Dropdown.update() def update_motion_module(self, motion_module_dropdown): self.selected_motion_module = motion_module_dropdown motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown) motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu") _, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False) assert len(unexpected) == 0 return gr.Dropdown.update() # def update_filter(self, shape, method, n, d_s, d_t): def update_filter(self, width_slider, height_slider, filter_type_dropdown, d_s_slider, d_t_slider): self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s_slider self.set_d_t = d_t_slider vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) shape = [1, 4, 16, self.set_width//vae_scale_factor, self.set_height//vae_scale_factor]
self.freq_filter = get_freq_filter(
6
2023-12-19 21:06:32+00:00
24k
m-abr/FCPCodebase
world/Robot.py
[ { "identifier": "Math_Ops", "path": "math_ops/Math_Ops.py", "snippet": "class Math_Ops():\n '''\n This class provides general mathematical operations that are not directly available through numpy \n '''\n \n @staticmethod\n def deg_sph2cart(spherical_vec):\n ''' Converts SimSpark'...
from collections import deque from math import atan, pi, sqrt, tan from math_ops.Math_Ops import Math_Ops as M from math_ops.Matrix_3x3 import Matrix_3x3 from math_ops.Matrix_4x4 import Matrix_4x4 from world.commons.Body_Part import Body_Part from world.commons.Joint_Info import Joint_Info import numpy as np import xml.etree.ElementTree as xmlp
14,724
self.feet_toes_last_touch = {"lf":0,"rf":0,"lf1":0,"rf1":0} # foot "lf"/"rf", toe "lf1"/"rf1" World.time_local_ms when foot/toe last touched any surface self.feet_toes_are_touching = {"lf":False,"rf":False,"lf1":False,"rf1":False} # foot "lf"/"rf", toe "lf1"/"rf1" True if touching in last received server message self.fwd_kinematics_list = None # List of body parts, ordered according to dependencies self.rel_cart_CoM_position = np.zeros(3) # Center of Mass position, relative to head, in cartesian coordinates (m) # Joint variables are optimized for performance / array operations self.joints_position = np.zeros(self.no_of_joints) # Joints' angular position (deg) self.joints_speed = np.zeros(self.no_of_joints) # Joints' angular speed (rad/s) self.joints_target_speed = np.zeros(self.no_of_joints) # Joints' target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_target_last_speed = np.zeros(self.no_of_joints) # Joints' last target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_info = [None] * self.no_of_joints # Joints' constant information (see class Joint_Info) self.joints_transform = [Matrix_4x4() for _ in range(self.no_of_joints)] # Joints' transformation matrix # Localization variables relative to head self.loc_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field self.loc_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head self.loc_rotation_head_to_field = Matrix_3x3() # Rotation matrix from head to field self.loc_rotation_field_to_head = Matrix_3x3() # Rotation matrix from field to head self.loc_head_position = np.zeros(3) # Absolute head position (m) self.loc_head_position_history = deque(maxlen=40)# Absolute head position history (queue with up to 40 old positions at intervals of 0.04s, where index 0 is the previous position) self.loc_head_velocity = np.zeros(3) # Absolute head velocity (m/s) (Warning: possibly noisy) self.loc_head_orientation = 0 # Head orientation (deg) self.loc_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible self.loc_last_update = 0 # World.time_local_ms when the localization was last updated self.loc_head_position_last_update = 0 # World.time_local_ms when loc_head_position was last updated by vision or radio self.radio_fallen_state = False # True if (radio says we fell) and (radio is significantly more recent than loc) self.radio_last_update = 0 # World.time_local_ms when radio_fallen_state was last updated (and possibly loc_head_position) # Localization variables relative to torso self.loc_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field self.loc_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field self.loc_torso_roll = 0 # Torso roll (deg) self.loc_torso_pitch = 0 # Torso pitch (deg) self.loc_torso_orientation = 0 # Torso orientation (deg) self.loc_torso_inclination = 0 # Torso inclination (deg) (inclination of z-axis in relation to field z-axis) self.loc_torso_position = np.zeros(3) # Absolute torso position (m) self.loc_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) self.loc_torso_acceleration = np.zeros(3) # Absolute Coordinate acceleration (m/s2) # Other localization variables self.cheat_abs_pos = np.zeros(3) # Absolute head position provided by the server as cheat (m) self.cheat_ori = 0.0 # Absolute head orientation provided by the server as cheat (deg) self.loc_CoM_position = np.zeros(3) # Absolute CoM position (m) self.loc_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) # Localization special variables ''' self.loc_head_z is often equivalent to self.loc_head_position[2], but sometimes it differs. There are situations in which the rotation and translation cannot be computed, but the z-coordinate can still be found through vision, in which case: self.loc_is_up_to_date is False self.loc_head_z_is_up_to_date is True It should be used in applications which rely on z as an independent coordinate, such as detecting if the robot has fallen, or as an observation for machine learning. It should NEVER be used for 3D transformations. ''' self.loc_head_z = 0 # Absolute head position (z) - see above for explanation (m) self.loc_head_z_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible self.loc_head_z_last_update = 0 # World.time_local_ms when loc_head_z was last computed self.loc_head_z_vel = 0 # Absolute head velocity (z) (m/s) # Localization + Gyroscope # These variables are reliable. The gyroscope is used to update the rotation when waiting for the next visual cycle self.imu_torso_roll = 0 # Torso roll (deg) (src: Localization + Gyro) self.imu_torso_pitch = 0 # Torso pitch (deg) (src: Localization + Gyro) self.imu_torso_orientation = 0 # Torso orientation (deg) (src: Localization + Gyro) self.imu_torso_inclination = 0 # Torso inclination (deg) (src: Localization + Gyro) self.imu_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field (src: Localization + Gyro) self.imu_last_visual_update = 0 # World.time_local_ms when the IMU data was last updated with visual information # Localization + Gyroscope + Accelerometer # Warning: these variables are unreliable, since small errors in the Localization Orientation lead to # wrong acceleration -> wrong velocity -> wrong position self.imu_weak_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field (src: Localization + Gyro + Acc) self.imu_weak_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field (src: Localization + Gyro + Acc) self.imu_weak_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head (src: Localization + Gyro + Acc) self.imu_weak_torso_position = np.zeros(3) # Absolute torso position (m) (src: Localization + Gyro + Acc) self.imu_weak_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) (src: Localization + Gyro + Acc) self.imu_weak_torso_acceleration = np.zeros(3) # Absolute torso acceleration (m/s2) (src: Localization + Gyro + Acc) self.imu_weak_torso_next_position = np.zeros(3) # Absolute position in next step estimate (m) (src: Localization + Gyro + Acc) self.imu_weak_torso_next_velocity = np.zeros(3) # Absolute velocity in next step estimate (m/s) (src: Localization + Gyro + Acc) self.imu_weak_CoM_position = np.zeros(3) # Absolute CoM position (m) (src: Localization + Gyro + Acc) self.imu_weak_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) (src: Localization + Gyro + Acc) #Using explicit variables to enable IDE suggestions self.J_HEAD_YAW = 0 self.J_HEAD_PITCH = 1 self.J_LLEG_YAW_PITCH = 2 self.J_RLEG_YAW_PITCH = 3 self.J_LLEG_ROLL = 4 self.J_RLEG_ROLL = 5 self.J_LLEG_PITCH = 6 self.J_RLEG_PITCH = 7 self.J_LKNEE = 8 self.J_RKNEE = 9 self.J_LFOOT_PITCH = 10 self.J_RFOOT_PITCH = 11 self.J_LFOOT_ROLL = 12 self.J_RFOOT_ROLL = 13 self.J_LARM_PITCH = 14 self.J_RARM_PITCH = 15 self.J_LARM_ROLL = 16 self.J_RARM_ROLL = 17 self.J_LELBOW_YAW = 18 self.J_RELBOW_YAW = 19 self.J_LELBOW_ROLL = 20 self.J_RELBOW_ROLL = 21 self.J_LTOE_PITCH = 22 self.J_RTOE_PITCH = 23 #------------------ parse robot xml dir = M.get_active_directory("/world/commons/robots/") robot_xml_root = xmlp.parse(dir + robot_xml).getroot() joint_no = 0 for child in robot_xml_root: if child.tag == "bodypart":
class Robot(): STEPTIME = 0.02 # Fixed step time VISUALSTEP = 0.04 # Fixed visual step time SQ_STEPTIME = STEPTIME * STEPTIME GRAVITY = np.array([0,0,-9.81]) IMU_DECAY = 0.996 #IMU's velocity decay #------------------ constants to force symmetry in joints/effectors MAP_PERCEPTOR_TO_INDEX = {"hj1":0, "hj2":1, "llj1":2, "rlj1":3, "llj2":4, "rlj2":5, "llj3":6, "rlj3":7, "llj4":8, "rlj4":9, "llj5":10,"rlj5":11, "llj6":12,"rlj6":13,"laj1":14,"raj1":15, "laj2":16,"raj2":17,"laj3":18,"raj3":19, "laj4":20,"raj4":21,"llj7":22,"rlj7":23 } # Fix symmetry issues 1a/4 (identification) FIX_PERCEPTOR_SET = {'rlj2','rlj6','raj2','laj3','laj4'} FIX_INDICES_LIST = [5,13,17,18,20] # Recommended height for unofficial beam (near ground) BEAM_HEIGHTS = [0.4, 0.43, 0.4, 0.46, 0.4] def __init__(self, unum:int, robot_type:int) -> None: robot_xml = "nao"+str(robot_type)+".xml" # Typical NAO file name self.type = robot_type self.beam_height = Robot.BEAM_HEIGHTS[robot_type] self.no_of_joints = 24 if robot_type == 4 else 22 #Fix symmetry issues 1b/4 (identification) self.FIX_EFFECTOR_MASK = np.ones(self.no_of_joints) self.FIX_EFFECTOR_MASK[Robot.FIX_INDICES_LIST] = -1 self.body_parts = dict() # keys='body part names' (given by the robot's XML), values='Body_Part objects' self.unum = unum # Robot's uniform number self.gyro = np.zeros(3) # Angular velocity along the three axes of freedom of the robot's torso (deg/s) self.acc = np.zeros(3) # Proper acceleration along the three axes of freedom of the robot's torso (m/s2) self.frp = dict() # foot "lf"/"rf", toe "lf1"/"rf1" resistance perceptor (relative [p]oint of origin + [f]orce vector) e.g. {"lf":(px,py,pz,fx,fy,fz)} self.feet_toes_last_touch = {"lf":0,"rf":0,"lf1":0,"rf1":0} # foot "lf"/"rf", toe "lf1"/"rf1" World.time_local_ms when foot/toe last touched any surface self.feet_toes_are_touching = {"lf":False,"rf":False,"lf1":False,"rf1":False} # foot "lf"/"rf", toe "lf1"/"rf1" True if touching in last received server message self.fwd_kinematics_list = None # List of body parts, ordered according to dependencies self.rel_cart_CoM_position = np.zeros(3) # Center of Mass position, relative to head, in cartesian coordinates (m) # Joint variables are optimized for performance / array operations self.joints_position = np.zeros(self.no_of_joints) # Joints' angular position (deg) self.joints_speed = np.zeros(self.no_of_joints) # Joints' angular speed (rad/s) self.joints_target_speed = np.zeros(self.no_of_joints) # Joints' target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_target_last_speed = np.zeros(self.no_of_joints) # Joints' last target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_info = [None] * self.no_of_joints # Joints' constant information (see class Joint_Info) self.joints_transform = [Matrix_4x4() for _ in range(self.no_of_joints)] # Joints' transformation matrix # Localization variables relative to head self.loc_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field self.loc_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head self.loc_rotation_head_to_field = Matrix_3x3() # Rotation matrix from head to field self.loc_rotation_field_to_head = Matrix_3x3() # Rotation matrix from field to head self.loc_head_position = np.zeros(3) # Absolute head position (m) self.loc_head_position_history = deque(maxlen=40)# Absolute head position history (queue with up to 40 old positions at intervals of 0.04s, where index 0 is the previous position) self.loc_head_velocity = np.zeros(3) # Absolute head velocity (m/s) (Warning: possibly noisy) self.loc_head_orientation = 0 # Head orientation (deg) self.loc_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible self.loc_last_update = 0 # World.time_local_ms when the localization was last updated self.loc_head_position_last_update = 0 # World.time_local_ms when loc_head_position was last updated by vision or radio self.radio_fallen_state = False # True if (radio says we fell) and (radio is significantly more recent than loc) self.radio_last_update = 0 # World.time_local_ms when radio_fallen_state was last updated (and possibly loc_head_position) # Localization variables relative to torso self.loc_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field self.loc_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field self.loc_torso_roll = 0 # Torso roll (deg) self.loc_torso_pitch = 0 # Torso pitch (deg) self.loc_torso_orientation = 0 # Torso orientation (deg) self.loc_torso_inclination = 0 # Torso inclination (deg) (inclination of z-axis in relation to field z-axis) self.loc_torso_position = np.zeros(3) # Absolute torso position (m) self.loc_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) self.loc_torso_acceleration = np.zeros(3) # Absolute Coordinate acceleration (m/s2) # Other localization variables self.cheat_abs_pos = np.zeros(3) # Absolute head position provided by the server as cheat (m) self.cheat_ori = 0.0 # Absolute head orientation provided by the server as cheat (deg) self.loc_CoM_position = np.zeros(3) # Absolute CoM position (m) self.loc_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) # Localization special variables ''' self.loc_head_z is often equivalent to self.loc_head_position[2], but sometimes it differs. There are situations in which the rotation and translation cannot be computed, but the z-coordinate can still be found through vision, in which case: self.loc_is_up_to_date is False self.loc_head_z_is_up_to_date is True It should be used in applications which rely on z as an independent coordinate, such as detecting if the robot has fallen, or as an observation for machine learning. It should NEVER be used for 3D transformations. ''' self.loc_head_z = 0 # Absolute head position (z) - see above for explanation (m) self.loc_head_z_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible self.loc_head_z_last_update = 0 # World.time_local_ms when loc_head_z was last computed self.loc_head_z_vel = 0 # Absolute head velocity (z) (m/s) # Localization + Gyroscope # These variables are reliable. The gyroscope is used to update the rotation when waiting for the next visual cycle self.imu_torso_roll = 0 # Torso roll (deg) (src: Localization + Gyro) self.imu_torso_pitch = 0 # Torso pitch (deg) (src: Localization + Gyro) self.imu_torso_orientation = 0 # Torso orientation (deg) (src: Localization + Gyro) self.imu_torso_inclination = 0 # Torso inclination (deg) (src: Localization + Gyro) self.imu_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field (src: Localization + Gyro) self.imu_last_visual_update = 0 # World.time_local_ms when the IMU data was last updated with visual information # Localization + Gyroscope + Accelerometer # Warning: these variables are unreliable, since small errors in the Localization Orientation lead to # wrong acceleration -> wrong velocity -> wrong position self.imu_weak_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field (src: Localization + Gyro + Acc) self.imu_weak_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field (src: Localization + Gyro + Acc) self.imu_weak_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head (src: Localization + Gyro + Acc) self.imu_weak_torso_position = np.zeros(3) # Absolute torso position (m) (src: Localization + Gyro + Acc) self.imu_weak_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) (src: Localization + Gyro + Acc) self.imu_weak_torso_acceleration = np.zeros(3) # Absolute torso acceleration (m/s2) (src: Localization + Gyro + Acc) self.imu_weak_torso_next_position = np.zeros(3) # Absolute position in next step estimate (m) (src: Localization + Gyro + Acc) self.imu_weak_torso_next_velocity = np.zeros(3) # Absolute velocity in next step estimate (m/s) (src: Localization + Gyro + Acc) self.imu_weak_CoM_position = np.zeros(3) # Absolute CoM position (m) (src: Localization + Gyro + Acc) self.imu_weak_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) (src: Localization + Gyro + Acc) #Using explicit variables to enable IDE suggestions self.J_HEAD_YAW = 0 self.J_HEAD_PITCH = 1 self.J_LLEG_YAW_PITCH = 2 self.J_RLEG_YAW_PITCH = 3 self.J_LLEG_ROLL = 4 self.J_RLEG_ROLL = 5 self.J_LLEG_PITCH = 6 self.J_RLEG_PITCH = 7 self.J_LKNEE = 8 self.J_RKNEE = 9 self.J_LFOOT_PITCH = 10 self.J_RFOOT_PITCH = 11 self.J_LFOOT_ROLL = 12 self.J_RFOOT_ROLL = 13 self.J_LARM_PITCH = 14 self.J_RARM_PITCH = 15 self.J_LARM_ROLL = 16 self.J_RARM_ROLL = 17 self.J_LELBOW_YAW = 18 self.J_RELBOW_YAW = 19 self.J_LELBOW_ROLL = 20 self.J_RELBOW_ROLL = 21 self.J_LTOE_PITCH = 22 self.J_RTOE_PITCH = 23 #------------------ parse robot xml dir = M.get_active_directory("/world/commons/robots/") robot_xml_root = xmlp.parse(dir + robot_xml).getroot() joint_no = 0 for child in robot_xml_root: if child.tag == "bodypart":
self.body_parts[child.attrib['name']] = Body_Part(child.attrib['mass'])
3
2023-12-16 23:40:23+00:00
24k
Sam-Izdat/tinycio
src/tinycio/tonemapping.py
[ { "identifier": "applyAgX", "path": "src/tinycio/np_agx/agx.py", "snippet": "def applyAgX(array):\n # type: (numpy.ndarray) -> numpy.ndarray\n \"\"\"\n -> take linear - sRGB image data as input\n - apply custom grading if any\n - apply the AgX Punchy view-transform\n - return a display...
import torch import numpy as np import typing from enum import IntEnum from .np_agx.agx import applyAgX, applyAgXPunchy from .colorspace import ColorSpace, TransferFunction
17,663
class ToneMapping: """ Map high-dynamic-range values to low-dynamic-range. LDR is typically sRGB in [0, 1] range. Example: .. highlight:: python .. code-block:: python tm = ToneMapping.Variant.HABLE tonemapped_image = ToneMapping.apply(input_im, tone_mapper=tm) """ class Variant(IntEnum): """ Tone mapper enum. Available options are: .. highlight:: text .. code-block:: text - NONE - CLAMP - AGX - AGX_PUNCHY - HABLE - REINHARD - ACESCG """ NONE = 1<<0 CLAMP = 1<<1 AGX = 1<<2 AGX_PUNCHY = 1<<3 HABLE = 1<<4 REINHARD = 1<<5 ACESCG = 1<<6 IP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD IP_ACESCG = ACESCG OP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD OP_ACESCG = ACESCG DISABLED = 0 @classmethod def apply(cls, im:torch.Tensor, tone_mapper:Variant): """ Apply tone mapping to HDR image tensor. Input data is expected to be in the correct color space for the chosen tone mapper. .. note:: :code:`ACESCG` tone mapping is performed on AP1 primaries and expects input in the :code:`ACESCG` color space. All other tone mappers expect :code:`SRGB_LIN`. The :code:`tone_map()` method of :class:`ColorImage` handles this conversion automatically. :param torch.Tensor im: [C=3, H, W] sized image tensor :param ToneMapping.Variant tone_mapper: tonemapper to be used :return: image tensor :rtype: torch.Tensor """ assert im.dim() == 3 and im.size(0) == 3, f"expected [C=3, H, W] image tensor, got {im.size()}" op, tm = tone_mapper, cls.Variant err_not_supported, err_disabled = f"ToneMapping {op.name} is not supported", f"ToneMapping {op.name} is disabled" if op & tm.DISABLED: raise Exception(err_disabled) if op == tm.NONE: return im elif op == tm.CLAMP: return im.clamp(0., 1.) elif op == tm.AGX: return cls._agx(im) elif op == tm.AGX_PUNCHY: return cls._agx_punchy(im) elif op == tm.HABLE: return cls._hable(im) elif op == tm.REINHARD: return cls._reinhard_extended_luminance(im) elif op == tm.ACESCG: return cls._aces_fitted(im) else: raise Exception(err_not_supported) return out @classmethod def _agx(cls, im:torch.Tensor): device = im.device out = applyAgX(im.permute(1, 2, 0).cpu().numpy()) out = torch.from_numpy(out).permute(2, 0, 1).to(device) return TransferFunction.srgb_eotf(out.clamp(0., 1.)) @classmethod def _agx_punchy(cls, im:torch.Tensor): device = im.device
class ToneMapping: """ Map high-dynamic-range values to low-dynamic-range. LDR is typically sRGB in [0, 1] range. Example: .. highlight:: python .. code-block:: python tm = ToneMapping.Variant.HABLE tonemapped_image = ToneMapping.apply(input_im, tone_mapper=tm) """ class Variant(IntEnum): """ Tone mapper enum. Available options are: .. highlight:: text .. code-block:: text - NONE - CLAMP - AGX - AGX_PUNCHY - HABLE - REINHARD - ACESCG """ NONE = 1<<0 CLAMP = 1<<1 AGX = 1<<2 AGX_PUNCHY = 1<<3 HABLE = 1<<4 REINHARD = 1<<5 ACESCG = 1<<6 IP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD IP_ACESCG = ACESCG OP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD OP_ACESCG = ACESCG DISABLED = 0 @classmethod def apply(cls, im:torch.Tensor, tone_mapper:Variant): """ Apply tone mapping to HDR image tensor. Input data is expected to be in the correct color space for the chosen tone mapper. .. note:: :code:`ACESCG` tone mapping is performed on AP1 primaries and expects input in the :code:`ACESCG` color space. All other tone mappers expect :code:`SRGB_LIN`. The :code:`tone_map()` method of :class:`ColorImage` handles this conversion automatically. :param torch.Tensor im: [C=3, H, W] sized image tensor :param ToneMapping.Variant tone_mapper: tonemapper to be used :return: image tensor :rtype: torch.Tensor """ assert im.dim() == 3 and im.size(0) == 3, f"expected [C=3, H, W] image tensor, got {im.size()}" op, tm = tone_mapper, cls.Variant err_not_supported, err_disabled = f"ToneMapping {op.name} is not supported", f"ToneMapping {op.name} is disabled" if op & tm.DISABLED: raise Exception(err_disabled) if op == tm.NONE: return im elif op == tm.CLAMP: return im.clamp(0., 1.) elif op == tm.AGX: return cls._agx(im) elif op == tm.AGX_PUNCHY: return cls._agx_punchy(im) elif op == tm.HABLE: return cls._hable(im) elif op == tm.REINHARD: return cls._reinhard_extended_luminance(im) elif op == tm.ACESCG: return cls._aces_fitted(im) else: raise Exception(err_not_supported) return out @classmethod def _agx(cls, im:torch.Tensor): device = im.device out = applyAgX(im.permute(1, 2, 0).cpu().numpy()) out = torch.from_numpy(out).permute(2, 0, 1).to(device) return TransferFunction.srgb_eotf(out.clamp(0., 1.)) @classmethod def _agx_punchy(cls, im:torch.Tensor): device = im.device
out = applyAgXPunchy(im.permute(1, 2, 0).cpu().numpy())
1
2023-12-15 15:39:08+00:00
24k
quocanh34/magic-animate-modified
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,...
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.multicontrolnet import ControlNetProcessor #fix from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
15,845
""" Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]],
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ # from magicanimate.models.controlnet import ControlNetModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, # controlnet: ControlNetModel, # processors: List[ControlNetProcessor], scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, # controlnet1=processors[0], scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition1, condition2, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # Prepare first condition condition1 = torch.from_numpy(condition1.copy()).to(device=device, dtype=dtype) / 255.0 condition1 = torch.stack([condition1 for _ in range(num_videos_per_prompt)], dim=0) condition1 = rearrange(condition1, 'b f h w c -> (b f) c h w').clone() # Prepare second condition condition2 = torch.from_numpy(condition2.copy()).to(device=device, dtype=dtype) / 255.0 condition2 = torch.stack([condition2 for _ in range(num_videos_per_prompt)], dim=0) condition2 = rearrange(condition2, 'b f h w c -> (b f) c h w').clone() # Here, we're averaging the two conditions combined_condition = (condition1*8+condition2*2)/10 if do_classifier_free_guidance: combined_condition = torch.cat([combined_condition] * 2) #combined_condition = torch.from_numpy(combined_condition.copy()).to(device=device, dtype=dtype) return combined_condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]],
processors: List[ControlNetProcessor], #fix
1
2023-12-15 01:22:37+00:00
24k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/urllib3/connection.py
[ { "identifier": "HTTPHeaderDict", "path": ".venv/Lib/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-in...
import datetime import logging import os import re import socket import sys import typing import warnings import ssl from http.client import HTTPConnection as _HTTPConnection from http.client import HTTPException as HTTPException # noqa: F401 from http.client import ResponseNotReady from socket import timeout as SocketTimeout from typing import Literal from .response import HTTPResponse from .util.ssl_ import _TYPE_PEER_CERT_RET_DICT from .util.ssltransport import SSLTransport from ._collections import HTTPHeaderDict from .util.response import assert_header_parsing from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT, Timeout from .util.util import to_str from .util.wait import wait_for_read from ._base_connection import _TYPE_BODY from ._base_connection import ProxyConfig as ProxyConfig from ._base_connection import _ResponseOptions as _ResponseOptions from ._version import __version__ from .exceptions import ( ConnectTimeoutError, HeaderParsingError, NameResolutionError, NewConnectionError, ProxyError, SystemTimeWarning, ) from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection, ssl_ from .util.request import body_to_chunks from .util.ssl_ import assert_fingerprint as _assert_fingerprint from .util.ssl_ import ( create_urllib3_context, is_ipaddress, resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, ) from .util.ssl_match_hostname import CertificateError, match_hostname from .util.url import Url from .response import HTTPResponse
15,905
self._response_options = _ResponseOptions( request_method=method, request_url=url, preload_content=preload_content, decode_content=decode_content, enforce_content_length=enforce_content_length, ) if headers is None: headers = {} header_keys = frozenset(to_str(k.lower()) for k in headers) skip_accept_encoding = "accept-encoding" in header_keys skip_host = "host" in header_keys self.putrequest( method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) # Transform the body into an iterable of sendall()-able chunks # and detect if an explicit Content-Length is doable. chunks_and_cl = body_to_chunks(body, method=method, blocksize=self.blocksize) chunks = chunks_and_cl.chunks content_length = chunks_and_cl.content_length # When chunked is explicit set to 'True' we respect that. if chunked: if "transfer-encoding" not in header_keys: self.putheader("Transfer-Encoding", "chunked") else: # Detect whether a framing mechanism is already in use. If so # we respect that value, otherwise we pick chunked vs content-length # depending on the type of 'body'. if "content-length" in header_keys: chunked = False elif "transfer-encoding" in header_keys: chunked = True # Otherwise we go off the recommendation of 'body_to_chunks()'. else: chunked = False if content_length is None: if chunks is not None: chunked = True self.putheader("Transfer-Encoding", "chunked") else: self.putheader("Content-Length", str(content_length)) # Now that framing headers are out of the way we send all the other headers. if "user-agent" not in header_keys: self.putheader("User-Agent", _get_default_user_agent()) for header, value in headers.items(): self.putheader(header, value) self.endheaders() # If we're given a body we start sending that in chunks. if chunks is not None: for chunk in chunks: # Sending empty chunks isn't allowed for TE: chunked # as it indicates the end of the body. if not chunk: continue if isinstance(chunk, str): chunk = chunk.encode("utf-8") if chunked: self.send(b"%x\r\n%b\r\n" % (len(chunk), chunk)) else: self.send(chunk) # Regardless of whether we have a body or not, if we're in # chunked mode we want to send an explicit empty chunk. if chunked: self.send(b"0\r\n\r\n") def request_chunked( self, method: str, url: str, body: _TYPE_BODY | None = None, headers: typing.Mapping[str, str] | None = None, ) -> None: """ Alternative to the common request method, which sends the body with chunked encoding and not as one block """ warnings.warn( "HTTPConnection.request_chunked() is deprecated and will be removed " "in urllib3 v2.1.0. Instead use HTTPConnection.request(..., chunked=True).", category=DeprecationWarning, stacklevel=2, ) self.request(method, url, body=body, headers=headers, chunked=True) def getresponse( # type: ignore[override] self, ) -> HTTPResponse: """ Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed. """ # Raise the same error as http.client.HTTPConnection if self._response_options is None: raise ResponseNotReady() # Reset this attribute for being used again. resp_options = self._response_options self._response_options = None # Since the connection's timeout value may have been updated # we need to set the timeout on the socket. self.sock.settimeout(self.timeout) # This is needed here to avoid circular import errors # Get the response from http.client.HTTPConnection httplib_response = super().getresponse() try: assert_header_parsing(httplib_response.msg)
from __future__ import annotations if typing.TYPE_CHECKING: try: # Compiled with SSL? BaseSSLError = ssl.SSLError except (ImportError, AttributeError): ssl = None # type: ignore[assignment] class BaseSSLError(BaseException): # type: ignore[no-redef] pass # Not a no-op, we're adding this to the namespace so it can be imported. ConnectionError = ConnectionError BrokenPipeError = BrokenPipeError log = logging.getLogger(__name__) port_by_scheme = {"http": 80, "https": 443} # When it comes time to update this value as a part of regular maintenance # (ie test_recent_date is failing) update it to ~6 months before the current date. RECENT_DATE = datetime.date(2022, 1, 1) _CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]") _HAS_SYS_AUDIT = hasattr(sys, "audit") class HTTPConnection(_HTTPConnection): """ Based on :class:`http.client.HTTPConnection` but provides an extra constructor backwards-compatibility layer between older and newer Pythons. Additional keyword parameters are used to configure attributes of the connection. Accepted parameters include: - ``source_address``: Set the source address for the current connection. - ``socket_options``: Set specific options on the underlying socket. If not specified, then defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. For example, if you wish to enable TCP Keep Alive in addition to the defaults, you might pass: .. code-block:: python HTTPConnection.default_socket_options + [ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), ] Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). """ default_port: typing.ClassVar[int] = port_by_scheme["http"] # type: ignore[misc] #: Disable Nagle's algorithm by default. #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` default_socket_options: typing.ClassVar[connection._TYPE_SOCKET_OPTIONS] = [ (socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) ] #: Whether this connection verifies the host's certificate. is_verified: bool = False #: Whether this proxy connection verified the proxy host's certificate. # If no proxy is currently connected to the value will be ``None``. proxy_is_verified: bool | None = None blocksize: int source_address: tuple[str, int] | None socket_options: connection._TYPE_SOCKET_OPTIONS | None _has_connected_to_proxy: bool _response_options: _ResponseOptions | None _tunnel_host: str | None _tunnel_port: int | None _tunnel_scheme: str | None def __init__( self, host: str, port: int | None = None, *, timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, source_address: tuple[str, int] | None = None, blocksize: int = 16384, socket_options: None | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options, proxy: Url | None = None, proxy_config: ProxyConfig | None = None, ) -> None: super().__init__( host=host, port=port, timeout=Timeout.resolve_default_timeout(timeout), source_address=source_address, blocksize=blocksize, ) self.socket_options = socket_options self.proxy = proxy self.proxy_config = proxy_config self._has_connected_to_proxy = False self._response_options = None self._tunnel_host: str | None = None self._tunnel_port: int | None = None self._tunnel_scheme: str | None = None # https://github.com/python/mypy/issues/4125 # Mypy treats this as LSP violation, which is considered a bug. # If `host` is made a property it violates LSP, because a writeable attribute is overridden with a read-only one. # However, there is also a `host` setter so LSP is not violated. # Potentially, a `@host.deleter` might be needed depending on how this issue will be fixed. @property def host(self) -> str: """ Getter method to remove any trailing dots that indicate the hostname is an FQDN. In general, SSL certificates don't include the trailing dot indicating a fully-qualified domain name, and thus, they don't validate properly when checked against a domain name that includes the dot. In addition, some servers may not expect to receive the trailing dot when provided. However, the hostname with trailing dot is critical to DNS resolution; doing a lookup with the trailing dot will properly only resolve the appropriate FQDN, whereas a lookup without a trailing dot will search the system's search domain list. Thus, it's important to keep the original host around for use only in those cases where it's appropriate (i.e., when doing DNS lookup to establish the actual TCP connection across which we're going to send HTTP requests). """ return self._dns_host.rstrip(".") @host.setter def host(self, value: str) -> None: """ Setter for the `host` property. We assume that only urllib3 uses the _dns_host attribute; httplib itself only uses `host`, and it seems reasonable that other libraries follow suit. """ self._dns_host = value def _new_conn(self) -> socket.socket: """Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ try: sock = connection.create_connection( (self._dns_host, self.port), self.timeout, source_address=self.source_address, socket_options=self.socket_options, ) except socket.gaierror as e: raise NameResolutionError(self.host, self, e) from e except SocketTimeout as e: raise ConnectTimeoutError( self, f"Connection to {self.host} timed out. (connect timeout={self.timeout})", ) from e except OSError as e: raise NewConnectionError( self, f"Failed to establish a new connection: {e}" ) from e # Audit hooks are only available in Python 3.8+ if _HAS_SYS_AUDIT: sys.audit("http.client.connect", self, self.host, self.port) return sock def set_tunnel( self, host: str, port: int | None = None, headers: typing.Mapping[str, str] | None = None, scheme: str = "http", ) -> None: if scheme not in ("http", "https"): raise ValueError( f"Invalid proxy scheme for tunneling: {scheme!r}, must be either 'http' or 'https'" ) super().set_tunnel(host, port=port, headers=headers) self._tunnel_scheme = scheme def connect(self) -> None: self.sock = self._new_conn() if self._tunnel_host: # If we're tunneling it means we're connected to our proxy. self._has_connected_to_proxy = True # TODO: Fix tunnel so it doesn't depend on self.sock state. self._tunnel() # type: ignore[attr-defined] # If there's a proxy to be connected to we are fully connected. # This is set twice (once above and here) due to forwarding proxies # not using tunnelling. self._has_connected_to_proxy = bool(self.proxy) @property def is_closed(self) -> bool: return self.sock is None @property def is_connected(self) -> bool: if self.sock is None: return False return not wait_for_read(self.sock, timeout=0.0) @property def has_connected_to_proxy(self) -> bool: return self._has_connected_to_proxy def close(self) -> None: try: super().close() finally: # Reset all stateful properties so connection # can be re-used without leaking prior configs. self.sock = None self.is_verified = False self.proxy_is_verified = None self._has_connected_to_proxy = False self._response_options = None self._tunnel_host = None self._tunnel_port = None self._tunnel_scheme = None def putrequest( self, method: str, url: str, skip_host: bool = False, skip_accept_encoding: bool = False, ) -> None: """""" # Empty docstring because the indentation of CPython's implementation # is broken but we don't want this method in our documentation. match = _CONTAINS_CONTROL_CHAR_RE.search(method) if match: raise ValueError( f"Method cannot contain non-token characters {method!r} (found at least {match.group()!r})" ) return super().putrequest( method, url, skip_host=skip_host, skip_accept_encoding=skip_accept_encoding ) def putheader(self, header: str, *values: str) -> None: """""" if not any(isinstance(v, str) and v == SKIP_HEADER for v in values): super().putheader(header, *values) elif to_str(header.lower()) not in SKIPPABLE_HEADERS: skippable_headers = "', '".join( [str.title(header) for header in sorted(SKIPPABLE_HEADERS)] ) raise ValueError( f"urllib3.util.SKIP_HEADER only supports '{skippable_headers}'" ) # `request` method's signature intentionally violates LSP. # urllib3's API is different from `http.client.HTTPConnection` and the subclassing is only incidental. def request( # type: ignore[override] self, method: str, url: str, body: _TYPE_BODY | None = None, headers: typing.Mapping[str, str] | None = None, *, chunked: bool = False, preload_content: bool = True, decode_content: bool = True, enforce_content_length: bool = True, ) -> None: # Update the inner socket's timeout value to send the request. # This only triggers if the connection is re-used. if self.sock is not None: self.sock.settimeout(self.timeout) # Store these values to be fed into the HTTPResponse # object later. TODO: Remove this in favor of a real # HTTP lifecycle mechanism. # We have to store these before we call .request() # because sometimes we can still salvage a response # off the wire even if we aren't able to completely # send the request body. self._response_options = _ResponseOptions( request_method=method, request_url=url, preload_content=preload_content, decode_content=decode_content, enforce_content_length=enforce_content_length, ) if headers is None: headers = {} header_keys = frozenset(to_str(k.lower()) for k in headers) skip_accept_encoding = "accept-encoding" in header_keys skip_host = "host" in header_keys self.putrequest( method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) # Transform the body into an iterable of sendall()-able chunks # and detect if an explicit Content-Length is doable. chunks_and_cl = body_to_chunks(body, method=method, blocksize=self.blocksize) chunks = chunks_and_cl.chunks content_length = chunks_and_cl.content_length # When chunked is explicit set to 'True' we respect that. if chunked: if "transfer-encoding" not in header_keys: self.putheader("Transfer-Encoding", "chunked") else: # Detect whether a framing mechanism is already in use. If so # we respect that value, otherwise we pick chunked vs content-length # depending on the type of 'body'. if "content-length" in header_keys: chunked = False elif "transfer-encoding" in header_keys: chunked = True # Otherwise we go off the recommendation of 'body_to_chunks()'. else: chunked = False if content_length is None: if chunks is not None: chunked = True self.putheader("Transfer-Encoding", "chunked") else: self.putheader("Content-Length", str(content_length)) # Now that framing headers are out of the way we send all the other headers. if "user-agent" not in header_keys: self.putheader("User-Agent", _get_default_user_agent()) for header, value in headers.items(): self.putheader(header, value) self.endheaders() # If we're given a body we start sending that in chunks. if chunks is not None: for chunk in chunks: # Sending empty chunks isn't allowed for TE: chunked # as it indicates the end of the body. if not chunk: continue if isinstance(chunk, str): chunk = chunk.encode("utf-8") if chunked: self.send(b"%x\r\n%b\r\n" % (len(chunk), chunk)) else: self.send(chunk) # Regardless of whether we have a body or not, if we're in # chunked mode we want to send an explicit empty chunk. if chunked: self.send(b"0\r\n\r\n") def request_chunked( self, method: str, url: str, body: _TYPE_BODY | None = None, headers: typing.Mapping[str, str] | None = None, ) -> None: """ Alternative to the common request method, which sends the body with chunked encoding and not as one block """ warnings.warn( "HTTPConnection.request_chunked() is deprecated and will be removed " "in urllib3 v2.1.0. Instead use HTTPConnection.request(..., chunked=True).", category=DeprecationWarning, stacklevel=2, ) self.request(method, url, body=body, headers=headers, chunked=True) def getresponse( # type: ignore[override] self, ) -> HTTPResponse: """ Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed. """ # Raise the same error as http.client.HTTPConnection if self._response_options is None: raise ResponseNotReady() # Reset this attribute for being used again. resp_options = self._response_options self._response_options = None # Since the connection's timeout value may have been updated # we need to set the timeout on the socket. self.sock.settimeout(self.timeout) # This is needed here to avoid circular import errors # Get the response from http.client.HTTPConnection httplib_response = super().getresponse() try: assert_header_parsing(httplib_response.msg)
except (HeaderParsingError, TypeError) as hpe:
12
2023-12-16 04:12:01+00:00
24k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/scene_manager.py
[ { "identifier": "SimpleTableCell", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableCell(object):\n \"\"\"A table class to create table cells.\n\n Example:\n cell = SimpleTableCell('Hello, world!')\n \"\"\"\n\n def __init__(self, text, header=False):\...
import csv import threading import queue import logging import math import sys import cv2 import numpy as np from enum import Enum from typing import Iterable, List, Tuple, Optional, Dict, Callable, Union, TextIO from backend.scenedetect._thirdparty.simpletable import (SimpleTableCell, SimpleTableImage, SimpleTableRow, SimpleTable, HTMLPage) from backend.scenedetect.platform import (tqdm, get_and_create_path, get_cv2_imwrite_params, Template) from backend.scenedetect.frame_timecode import FrameTimecode from backend.scenedetect.video_stream import VideoStream from backend.scenedetect.scene_detector import SceneDetector, SparseSceneDetector from backend.scenedetect.stats_manager import StatsManager, FrameMetricRegistered
14,719
if not completed: logger.error('Could not generate all output images.') return image_filenames ## ## SceneManager Class Implementation ## class SceneManager: """The SceneManager facilitates detection of scenes (:meth:`detect_scenes`) on a video (:class:`VideoStream <scenedetect.video_stream.VideoStream>`) using a detector (:meth:`add_detector`). Video decoding is done in parallel in a background thread. """ def __init__( self, stats_manager: Optional[StatsManager] = None, ): """ Arguments: stats_manager: :class:`StatsManager` to bind to this `SceneManager`. Can be accessed via the `stats_manager` property of the resulting object to save to disk. """ self._cutting_list = [] self._event_list = [] self._detector_list = [] self._sparse_detector_list = [] # TODO(v1.0): This class should own a StatsManager instead of taking an optional one. # Expose a new `stats_manager` @property from the SceneManager, and either change the # `stats_manager` argument to to `store_stats: bool=False`, or lazy-init one. # TODO(v1.0): This class should own a VideoStream as well, instead of passing one # to the detect_scenes method. If concatenation is required, it can be implemented as # a generic VideoStream wrapper. self._stats_manager: Optional[StatsManager] = stats_manager # Position of video that was first passed to detect_scenes. self._start_pos: FrameTimecode = None # Position of video on the last frame processed by detect_scenes. self._last_pos: FrameTimecode = None self._base_timecode: Optional[FrameTimecode] = None self._downscale: int = 1 self._auto_downscale: bool = True # Interpolation method to use when downscaling. Defaults to linear interpolation # as a good balance between quality and performance. self._interpolation: Interpolation = Interpolation.LINEAR # Boolean indicating if we have only seen EventType.CUT events so far. self._only_cuts: bool = True # Set by decode thread when an exception occurs. self._exception_info = None self._stop = threading.Event() self._frame_buffer = [] self._frame_buffer_size = 0 @property def interpolation(self) -> Interpolation: """Interpolation method to use when downscaling frames. Must be one of cv2.INTER_*.""" return self._interpolation @interpolation.setter def interpolation(self, value: Interpolation): self._interpolation = value @property def stats_manager(self) -> Optional[StatsManager]: """Getter for the StatsManager associated with this SceneManager, if any.""" return self._stats_manager @property def downscale(self) -> int: """Factor to downscale each frame by. Will always be >= 1, where 1 indicates no scaling. Will be ignored if auto_downscale=True.""" return self._downscale @downscale.setter def downscale(self, value: int): """Set to 1 for no downscaling, 2 for 2x downscaling, 3 for 3x, etc...""" if value < 1: raise ValueError("Downscale factor must be a positive integer >= 1!") if self.auto_downscale: logger.warning("Downscale factor will be ignored because auto_downscale=True!") if value is not None and not isinstance(value, int): logger.warning("Downscale factor will be truncated to integer!") value = int(value) self._downscale = value @property def auto_downscale(self) -> bool: """If set to True, will automatically downscale based on video frame size. Overrides `downscale` if set.""" return self._auto_downscale @auto_downscale.setter def auto_downscale(self, value: bool): self._auto_downscale = value def add_detector(self, detector: SceneDetector) -> None: """Add/register a SceneDetector (e.g. ContentDetector, ThresholdDetector) to run when detect_scenes is called. The SceneManager owns the detector object, so a temporary may be passed. Arguments: detector (SceneDetector): Scene detector to add to the SceneManager. """ if self._stats_manager is None and detector.stats_manager_required(): # Make sure the lists are empty so that the detectors don't get # out of sync (require an explicit statsmanager instead) assert not self._detector_list and not self._sparse_detector_list self._stats_manager = StatsManager() detector.stats_manager = self._stats_manager if self._stats_manager is not None: try: self._stats_manager.register_metrics(detector.get_metrics())
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """``scenedetect.scene_manager`` Module This module implements :class:`SceneManager`, coordinates running a :mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts( cut_list: Iterable[FrameTimecode], start_pos: Union[int, FrameTimecode], end_pos: Union[int, FrameTimecode], base_timecode: Optional[FrameTimecode] = None, ) -> List[Tuple[FrameTimecode, FrameTimecode]]: """Returns a list of tuples of start/end FrameTimecodes for each scene based on a list of detected scene cuts/breaks. This function is called when using the :meth:`SceneManager.get_scene_list` method. The scene list is generated from a cutting list (:meth:`SceneManager.get_cut_list`), noting that each scene is contiguous, starting from the first to last frame of the input. If `cut_list` is empty, the resulting scene will span from `start_pos` to `end_pos`. Arguments: cut_list: List of FrameTimecode objects where scene cuts/breaks occur. base_timecode: The base_timecode of which all FrameTimecodes in the cut_list are based on. num_frames: The number of frames, or FrameTimecode representing duration, of the video that was processed (used to generate last scene's end time). start_frame: The start frame or FrameTimecode of the cut list. Used to generate the first scene's start time. base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: List of tuples in the form (start_time, end_time), where both start_time and end_time are FrameTimecode objects representing the exact time/frame where each scene occupies based on the input cut_list. """ # TODO(v0.7): Use the warnings module to turn this into a warning. if base_timecode is not None: logger.error('`base_timecode` argument is deprecated has no effect.') # Scene list, where scenes are tuples of (Start FrameTimecode, End FrameTimecode). scene_list = [] if not cut_list: scene_list.append((start_pos, end_pos)) return scene_list # Initialize last_cut to the first frame we processed,as it will be # the start timecode for the first scene in the list. last_cut = start_pos for cut in cut_list: scene_list.append((last_cut, cut)) last_cut = cut # Last scene is from last cut to end of video. scene_list.append((last_cut, end_pos)) return scene_list def write_scene_list(output_csv_file: TextIO, scene_list: Iterable[Tuple[FrameTimecode, FrameTimecode]], include_cut_list: bool = True, cut_list: Optional[Iterable[FrameTimecode]] = None) -> None: """Writes the given list of scenes to an output file handle in CSV format. Arguments: output_csv_file: Handle to open file in write mode. scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. include_cut_list: Bool indicating if the first row should include the timecodes where each scene starts. Should be set to False if RFC 4180 compliant CSV output is required. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not specified, the cut list is generated using the start times of each scene following the first one. """ csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]], video: VideoStream, num_images: int = 3, frame_margin: int = 1, image_extension: str = 'jpg', encoder_param: int = 95, image_name_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir: Optional[str] = None, show_progress: Optional[bool] = False, scale: Optional[float] = None, height: Optional[int] = None, width: Optional[int] = None, interpolation: Interpolation = Interpolation.CUBIC, video_manager=None) -> Dict[int, List[str]]: """Save a set number of images from each scene, given a list of scenes and the associated video/frame source. Arguments: scene_list: A list of scenes (pairs of FrameTimecode objects) returned from calling a SceneManager's detect_scenes() method. video: A VideoStream object corresponding to the scene list. Note that the video will be closed/re-opened and seeked through. num_images: Number of images to generate for each scene. Minimum is 1. frame_margin: Number of frames to pad each scene around the beginning and end (e.g. moves the first/last image into the scene by N frames). Can set to 0, but will result in some video files failing to extract the very last frame. image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp'). encoder_param: Quality/compression efficiency, based on type of image: 'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp. 'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode. image_name_template: Template to use when creating the images on disk. Can use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image extension is applied automatically as per the argument image_extension. output_dir: Directory to output the images into. If not set, the output is created in the working directory. show_progress: If True, shows a progress bar if tqdm is installed. scale: Optional factor by which to rescale saved images. A scaling factor of 1 would not result in rescaling. A value < 1 results in a smaller saved image, while a value > 1 results in an image larger than the original. This value is ignored if either the height or width values are specified. height: Optional value for the height of the saved images. Specifying both the height and width will resize images to an exact size, regardless of aspect ratio. Specifying only height will rescale the image to that number of pixels in height while preserving the aspect ratio. width: Optional value for the width of the saved images. Specifying both the width and height will resize images to an exact size, regardless of aspect ratio. Specifying only width will rescale the image to that number of pixels wide while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png. imwrite_param = [get_cv2_imwrite_params()[image_extension], encoder_param ] if encoder_param is not None else [] video.reset() # Setup flags and init progress bar if available. completed = True logger.info('Generating output images (%d per scene)...', num_images) progress_bar = None if show_progress: progress_bar = tqdm(total=len(scene_list) * num_images, unit='images', dynamic_ncols=True) filename_template = Template(image_name_template) scene_num_format = '%0' scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd' image_num_format = '%0' image_num_format += str(math.floor(math.log(num_images, 10)) + 2) + 'd' framerate = scene_list[0][0].framerate # TODO(v1.0): Split up into multiple sub-expressions so auto-formatter works correctly. timecode_list = [ [ FrameTimecode(int(f), fps=framerate) for f in [ # middle frames a[len(a) // 2] if (0 < j < num_images - 1) or num_images == 1 # first frame else min(a[0] + frame_margin, a[-1]) if j == 0 # last frame else max(a[-1] - frame_margin, a[0]) # for each evenly-split array of frames in the scene list for j, a in enumerate(np.array_split(r, num_images)) ] ] for i, r in enumerate([ # pad ranges to number of images r if 1 + r[-1] - r[0] >= num_images else list(r) + [r[-1]] * (num_images - len(r)) # create range of frames in scene for r in ( range( start.get_frames(), start.get_frames() + max( 1, # guard against zero length scenes end.get_frames() - start.get_frames())) # for each scene in scene list for start, end in scene_list) ]) ] image_filenames = {i: [] for i in range(len(timecode_list))} aspect_ratio = video.aspect_ratio if abs(aspect_ratio - 1.0) < 0.01: aspect_ratio = None logger.debug('Writing images with template %s', filename_template.template) for i, scene_timecodes in enumerate(timecode_list): for j, image_timecode in enumerate(scene_timecodes): video.seek(image_timecode) frame_im = video.read() if frame_im is not None: # TODO: Allow NUM to be a valid suffix in addition to NUMBER. file_path = '%s.%s' % (filename_template.safe_substitute( VIDEO_NAME=video.name, SCENE_NUMBER=scene_num_format % (i + 1), IMAGE_NUMBER=image_num_format % (j + 1), FRAME_NUMBER=image_timecode.get_frames()), image_extension) image_filenames[i].append(file_path) # TODO(0.6.3): Combine this resize with the ones below. if aspect_ratio is not None: frame_im = cv2.resize( frame_im, (0, 0), fx=aspect_ratio, fy=1.0, interpolation=interpolation.value) frame_height = frame_im.shape[0] frame_width = frame_im.shape[1] # Figure out what kind of resizing needs to be done if height or width: if height and not width: factor = height / float(frame_height) width = int(factor * frame_width) if width and not height: factor = width / float(frame_width) height = int(factor * frame_height) assert height > 0 and width > 0 frame_im = cv2.resize( frame_im, (width, height), interpolation=interpolation.value) elif scale: frame_im = cv2.resize( frame_im, (0, 0), fx=scale, fy=scale, interpolation=interpolation.value) cv2.imwrite(get_and_create_path(file_path, output_dir), frame_im, imwrite_param) else: completed = False break if progress_bar is not None: progress_bar.update(1) if progress_bar is not None: progress_bar.close() if not completed: logger.error('Could not generate all output images.') return image_filenames ## ## SceneManager Class Implementation ## class SceneManager: """The SceneManager facilitates detection of scenes (:meth:`detect_scenes`) on a video (:class:`VideoStream <scenedetect.video_stream.VideoStream>`) using a detector (:meth:`add_detector`). Video decoding is done in parallel in a background thread. """ def __init__( self, stats_manager: Optional[StatsManager] = None, ): """ Arguments: stats_manager: :class:`StatsManager` to bind to this `SceneManager`. Can be accessed via the `stats_manager` property of the resulting object to save to disk. """ self._cutting_list = [] self._event_list = [] self._detector_list = [] self._sparse_detector_list = [] # TODO(v1.0): This class should own a StatsManager instead of taking an optional one. # Expose a new `stats_manager` @property from the SceneManager, and either change the # `stats_manager` argument to to `store_stats: bool=False`, or lazy-init one. # TODO(v1.0): This class should own a VideoStream as well, instead of passing one # to the detect_scenes method. If concatenation is required, it can be implemented as # a generic VideoStream wrapper. self._stats_manager: Optional[StatsManager] = stats_manager # Position of video that was first passed to detect_scenes. self._start_pos: FrameTimecode = None # Position of video on the last frame processed by detect_scenes. self._last_pos: FrameTimecode = None self._base_timecode: Optional[FrameTimecode] = None self._downscale: int = 1 self._auto_downscale: bool = True # Interpolation method to use when downscaling. Defaults to linear interpolation # as a good balance between quality and performance. self._interpolation: Interpolation = Interpolation.LINEAR # Boolean indicating if we have only seen EventType.CUT events so far. self._only_cuts: bool = True # Set by decode thread when an exception occurs. self._exception_info = None self._stop = threading.Event() self._frame_buffer = [] self._frame_buffer_size = 0 @property def interpolation(self) -> Interpolation: """Interpolation method to use when downscaling frames. Must be one of cv2.INTER_*.""" return self._interpolation @interpolation.setter def interpolation(self, value: Interpolation): self._interpolation = value @property def stats_manager(self) -> Optional[StatsManager]: """Getter for the StatsManager associated with this SceneManager, if any.""" return self._stats_manager @property def downscale(self) -> int: """Factor to downscale each frame by. Will always be >= 1, where 1 indicates no scaling. Will be ignored if auto_downscale=True.""" return self._downscale @downscale.setter def downscale(self, value: int): """Set to 1 for no downscaling, 2 for 2x downscaling, 3 for 3x, etc...""" if value < 1: raise ValueError("Downscale factor must be a positive integer >= 1!") if self.auto_downscale: logger.warning("Downscale factor will be ignored because auto_downscale=True!") if value is not None and not isinstance(value, int): logger.warning("Downscale factor will be truncated to integer!") value = int(value) self._downscale = value @property def auto_downscale(self) -> bool: """If set to True, will automatically downscale based on video frame size. Overrides `downscale` if set.""" return self._auto_downscale @auto_downscale.setter def auto_downscale(self, value: bool): self._auto_downscale = value def add_detector(self, detector: SceneDetector) -> None: """Add/register a SceneDetector (e.g. ContentDetector, ThresholdDetector) to run when detect_scenes is called. The SceneManager owns the detector object, so a temporary may be passed. Arguments: detector (SceneDetector): Scene detector to add to the SceneManager. """ if self._stats_manager is None and detector.stats_manager_required(): # Make sure the lists are empty so that the detectors don't get # out of sync (require an explicit statsmanager instead) assert not self._detector_list and not self._sparse_detector_list self._stats_manager = StatsManager() detector.stats_manager = self._stats_manager if self._stats_manager is not None: try: self._stats_manager.register_metrics(detector.get_metrics())
except FrameMetricRegistered:
11
2023-10-25 02:50:01+00:00
24k
EulerSearch/embedding_studio
embedding_studio/embeddings/training/embeddings_finetuner.py
[ { "identifier": "QueryRetriever", "path": "embedding_studio/embeddings/data/clickstream/query_retriever.py", "snippet": "class QueryRetriever(object):\n \"\"\"As we can't exactly predict a schema of storing queries:\n 1. As text exceptly in clickstream service\n 2. As ID of a record with a text...
import logging import numpy as np import pytorch_lightning as pl import torch from collections import defaultdict from typing import Callable, List, Optional, Tuple, Union from datasets import DatasetDict from torch import FloatTensor, Tensor from torch.optim import SGD, Optimizer from torch.optim.lr_scheduler import LRScheduler, StepLR from embedding_studio.embeddings.data.clickstream.query_retriever import ( QueryRetriever, ) from embedding_studio.embeddings.data.clickstream.raw_session import ( ClickstreamSession, ) from embedding_studio.embeddings.features.event_confidences import ( dummy_confidences, ) from embedding_studio.embeddings.features.extractor import ( COSINE_SIMILARITY, FeaturesExtractor, ) from embedding_studio.embeddings.features.session_features import ( SessionFeatures, ) from embedding_studio.embeddings.losses.ranking_loss_interface import ( RankingLossInterface, ) from embedding_studio.embeddings.metrics.distance_shift import DistanceShift from embedding_studio.embeddings.metrics.metric import MetricCalculator from embedding_studio.embeddings.models.interface import ( EmbeddingsModelInterface, ) from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import ( ExperimentsManager, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_params import ( FineTuningParams, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import ( FineTuningSettings, ) from embedding_studio.workers.fine_tuning.experiments.metrics_accumulator import ( MetricValue, )
15,373
logger = logging.getLogger(__name__) class EmbeddingsFineTuner(pl.LightningModule): def __init__( self, model: EmbeddingsModelInterface, items_storages: DatasetDict, query_retriever: QueryRetriever,
logger = logging.getLogger(__name__) class EmbeddingsFineTuner(pl.LightningModule): def __init__( self, model: EmbeddingsModelInterface, items_storages: DatasetDict, query_retriever: QueryRetriever,
loss_func: RankingLossInterface,
6
2023-10-31 00:33:13+00:00
24k
facebookresearch/minimax
src/minimax/runners/xp_runner.py
[ { "identifier": "EvalRunner", "path": "src/minimax/runners/eval_runner.py", "snippet": "class EvalRunner:\n def __init__(\n self,\n pop,\n env_names,\n env_kwargs=None,\n n_episodes=10,\n agent_idxs='*',\n render_mode=None):\n\n self.pop = pop\n...
import copy import time import numpy as np import jax import minimax.envs as envs import minimax.models as models import minimax.agents as agents from functools import partial from collections import defaultdict from jax.sharding import Mesh, PartitionSpec as P from jax.experimental import mesh_utils from jax.experimental.shard_map import shard_map from .eval_runner import EvalRunner from .dr_runner import DRRunner from .paired_runner import PAIREDRunner from .plr_runner import PLRRunner from minimax.util.rl import UEDScore, PopPLRManager
16,396
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class RunnerInfo: def __init__( self, runner_cls, is_ued=False): self.runner_cls = runner_cls self.is_ued = is_ued RUNNER_INFO = { 'dr': RunnerInfo( runner_cls=DRRunner, ), 'plr': RunnerInfo( runner_cls=PLRRunner, ), 'paired': RunnerInfo(
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class RunnerInfo: def __init__( self, runner_cls, is_ued=False): self.runner_cls = runner_cls self.is_ued = is_ued RUNNER_INFO = { 'dr': RunnerInfo( runner_cls=DRRunner, ), 'plr': RunnerInfo( runner_cls=PLRRunner, ), 'paired': RunnerInfo(
runner_cls=PAIREDRunner,
2
2023-10-28 12:12:01+00:00
24k
nv-tlabs/vid2player3d
uhc/smpllib/smpl_local_robot.py
[ { "identifier": "Skeleton", "path": "uhc/khrylib/mocap/skeleton_local.py", "snippet": "class Skeleton:\n def __init__(\n self, template_dir=\"/hdd/zen/dev/copycat/Copycat/assets/bigfoot_template_v1.pkl\"\n ):\n self.bones = []\n self.name2bone = {}\n self.mass_scale = 1...
import os import sys import time import argparse import torch import os.path as osp import mujoco_py import numpy as np import math import uuid import atexit import shutil from copy import deepcopy from lxml.etree import XMLParser, parse, Element, SubElement from lxml import etree from io import BytesIO from scipy.spatial import ConvexHull from stl import mesh from mujoco_py import load_model_from_path, MjSim, MjViewer from uhc.khrylib.mocap.skeleton_local import Skeleton from uhc.khrylib.mocap.skeleton_mesh_local import Skeleton as SkeletonMesh from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from uhc.utils.geom import quadric_mesh_decimation from uhc.utils.flags import flags
14,571
self.param_specs["bone_ang"]["lb"] = max( self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"].get("min", -np.inf), ) self.param_specs["bone_ang"]["ub"] = min( self.param_specs["bone_ang"]["ub"], self.param_specs["bone_ang"].get("max", np.inf), ) bone_ang = normalize_range( bone_ang, self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) param_list.append(np.array([bone_ang])) for joint in self.joints: joint.get_params(param_list, get_name, pad_zeros) for geom in self.geoms: geom.get_params(param_list, get_name, pad_zeros) if not get_name: self.param_inited = True if demap_params and not get_name and len(param_list) > 0: params = self.robot.demap_params(np.concatenate(param_list)) return params def set_params(self, params, pad_zeros=False, map_params=False): if map_params: params = self.robot.map_params(params) if self.bone_offset is not None and "offset" in self.param_specs: if self.param_specs["offset"]["type"] in {"xz", "xy"}: offset = denormalize_range( params[:2], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 if self.param_specs["offset"]["type"] == "xz": self.bone_offset[[0, 2]] = offset elif self.param_specs["offset"]["type"] == "xy": self.bone_offset[[0, 1]] = offset params = params[2:] else: offset = denormalize_range( params[:3], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 self.bone_offset[:] = offset params = params[3:] if self.bone_offset is not None and "bone_len" in self.param_specs: bone_len = denormalize_range( params[0].item(), self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"]["ub"], ) bone_len = max(bone_len, 1e-4) params = params[1:] elif self.bone_offset is not None: bone_len = np.linalg.norm(self.bone_offset) if self.bone_offset is not None and "bone_ang" in self.param_specs: bone_ang = denormalize_range( params[0].item(), self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) params = params[1:] elif self.bone_offset is not None: bone_ang = math.atan2(self.bone_offset[2], self.bone_offset[0]) if "bone_len" in self.param_specs or "bone_ang" in self.param_specs: self.bone_offset = np.array( [bone_len * math.cos(bone_ang), 0, bone_len * math.sin(bone_ang)] ) for joint in self.joints: params = joint.set_params(params, pad_zeros) for geom in self.geoms: params = geom.set_params(params, pad_zeros) # rebuild bone, geom, joint self.rebuild() return params class Robot: def __init__(self, cfg, data_dir="data/smpl", model_xml_path=None, masterfoot=False, create_default_skeleton=False, clean_up=False): self.bodies = [] self.weight = 0 self.height = 0 self.cfg = cfg if model_xml_path is not None: self.set_model_xml_path(model_xml_path) else: self.model_xml_path = None self.param_mapping = cfg.get("param_mapping", "clip") self.smpl_model = cfg.get("model", "smpl") self.mesh = cfg.get("mesh", False) self.gender = cfg.get("gender", "neutral") self.flatfoot = cfg.get("flatfoot", True) self.rel_joint_lm = cfg.get( "rel_joint_lm", True ) # Rolling this out worldwide!! self.masterfoot = masterfoot self.param_specs = self.cfg.get("body_params", {}) self.hull_dict = {} self.beta = ( torch.zeros((1, 10)).float() if self.smpl_model == "smpl" else torch.zeros((1, 16)).float() ) if self.smpl_model == "smpl":
sys.path.append(os.getcwd()) def parse_vec(string): return np.fromstring(string, sep=" ") def parse_fromto(string): fromto = np.fromstring(string, sep=" ") return fromto[:3], fromto[3:] def normalize_range(value, lb, ub): return (value - lb) / (ub - lb) * 2 - 1 def denormalize_range(value, lb, ub): return (value + 1) * 0.5 * (ub - lb) + lb def vec_to_polar(v): phi = math.atan2(v[1], v[0]) theta = math.acos(v[2]) return np.array([theta, phi]) def polar_to_vec(p): v = np.zeros(3) v[0] = math.sin(p[0]) * math.cos(p[1]) v[1] = math.sin(p[0]) * math.sin(p[1]) v[2] = math.cos(p[0]) return v def in_hull(hull, queries): tolerance = 1e-3 if len(queries.shape) == 1: queries = queries[ None, ] return np.all( np.add(np.dot(queries, hull.equations[:, :-1].T), hull.equations[:, -1]) <= tolerance, axis=1, ) def get_joint_geometries( smpl_verts, smpl_jts, skin_weights, joint_names, geom_dir, scale_dict={}, suffix = None, verbose=False, min_num_vert = 50, ): vert_to_joint = skin_weights.argmax(axis=1) hull_dict = {} # create joint geometries os.makedirs(geom_dir, exist_ok=True) for jind, jname in enumerate(joint_names): vind = np.where(vert_to_joint == jind)[0] if len(vind) == 0: print(f"{jname} has no vertices!") continue vert = (smpl_verts[vind] - smpl_jts[jind]) * scale_dict.get(jname, 1) hull = ConvexHull(vert) norm_verts = vert - smpl_jts[jind] norm_hull = ConvexHull(norm_verts) hull_dict[jname] = { "norm_hull": norm_hull, "norm_verts": norm_verts, "verts": vert, "hull": hull, } # print(jname, hull.simplices.shape[0]) center = vert[hull.vertices].mean(axis=0) jgeom = mesh.Mesh(np.zeros(hull.simplices.shape[0], dtype=mesh.Mesh.dtype)) for i, f in enumerate(hull.simplices): for j in range(3): jgeom.vectors[i][j] = vert[f[j], :] # check if the face's normal is facing outward normal = np.cross( jgeom.vectors[i][1] - jgeom.vectors[i][0], jgeom.vectors[i][2] - jgeom.vectors[i][0], ) out_vec = jgeom.vectors[i].mean(axis=0) - center if np.dot(normal, out_vec) < 0: jgeom.vectors[i] = jgeom.vectors[i][[0, 2, 1]] # flip the face if suffix is None: fname = f"{geom_dir}/{jname}.stl" else: fname = f"{geom_dir}/{jname}_{suffix}.stl" jgeom.save(fname) # mesh simplification with vtk # min_num_vert = 50 min_num_vert = 50 cur_num_vert = len(hull.vertices) reduction_rate = min(0.9, 1.0 - min_num_vert / cur_num_vert) quadric_mesh_decimation(fname, reduction_rate, verbose=verbose) return hull_dict class Joint: def __init__(self, node, body): self.node = node self.body = body self.cfg = body.cfg self.local_coord = body.local_coord self.name = node.attrib["name"] self.type = node.attrib["type"] if "type" in node.attrib else "free" if self.type == "hinge": self.range = np.deg2rad(parse_vec(node.attrib.get("range", "-360 360"))) actu_node = ( body.tree.getroot().find("actuator").find(f'motor[@joint="{self.name}"]') ) if actu_node is not None: self.actuator = Actuator(actu_node, self) else: self.actuator = None self.parse_param_specs() self.param_inited = False # tunable parameters self.pos = parse_vec("0 0 0") if self.type == "hinge": self.axis = vec_to_polar(parse_vec(node.attrib["axis"])) if self.local_coord: self.pos += body.pos self.damping = ( parse_vec(node.attrib["damping"]) if "damping" in node.attrib else np.array([0]) ) self.stiffness = ( parse_vec(node.attrib["stiffness"]) if "stiffness" in node.attrib else np.array([0]) ) self.armature = ( parse_vec(node.attrib["armature"]) if "armature" in node.attrib else np.array([0.01]) ) self.frictionloss = ( parse_vec(node.attrib["frictionloss"]) if "frictionloss" in node.attrib else np.array([0]) ) # import ipdb; ipdb.set_trace() # assert np.all(self.pos == body.pos) def __repr__(self): return "joint_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("joint_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) def sync_node(self, rename=False, index=0): pos = self.pos - self.body.pos if self.local_coord else self.pos if rename: self.name = self.body.name + "_joint_" + str(index) self.node.attrib["name"] = self.name if self.type == "hinge": axis_vec = polar_to_vec(self.axis) self.node.attrib["axis"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in axis_vec] ) self.node.attrib["pos"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in pos] ) self.node.attrib["damping"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.damping] ) self.node.attrib["stiffness"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.stiffness] ) self.node.attrib["armature"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.armature] ) elif self.type == "free": pass if self.actuator is not None: self.actuator.sync_node() # if self.name != "Pelvis": # self.node.attrib["frictionloss"] = " ".join( # [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.frictionloss] # ) # if np.sum([self.name.startswith(i) for i in ["L_Knee", "R_Knee", "L_Ankle", "R_Ankle", "L_Toe", "R_Toe"]]): # self.node.attrib["frictionloss"] = "500" # self.node.attrib["stiffness"] = "5" # self.node.attrib["damping"] = "5" # if self.name != "Pelvis": # self.node.attrib["frictionloss"] = "5000" def get_params(self, param_list, get_name=False, pad_zeros=False): if "axis" in self.param_specs: if self.type == "hinge": if get_name: param_list += ["axis_theta", "axis_phi"] else: axis = normalize_range( self.axis, np.array([0, -2 * np.pi]), np.array([np.pi, 2 * np.pi]), ) param_list.append(axis) elif pad_zeros: param_list.append(np.zeros(2)) if self.actuator is not None: self.actuator.get_params(param_list, get_name) elif pad_zeros: param_list.append( np.zeros(3 if self.type == "free" else 1) ) # ZL currently a workaround for supporting 3D joints if "damping" in self.param_specs: if get_name: param_list.append("damping") else: if not self.param_inited and self.param_specs["damping"].get( "rel", False ): self.param_specs["damping"]["lb"] += self.damping self.param_specs["damping"]["ub"] += self.damping self.param_specs["damping"]["lb"] = max( self.param_specs["damping"]["lb"], self.param_specs["damping"].get("min", -np.inf), ) self.param_specs["damping"]["ub"] = min( self.param_specs["damping"]["ub"], self.param_specs["damping"].get("max", np.inf), ) damping = normalize_range( self.damping, self.param_specs["damping"]["lb"], self.param_specs["damping"]["ub"], ) param_list.append(damping.flatten()) if "armature" in self.param_specs: if get_name: param_list.append("armature") else: if not self.param_inited and self.param_specs["armature"].get( "rel", False ): self.param_specs["armature"]["lb"] += self.armature self.param_specs["armature"]["ub"] += self.armature self.param_specs["armature"]["lb"] = max( self.param_specs["armature"]["lb"], self.param_specs["armature"].get("min", -np.inf), ) self.param_specs["armature"]["ub"] = min( self.param_specs["armature"]["ub"], self.param_specs["armature"].get("max", np.inf), ) armature = normalize_range( self.armature, self.param_specs["armature"]["lb"], self.param_specs["armature"]["ub"], ) param_list.append(armature.flatten()) if "stiffness" in self.param_specs: if get_name: param_list.append("stiffness") else: if not self.param_inited and self.param_specs["stiffness"].get( "rel", False ): self.param_specs["stiffness"]["lb"] += self.stiffness self.param_specs["stiffness"]["ub"] += self.stiffness self.param_specs["stiffness"]["lb"] = max( self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"].get("min", -np.inf), ) self.param_specs["stiffness"]["ub"] = min( self.param_specs["stiffness"]["ub"], self.param_specs["stiffness"].get("max", np.inf), ) stiffness = normalize_range( self.stiffness, self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"]["ub"], ) param_list.append(stiffness.flatten()) if "frictionloss" in self.param_specs: if get_name: param_list.append("frictionloss") else: if not self.param_inited and self.param_specs["frictionloss"].get( "rel", False ): self.param_specs["frictionloss"]["lb"] += self.frictionloss self.param_specs["frictionloss"]["ub"] += self.frictionloss self.param_specs["frictionloss"]["lb"] = max( self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"].get("min", -np.inf), ) self.param_specs["frictionloss"]["ub"] = min( self.param_specs["frictionloss"]["ub"], self.param_specs["frictionloss"].get("max", np.inf), ) frictionloss = normalize_range( self.frictionloss, self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"]["ub"], ) param_list.append(frictionloss.flatten()) if not get_name: self.param_inited = True # import ipdb; ipdb.set_trace() def set_params(self, params, pad_zeros=False): if "axis" in self.param_specs: if self.type == "hinge": self.axis = denormalize_range( params[:2], np.array([0, -2 * np.pi]), np.array([np.pi, 2 * np.pi]) ) params = params[2:] elif pad_zeros: params = params[2:] if self.actuator is not None: params = self.actuator.set_params(params) elif pad_zeros: params = params[1:] # Order of this matters!!! Should always be damping, aramature, stiffness (the order they are read) if "damping" in self.param_specs: self.damping = denormalize_range( params[[0]], self.param_specs["damping"]["lb"], self.param_specs["damping"]["ub"], ) params = params[1:] if "armature" in self.param_specs: self.armature = denormalize_range( params[[0]], self.param_specs["armature"]["lb"], self.param_specs["armature"]["ub"], ) params = params[1:] if "stiffness" in self.param_specs: self.stiffness = denormalize_range( params[[0]], self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"]["ub"], ) params = params[1:] if "frictionloss" in self.param_specs: self.frictionloss = denormalize_range( params[[0]], self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"]["ub"], ) params = params[1:] return params class Geom: def __init__(self, node, body): self.node = node self.body = body self.cfg = body.cfg self.local_coord = body.local_coord self.name = node.attrib.get("name", "") self.type = node.attrib["type"] self.density = ( parse_vec(node.attrib["density"]) / 1000 if "density" in node.attrib else np.array([1]) ) self.parse_param_specs() self.param_inited = False # tunable parameters # self.size = ( # parse_vec(node.attrib["size"]) if "size" in node.attrib else np.array([0]) # ) self.size = ( parse_vec(node.attrib["size"]) if "size" in node.attrib else np.array([1, 1, 1]) ) if self.type == "box": self.start = self.end = self.pos = parse_vec(node.attrib["pos"]) self.pos_delta = np.array([0, 0, 0]) self.rot = parse_vec(node.attrib["quat"]) elif self.type == "sphere": self.pos_delta = np.array([0, 0, 0]) self.start = self.end = self.pos = parse_vec(node.attrib["pos"]) elif self.type == "capsule": self.start, self.end = parse_fromto(node.attrib["fromto"]) elif self.type == "mesh": self.start, self.end = body.pos.copy(), body.pos.copy() if self.local_coord: self.start += body.pos self.end += body.pos if body.bone_start is None: self.bone_start = self.start.copy() body.bone_start = self.bone_start.copy() else: self.bone_start = body.bone_start.copy() self.ext_start = np.linalg.norm( self.bone_start - self.start ) ## Geom extension from bone start def __repr__(self): return "geom_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("geom_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): if self.type == "box": specs["lb"] = np.array([specs["lb"]] * 3) elif self.type == "capsule": specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): if self.type == "box": specs["lb"] = np.array([specs["lb"]] * 3) elif self.type == "capsule": specs["lb"] = np.array(specs["lb"]) def update_start(self): if self.type == "capsule": vec = self.bone_start - self.end self.start = self.bone_start + vec * (self.ext_start / np.linalg.norm(vec)) def sync_node(self): # self.node.attrib['name'] = self.name self.node.attrib.pop("name", None) if not self.size is None: self.node.attrib["size"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.size] ) self.node.attrib["density"] = " ".join( [f"{x * 1000:.6f}".rstrip("0").rstrip(".") for x in self.density] ) # if self.type == "capsule": # start = self.start - self.body.pos if self.local_coord else self.start # end = self.end - self.body.pos if self.local_coord else self.end # self.node.attrib["fromto"] = " ".join( # [ # f"{x:.6f}".rstrip("0").rstrip(".") # for x in np.concatenate([start, end]) # ] # ) # elif self.type == "box" or self.type == "sphere": # # self.node.attrib["pos"] = " ".join( # # [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.pos + self.pos_delta] # # ) # import ipdb; ipdb.set_trace() # pass def get_params(self, param_list, get_name=False, pad_zeros=False): if "size" in self.param_specs: if get_name: param_list.append("size") else: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh" ): if not self.param_inited and self.param_specs["size"].get( "rel", False ): self.param_specs["size"]["lb"] += self.size self.param_specs["size"]["ub"] += self.size self.param_specs["size"]["lb"] = max( self.param_specs["size"]["lb"], self.param_specs["size"].get("min", -np.inf), ) self.param_specs["size"]["ub"] = min( self.param_specs["size"]["ub"], self.param_specs["size"].get("max", np.inf), ) size = normalize_range( self.size, self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) param_list.append(size.flatten()) if pad_zeros and self.type == "capsule": param_list.append( np.zeros(2) ) # capsule has needs to be 3 for GNN elif pad_zeros: param_list.append(np.zeros(self.size.shape)) if "ext_start" in self.param_specs: if get_name: param_list.append("ext_start") else: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" ): if not self.param_inited and self.param_specs["ext_start"].get( "rel", False ): self.param_specs["ext_start"]["lb"] += self.ext_start self.param_specs["ext_start"]["ub"] += self.ext_start self.param_specs["ext_start"]["lb"] = max( self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"].get("min", -np.inf), ) self.param_specs["ext_start"]["ub"] = min( self.param_specs["ext_start"]["ub"], self.param_specs["ext_start"].get("max", np.inf), ) ext_start = normalize_range( self.ext_start, self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"]["ub"], ) param_list.append(ext_start.flatten()) elif pad_zeros: param_list.append(np.zeros(self.size.shape)) if "density" in self.param_specs: if get_name: param_list.append("density") else: if not self.param_inited and self.param_specs["density"].get( "rel", False ): self.param_specs["density"]["lb"] += self.density self.param_specs["density"]["ub"] += self.density self.param_specs["density"]["lb"] = max( self.param_specs["density"]["lb"], self.param_specs["density"].get("min", -np.inf), ) self.param_specs["density"]["ub"] = min( self.param_specs["density"]["ub"], self.param_specs["density"].get("max", np.inf), ) density = normalize_range( self.density, self.param_specs["density"]["lb"], self.param_specs["density"]["ub"], ) param_list.append(density.flatten()) # if pad_zeros: # param_list.append(np.zeros(self.density.shape)) if "pos_delta" in self.param_specs: if get_name: param_list.append("pos_delta") else: if self.type == "box" or self.type == "sphere": if not self.param_inited and self.param_specs["pos_delta"].get( "rel", False ): self.param_specs["pos_delta"]["lb"] += self.density self.param_specs["pos_delta"]["ub"] += self.density self.param_specs["pos_delta"]["lb"] = max( self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"].get("min", -np.inf), ) self.param_specs["pos_delta"]["ub"] = min( self.param_specs["pos_delta"]["ub"], self.param_specs["pos_delta"].get("max", np.inf), ) pos_delta = normalize_range( self.pos_delta, self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"]["ub"], ) param_list.append(pos_delta.flatten()) elif pad_zeros: param_list.append(np.zeros(3)) if not get_name: self.param_inited = True def set_params(self, params, pad_zeros=False): if "size" in self.param_specs: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh" ): if len(self.size) == 1: self.size = denormalize_range( params[[0]], self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) params = params[1:] elif len(self.size) == 3: self.size = denormalize_range( np.array(params[:3]), self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) params = params[3:] elif pad_zeros: params = params[1:] if "ext_start" in self.param_specs: if self.type == "capsule" or self.type == "box" or self.type == "sphere": self.ext_start = denormalize_range( params[[0]], self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"]["ub"], ) params = params[1:] elif pad_zeros: params = params[1:] if "density" in self.param_specs: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh" ): self.density = denormalize_range( params[[0]], self.param_specs["density"]["lb"], self.param_specs["density"]["ub"], ) params = params[1:] elif pad_zeros: params = params[1:] if "pos_delta" in self.param_specs: if self.type == "box" or self.type == "sphere": self.pos_delta = denormalize_range( np.array(params[:3]), self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"]["ub"], ) params = params[3:] elif pad_zeros: params = params[3:] return params class Actuator: def __init__(self, node, joint): self.node = node self.joint = joint self.cfg = joint.cfg self.joint_name = node.attrib["joint"] self.name = self.joint_name self.parse_param_specs() self.param_inited = False # tunable parameters self.gear = float(node.attrib["gear"]) def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("actuator_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) def sync_node(self): self.node.attrib["gear"] = f"{self.gear:.6f}".rstrip("0").rstrip(".") self.name = self.joint.name self.node.attrib["name"] = self.name self.node.attrib["joint"] = self.joint.name def get_params(self, param_list, get_name=False): if "gear" in self.param_specs: if get_name: param_list.append("gear") else: if not self.param_inited and self.param_specs["gear"].get("rel", False): self.param_specs["gear"]["lb"] += self.gear self.param_specs["gear"]["ub"] += self.gear self.param_specs["gear"]["lb"] = max( self.param_specs["gear"]["lb"], self.param_specs["gear"].get("min", -np.inf), ) self.param_specs["gear"]["ub"] = min( self.param_specs["gear"]["ub"], self.param_specs["gear"].get("max", np.inf), ) gear = normalize_range( self.gear, self.param_specs["gear"]["lb"], self.param_specs["gear"]["ub"], ) param_list.append(np.array([gear])) if not get_name: self.param_inited = True def set_params(self, params): if "gear" in self.param_specs: self.gear = denormalize_range( params[0].item(), self.param_specs["gear"]["lb"], self.param_specs["gear"]["ub"], ) params = params[1:] return params class Body: def __init__(self, node, parent_body, robot, cfg, new_body=False): self.node = node self.parent = parent_body self.new_body = new_body if parent_body is not None: parent_body.child.append(self) parent_body.cind += 1 self.depth = parent_body.depth + 1 else: self.depth = 0 self.robot = robot self.cfg = cfg self.tree = robot.tree self.local_coord = robot.local_coord self.name = ( node.attrib["name"] if "name" in node.attrib else self.parent.name + f"_child{len(self.parent.child)}" ) self.child = [] self.cind = 0 self.pos = parse_vec(node.attrib["pos"]) if self.local_coord and parent_body is not None: self.pos += parent_body.pos if cfg.get("init_root_from_geom", False): self.bone_start = None if parent_body is None else self.pos.copy() else: self.bone_start = self.pos.copy() self.joints = [Joint(x, self) for x in node.findall('joint[@type="hinge"]')] + \ [Joint(x, self) for x in node.findall('joint[@type="free"]')] + \ [Joint(x, self) for x in node.findall('freejoint')] # self.geoms = [Geom(x, self) for x in node.findall('geom[@type="capsule"]')] supported_geoms = self.cfg.get("supported_geoms", ["capsule", "box"]) self.geoms = [ Geom(x, self) for geom_type in supported_geoms for x in node.findall(f'geom[@type="{geom_type}"]') ] # self.geoms = [Geom(x, self) for x in node.findall('geom[@type="capsule"]')] + [Geom(x, self) for x in node.findall('geom[@type="sphere"]')] + [Geom(x, self) for x in node.findall('geom[@type="box"]')] self.parse_param_specs() self.param_inited = False # parameters self.bone_end = None self.bone_offset = None def __repr__(self): return "body_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("body_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) if name == "bone_ang": specs["lb"] = np.deg2rad(specs["lb"]) specs["ub"] = np.deg2rad(specs["ub"]) def reindex(self): if self.parent is None: self.index = "0" else: ind = self.parent.child.index(self) + 1 pname = "" if self.parent.index == "0" else self.parent.index self.index = str(ind) + pname if self.new_body: self.name = self.index def init(self): if len(self.child) > 0: bone_ends = [x.bone_start for x in self.child] else: bone_ends = [x.end for x in self.geoms] if len(bone_ends) > 0: self.bone_end = np.mean(np.stack(bone_ends), axis=0) self.bone_offset = self.bone_end - self.bone_start def get_actuator_name(self): for joint in self.joints: if joint.actuator is not None: return joint.actuator.name def get_joint_range(self): assert len(self.joints) == 1 return self.joints[0].range def sync_node(self): pos = ( self.pos - self.parent.pos if self.local_coord and self.parent is not None else self.pos ) self.node.attrib["name"] = self.name self.node.attrib["pos"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in pos] ) for idx, joint in enumerate(self.joints): joint.sync_node(rename=self.new_body, index=idx) for geom in self.geoms: geom.sync_node() def sync_geom(self): for geom in self.geoms: geom.bone_start = self.bone_start.copy() # geom.end = self.bone_end.copy() # geom.update_start() def sync_joint(self): if self.parent is not None: for joint in self.joints: joint.pos = self.pos.copy() def rebuild(self): if self.parent is not None: # self.bone_start = self.parent.bone_end.copy() self.pos = self.bone_start.copy() if self.bone_offset is not None: self.bone_end = self.bone_start + self.bone_offset if self.parent is None and self.cfg.get("no_root_offset", False): self.bone_end = self.bone_start self.sync_geom() self.sync_joint() def get_params( self, param_list, get_name=False, pad_zeros=False, demap_params=False ): if self.bone_offset is not None and "offset" in self.param_specs: if get_name: if self.param_specs["offset"]["type"] == "xz": param_list += ["offset_x", "offset_z"] elif self.param_specs["offset"]["type"] == "xy": param_list += ["offset_x", "offset_y"] else: param_list += ["offset_x", "offset_y", "offset_z"] else: if self.param_specs["offset"]["type"] == "xz": offset = self.bone_offset[[0, 2]] elif self.param_specs["offset"]["type"] == "xy": offset = self.bone_offset[[0, 1]] else: offset = self.bone_offset if not self.param_inited and self.param_specs["offset"].get( "rel", False ): self.param_specs["offset"]["lb"] += offset self.param_specs["offset"]["ub"] += offset self.param_specs["offset"]["lb"] = np.maximum( self.param_specs["offset"]["lb"], self.param_specs["offset"].get( "min", np.full_like(offset, -np.inf) ), ) self.param_specs["offset"]["ub"] = np.minimum( self.param_specs["offset"]["ub"], self.param_specs["offset"].get( "max", np.full_like(offset, np.inf) ), ) offset = normalize_range( offset, self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) param_list.append(offset.flatten()) if self.bone_offset is not None and "bone_len" in self.param_specs: if get_name: param_list += ["bone_len"] else: bone_len = np.linalg.norm(self.bone_offset) if not self.param_inited and self.param_specs["bone_len"].get( "rel", False ): self.param_specs["bone_len"]["lb"] += bone_len self.param_specs["bone_len"]["ub"] += bone_len self.param_specs["bone_len"]["lb"] = max( self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"].get("min", -np.inf), ) self.param_specs["bone_len"]["ub"] = min( self.param_specs["bone_len"]["ub"], self.param_specs["bone_len"].get("max", np.inf), ) bone_len = normalize_range( bone_len, self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"]["ub"], ) param_list.append(np.array([bone_len])) if self.bone_offset is not None and "bone_ang" in self.param_specs: if get_name: param_list += ["bone_ang"] else: bone_ang = math.atan2(self.bone_offset[2], self.bone_offset[0]) if not self.param_inited and self.param_specs["bone_ang"].get( "rel", False ): self.param_specs["bone_ang"]["lb"] += bone_ang self.param_specs["bone_ang"]["ub"] += bone_ang self.param_specs["bone_ang"]["lb"] = max( self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"].get("min", -np.inf), ) self.param_specs["bone_ang"]["ub"] = min( self.param_specs["bone_ang"]["ub"], self.param_specs["bone_ang"].get("max", np.inf), ) bone_ang = normalize_range( bone_ang, self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) param_list.append(np.array([bone_ang])) for joint in self.joints: joint.get_params(param_list, get_name, pad_zeros) for geom in self.geoms: geom.get_params(param_list, get_name, pad_zeros) if not get_name: self.param_inited = True if demap_params and not get_name and len(param_list) > 0: params = self.robot.demap_params(np.concatenate(param_list)) return params def set_params(self, params, pad_zeros=False, map_params=False): if map_params: params = self.robot.map_params(params) if self.bone_offset is not None and "offset" in self.param_specs: if self.param_specs["offset"]["type"] in {"xz", "xy"}: offset = denormalize_range( params[:2], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 if self.param_specs["offset"]["type"] == "xz": self.bone_offset[[0, 2]] = offset elif self.param_specs["offset"]["type"] == "xy": self.bone_offset[[0, 1]] = offset params = params[2:] else: offset = denormalize_range( params[:3], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 self.bone_offset[:] = offset params = params[3:] if self.bone_offset is not None and "bone_len" in self.param_specs: bone_len = denormalize_range( params[0].item(), self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"]["ub"], ) bone_len = max(bone_len, 1e-4) params = params[1:] elif self.bone_offset is not None: bone_len = np.linalg.norm(self.bone_offset) if self.bone_offset is not None and "bone_ang" in self.param_specs: bone_ang = denormalize_range( params[0].item(), self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) params = params[1:] elif self.bone_offset is not None: bone_ang = math.atan2(self.bone_offset[2], self.bone_offset[0]) if "bone_len" in self.param_specs or "bone_ang" in self.param_specs: self.bone_offset = np.array( [bone_len * math.cos(bone_ang), 0, bone_len * math.sin(bone_ang)] ) for joint in self.joints: params = joint.set_params(params, pad_zeros) for geom in self.geoms: params = geom.set_params(params, pad_zeros) # rebuild bone, geom, joint self.rebuild() return params class Robot: def __init__(self, cfg, data_dir="data/smpl", model_xml_path=None, masterfoot=False, create_default_skeleton=False, clean_up=False): self.bodies = [] self.weight = 0 self.height = 0 self.cfg = cfg if model_xml_path is not None: self.set_model_xml_path(model_xml_path) else: self.model_xml_path = None self.param_mapping = cfg.get("param_mapping", "clip") self.smpl_model = cfg.get("model", "smpl") self.mesh = cfg.get("mesh", False) self.gender = cfg.get("gender", "neutral") self.flatfoot = cfg.get("flatfoot", True) self.rel_joint_lm = cfg.get( "rel_joint_lm", True ) # Rolling this out worldwide!! self.masterfoot = masterfoot self.param_specs = self.cfg.get("body_params", {}) self.hull_dict = {} self.beta = ( torch.zeros((1, 10)).float() if self.smpl_model == "smpl" else torch.zeros((1, 16)).float() ) if self.smpl_model == "smpl":
self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral", create_transl=False)
2
2023-10-30 20:43:43+00:00
24k
masked-spacetime-hashing/msth
nerfstudio/data/datamanagers/base_datamanager.py
[ { "identifier": "CameraOptimizerConfig", "path": "nerfstudio/cameras/camera_optimizers.py", "snippet": "class CameraOptimizerConfig(InstantiateConfig):\n \"\"\"Configuration of optimization for camera poses.\"\"\"\n\n _target: Type = field(default_factory=lambda: CameraOptimizer)\n\n mode: Lite...
from abc import abstractmethod from dataclasses import dataclass, field from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Type, Union from rich.progress import Console from torch import nn from torch.nn import Parameter from torch.utils.data import Dataset from torch.utils.data.distributed import DistributedSampler from typing_extensions import Literal from nerfstudio.cameras.camera_optimizers import CameraOptimizerConfig from nerfstudio.cameras.cameras import CameraType from nerfstudio.cameras.rays import RayBundle from nerfstudio.configs.base_config import InstantiateConfig from nerfstudio.data.dataparsers.arkitscenes_dataparser import ( ARKitScenesDataParserConfig, ) from nerfstudio.data.dataparsers.base_dataparser import DataparserOutputs from nerfstudio.data.dataparsers.blender_dataparser import BlenderDataParserConfig from nerfstudio.data.dataparsers.dnerf_dataparser import DNeRFDataParserConfig from nerfstudio.data.dataparsers.dycheck_dataparser import DycheckDataParserConfig from nerfstudio.data.dataparsers.instant_ngp_dataparser import ( InstantNGPDataParserConfig, ) from nerfstudio.data.dataparsers.minimal_dataparser import MinimalDataParserConfig from nerfstudio.data.dataparsers.nerfstudio_dataparser import NerfstudioDataParserConfig from nerfstudio.data.dataparsers.nuscenes_dataparser import NuScenesDataParserConfig from nerfstudio.data.dataparsers.phototourism_dataparser import ( PhototourismDataParserConfig, ) from nerfstudio.data.dataparsers.scannet_dataparser import ScanNetDataParserConfig from nerfstudio.data.dataparsers.sdfstudio_dataparser import SDFStudioDataParserConfig from nerfstudio.data.dataparsers.sitcoms3d_dataparser import Sitcoms3DDataParserConfig from nerfstudio.data.datasets.base_dataset import InputDataset from nerfstudio.data.pixel_samplers import ( EquirectangularPixelSampler, PatchPixelSampler, PixelSampler, ) from nerfstudio.data.utils.dataloaders import ( CacheDataloader, FixedIndicesEvalDataloader, RandIndicesEvalDataloader, ) from nerfstudio.data.utils.nerfstudio_collate import nerfstudio_collate from nerfstudio.engine.callbacks import TrainingCallback, TrainingCallbackAttributes from nerfstudio.model_components.ray_generators import RayGenerator from nerfstudio.utils.misc import IterableWrapper import torch import tyro
17,392
def setup_eval(self): """Sets up the data manager for evaluation""" @abstractmethod def next_train(self, step: int) -> Tuple[RayBundle, Dict]: """Returns the next batch of data from the train data manager. Args: step: the step number of the eval image to retrieve Returns: A tuple of the ray bundle for the image, and a dictionary of additional batch information such as the groudtruth image. """ raise NotImplementedError @abstractmethod def next_eval(self, step: int) -> Tuple[RayBundle, Dict]: """Returns the next batch of data from the eval data manager. Args: step: the step number of the eval image to retrieve Returns: A tuple of the ray bundle for the image, and a dictionary of additional batch information such as the groudtruth image. """ raise NotImplementedError @abstractmethod def next_eval_image(self, step: int) -> Tuple[int, RayBundle, Dict]: """Retreive the next eval image. Args: step: the step number of the eval image to retrieve Returns: A tuple of the step number, the ray bundle for the image, and a dictionary of additional batch information such as the groudtruth image. """ raise NotImplementedError @abstractmethod def get_train_rays_per_batch(self) -> int: """Returns the number of rays per batch for training.""" raise NotImplementedError @abstractmethod def get_eval_rays_per_batch(self) -> int: """Returns the number of rays per batch for evaluation.""" raise NotImplementedError def get_datapath(self) -> Optional[Path]: # pylint:disable=no-self-use """Returns the path to the data. This is used to determine where to save camera paths.""" return None def get_training_callbacks( # pylint:disable=no-self-use self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument ) -> List[TrainingCallback]: """Returns a list of callbacks to be used during training.""" return [] @abstractmethod def get_param_groups(self) -> Dict[str, List[Parameter]]: # pylint: disable=no-self-use """Get the param groups for the data manager. Returns: A list of dictionaries containing the data manager's param groups. """ return {} @dataclass class VanillaDataManagerConfig(DataManagerConfig): """A basic data manager""" _target: Type = field(default_factory=lambda: VanillaDataManager) """Target class to instantiate.""" dataparser: AnnotatedDataParserUnion = BlenderDataParserConfig() """Specifies the dataparser used to unpack the data.""" train_num_rays_per_batch: int = 1024 """Number of rays per batch to use per training iteration.""" train_num_images_to_sample_from: int = -1 """Number of images to sample during training iteration.""" train_num_times_to_repeat_images: int = -1 """When not training on all images, number of iterations before picking new images. If -1, never pick new images.""" eval_num_rays_per_batch: int = 1024 """Number of rays per batch to use per eval iteration.""" eval_num_images_to_sample_from: int = -1 """Number of images to sample during eval iteration.""" eval_num_times_to_repeat_images: int = -1 """When not evaluating on all images, number of iterations before picking new images. If -1, never pick new images.""" eval_image_indices: Optional[Tuple[int, ...]] = (0,) """Specifies the image indices to use during eval; if None, uses all.""" camera_optimizer: CameraOptimizerConfig = CameraOptimizerConfig() """Specifies the camera pose optimizer used during training. Helpful if poses are noisy, such as for data from Record3D.""" collate_fn = staticmethod(nerfstudio_collate) """Specifies the collate function to use for the train and eval dataloaders.""" camera_res_scale_factor: float = 1.0 """The scale factor for scaling spatial data such as images, mask, semantics along with relevant information about camera intrinsics """ patch_size: int = 1 """Size of patch to sample from. If >1, patch-based sampling will be used.""" class VanillaDataManager(DataManager): # pylint: disable=abstract-method """Basic stored data manager implementation. This is pretty much a port over from our old dataloading utilities, and is a little jank under the hood. We may clean this up a little bit under the hood with more standard dataloading components that can be strung together, but it can be just used as a black box for now since only the constructor is likely to change in the future, or maybe passing in step number to the next_train and next_eval functions. Args: config: the DataManagerConfig used to instantiate class """ config: VanillaDataManagerConfig
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Datamanager. """ from __future__ import annotations CONSOLE = Console(width=120) AnnotatedDataParserUnion = tyro.conf.OmitSubcommandPrefixes[ # Omit prefixes of flags in subcommands. tyro.extras.subcommand_type_from_defaults( { "nerfstudio-data": NerfstudioDataParserConfig(), "minimal-parser": MinimalDataParserConfig(), "arkit-data": ARKitScenesDataParserConfig(), "blender-data": BlenderDataParserConfig(), "instant-ngp-data": InstantNGPDataParserConfig(), "nuscenes-data": NuScenesDataParserConfig(), "dnerf-data": DNeRFDataParserConfig(), "phototourism-data": PhototourismDataParserConfig(), "dycheck-data": DycheckDataParserConfig(), "scannet-data": ScanNetDataParserConfig(), "sdfstudio-data": SDFStudioDataParserConfig(), "sitcoms3d-data": Sitcoms3DDataParserConfig(), }, prefix_names=False, # Omit prefixes in subcommands themselves. ) ] """Union over possible dataparser types, annotated with metadata for tyro. This is the same as the vanilla union, but results in shorter subcommand names.""" @dataclass class DataManagerConfig(InstantiateConfig): """Configuration for data manager instantiation; DataManager is in charge of keeping the train/eval dataparsers; After instantiation, data manager holds both train/eval datasets and is in charge of returning unpacked train/eval data at each iteration """ _target: Type = field(default_factory=lambda: DataManager) """Target class to instantiate.""" data: Optional[Path] = None """Source of data, may not be used by all models.""" camera_optimizer: Optional[CameraOptimizerConfig] = None """Specifies the camera pose optimizer used during training. Helpful if poses are noisy.""" class DataManager(nn.Module): """Generic data manager's abstract class This version of the data manager is designed be a monolithic way to load data and latents, especially since this may contain learnable parameters which need to be shared across the train and test data managers. The idea is that we have setup methods for train and eval separately and this can be a combined train/eval if you want. Usage: To get data, use the next_train and next_eval functions. This data manager's next_train and next_eval methods will return 2 things: 1. A Raybundle: This will contain the rays we are sampling, with latents and conditionals attached (everything needed at inference) 2. A "batch" of auxiliary information: This will contain the mask, the ground truth pixels, etc needed to actually train, score, etc the model Rationale: Because of this abstraction we've added, we can support more NeRF paradigms beyond the vanilla nerf paradigm of single-scene, fixed-images, no-learnt-latents. We can now support variable scenes, variable number of images, and arbitrary latents. Train Methods: setup_train: sets up for being used as train iter_train: will be called on __iter__() for the train iterator next_train: will be called on __next__() for the training iterator get_train_iterable: utility that gets a clean pythonic iterator for your training data Eval Methods: setup_eval: sets up for being used as eval iter_eval: will be called on __iter__() for the eval iterator next_eval: will be called on __next__() for the eval iterator get_eval_iterable: utility that gets a clean pythonic iterator for your eval data Attributes: train_count (int): the step number of our train iteration, needs to be incremented manually eval_count (int): the step number of our eval iteration, needs to be incremented manually train_dataset (Dataset): the dataset for the train dataset eval_dataset (Dataset): the dataset for the eval dataset Additional attributes specific to each subclass are defined in the setup_train and setup_eval functions. """ train_dataset: Optional[Dataset] = None eval_dataset: Optional[Dataset] = None train_sampler: Optional[DistributedSampler] = None eval_sampler: Optional[DistributedSampler] = None def __init__(self): """Constructor for the DataManager class. Subclassed DataManagers will likely need to override this constructor. If you aren't manually calling the setup_train and setup_eval functions from an overriden constructor, that you call super().__init__() BEFORE you initialize any nn.Modules or nn.Parameters, but AFTER you've already set all the attributes you need for the setup functions.""" super().__init__() self.train_count = 0 self.eval_count = 0 if self.train_dataset and self.test_mode != "inference": self.setup_train() if self.eval_dataset and self.test_mode != "inference": self.setup_eval() def forward(self): """Blank forward method This is an nn.Module, and so requires a forward() method normally, although in our case we do not need a forward() method""" raise NotImplementedError def iter_train(self): """The __iter__ function for the train iterator. This only exists to assist the get_train_iterable function, since we need to pass in an __iter__ function for our trivial iterable that we are making.""" self.train_count = 0 def iter_eval(self): """The __iter__ function for the eval iterator. This only exists to assist the get_eval_iterable function, since we need to pass in an __iter__ function for our trivial iterable that we are making.""" self.eval_count = 0 def get_train_iterable(self, length=-1) -> IterableWrapper: """Gets a trivial pythonic iterator that will use the iter_train and next_train functions as __iter__ and __next__ methods respectively. This basically is just a little utility if you want to do something like: | for ray_bundle, batch in datamanager.get_train_iterable(): | <eval code here> since the returned IterableWrapper is just an iterator with the __iter__ and __next__ methods (methods bound to our DataManager instance in this case) specified in the constructor. """ return IterableWrapper(self.iter_train, self.next_train, length) def get_eval_iterable(self, length=-1) -> IterableWrapper: """Gets a trivial pythonic iterator that will use the iter_eval and next_eval functions as __iter__ and __next__ methods respectively. This basically is just a little utility if you want to do something like: | for ray_bundle, batch in datamanager.get_eval_iterable(): | <eval code here> since the returned IterableWrapper is just an iterator with the __iter__ and __next__ methods (methods bound to our DataManager instance in this case) specified in the constructor. """ return IterableWrapper(self.iter_eval, self.next_eval, length) @abstractmethod def setup_train(self): """Sets up the data manager for training. Here you will define any subclass specific object attributes from the attribute""" @abstractmethod def setup_eval(self): """Sets up the data manager for evaluation""" @abstractmethod def next_train(self, step: int) -> Tuple[RayBundle, Dict]: """Returns the next batch of data from the train data manager. Args: step: the step number of the eval image to retrieve Returns: A tuple of the ray bundle for the image, and a dictionary of additional batch information such as the groudtruth image. """ raise NotImplementedError @abstractmethod def next_eval(self, step: int) -> Tuple[RayBundle, Dict]: """Returns the next batch of data from the eval data manager. Args: step: the step number of the eval image to retrieve Returns: A tuple of the ray bundle for the image, and a dictionary of additional batch information such as the groudtruth image. """ raise NotImplementedError @abstractmethod def next_eval_image(self, step: int) -> Tuple[int, RayBundle, Dict]: """Retreive the next eval image. Args: step: the step number of the eval image to retrieve Returns: A tuple of the step number, the ray bundle for the image, and a dictionary of additional batch information such as the groudtruth image. """ raise NotImplementedError @abstractmethod def get_train_rays_per_batch(self) -> int: """Returns the number of rays per batch for training.""" raise NotImplementedError @abstractmethod def get_eval_rays_per_batch(self) -> int: """Returns the number of rays per batch for evaluation.""" raise NotImplementedError def get_datapath(self) -> Optional[Path]: # pylint:disable=no-self-use """Returns the path to the data. This is used to determine where to save camera paths.""" return None def get_training_callbacks( # pylint:disable=no-self-use self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument ) -> List[TrainingCallback]: """Returns a list of callbacks to be used during training.""" return [] @abstractmethod def get_param_groups(self) -> Dict[str, List[Parameter]]: # pylint: disable=no-self-use """Get the param groups for the data manager. Returns: A list of dictionaries containing the data manager's param groups. """ return {} @dataclass class VanillaDataManagerConfig(DataManagerConfig): """A basic data manager""" _target: Type = field(default_factory=lambda: VanillaDataManager) """Target class to instantiate.""" dataparser: AnnotatedDataParserUnion = BlenderDataParserConfig() """Specifies the dataparser used to unpack the data.""" train_num_rays_per_batch: int = 1024 """Number of rays per batch to use per training iteration.""" train_num_images_to_sample_from: int = -1 """Number of images to sample during training iteration.""" train_num_times_to_repeat_images: int = -1 """When not training on all images, number of iterations before picking new images. If -1, never pick new images.""" eval_num_rays_per_batch: int = 1024 """Number of rays per batch to use per eval iteration.""" eval_num_images_to_sample_from: int = -1 """Number of images to sample during eval iteration.""" eval_num_times_to_repeat_images: int = -1 """When not evaluating on all images, number of iterations before picking new images. If -1, never pick new images.""" eval_image_indices: Optional[Tuple[int, ...]] = (0,) """Specifies the image indices to use during eval; if None, uses all.""" camera_optimizer: CameraOptimizerConfig = CameraOptimizerConfig() """Specifies the camera pose optimizer used during training. Helpful if poses are noisy, such as for data from Record3D.""" collate_fn = staticmethod(nerfstudio_collate) """Specifies the collate function to use for the train and eval dataloaders.""" camera_res_scale_factor: float = 1.0 """The scale factor for scaling spatial data such as images, mask, semantics along with relevant information about camera intrinsics """ patch_size: int = 1 """Size of patch to sample from. If >1, patch-based sampling will be used.""" class VanillaDataManager(DataManager): # pylint: disable=abstract-method """Basic stored data manager implementation. This is pretty much a port over from our old dataloading utilities, and is a little jank under the hood. We may clean this up a little bit under the hood with more standard dataloading components that can be strung together, but it can be just used as a black box for now since only the constructor is likely to change in the future, or maybe passing in step number to the next_train and next_eval functions. Args: config: the DataManagerConfig used to instantiate class """ config: VanillaDataManagerConfig
train_dataset: InputDataset
17
2023-10-26 04:39:15+00:00
24k
chenruduan/OAReactDiff
oa_reactdiff/trainer/pl_trainer.py
[ { "identifier": "ProcessedQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=2,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kw...
from typing import Dict, List, Optional, Tuple from pathlib import Path from torch import nn from torch.utils.data import DataLoader from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, StepLR from pytorch_lightning import LightningModule from torchmetrics.classification import ( BinaryAccuracy, BinaryAUROC, BinaryF1Score, BinaryPrecision, BinaryCohenKappa, ) from torchmetrics import PearsonCorrCoef, SpearmanCorrCoef, MeanAbsoluteError from oa_reactdiff.dataset import ( ProcessedQM9, ProcessedDoubleQM9, ProcessedTripleQM9, ProcessedTS1x, ) from oa_reactdiff.dynamics import EGNNDynamics, Confidence from oa_reactdiff.diffusion._schedule import DiffSchedule, PredefinedNoiseSchedule from oa_reactdiff.diffusion._normalizer import Normalizer, FEATURE_MAPPING from oa_reactdiff.diffusion.en_diffusion import EnVariationalDiffusion from oa_reactdiff.trainer._metrics import average_over_batch_metrics, pretty_print from oa_reactdiff.analyze.rmsd import batch_rmsd import torch import copy import torch.nn.functional as F import numpy as np import pandas as pd import oa_reactdiff.utils.training_tools as utils
20,008
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__() egnn_dynamics = EGNNDynamics( model_config=model_config, node_nfs=node_nfs, edge_nf=edge_nf, condition_nf=condition_nf, fragment_names=fragment_names, pos_dim=pos_dim, update_pocket_coords=update_pocket_coords, condition_time=condition_time, edge_cutoff=edge_cutoff, model=model, enforce_same_encoding=enforce_same_encoding, source=source, ) normalizer = Normalizer( norm_values=norm_values, norm_biases=norm_biases, pos_dim=pos_dim, )
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__() egnn_dynamics = EGNNDynamics( model_config=model_config, node_nfs=node_nfs, edge_nf=edge_nf, condition_nf=condition_nf, fragment_names=fragment_names, pos_dim=pos_dim, update_pocket_coords=update_pocket_coords, condition_time=condition_time, edge_cutoff=edge_cutoff, model=model, enforce_same_encoding=enforce_same_encoding, source=source, ) normalizer = Normalizer( norm_values=norm_values, norm_biases=norm_biases, pos_dim=pos_dim, )
gamma_module = PredefinedNoiseSchedule(
7
2023-10-30 02:53:38+00:00
24k
lewandofskee/DiAD
sgn/.ipynb_checkpoints/sgn-checkpoint.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(...
import einops import torch import torch as th import torch.nn as nn import torchvision from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer from ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock, Upsample from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig
21,262
def custom_sigmoid(x): return 1 / (1 + torch.exp(-(x - 600) / 10)) class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs): hs = [] with torch.no_grad():
def custom_sigmoid(x): return 1 / (1 + torch.exp(-(x - 600) / 10)) class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs): hs = [] with torch.no_grad():
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
3
2023-10-30 14:21:09+00:00
24k
nv-tlabs/pacer
uhc/smpllib/smpl_local_robot.py
[ { "identifier": "Skeleton", "path": "uhc/khrylib/mocap/skeleton_local.py", "snippet": "class Skeleton:\n def __init__(self):\n self.bones = []\n self.name2bone = {}\n self.mass_scale = 1.0\n self.len_scale = 1.0\n self.dof_name = [\"x\", \"y\", \"z\"]\n self....
import os import sys import time import argparse import torch import pdb import os.path as osp import numpy as np import math import uuid import atexit import shutil import joblib import cv2 import mujoco import mujoco.viewer from copy import deepcopy from collections import defaultdict from lxml.etree import XMLParser, parse, ElementTree, Element, SubElement from lxml import etree from io import BytesIO from uhc.khrylib.mocap.skeleton_local import Skeleton from uhc.khrylib.mocap.skeleton_mesh_local import Skeleton as SkeletonMesh from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from collections import defaultdict from scipy.spatial import ConvexHull from stl import mesh from uhc.utils.geom import quadric_mesh_decimation, center_scale_mesh from uhc.utils.flags import flags
16,076
print("!!!! Using modified SMPL starting pose !!!!") self.remove_toe = cfg.get("remove_toe", False) self.big_ankle = cfg.get("big_ankle", False) self.real_weight = cfg.get("real_weight", False) self.real_weight_porpotion = cfg.get("real_weight_porpotion", False) self.rel_joint_lm = cfg.get("rel_joint_lm", True) # Rolling this out worldwide!! os.makedirs("/tmp/smpl/", exist_ok=True) self.masterfoot = cfg.get("masterfoot", False) self.param_specs = self.cfg.get("body_params", {}) self.hull_dict = {} self.beta = (torch.zeros( (1, 10)).float() if self.smpl_model == "smpl" else torch.zeros( (1, 16)).float()) if self.smpl_model == "smpl": self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") self.smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") self.smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") elif self.smpl_model == "smplh": self.smpl_parser_n = SMPLH_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLH_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False) self.smpl_parser_f = SMPLH_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False) elif self.smpl_model == "smplx": self.smpl_parser_n = SMPLX_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLX_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False) self.smpl_parser_f = SMPLX_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False) self.load_from_skeleton() atexit.register(self.remove_geoms) def remove_geoms(self): while len(self.model_dirs) > 0: geom_dir = self.model_dirs.pop(0) if osp.isdir(geom_dir): shutil.rmtree(geom_dir, ignore_errors=True) def get_joint_vertices(self, pose_aa, th_betas=None, th_trans=None, gender=[0]): if gender[0] == 0: smpl_parser = self.smpl_parser_n elif gender[0] == 1: smpl_parser = self.smpl_parser_m elif gender[0] == 2: smpl_parser = self.smpl_parser_f else: print(gender) raise Exception("Gender Not Supported!!") vertices, joints = smpl_parser.get_joints_verts(pose=pose_aa, th_betas=th_betas, th_trans=th_trans) return vertices, joints def load_from_skeleton( self, betas=None, v_template=None, gender=[0], objs_info=None, obj_pose=None, params=None, ): self.tree = None # xml tree if gender[0] == 0: self.smpl_parser = smpl_parser = self.smpl_parser_n elif gender[0] == 1: self.smpl_parser = smpl_parser = self.smpl_parser_m elif gender[0] == 2: self.smpl_parser = smpl_parser = self.smpl_parser_f else: print(gender) raise Exception("Gender Not Supported!!") if betas is None and self.beta is None: betas = (torch.zeros( (1, 10)).float() if self.smpl_model == "smpl" else torch.zeros( (1, 16)).float()) else: if params is None: self.beta = betas if not betas is None else self.beta else: # If params is not none, we need to set the beta first betas = self.map_params(betas) self.beta = torch.from_numpy( denormalize_range( betas.numpy().squeeze(), self.param_specs["beta"]["lb"], self.param_specs["beta"]["ub"], )[None, ])
sys.path.append(os.getcwd()) # from scipy.spatial.qhull import _Qhull def parse_vec(string): return np.fromstring(string, sep=" ") def parse_fromto(string): fromto = np.fromstring(string, sep=" ") return fromto[:3], fromto[3:] def normalize_range(value, lb, ub): return (value - lb) / (ub - lb) * 2 - 1 def denormalize_range(value, lb, ub): return (value + 1) * 0.5 * (ub - lb) + lb def vec_to_polar(v): phi = math.atan2(v[1], v[0]) theta = math.acos(v[2]) return np.array([theta, phi]) def polar_to_vec(p): v = np.zeros(3) v[0] = math.sin(p[0]) * math.cos(p[1]) v[1] = math.sin(p[0]) * math.sin(p[1]) v[2] = math.cos(p[0]) return v def in_hull(hull, queries): tolerance = 1e-3 if len(queries.shape) == 1: queries = queries[None, ] return np.all( np.add(np.dot(queries, hull.equations[:, :-1].T), hull.equations[:, -1]) <= tolerance, axis=1, ) def get_joint_geometries( smpl_verts, smpl_jts, skin_weights, joint_names, geom_dir, scale_dict={}, suffix=None, verbose=False, min_num_vert=50, ): vert_to_joint = skin_weights.argmax(axis=1) hull_dict = {} # create joint geometries os.makedirs(geom_dir, exist_ok=True) for jind, jname in enumerate(joint_names): vind = np.where(vert_to_joint == jind)[0] if len(vind) == 0: print(f"{jname} has no vertices!") continue norm_verts = (smpl_verts[vind] - smpl_jts[jind]) * scale_dict.get(jname, 1) hull = ConvexHull(smpl_verts[vind]) norm_hull = ConvexHull(norm_verts) hull_dict[jname] = { "norm_hull": norm_hull, "norm_verts": norm_verts, "verts": smpl_verts[vind], "hull": hull, "volume": hull.volume } center = norm_verts[hull.vertices].mean(axis=0) jgeom = mesh.Mesh( np.zeros(hull.simplices.shape[0], dtype=mesh.Mesh.dtype)) for i, f in enumerate(hull.simplices): for j in range(3): jgeom.vectors[i][j] = norm_verts[f[j], :] # check if the face's normal is facing outward normal = np.cross( jgeom.vectors[i][1] - jgeom.vectors[i][0], jgeom.vectors[i][2] - jgeom.vectors[i][0], ) out_vec = jgeom.vectors[i].mean(axis=0) - center if np.dot(normal, out_vec) < 0: jgeom.vectors[i] = jgeom.vectors[i][[0, 2, 1]] # flip the face if suffix is None: fname = f"{geom_dir}/{jname}.stl" else: fname = f"{geom_dir}/{jname}_{suffix}.stl" jgeom.save(fname) # mesh simplification with vtk # min_num_vert = 50 min_num_vert = 50 cur_num_vert = len(hull.vertices) reduction_rate = min(0.9, 1.0 - min_num_vert / cur_num_vert) quadric_mesh_decimation(fname, reduction_rate, verbose=verbose) return hull_dict def get_geom_dict( smpl_verts, smpl_jts, skin_weights, joint_names, scale_dict={}, ): vert_to_joint = skin_weights.argmax(axis=1) hull_dict = {} # create joint geometries for jind, jname in enumerate(joint_names): vind = np.where(vert_to_joint == jind)[0] if len(vind) == 0: print(f"{jname} has no vertices!") continue norm_verts = (smpl_verts[vind] - smpl_jts[jind]) * scale_dict.get(jname, 1) hull = ConvexHull(norm_verts) hull_dict[jname] = { "norm_hull": hull, "norm_verts": norm_verts, "verts": smpl_verts[vind], "volume": hull.volume } return hull_dict def update_joint_limits(joint_range): joint_range["Head"][0] = np.array([-np.pi / 2, np.pi / 2]) joint_range["Head"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["Head"][2] = np.array([-np.pi / 2, np.pi / 2]) joint_range["Chest"][0] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Chest"][1] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Chest"][2] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Spine"][0] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Spine"][1] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Spine"][2] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Torso"][0] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Torso"][1] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Torso"][2] = np.array([-np.pi / 3, np.pi / 3]) ############################## joint_range["L_Thorax"][0] = np.array([-np.pi, np.pi]) joint_range["L_Thorax"][1] = np.array([-np.pi, np.pi]) joint_range["L_Thorax"][2] = np.array([-np.pi, np.pi]) joint_range["R_Thorax"][0] = np.array([-np.pi, np.pi]) joint_range["R_Thorax"][1] = np.array([-np.pi, np.pi]) joint_range["R_Thorax"][2] = np.array([-np.pi, np.pi]) joint_range["L_Shoulder"][0] = np.array([-np.pi, np.pi]) joint_range["L_Shoulder"][1] = np.array([-np.pi, np.pi]) joint_range["L_Shoulder"][2] = np.array([-np.pi, np.pi]) joint_range["R_Shoulder"][0] = np.array([-np.pi, np.pi]) joint_range["R_Shoulder"][1] = np.array([-np.pi, np.pi]) joint_range["R_Shoulder"][2] = np.array([-np.pi, np.pi]) ############################## joint_range["L_Hip"][0] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Hip"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Hip"][2] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Hip"][0] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Hip"][1] = np.array([-np.pi / 3, np.pi / 3]) joint_range["R_Hip"][2] = np.array([-np.pi / 3, np.pi / 3]) joint_range["L_Knee"][0] = np.array([-np.pi, np.pi]) joint_range["L_Knee"][1] = np.array([-np.pi / 32, np.pi / 32]) joint_range["L_Knee"][2] = np.array([-np.pi / 32, np.pi / 32]) joint_range["R_Knee"][0] = np.array([-np.pi, np.pi]) joint_range["R_Knee"][1] = np.array([-np.pi / 32, np.pi / 32]) joint_range["R_Knee"][2] = np.array([-np.pi / 32, np.pi / 32]) joint_range["L_Ankle"][0] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Ankle"][2] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Ankle"][0] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Ankle"][2] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Toe"][0] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Toe"][1] = np.array([-np.pi / 4, np.pi / 4]) joint_range["L_Toe"][2] = np.array([-np.pi / 4, np.pi / 4]) joint_range["R_Toe"][0] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Toe"][1] = np.array([-np.pi / 4, np.pi / 4]) joint_range["R_Toe"][2] = np.array([-np.pi / 4, np.pi / 4]) return joint_range def update_joint_limits_upright(joint_range): joint_range["L_Knee"][0] = np.array([-np.pi / 32, np.pi / 32]) joint_range["L_Knee"][1] = np.array([-np.pi, np.pi]) joint_range["L_Knee"][2] = np.array([-np.pi / 32, np.pi / 32]) joint_range["R_Knee"][0] = np.array([-np.pi / 32, np.pi / 32]) joint_range["R_Knee"][1] = np.array([-np.pi, np.pi]) joint_range["R_Knee"][2] = np.array([-np.pi / 32, np.pi / 32]) joint_range["L_Ankle"][0] = np.array([-np.pi / 4, np.pi / 4]) joint_range["L_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Ankle"][2] = np.array([-np.pi / 4, np.pi / 4]) joint_range["R_Ankle"][0] = np.array([-np.pi / 4, np.pi / 4]) joint_range["R_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Ankle"][2] = np.array([-np.pi / 4, np.pi / 4]) joint_range["L_Toe"][0] = np.array([-np.pi / 32, np.pi / 32]) joint_range["L_Toe"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Toe"][2] = np.array([-np.pi / 32, np.pi / 32]) joint_range["R_Toe"][0] = np.array([-np.pi / 32, np.pi / 32]) joint_range["R_Toe"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Toe"][2] = np.array([-np.pi / 32, np.pi / 32]) return joint_range class Joint: def __init__(self, node, body): self.node = node self.body = body self.cfg = body.cfg self.local_coord = body.local_coord self.name = node.attrib["name"] self.type = node.attrib["type"] if "type" in node.attrib else "free" if self.type == "hinge": self.range = np.deg2rad( parse_vec(node.attrib.get("range", "-360 360"))) actu_node = (body.tree.getroot().find("actuator").find( f'motor[@joint="{self.name}"]')) if actu_node is not None: self.actuator = Actuator(actu_node, self) else: self.actuator = None self.parse_param_specs() self.param_inited = False # tunable parameters self.pos = parse_vec("0 0 0") if self.type == "hinge": self.axis = vec_to_polar(parse_vec(node.attrib["axis"])) if self.local_coord: self.pos += body.pos self.damping = (parse_vec(node.attrib["damping"]) if "damping" in node.attrib else np.array([0])) self.stiffness = (parse_vec(node.attrib["stiffness"]) if "stiffness" in node.attrib else np.array([0])) self.armature = (parse_vec(node.attrib["armature"]) if "armature" in node.attrib else np.array([0.01])) self.frictionloss = (parse_vec(node.attrib["frictionloss"]) if "frictionloss" in node.attrib else np.array([0])) # import ipdb; ipdb.set_trace() # assert np.all(self.pos == body.pos) def __repr__(self): return "joint_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("joint_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) def sync_node(self, rename=False, index=0): pos = self.pos - self.body.pos if self.local_coord else self.pos if rename: self.name = self.body.name + "_joint_" + str(index) self.node.attrib["name"] = self.name if self.type == "hinge": axis_vec = polar_to_vec(self.axis) self.node.attrib["axis"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in axis_vec]) self.node.attrib["pos"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in pos]) self.node.attrib["damping"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.damping]) self.node.attrib["stiffness"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.stiffness]) self.node.attrib["armature"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.armature]) elif self.type == "free": pass if self.actuator is not None: self.actuator.sync_node() def get_params(self, param_list, get_name=False, pad_zeros=False): if "axis" in self.param_specs: if self.type == "hinge": if get_name: param_list += ["axis_theta", "axis_phi"] else: axis = normalize_range( self.axis, np.array([0, -2 * np.pi]), np.array([np.pi, 2 * np.pi]), ) param_list.append(axis) elif pad_zeros: param_list.append(np.zeros(2)) if self.actuator is not None: self.actuator.get_params(param_list, get_name) elif pad_zeros: param_list.append( np.zeros(3 if self.type == "free" else 1) ) # ZL currently a workaround for supporting 3D joints if "damping" in self.param_specs: if get_name: param_list.append("damping") else: if not self.param_inited and self.param_specs["damping"].get( "rel", False): self.param_specs["damping"]["lb"] += self.damping self.param_specs["damping"]["ub"] += self.damping self.param_specs["damping"]["lb"] = max( self.param_specs["damping"]["lb"], self.param_specs["damping"].get("min", -np.inf), ) self.param_specs["damping"]["ub"] = min( self.param_specs["damping"]["ub"], self.param_specs["damping"].get("max", np.inf), ) damping = normalize_range( self.damping, self.param_specs["damping"]["lb"], self.param_specs["damping"]["ub"], ) param_list.append(damping.flatten()) if "armature" in self.param_specs: if get_name: param_list.append("armature") else: if not self.param_inited and self.param_specs["armature"].get( "rel", False): self.param_specs["armature"]["lb"] += self.armature self.param_specs["armature"]["ub"] += self.armature self.param_specs["armature"]["lb"] = max( self.param_specs["armature"]["lb"], self.param_specs["armature"].get("min", -np.inf), ) self.param_specs["armature"]["ub"] = min( self.param_specs["armature"]["ub"], self.param_specs["armature"].get("max", np.inf), ) armature = normalize_range( self.armature, self.param_specs["armature"]["lb"], self.param_specs["armature"]["ub"], ) param_list.append(armature.flatten()) if "stiffness" in self.param_specs: if get_name: param_list.append("stiffness") else: if not self.param_inited and self.param_specs["stiffness"].get( "rel", False): self.param_specs["stiffness"]["lb"] += self.stiffness self.param_specs["stiffness"]["ub"] += self.stiffness self.param_specs["stiffness"]["lb"] = max( self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"].get("min", -np.inf), ) self.param_specs["stiffness"]["ub"] = min( self.param_specs["stiffness"]["ub"], self.param_specs["stiffness"].get("max", np.inf), ) stiffness = normalize_range( self.stiffness, self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"]["ub"], ) param_list.append(stiffness.flatten()) if "frictionloss" in self.param_specs: if get_name: param_list.append("frictionloss") else: if not self.param_inited and self.param_specs[ "frictionloss"].get("rel", False): self.param_specs["frictionloss"]["lb"] += self.frictionloss self.param_specs["frictionloss"]["ub"] += self.frictionloss self.param_specs["frictionloss"]["lb"] = max( self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"].get("min", -np.inf), ) self.param_specs["frictionloss"]["ub"] = min( self.param_specs["frictionloss"]["ub"], self.param_specs["frictionloss"].get("max", np.inf), ) frictionloss = normalize_range( self.frictionloss, self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"]["ub"], ) param_list.append(frictionloss.flatten()) if not get_name: self.param_inited = True # import ipdb; ipdb.set_trace() def set_params(self, params, pad_zeros=False): if "axis" in self.param_specs: if self.type == "hinge": self.axis = denormalize_range(params[:2], np.array([0, -2 * np.pi]), np.array([np.pi, 2 * np.pi])) params = params[2:] elif pad_zeros: params = params[2:] if self.actuator is not None: params = self.actuator.set_params(params) elif pad_zeros: params = params[1:] # Order of this matters!!! Should always be damping, aramature, stiffness (the order they are read) if "damping" in self.param_specs: self.damping = denormalize_range( params[[0]], self.param_specs["damping"]["lb"], self.param_specs["damping"]["ub"], ) params = params[1:] if "armature" in self.param_specs: self.armature = denormalize_range( params[[0]], self.param_specs["armature"]["lb"], self.param_specs["armature"]["ub"], ) params = params[1:] if "stiffness" in self.param_specs: self.stiffness = denormalize_range( params[[0]], self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"]["ub"], ) params = params[1:] if "frictionloss" in self.param_specs: self.frictionloss = denormalize_range( params[[0]], self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"]["ub"], ) params = params[1:] return params class Geom: def __init__(self, node, body): self.node = node self.body = body self.cfg = body.cfg self.local_coord = body.local_coord self.name = node.attrib.get("name", "") self.type = node.attrib["type"] self.density = (parse_vec(node.attrib["density"]) / 1000 if "density" in node.attrib else np.array([1])) self.parse_param_specs() self.param_inited = False # tunable parameters # self.size = ( # parse_vec(node.attrib["size"]) if "size" in node.attrib else np.array([0]) # ) self.size = (parse_vec(node.attrib["size"]) if "size" in node.attrib else np.array([1, 1, 1])) if self.type == "box": self.start = self.end = self.pos = parse_vec(node.attrib["pos"]) self.pos_delta = np.array([0, 0, 0]) self.rot = parse_vec(node.attrib["quat"]) elif self.type == "sphere": self.pos_delta = np.array([0, 0, 0]) self.start = self.end = self.pos = parse_vec(node.attrib["pos"]) elif self.type == "capsule": self.start, self.end = parse_fromto(node.attrib["fromto"]) elif self.type == "mesh": self.start, self.end = body.pos.copy(), body.pos.copy() if self.local_coord: self.start += body.pos self.end += body.pos if body.bone_start is None: self.bone_start = self.start.copy() body.bone_start = self.bone_start.copy() else: self.bone_start = body.bone_start.copy() self.ext_start = np.linalg.norm( self.bone_start - self.start) ## Geom extension from bone start def __repr__(self): return "geom_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("geom_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): if self.type == "box": specs["lb"] = np.array([specs["lb"]] * 3) elif self.type == "capsule": specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): if self.type == "box": specs["lb"] = np.array([specs["lb"]] * 3) elif self.type == "capsule": specs["lb"] = np.array(specs["lb"]) def update_start(self): if self.type == "capsule": vec = self.bone_start - self.end self.start = self.bone_start + vec * (self.ext_start / np.linalg.norm(vec)) def sync_node(self): # self.node.attrib['name'] = self.name self.node.attrib.pop("name", None) if not self.size is None: self.node.attrib["size"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.size]) self.node.attrib["density"] = " ".join( [f"{x * 1000:.6f}".rstrip("0").rstrip(".") for x in self.density]) # if self.type == "capsule": # start = self.start - self.body.pos if self.local_coord else self.start # end = self.end - self.body.pos if self.local_coord else self.end # self.node.attrib["fromto"] = " ".join( # [ # f"{x:.6f}".rstrip("0").rstrip(".") # for x in np.concatenate([start, end]) # ] # ) # elif self.type == "box" or self.type == "sphere": # # self.node.attrib["pos"] = " ".join( # # [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.pos + self.pos_delta] # # ) # import ipdb; ipdb.set_trace() # pass def get_params(self, param_list, get_name=False, pad_zeros=False): if "size" in self.param_specs: if get_name: param_list.append("size") else: if (self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh"): if not self.param_inited and self.param_specs["size"].get( "rel", False): self.param_specs["size"]["lb"] += self.size self.param_specs["size"]["ub"] += self.size self.param_specs["size"]["lb"] = max( self.param_specs["size"]["lb"], self.param_specs["size"].get("min", -np.inf), ) self.param_specs["size"]["ub"] = min( self.param_specs["size"]["ub"], self.param_specs["size"].get("max", np.inf), ) size = normalize_range( self.size, self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) param_list.append(size.flatten()) if pad_zeros and self.type == "capsule": param_list.append( np.zeros(2)) # capsule has needs to be 3 for GNN elif pad_zeros: param_list.append(np.zeros(self.size.shape)) if "ext_start" in self.param_specs: if get_name: param_list.append("ext_start") else: if (self.type == "capsule" or self.type == "box" or self.type == "sphere"): if not self.param_inited and self.param_specs[ "ext_start"].get("rel", False): self.param_specs["ext_start"]["lb"] += self.ext_start self.param_specs["ext_start"]["ub"] += self.ext_start self.param_specs["ext_start"]["lb"] = max( self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"].get("min", -np.inf), ) self.param_specs["ext_start"]["ub"] = min( self.param_specs["ext_start"]["ub"], self.param_specs["ext_start"].get("max", np.inf), ) ext_start = normalize_range( self.ext_start, self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"]["ub"], ) param_list.append(ext_start.flatten()) elif pad_zeros: param_list.append(np.zeros(self.size.shape)) if "density" in self.param_specs: if get_name: param_list.append("density") else: if not self.param_inited and self.param_specs["density"].get( "rel", False): self.param_specs["density"]["lb"] += self.density self.param_specs["density"]["ub"] += self.density self.param_specs["density"]["lb"] = max( self.param_specs["density"]["lb"], self.param_specs["density"].get("min", -np.inf), ) self.param_specs["density"]["ub"] = min( self.param_specs["density"]["ub"], self.param_specs["density"].get("max", np.inf), ) density = normalize_range( self.density, self.param_specs["density"]["lb"], self.param_specs["density"]["ub"], ) param_list.append(density.flatten()) # if pad_zeros: # param_list.append(np.zeros(self.density.shape)) if "pos_delta" in self.param_specs: if get_name: param_list.append("pos_delta") else: if self.type == "box" or self.type == "sphere": if not self.param_inited and self.param_specs[ "pos_delta"].get("rel", False): self.param_specs["pos_delta"]["lb"] += self.density self.param_specs["pos_delta"]["ub"] += self.density self.param_specs["pos_delta"]["lb"] = max( self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"].get("min", -np.inf), ) self.param_specs["pos_delta"]["ub"] = min( self.param_specs["pos_delta"]["ub"], self.param_specs["pos_delta"].get("max", np.inf), ) pos_delta = normalize_range( self.pos_delta, self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"]["ub"], ) param_list.append(pos_delta.flatten()) elif pad_zeros: param_list.append(np.zeros(3)) if not get_name: self.param_inited = True def set_params(self, params, pad_zeros=False): if "size" in self.param_specs: if (self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh"): if len(self.size) == 1: self.size = denormalize_range( params[[0]], self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) params = params[1:] elif len(self.size) == 3: self.size = denormalize_range( np.array(params[:3]), self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) params = params[3:] elif pad_zeros: params = params[1:] if "ext_start" in self.param_specs: if self.type == "capsule" or self.type == "box" or self.type == "sphere": self.ext_start = denormalize_range( params[[0]], self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"]["ub"], ) params = params[1:] elif pad_zeros: params = params[1:] if "density" in self.param_specs: if (self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh"): self.density = denormalize_range( params[[0]], self.param_specs["density"]["lb"], self.param_specs["density"]["ub"], ) params = params[1:] elif pad_zeros: params = params[1:] if "pos_delta" in self.param_specs: if self.type == "box" or self.type == "sphere": self.pos_delta = denormalize_range( np.array(params[:3]), self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"]["ub"], ) params = params[3:] elif pad_zeros: params = params[3:] return params class Actuator: def __init__(self, node, joint): self.node = node self.joint = joint self.cfg = joint.cfg self.joint_name = node.attrib["joint"] self.name = self.joint_name self.parse_param_specs() self.param_inited = False # tunable parameters self.gear = float(node.attrib["gear"]) def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("actuator_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) def sync_node(self): self.node.attrib["gear"] = f"{self.gear:.6f}".rstrip("0").rstrip(".") self.name = self.joint.name self.node.attrib["name"] = self.name self.node.attrib["joint"] = self.joint.name def get_params(self, param_list, get_name=False): if "gear" in self.param_specs: if get_name: param_list.append("gear") else: if not self.param_inited and self.param_specs["gear"].get( "rel", False): self.param_specs["gear"]["lb"] += self.gear self.param_specs["gear"]["ub"] += self.gear self.param_specs["gear"]["lb"] = max( self.param_specs["gear"]["lb"], self.param_specs["gear"].get("min", -np.inf), ) self.param_specs["gear"]["ub"] = min( self.param_specs["gear"]["ub"], self.param_specs["gear"].get("max", np.inf), ) gear = normalize_range( self.gear, self.param_specs["gear"]["lb"], self.param_specs["gear"]["ub"], ) param_list.append(np.array([gear])) if not get_name: self.param_inited = True def set_params(self, params): if "gear" in self.param_specs: self.gear = denormalize_range( params[0].item(), self.param_specs["gear"]["lb"], self.param_specs["gear"]["ub"], ) params = params[1:] return params class Body: def __init__(self, node, parent_body, robot, cfg, new_body=False): self.node = node self.parent = parent_body self.new_body = new_body if parent_body is not None: parent_body.child.append(self) parent_body.cind += 1 self.depth = parent_body.depth + 1 else: self.depth = 0 self.robot = robot self.cfg = cfg self.tree = robot.tree self.local_coord = robot.local_coord self.name = (node.attrib["name"] if "name" in node.attrib else self.parent.name + f"_child{len(self.parent.child)}") self.child = [] self.cind = 0 self.pos = parse_vec(node.attrib["pos"]) if self.local_coord and parent_body is not None: self.pos += parent_body.pos if cfg.get("init_root_from_geom", False): self.bone_start = None if parent_body is None else self.pos.copy() else: self.bone_start = self.pos.copy() self.joints = [Joint(x, self) for x in node.findall('joint[@type="hinge"]')] + \ [Joint(x, self) for x in node.findall('joint[@type="free"]')] + \ [Joint(x, self) for x in node.findall('freejoint')] # self.geoms = [Geom(x, self) for x in node.findall('geom[@type="capsule"]')] supported_geoms = self.cfg.get("supported_geoms", ["capsule", "box"]) self.geoms = [ Geom(x, self) for geom_type in supported_geoms for x in node.findall(f'geom[@type="{geom_type}"]') ] # self.geoms = [Geom(x, self) for x in node.findall('geom[@type="capsule"]')] + [Geom(x, self) for x in node.findall('geom[@type="sphere"]')] + [Geom(x, self) for x in node.findall('geom[@type="box"]')] self.parse_param_specs() self.param_inited = False # parameters self.bone_end = None self.bone_offset = None def __repr__(self): return "body_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("body_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) if name == "bone_ang": specs["lb"] = np.deg2rad(specs["lb"]) specs["ub"] = np.deg2rad(specs["ub"]) def reindex(self): if self.parent is None: self.index = "0" else: ind = self.parent.child.index(self) + 1 pname = "" if self.parent.index == "0" else self.parent.index self.index = str(ind) + pname if self.new_body: self.name = self.index def init(self): if len(self.child) > 0: bone_ends = [x.bone_start for x in self.child] else: bone_ends = [x.end for x in self.geoms] if len(bone_ends) > 0: self.bone_end = np.mean(np.stack(bone_ends), axis=0) self.bone_offset = self.bone_end - self.bone_start def get_actuator_name(self): for joint in self.joints: if joint.actuator is not None: return joint.actuator.name def get_joint_range(self): assert len(self.joints) == 1 return self.joints[0].range def sync_node(self): pos = (self.pos - self.parent.pos if self.local_coord and self.parent is not None else self.pos) self.node.attrib["name"] = self.name self.node.attrib["pos"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in pos]) for idx, joint in enumerate(self.joints): joint.sync_node(rename=self.new_body, index=idx) for geom in self.geoms: geom.sync_node() def sync_geom(self): for geom in self.geoms: geom.bone_start = self.bone_start.copy() # geom.end = self.bone_end.copy() # geom.update_start() def sync_joint(self): if self.parent is not None: for joint in self.joints: joint.pos = self.pos.copy() def rebuild(self): if self.parent is not None: # self.bone_start = self.parent.bone_end.copy() self.pos = self.bone_start.copy() if self.bone_offset is not None: self.bone_end = self.bone_start + self.bone_offset if self.parent is None and self.cfg.get("no_root_offset", False): self.bone_end = self.bone_start self.sync_geom() self.sync_joint() def get_params(self, param_list, get_name=False, pad_zeros=False, demap_params=False): if self.bone_offset is not None and "offset" in self.param_specs: if get_name: if self.param_specs["offset"]["type"] == "xz": param_list += ["offset_x", "offset_z"] elif self.param_specs["offset"]["type"] == "xy": param_list += ["offset_x", "offset_y"] else: param_list += ["offset_x", "offset_y", "offset_z"] else: if self.param_specs["offset"]["type"] == "xz": offset = self.bone_offset[[0, 2]] elif self.param_specs["offset"]["type"] == "xy": offset = self.bone_offset[[0, 1]] else: offset = self.bone_offset if not self.param_inited and self.param_specs["offset"].get( "rel", False): self.param_specs["offset"]["lb"] += offset self.param_specs["offset"]["ub"] += offset self.param_specs["offset"]["lb"] = np.maximum( self.param_specs["offset"]["lb"], self.param_specs["offset"].get( "min", np.full_like(offset, -np.inf)), ) self.param_specs["offset"]["ub"] = np.minimum( self.param_specs["offset"]["ub"], self.param_specs["offset"].get( "max", np.full_like(offset, np.inf)), ) offset = normalize_range( offset, self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) param_list.append(offset.flatten()) if self.bone_offset is not None and "bone_len" in self.param_specs: if get_name: param_list += ["bone_len"] else: bone_len = np.linalg.norm(self.bone_offset) if not self.param_inited and self.param_specs["bone_len"].get( "rel", False): self.param_specs["bone_len"]["lb"] += bone_len self.param_specs["bone_len"]["ub"] += bone_len self.param_specs["bone_len"]["lb"] = max( self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"].get("min", -np.inf), ) self.param_specs["bone_len"]["ub"] = min( self.param_specs["bone_len"]["ub"], self.param_specs["bone_len"].get("max", np.inf), ) bone_len = normalize_range( bone_len, self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"]["ub"], ) param_list.append(np.array([bone_len])) if self.bone_offset is not None and "bone_ang" in self.param_specs: if get_name: param_list += ["bone_ang"] else: bone_ang = math.atan2(self.bone_offset[2], self.bone_offset[0]) if not self.param_inited and self.param_specs["bone_ang"].get( "rel", False): self.param_specs["bone_ang"]["lb"] += bone_ang self.param_specs["bone_ang"]["ub"] += bone_ang self.param_specs["bone_ang"]["lb"] = max( self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"].get("min", -np.inf), ) self.param_specs["bone_ang"]["ub"] = min( self.param_specs["bone_ang"]["ub"], self.param_specs["bone_ang"].get("max", np.inf), ) bone_ang = normalize_range( bone_ang, self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) param_list.append(np.array([bone_ang])) for joint in self.joints: joint.get_params(param_list, get_name, pad_zeros) for geom in self.geoms: geom.get_params(param_list, get_name, pad_zeros) if not get_name: self.param_inited = True if demap_params and not get_name and len(param_list) > 0: params = self.robot.demap_params(np.concatenate(param_list)) return params def set_params(self, params, pad_zeros=False, map_params=False): if map_params: params = self.robot.map_params(params) if self.bone_offset is not None and "offset" in self.param_specs: if self.param_specs["offset"]["type"] in {"xz", "xy"}: offset = denormalize_range( params[:2], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 if self.param_specs["offset"]["type"] == "xz": self.bone_offset[[0, 2]] = offset elif self.param_specs["offset"]["type"] == "xy": self.bone_offset[[0, 1]] = offset params = params[2:] else: offset = denormalize_range( params[:3], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 self.bone_offset[:] = offset params = params[3:] if self.bone_offset is not None and "bone_len" in self.param_specs: bone_len = denormalize_range( params[0].item(), self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"]["ub"], ) bone_len = max(bone_len, 1e-4) params = params[1:] elif self.bone_offset is not None: bone_len = np.linalg.norm(self.bone_offset) if self.bone_offset is not None and "bone_ang" in self.param_specs: bone_ang = denormalize_range( params[0].item(), self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) params = params[1:] elif self.bone_offset is not None: bone_ang = math.atan2(self.bone_offset[2], self.bone_offset[0]) if "bone_len" in self.param_specs or "bone_ang" in self.param_specs: self.bone_offset = np.array([ bone_len * math.cos(bone_ang), 0, bone_len * math.sin(bone_ang) ]) for joint in self.joints: params = joint.set_params(params, pad_zeros) for geom in self.geoms: params = geom.set_params(params, pad_zeros) # rebuild bone, geom, joint self.rebuild() return params class Robot: def __init__(self, cfg, data_dir="data/smpl"): self.bodies = [] self.weight = 0 self.height = 0 self.cfg = cfg self.model_dirs = [] self.param_mapping = cfg.get("param_mapping", "clip") self.smpl_model = cfg.get("model", "smpl") self.mesh = cfg.get("mesh", False) self.replace_feet = cfg.get("replace_feet", True) self.gender = cfg.get("gender", "neutral") self.flatfoot = cfg.get("flatfoot", True) self.upright_start = cfg.get("upright_start", True) if self.upright_start: print("!!!! Using modified SMPL starting pose !!!!") self.remove_toe = cfg.get("remove_toe", False) self.big_ankle = cfg.get("big_ankle", False) self.real_weight = cfg.get("real_weight", False) self.real_weight_porpotion = cfg.get("real_weight_porpotion", False) self.rel_joint_lm = cfg.get("rel_joint_lm", True) # Rolling this out worldwide!! os.makedirs("/tmp/smpl/", exist_ok=True) self.masterfoot = cfg.get("masterfoot", False) self.param_specs = self.cfg.get("body_params", {}) self.hull_dict = {} self.beta = (torch.zeros( (1, 10)).float() if self.smpl_model == "smpl" else torch.zeros( (1, 16)).float()) if self.smpl_model == "smpl": self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") self.smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") self.smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") elif self.smpl_model == "smplh": self.smpl_parser_n = SMPLH_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLH_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False) self.smpl_parser_f = SMPLH_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False) elif self.smpl_model == "smplx": self.smpl_parser_n = SMPLX_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLX_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False) self.smpl_parser_f = SMPLX_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False) self.load_from_skeleton() atexit.register(self.remove_geoms) def remove_geoms(self): while len(self.model_dirs) > 0: geom_dir = self.model_dirs.pop(0) if osp.isdir(geom_dir): shutil.rmtree(geom_dir, ignore_errors=True) def get_joint_vertices(self, pose_aa, th_betas=None, th_trans=None, gender=[0]): if gender[0] == 0: smpl_parser = self.smpl_parser_n elif gender[0] == 1: smpl_parser = self.smpl_parser_m elif gender[0] == 2: smpl_parser = self.smpl_parser_f else: print(gender) raise Exception("Gender Not Supported!!") vertices, joints = smpl_parser.get_joints_verts(pose=pose_aa, th_betas=th_betas, th_trans=th_trans) return vertices, joints def load_from_skeleton( self, betas=None, v_template=None, gender=[0], objs_info=None, obj_pose=None, params=None, ): self.tree = None # xml tree if gender[0] == 0: self.smpl_parser = smpl_parser = self.smpl_parser_n elif gender[0] == 1: self.smpl_parser = smpl_parser = self.smpl_parser_m elif gender[0] == 2: self.smpl_parser = smpl_parser = self.smpl_parser_f else: print(gender) raise Exception("Gender Not Supported!!") if betas is None and self.beta is None: betas = (torch.zeros( (1, 10)).float() if self.smpl_model == "smpl" else torch.zeros( (1, 16)).float()) else: if params is None: self.beta = betas if not betas is None else self.beta else: # If params is not none, we need to set the beta first betas = self.map_params(betas) self.beta = torch.from_numpy( denormalize_range( betas.numpy().squeeze(), self.param_specs["beta"]["lb"], self.param_specs["beta"]["ub"], )[None, ])
if flags.debug:
7
2023-10-31 20:47:12+00:00
24k
Improbable-AI/dexenv
dexenv/envs/dclaw_multiobjs.py
[ { "identifier": "DClawBase", "path": "dexenv/envs/dclaw_base.py", "snippet": "class DClawBase(VecTask):\n\n def __init__(self, cfg, sim_device, rl_device, graphics_device_id):\n\n self.cfg = cfg\n headless = self.cfg.headless\n self.randomize = self.cfg[\"task\"][\"randomize\"]\n...
import numpy as np import torch import dexenv from gym.utils import seeding from isaacgym import gymapi from loguru import logger from tqdm import tqdm from dexenv.envs.dclaw_base import DClawBase from dexenv.utils.common import chunker_list from dexenv.utils.common import get_all_files_with_name from dexenv.utils.common import load_from_pickle from dexenv.utils.isaac_utils import load_a_goal_object_asset from dexenv.utils.isaac_utils import load_an_object_asset from dexenv.utils.isaac_utils import load_obj_texture
15,505
object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}') urdf_files = get_all_files_with_name(dataset_path, name='model.urdf') permute_ids = self.np_random.permutation(np.arange(len(urdf_files))) permuted_urdfs = [urdf_files[i] for i in permute_ids] object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs]))) obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)} return permuted_urdfs, dataset_path, obj_name_to_id def get_object_category(self, urdf_path): cat = urdf_path.parents[0].name if 'var_' in cat: cat = urdf_path.parents[1].name return cat def load_object_asset(self): asset_root = dexenv.LIB_PATH.joinpath('assets') object_urdfs = self.object_urdfs object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], [] object_cat_ids = [] if self.cfg.obj.object_id is not None: urdf_to_load = self.object_urdfs[self.cfg.obj.object_id] logger.info(f'Loading a single object: {urdf_to_load}') obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) else: if self.cfg.obj.start_id is None: start = 0 end = min(len(object_urdfs), self.cfg.obj.num_objs) else: start = self.cfg.obj.start_id end = min(start + self.cfg.obj.num_objs, len(object_urdfs)) iters = range(start, end) logger.info(f'Loading object IDs from {start} to {end}.') for idx in tqdm(iters, desc='Loading Asset'): urdf_to_load = object_urdfs[idx] obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids def load_an_object(self, asset_root, object_urdf): out = [] obj_asset = load_an_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=self.cfg.env.vhacd) obj_asset = self.change_obj_asset_dyn(obj_asset)
class DclawMultiObjs(DClawBase): def __init__(self, cfg, sim_device, rl_device, graphics_device_id): self.set_random_gen() self.object_urdfs, self.dataset_path, self.obj_name_to_cat_id = self.parse_obj_dataset(cfg.obj.dataset) self.num_objects = len(self.object_urdfs) logger.info(f'Object urdf root path:{self.dataset_path}.') logger.info(f'Number of available objects:{self.num_objects}.') super().__init__(cfg=cfg, sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id) def set_random_gen(self, seed=12345): self.np_random, seed = seeding.np_random(seed) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix() dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root) # load manipulated object and goal assets table_asset = self.get_table_asset() table_pose = self.get_table_pose() object_assets, goal_assets, object_ids, object_textures, object_ptds, object_cat_ids = self.load_object_asset() # create fingertip force sensors, if needed if self.obs_type == "full_state": sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.object_cat_indices = [] self.goal_object_indices = [] self.render_camera_handles = [] if self.cfg.rgb_render: render_cam_pose, render_cam_params = self.get_visual_render_camera_setup() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_assets[0]) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) self.object_handles = [] num_object_assets = len(object_assets) env_obj_ids = [] for i in range(self.num_envs): # create env instance obj_asset_id = i % num_object_assets env_obj_ids.append(object_ids[obj_asset_id]) env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: # compute aggregate size obj_num_bodies = self.gym.get_asset_rigid_body_count(object_assets[obj_asset_id]) obj_num_shapes = self.gym.get_asset_rigid_shape_count(object_assets[obj_asset_id]) max_agg_bodies = self.num_dclaw_bodies + obj_num_bodies * 2 + 1 max_agg_shapes = self.num_dclaw_shapes + obj_num_shapes * 2 + 1 self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) # add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}') urdf_files = get_all_files_with_name(dataset_path, name='model.urdf') permute_ids = self.np_random.permutation(np.arange(len(urdf_files))) permuted_urdfs = [urdf_files[i] for i in permute_ids] object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs]))) obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)} return permuted_urdfs, dataset_path, obj_name_to_id def get_object_category(self, urdf_path): cat = urdf_path.parents[0].name if 'var_' in cat: cat = urdf_path.parents[1].name return cat def load_object_asset(self): asset_root = dexenv.LIB_PATH.joinpath('assets') object_urdfs = self.object_urdfs object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], [] object_cat_ids = [] if self.cfg.obj.object_id is not None: urdf_to_load = self.object_urdfs[self.cfg.obj.object_id] logger.info(f'Loading a single object: {urdf_to_load}') obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) else: if self.cfg.obj.start_id is None: start = 0 end = min(len(object_urdfs), self.cfg.obj.num_objs) else: start = self.cfg.obj.start_id end = min(start + self.cfg.obj.num_objs, len(object_urdfs)) iters = range(start, end) logger.info(f'Loading object IDs from {start} to {end}.') for idx in tqdm(iters, desc='Loading Asset'): urdf_to_load = object_urdfs[idx] obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids def load_an_object(self, asset_root, object_urdf): out = [] obj_asset = load_an_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=self.cfg.env.vhacd) obj_asset = self.change_obj_asset_dyn(obj_asset)
goal_obj_asset = load_a_goal_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=False)
4
2023-10-25 17:22:41+00:00
24k
ai-safety-foundation/sparse_autoencoder
sparse_autoencoder/train/pipeline.py
[ { "identifier": "ActivationResampler", "path": "sparse_autoencoder/activation_resampler/activation_resampler.py", "snippet": "class ActivationResampler:\n \"\"\"Activation resampler.\n\n Collates the number of times each neuron fires over a set number of learned activation vectors,\n and then p...
from collections.abc import Iterator from functools import partial from pathlib import Path from tempfile import gettempdir from typing import TYPE_CHECKING, final from deepspeed import DeepSpeedEngine from jaxtyping import Float, Int, Int64 from pydantic import NonNegativeInt, PositiveInt, validate_call from torch import Tensor from torch.nn.parallel import DataParallel from torch.optim.lr_scheduler import LRScheduler from torch.utils.data import DataLoader from tqdm.auto import tqdm from transformer_lens import HookedTransformer from sparse_autoencoder.activation_resampler.activation_resampler import ( ActivationResampler, ParameterUpdateResults, ) from sparse_autoencoder.activation_store.tensor_store import TensorActivationStore from sparse_autoencoder.autoencoder.model import SparseAutoencoder from sparse_autoencoder.loss.abstract_loss import AbstractLoss, LossReductionType from sparse_autoencoder.metrics.metrics_container import MetricsContainer, default_metrics from sparse_autoencoder.metrics.train.abstract_train_metric import TrainMetricData from sparse_autoencoder.metrics.validate.abstract_validate_metric import ValidationMetricData from sparse_autoencoder.optimizer.abstract_optimizer import AbstractOptimizerWithReset from sparse_autoencoder.source_data.abstract_dataset import SourceDataset, TorchTokenizedPrompts from sparse_autoencoder.source_model.replace_activations_hook import replace_activations_hook from sparse_autoencoder.source_model.store_activations_hook import store_activations_hook from sparse_autoencoder.source_model.zero_ablate_hook import zero_ablate_hook from sparse_autoencoder.tensor_types import Axis from sparse_autoencoder.train.utils.get_model_device import get_model_device from sparse_autoencoder.metrics.abstract_metric import MetricResult import torch import wandb
18,523
"""Default pipeline.""" if TYPE_CHECKING: DEFAULT_CHECKPOINT_DIRECTORY: Path = Path(gettempdir()) / "sparse_autoencoder" class Pipeline: """Pipeline for training a Sparse Autoencoder on TransformerLens activations. Includes all the key functionality to train a sparse autoencoder, with a specific set of hyperparameters. """ activation_resampler: ActivationResampler | None """Activation resampler to use.""" autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine """Sparse autoencoder to train.""" n_input_features: int """Number of input features in the sparse autoencoder.""" n_learned_features: int """Number of learned features in the sparse autoencoder.""" cache_names: list[str] """Names of the cache hook points to use in the source model.""" layer: int """Layer to stope the source model at (if we don't need activations after this layer).""" log_frequency: int """Frequency at which to log metrics (in steps).""" loss: AbstractLoss """Loss function to use."""
"""Default pipeline.""" if TYPE_CHECKING: DEFAULT_CHECKPOINT_DIRECTORY: Path = Path(gettempdir()) / "sparse_autoencoder" class Pipeline: """Pipeline for training a Sparse Autoencoder on TransformerLens activations. Includes all the key functionality to train a sparse autoencoder, with a specific set of hyperparameters. """ activation_resampler: ActivationResampler | None """Activation resampler to use.""" autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine """Sparse autoencoder to train.""" n_input_features: int """Number of input features in the sparse autoencoder.""" n_learned_features: int """Number of learned features in the sparse autoencoder.""" cache_names: list[str] """Names of the cache hook points to use in the source model.""" layer: int """Layer to stope the source model at (if we don't need activations after this layer).""" log_frequency: int """Frequency at which to log metrics (in steps).""" loss: AbstractLoss """Loss function to use."""
metrics: MetricsContainer
6
2023-10-27 07:37:15+00:00
24k
OATML-Markslab/ProteinNPT
scripts/train.py
[ { "identifier": "ProteinNPTModel", "path": "proteinnpt/model.py", "snippet": "class ProteinNPTModel(nn.Module):\n def __init__(self, args, alphabet):\n super().__init__()\n self.args = args\n self.alphabet = alphabet\n self.alphabet_size = len(alphabet)\n self.paddi...
import os,gc import json import argparse import random import numpy as np import pandas as pd import wandb import torch import proteinnpt,baselines,utils from collections import defaultdict from proteinnpt.model import ProteinNPTModel from baselines.model import AugmentedPropertyPredictor from utils.esm.data import Alphabet from utils.tranception.model_pytorch import get_tranception_tokenizer from utils.data_utils import get_train_val_test_data, standardize, pnpt_count_non_nan, pnpt_spearmanr from utils.msa_utils import process_MSA from utils.model_utils import Trainer
18,339
def setup_config_and_paths(args): # All parameters that are not defined by end user are fetched from the config file if args.model_config_location is not None: args.main_config=json.load(open(args.model_config_location)) for key in args.main_config: if args.__dict__[key] is None: args.__dict__[key] = args.main_config[key] # File paths config for local_path in ['embedding_model_location','MSA_data_folder','MSA_weight_data_folder','path_to_hhfilter']: if getattr(args, local_path): setattr(args, local_path, args.data_location + os.sep + getattr(args, local_path)) if not os.path.exists(args.data_location + os.sep + 'model_predictions'): os.mkdir(args.data_location + os.sep + 'model_predictions') if not os.path.exists(args.data_location + os.sep + 'checkpoint'): os.mkdir(args.data_location + os.sep + 'checkpoint') args.output_scores_location = args.data_location + os.sep + 'model_predictions' + os.sep + args.model_name_suffix if not os.path.exists(args.output_scores_location): os.mkdir(args.output_scores_location) args.model_location = args.data_location + os.sep + 'checkpoint' + os.sep + args.model_name_suffix if not os.path.exists(args.model_location): os.mkdir(args.model_location) # Target config args.target_config=json.load(open(args.target_config_location)) zero_shot_predictions_mapping={ "MSA_Transformer_pred": "MSA_Transformer_ensemble", "ESM1v_pred": "ESM1v_ensemble", "TranceptEVE_pred": "TranceptEVE_L", "Tranception_pred": "Tranception_L", "DeepSequence_pred": "DeepSequence_ensemble" } if args.model_type=="ProteinNPT": zero_shot_predictions_mapping["ProteinNPT"]=zero_shot_predictions_mapping[args.aa_embeddings+"_pred"] if args.augmentation=="zero_shot_fitness_predictions_auxiliary_labels": # Add auxiliary label to target_config assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as auxiliary labels not properly referenced" print("Using zero-shot fitness predictions as auxiliary labels") args.target_config["zero_shot_fitness_predictions"] = { "type": "continuous", "dim": 1, "var_name": zero_shot_predictions_mapping[args.model_type], #Select the relevant model for zero-shot fitness predictions "location": args.zero_shot_fitness_predictions_location, "in_NPT_loss": False, "main_target": False } args.augmentation_short="auxiliary" elif args.augmentation=="zero_shot_fitness_predictions_covariate": # Will use zero-shot fitness predictions as an additional model covariate assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as model covariate not properly referenced" print("Using zero-shot fitness predictions as covariate") args.augmentation_short="covariate" args.zero_shot_fitness_predictions_var_name = zero_shot_predictions_mapping[args.model_type] else: args.augmentation_short="none" for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization spearmans = {target_name: pnpt_spearmanr(test_eval_results['output_scores']['predictions_'+target_name], test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names}
def setup_config_and_paths(args): # All parameters that are not defined by end user are fetched from the config file if args.model_config_location is not None: args.main_config=json.load(open(args.model_config_location)) for key in args.main_config: if args.__dict__[key] is None: args.__dict__[key] = args.main_config[key] # File paths config for local_path in ['embedding_model_location','MSA_data_folder','MSA_weight_data_folder','path_to_hhfilter']: if getattr(args, local_path): setattr(args, local_path, args.data_location + os.sep + getattr(args, local_path)) if not os.path.exists(args.data_location + os.sep + 'model_predictions'): os.mkdir(args.data_location + os.sep + 'model_predictions') if not os.path.exists(args.data_location + os.sep + 'checkpoint'): os.mkdir(args.data_location + os.sep + 'checkpoint') args.output_scores_location = args.data_location + os.sep + 'model_predictions' + os.sep + args.model_name_suffix if not os.path.exists(args.output_scores_location): os.mkdir(args.output_scores_location) args.model_location = args.data_location + os.sep + 'checkpoint' + os.sep + args.model_name_suffix if not os.path.exists(args.model_location): os.mkdir(args.model_location) # Target config args.target_config=json.load(open(args.target_config_location)) zero_shot_predictions_mapping={ "MSA_Transformer_pred": "MSA_Transformer_ensemble", "ESM1v_pred": "ESM1v_ensemble", "TranceptEVE_pred": "TranceptEVE_L", "Tranception_pred": "Tranception_L", "DeepSequence_pred": "DeepSequence_ensemble" } if args.model_type=="ProteinNPT": zero_shot_predictions_mapping["ProteinNPT"]=zero_shot_predictions_mapping[args.aa_embeddings+"_pred"] if args.augmentation=="zero_shot_fitness_predictions_auxiliary_labels": # Add auxiliary label to target_config assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as auxiliary labels not properly referenced" print("Using zero-shot fitness predictions as auxiliary labels") args.target_config["zero_shot_fitness_predictions"] = { "type": "continuous", "dim": 1, "var_name": zero_shot_predictions_mapping[args.model_type], #Select the relevant model for zero-shot fitness predictions "location": args.zero_shot_fitness_predictions_location, "in_NPT_loss": False, "main_target": False } args.augmentation_short="auxiliary" elif args.augmentation=="zero_shot_fitness_predictions_covariate": # Will use zero-shot fitness predictions as an additional model covariate assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as model covariate not properly referenced" print("Using zero-shot fitness predictions as covariate") args.augmentation_short="covariate" args.zero_shot_fitness_predictions_var_name = zero_shot_predictions_mapping[args.model_type] else: args.augmentation_short="none" for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization spearmans = {target_name: pnpt_spearmanr(test_eval_results['output_scores']['predictions_'+target_name], test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names}
num_obs_spearmans = {target_name: pnpt_count_non_nan(test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names}
6
2023-10-28 11:41:05+00:00
24k
CVHub520/yolov5_obb
detect.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=None, dnn=False):\n # Usage:\n # PyTorch: ...
import argparse import os import sys import cv2 import torch import torch.backends.cudnn as cudnn from pathlib import Path from models.common import DetectMultiBackend from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, increment_path, non_max_suppression, non_max_suppression_obb, print_args, scale_coords, scale_polys, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, time_sync from utils.rboxs_utils import poly2rbox, rbox2poly
14,927
# Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS # pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update:
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam img.jpg # image vid.mp4 # video path/ # directory path/*.jpg # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size # Half half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS # pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update:
strip_optimizer(weights) # update model (to fix SourceChangeWarning)
17
2023-10-31 06:06:41+00:00
24k
serengil/LightPHE
lightphe/models/Ciphertext.py
[ { "identifier": "Homomorphic", "path": "lightphe/models/Homomorphic.py", "snippet": "class Homomorphic(ABC):\n keys: dict\n plaintext_modulo: int\n ciphertext_modulo: int\n\n @abstractmethod\n def generate_keys(self, key_size: int, s: Optional[int] = None) -> dict:\n pass\n\n @a...
from typing import Union from lightphe.models.Homomorphic import Homomorphic from lightphe.models.Algorithm import Algorithm from lightphe.cryptosystems.RSA import RSA from lightphe.cryptosystems.ElGamal import ElGamal from lightphe.cryptosystems.Paillier import Paillier from lightphe.cryptosystems.DamgardJurik import DamgardJurik from lightphe.cryptosystems.OkamotoUchiyama import OkamotoUchiyama from lightphe.cryptosystems.Benaloh import Benaloh from lightphe.cryptosystems.NaccacheStern import NaccacheStern from lightphe.cryptosystems.GoldwasserMicali import GoldwasserMicali from lightphe.cryptosystems.EllipticCurveElGamal import EllipticCurveElGamal from lightphe.commons import phe_utils from lightphe.commons.logger import Logger
17,615
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value if algorithm_name == Algorithm.RSA: cs = RSA(keys=keys) elif algorithm_name == Algorithm.ElGamal: cs = ElGamal(keys=keys) elif algorithm_name == Algorithm.ExponentialElGamal: cs = ElGamal(keys=keys, exponential=True) elif algorithm_name == Algorithm.EllipticCurveElGamal: cs = EllipticCurveElGamal(keys=keys) elif algorithm_name == Algorithm.Paillier: cs = Paillier(keys=keys) elif algorithm_name == Algorithm.DamgardJurik: cs = DamgardJurik(keys=keys) elif algorithm_name == Algorithm.OkamotoUchiyama: cs = OkamotoUchiyama(keys=keys) elif algorithm_name == Algorithm.Benaloh: cs = Benaloh(keys=keys) elif algorithm_name == Algorithm.NaccacheStern: cs = NaccacheStern(keys=keys)
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value if algorithm_name == Algorithm.RSA: cs = RSA(keys=keys) elif algorithm_name == Algorithm.ElGamal: cs = ElGamal(keys=keys) elif algorithm_name == Algorithm.ExponentialElGamal: cs = ElGamal(keys=keys, exponential=True) elif algorithm_name == Algorithm.EllipticCurveElGamal: cs = EllipticCurveElGamal(keys=keys) elif algorithm_name == Algorithm.Paillier: cs = Paillier(keys=keys) elif algorithm_name == Algorithm.DamgardJurik: cs = DamgardJurik(keys=keys) elif algorithm_name == Algorithm.OkamotoUchiyama: cs = OkamotoUchiyama(keys=keys) elif algorithm_name == Algorithm.Benaloh: cs = Benaloh(keys=keys) elif algorithm_name == Algorithm.NaccacheStern: cs = NaccacheStern(keys=keys)
elif algorithm_name == Algorithm.GoldwasserMicali:
9
2023-10-28 14:57:59+00:00
24k
DataCanvasIO/LMS
lms/runtime/prune/llm_pruner/LLMPruner/peft/mapping.py
[ { "identifier": "PeftModel", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModel(PushToHubMixin, torch.nn.Module):\n \"\"\"\n Base model encompassing various Peft methods.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): The base transfo...
from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) from .tuners import AdaLoraConfig, LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig from .utils import PromptLearningConfig
14,919
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. MODEL_TYPE_TO_PEFT_MODEL_MAPPING = { "SEQ_CLS": PeftModelForSequenceClassification, "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, "CAUSAL_LM": PeftModelForCausalLM, "TOKEN_CLS": PeftModelForTokenClassification, } PEFT_TYPE_TO_CONFIG_MAPPING = { "PROMPT_TUNING": PromptTuningConfig, "PREFIX_TUNING": PrefixTuningConfig, "P_TUNING": PromptEncoderConfig, "LORA": LoraConfig, "ADALORA": AdaLoraConfig, } def get_peft_config(config_dict): """ Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. """ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict) def _prepare_prompt_learning_config(peft_config, model_config): if peft_config.num_layers is None: if "num_hidden_layers" in model_config: num_layers = model_config["num_hidden_layers"] elif "num_layers" in model_config: num_layers = model_config["num_layers"] elif "n_layer" in model_config: num_layers = model_config["n_layer"] else: raise ValueError("Please specify `num_layers` in `peft_config`") peft_config.num_layers = num_layers if peft_config.token_dim is None: if "hidden_size" in model_config: token_dim = model_config["hidden_size"] elif "n_embd" in model_config: token_dim = model_config["n_embd"] elif "d_model" in model_config: token_dim = model_config["d_model"] else: raise ValueError("Please specify `token_dim` in `peft_config`") peft_config.token_dim = token_dim if peft_config.num_attention_heads is None: if "num_attention_heads" in model_config: num_attention_heads = model_config["num_attention_heads"] elif "n_head" in model_config: num_attention_heads = model_config["n_head"] elif "num_heads" in model_config: num_attention_heads = model_config["num_heads"] elif "encoder_attention_heads" in model_config: num_attention_heads = model_config["encoder_attention_heads"] else: raise ValueError("Please specify `num_attention_heads` in `peft_config`") peft_config.num_attention_heads = num_attention_heads if getattr(peft_config, "encoder_hidden_size", None) is None: setattr(peft_config, "encoder_hidden_size", token_dim) return peft_config def get_peft_model(model, peft_config): """ Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. """ model_config = model.config.to_dict() if hasattr(model.config, "to_dict") else model.config peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not isinstance( peft_config, PromptLearningConfig ):
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. MODEL_TYPE_TO_PEFT_MODEL_MAPPING = { "SEQ_CLS": PeftModelForSequenceClassification, "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, "CAUSAL_LM": PeftModelForCausalLM, "TOKEN_CLS": PeftModelForTokenClassification, } PEFT_TYPE_TO_CONFIG_MAPPING = { "PROMPT_TUNING": PromptTuningConfig, "PREFIX_TUNING": PrefixTuningConfig, "P_TUNING": PromptEncoderConfig, "LORA": LoraConfig, "ADALORA": AdaLoraConfig, } def get_peft_config(config_dict): """ Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. """ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict) def _prepare_prompt_learning_config(peft_config, model_config): if peft_config.num_layers is None: if "num_hidden_layers" in model_config: num_layers = model_config["num_hidden_layers"] elif "num_layers" in model_config: num_layers = model_config["num_layers"] elif "n_layer" in model_config: num_layers = model_config["n_layer"] else: raise ValueError("Please specify `num_layers` in `peft_config`") peft_config.num_layers = num_layers if peft_config.token_dim is None: if "hidden_size" in model_config: token_dim = model_config["hidden_size"] elif "n_embd" in model_config: token_dim = model_config["n_embd"] elif "d_model" in model_config: token_dim = model_config["d_model"] else: raise ValueError("Please specify `token_dim` in `peft_config`") peft_config.token_dim = token_dim if peft_config.num_attention_heads is None: if "num_attention_heads" in model_config: num_attention_heads = model_config["num_attention_heads"] elif "n_head" in model_config: num_attention_heads = model_config["n_head"] elif "num_heads" in model_config: num_attention_heads = model_config["num_heads"] elif "encoder_attention_heads" in model_config: num_attention_heads = model_config["encoder_attention_heads"] else: raise ValueError("Please specify `num_attention_heads` in `peft_config`") peft_config.num_attention_heads = num_attention_heads if getattr(peft_config, "encoder_hidden_size", None) is None: setattr(peft_config, "encoder_hidden_size", token_dim) return peft_config def get_peft_model(model, peft_config): """ Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. """ model_config = model.config.to_dict() if hasattr(model.config, "to_dict") else model.config peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not isinstance( peft_config, PromptLearningConfig ):
return PeftModel(model, peft_config)
0
2023-10-30 10:50:32+00:00
24k
chenran-li/RQL-release
stable_baselines3/dqn_ME/dqn_ME.py
[ { "identifier": "ReplayBuffer", "path": "stable_baselines3/common/buffers.py", "snippet": "class ReplayBuffer(BaseBuffer):\n \"\"\"\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation ...
import warnings import numpy as np import torch as th from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union from gym import spaces from torch.nn import functional as F from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.preprocessing import maybe_transpose from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule from stable_baselines3.common.utils import get_linear_fn, get_parameters_by_name, is_vectorized_observation, polyak_update from stable_baselines3.dqn_ME.policies_ME import CnnPolicy, DQNPolicy, MlpPolicy, MultiInputPolicy from stable_baselines3.dqn.dqn import DQN
15,144
SelfDQN_ME = TypeVar("SelfDQN_ME", bound="DQN_ME") class DQN_ME(DQN): """ Soft Deep Q-Network (i.e. entropy-regularized DQN) Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236, https://arxiv.org/abs/1702.08165 Default hyperparameters are taken from the Nature paper, except for the optimizer and learning rate that were taken from Stable Baselines defaults. :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) :param env: The environment to learn from (if registered in Gym, can be str) :param learning_rate: The learning rate, it can be a function of the current progress remaining (from 1 to 0) :param buffer_size: size of the replay buffer :param learning_starts: how many steps of the model to collect transitions for before learning starts :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update :param gamma: the discount factor :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit like ``(5, "step")`` or ``(2, "episode")``. :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) Set to ``-1`` means to do as many gradient steps as steps done in the environment during the rollout. :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). If ``None``, it will be automatically selected. :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 :param target_update_interval: update the target network every ``target_update_interval`` environment steps. :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced :param exploration_initial_eps: initial value of random action probability :param exploration_final_eps: final value of random action probability :param max_grad_norm: The maximum value for the gradient clipping :param tensorboard_log: the lonext_q_valuesg location for tensorboard (if None, no logging) :param policy_kwargs: additionnext_q_valuesal arguments to be passed to the policy on creation :param verbose: Verbosity levenext_q_valuesl: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for debug messagesnext_q_values :param seed: Seed for the pseunext_q_valuesdo random generators :param device: Device (cpu, cuda, ...) on which the code should be run. Setting it to auto, the code will be run on the GPU if possible. :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy, "CnnPolicy": CnnPolicy,
SelfDQN_ME = TypeVar("SelfDQN_ME", bound="DQN_ME") class DQN_ME(DQN): """ Soft Deep Q-Network (i.e. entropy-regularized DQN) Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236, https://arxiv.org/abs/1702.08165 Default hyperparameters are taken from the Nature paper, except for the optimizer and learning rate that were taken from Stable Baselines defaults. :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) :param env: The environment to learn from (if registered in Gym, can be str) :param learning_rate: The learning rate, it can be a function of the current progress remaining (from 1 to 0) :param buffer_size: size of the replay buffer :param learning_starts: how many steps of the model to collect transitions for before learning starts :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update :param gamma: the discount factor :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit like ``(5, "step")`` or ``(2, "episode")``. :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) Set to ``-1`` means to do as many gradient steps as steps done in the environment during the rollout. :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). If ``None``, it will be automatically selected. :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 :param target_update_interval: update the target network every ``target_update_interval`` environment steps. :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced :param exploration_initial_eps: initial value of random action probability :param exploration_final_eps: final value of random action probability :param max_grad_norm: The maximum value for the gradient clipping :param tensorboard_log: the lonext_q_valuesg location for tensorboard (if None, no logging) :param policy_kwargs: additionnext_q_valuesal arguments to be passed to the policy on creation :param verbose: Verbosity levenext_q_valuesl: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for debug messagesnext_q_values :param seed: Seed for the pseunext_q_valuesdo random generators :param device: Device (cpu, cuda, ...) on which the code should be run. Setting it to auto, the code will be run on the GPU if possible. :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy, "CnnPolicy": CnnPolicy,
"MultiInputPolicy": MultiInputPolicy,
9
2023-10-28 01:09:21+00:00
24k
pytabular-ai/auto-scikit-dl
utils/model.py
[ { "identifier": "MLP", "path": "models/mlp.py", "snippet": "class MLP(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ...
import os import time import json import yaml import shutil import random import datetime import numpy as np import torch import optuna from pathlib import Path from typing import Dict, List, Tuple, Union, Optional, Literal from models import MLP, FTTransformer, AutoInt, DCNv2, NODE from models.abstract import TabModel, check_dir from data.utils import Dataset from data.processor import DataProcessor
15,895
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE, 'ft-transformer': FTTransformer, 'saint': None, 't2g-former': None, 'excel-former': None, } HPOLib = Literal['optuna', 'hyperopt'] # TODO: add 'hyperopt' support def get_model_cards(): return { 'ready': sorted(list([key for key, value in MODEL_CARDS.items() if value])), 'comming soon': sorted(list([key for key, value in MODEL_CARDS.items() if not value])) } def seed_everything(seed=42): ''' Sets the seed of the entire notebook so results are the same every time we run. This is for REPRODUCIBILITY. ''' random.seed(seed) # Set a fixed value for the hash seed os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # When running on the CuDNN backend, two further options must be set torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def load_config_from_file(file): file = str(file) if file.endswith('.yaml'): with open(file, 'r') as f: cfg = yaml.safe_load(f) elif file.endswith('.json'): with open(file, 'r') as f: cfg = json.load(f) else: raise AssertionError('Config files only support yaml or json format now.') return cfg def extract_config(model_config: dict, is_large_data: bool = False): """selection of different search spaces""" used_cfgs = {"model": {}, "training": {}, 'meta': model_config.get('meta', {})} for field in ['model', 'training']: for k in model_config[field]: cfgs = model_config[field][k] if 'type2' not in cfgs: used_cfg = cfgs else: if not is_large_data: used_cfg = {k: v for k, v in cfgs.items() if not k.endswith('2')} else: used_cfg = {k[:-1]: v for k, v in cfgs.items() if k.endswith('2')} used_cfgs[field][k] = used_cfg return used_cfgs def make_baseline( model_name, model_config: Union[dict, str], n_num: int, cat_card: Optional[List[int]], n_labels: int, sparsity_scheme: Optional[str] = None, device: Union[str, torch.device] = 'cuda', ) -> TabModel: """Process Model Configs and Call Specific Model APIs""" assert model_name in MODEL_CARDS, f"unrecognized `{model_name}` model name, choose one of valid models in {MODEL_CARDS}" if isinstance(model_config, str): model_config = load_config_from_file(model_config)['model'] if MODEL_CARDS[model_name] is None: raise NotImplementedError("Please add corresponding model implementation to `models` module") if sparsity_scheme is not None: assert 'mlp' in model_name return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels, sparsity_scheme=sparsity_scheme) return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels) def tune( model_name: str = None, search_config: Union[dict, str] = None, dataset: Dataset = None, batch_size: int = 64, patience: int = 8, # a small patience for quick tune n_iterations: int = 50, framework: HPOLib = 'optuna', device: Union[str, torch.device] = 'cuda', output_dir: Optional[str] = None, ) -> 'TabModel': # assert framework in HPOLib, f"hyper tune only support the following frameworks '{HPOLib}'" # device device = torch.device(device) # task params n_num_features = dataset.n_num_features categories = dataset.get_category_sizes('train') if len(categories) == 0: categories = None n_labels = dataset.n_classes or 1 y_std = dataset.y_info.get('std') # for regression # preprocess
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE, 'ft-transformer': FTTransformer, 'saint': None, 't2g-former': None, 'excel-former': None, } HPOLib = Literal['optuna', 'hyperopt'] # TODO: add 'hyperopt' support def get_model_cards(): return { 'ready': sorted(list([key for key, value in MODEL_CARDS.items() if value])), 'comming soon': sorted(list([key for key, value in MODEL_CARDS.items() if not value])) } def seed_everything(seed=42): ''' Sets the seed of the entire notebook so results are the same every time we run. This is for REPRODUCIBILITY. ''' random.seed(seed) # Set a fixed value for the hash seed os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # When running on the CuDNN backend, two further options must be set torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def load_config_from_file(file): file = str(file) if file.endswith('.yaml'): with open(file, 'r') as f: cfg = yaml.safe_load(f) elif file.endswith('.json'): with open(file, 'r') as f: cfg = json.load(f) else: raise AssertionError('Config files only support yaml or json format now.') return cfg def extract_config(model_config: dict, is_large_data: bool = False): """selection of different search spaces""" used_cfgs = {"model": {}, "training": {}, 'meta': model_config.get('meta', {})} for field in ['model', 'training']: for k in model_config[field]: cfgs = model_config[field][k] if 'type2' not in cfgs: used_cfg = cfgs else: if not is_large_data: used_cfg = {k: v for k, v in cfgs.items() if not k.endswith('2')} else: used_cfg = {k[:-1]: v for k, v in cfgs.items() if k.endswith('2')} used_cfgs[field][k] = used_cfg return used_cfgs def make_baseline( model_name, model_config: Union[dict, str], n_num: int, cat_card: Optional[List[int]], n_labels: int, sparsity_scheme: Optional[str] = None, device: Union[str, torch.device] = 'cuda', ) -> TabModel: """Process Model Configs and Call Specific Model APIs""" assert model_name in MODEL_CARDS, f"unrecognized `{model_name}` model name, choose one of valid models in {MODEL_CARDS}" if isinstance(model_config, str): model_config = load_config_from_file(model_config)['model'] if MODEL_CARDS[model_name] is None: raise NotImplementedError("Please add corresponding model implementation to `models` module") if sparsity_scheme is not None: assert 'mlp' in model_name return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels, sparsity_scheme=sparsity_scheme) return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels) def tune( model_name: str = None, search_config: Union[dict, str] = None, dataset: Dataset = None, batch_size: int = 64, patience: int = 8, # a small patience for quick tune n_iterations: int = 50, framework: HPOLib = 'optuna', device: Union[str, torch.device] = 'cuda', output_dir: Optional[str] = None, ) -> 'TabModel': # assert framework in HPOLib, f"hyper tune only support the following frameworks '{HPOLib}'" # device device = torch.device(device) # task params n_num_features = dataset.n_num_features categories = dataset.get_category_sizes('train') if len(categories) == 0: categories = None n_labels = dataset.n_classes or 1 y_std = dataset.y_info.get('std') # for regression # preprocess
datas = DataProcessor.prepare(dataset, device=device)
8
2023-10-30 14:55:44+00:00
24k
hyperspy/exspy
exspy/tests/models/test_eelsmodel.py
[ { "identifier": "elements_db", "path": "exspy/misc/elements.py", "snippet": "" }, { "identifier": "_GOSH_URL", "path": "exspy/misc/eels/gosh_gos.py", "snippet": "_GOSH_URL = f\"doi:{_GOSH_DOI}/Segger_Guzzinati_Kohl_1.5.0.gosh\"" }, { "identifier": "_GOSH_KNOWN_HASH", "path": ...
import contextlib import io import numpy as np import pooch import pytest import hyperspy.api as hs from unittest import mock from exspy.misc.elements import elements_db as elements from hyperspy.decorators import lazifyTestClass from exspy.misc.eels.gosh_gos import _GOSH_URL, _GOSH_KNOWN_HASH from exspy.signals import EELSSpectrum from exspy.models.eelsmodel import EELSModel from hyperspy.components1d import PowerLaw from hyperspy.components1d import PowerLaw
19,076
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. # Dask does not always work nicely with np.errstate, # see: https://github.com/dask/dask/issues/3245, so # filter out divide-by-zero warnings that only appear # when the test is lazy. When the test is not lazy, # internal use of np.errstate means the warnings never # appear in the first place. @pytest.mark.filterwarnings( "ignore:invalid value encountered in subtract:RuntimeWarning" ) @pytest.mark.filterwarnings("ignore:divide by zero encountered in log:RuntimeWarning") @lazifyTestClass class TestCreateEELSModel: def setup_method(self, method): s = EELSSpectrum(np.zeros(200)) s.set_microscope_parameters(100, 10, 10) s.axes_manager[-1].offset = 150 s.add_elements(("B", "C")) self.s = s def test_create_eelsmodel(self): assert isinstance(self.s.create_model(), EELSModel) def test_create_eelsmodel_no_md(self): s = self.s del s.metadata.Acquisition_instrument with pytest.raises(ValueError): s.create_model() def test_auto_add_edges_true(self): m = self.s.create_model(auto_add_edges=True) cnames = [component.name for component in m] assert "B_K" in cnames and "C_K" in cnames def test_gos_hydrogenic(self): m = self.s.create_model(auto_add_edges=True, GOS="hydrogenic") assert m["B_K"].GOS._name == "hydrogenic" m.fit() def test_gos_gosh(self): m = self.s.create_model(auto_add_edges=True, GOS="gosh") assert m["B_K"].GOS._name == "gosh" m.fit() with pytest.raises(ValueError): self.s.create_model(auto_add_edges=True, GOS="not_a_GOS") def test_gos_file(self): gos_file_path = pooch.retrieve( url=_GOSH_URL,
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. # Dask does not always work nicely with np.errstate, # see: https://github.com/dask/dask/issues/3245, so # filter out divide-by-zero warnings that only appear # when the test is lazy. When the test is not lazy, # internal use of np.errstate means the warnings never # appear in the first place. @pytest.mark.filterwarnings( "ignore:invalid value encountered in subtract:RuntimeWarning" ) @pytest.mark.filterwarnings("ignore:divide by zero encountered in log:RuntimeWarning") @lazifyTestClass class TestCreateEELSModel: def setup_method(self, method): s = EELSSpectrum(np.zeros(200)) s.set_microscope_parameters(100, 10, 10) s.axes_manager[-1].offset = 150 s.add_elements(("B", "C")) self.s = s def test_create_eelsmodel(self): assert isinstance(self.s.create_model(), EELSModel) def test_create_eelsmodel_no_md(self): s = self.s del s.metadata.Acquisition_instrument with pytest.raises(ValueError): s.create_model() def test_auto_add_edges_true(self): m = self.s.create_model(auto_add_edges=True) cnames = [component.name for component in m] assert "B_K" in cnames and "C_K" in cnames def test_gos_hydrogenic(self): m = self.s.create_model(auto_add_edges=True, GOS="hydrogenic") assert m["B_K"].GOS._name == "hydrogenic" m.fit() def test_gos_gosh(self): m = self.s.create_model(auto_add_edges=True, GOS="gosh") assert m["B_K"].GOS._name == "gosh" m.fit() with pytest.raises(ValueError): self.s.create_model(auto_add_edges=True, GOS="not_a_GOS") def test_gos_file(self): gos_file_path = pooch.retrieve( url=_GOSH_URL,
known_hash=_GOSH_KNOWN_HASH,
2
2023-10-28 20:04:10+00:00
24k
Elfenreigen/UniChest
train.py
[ { "identifier": "utils", "path": "factory/utils.py", "snippet": "class SmoothedValue(object):\nclass MetricLogger(object):\nclass AttrDict(dict):\n def __init__(self, window_size=20, fmt=None):\n def update(self, value, n=1):\n def synchronize_between_processes(self):\n def median(self):\n ...
import argparse import os import logging import yaml import numpy as np import random import time import datetime import json import math import torch import torch.nn as nn import torch.nn.functional as F import torch.backends.cudnn as cudnn import torch.distributed as dist import socket from pathlib import Path from functools import partial from sklearn.metrics import roc_auc_score from collections import OrderedDict from torch.utils.data import DataLoader from tensorboardX import SummaryWriter from transformers import AutoModel,BertConfig,AutoTokenizer from factory import utils from scheduler import create_scheduler from optim import create_optimizer from engine.train import train,valid_on_cheXpert,valid_on_chestxray14 from models.clip_tqn import CLP_clinical,ModelRes,TQN_Model,TQN_Model_Add,ModelDense,CLP_clinical2 from models.tokenization_bert import BertTokenizer from dataset.dataset_entity import MIMIC_Dataset,Mergetrain_Dataset, Chestxray14_Dataset,CheXpert_Dataset from io import BytesIO
17,491
if args.add_dataset == True: train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) else: train_dataset = MIMIC_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) train_dataloader = DataLoader( train_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=train_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) train_dataloader.num_samples = len(train_dataset) train_dataloader.num_batches = len(train_dataloader) val_dataset = Chestxray14_Dataset(config['chestxray_valid_file'],config['image_res']) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) val_dataloader =DataLoader( val_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=val_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) val_dataloader.num_samples = len(val_dataset) val_dataloader.num_batches = len(val_dataloader) test_dataset = Chestxray14_Dataset(config['chestxray_test_file'],config['image_res']) test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader =DataLoader( test_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=test_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader.num_samples = len(test_dataset) test_dataloader.num_batches = len(test_dataloader) test_dataset_chexpert = CheXpert_Dataset(config['chexpert_valid_file'],config['image_res']) test_sampler_chexpert = torch.utils.data.distributed.DistributedSampler(test_dataset_chexpert,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader_chexpert =DataLoader( test_dataset_chexpert, batch_size=config['batch_size'], num_workers=4, pin_memory=True, sampler=test_sampler_chexpert, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader_chexpert.num_samples = len(test_dataset_chexpert) test_dataloader_chexpert.num_batches = len(test_dataloader_chexpert) if args.image_encoder_name == 'resnet': image_encoder = ModelRes(res_base_model='resnet50').cuda() elif args.image_encoder_name == 'dense': image_encoder = ModelDense(dense_base_model = 'densenet121').cuda() if args.bert_model_name == 'emilyalsentzer/Bio_ClinicalBERT': tokenizer = BertTokenizer.from_pretrained(args.bert_model_name) text_encoder = CLP_clinical2(bert_model_name=args.bert_model_name).cuda() else: tokenizer = AutoTokenizer.from_pretrained(args.bert_model_name,do_lower_case=True, local_files_only=True) text_encoder = CLP_clinical(bert_model_name=args.bert_model_name).cuda() if args.bert_pretrained: checkpoint = torch.load(args.bert_pretrained, map_location='cpu') state_dict = checkpoint["state_dict"] text_encoder.load_state_dict(state_dict) print('Load pretrained bert success from: ',args.bert_pretrained) if args.freeze_bert: for param in text_encoder.parameters(): param.requires_grad = False if args.add_dataset: if 'lam' in config: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim, lam = config['lam']).cuda() else: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim).cuda() else: if 'lam' in config: model = TQN_Model(class_num = args.class_num, lam = config['lam']).cuda() else: model = TQN_Model(class_num = args.class_num).cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) model_without_ddp = model.module if args.finetune: image_encoder_without_ddp = image_encoder else: image_encoder = torch.nn.parallel.DistributedDataParallel(image_encoder, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) image_encoder_without_ddp = image_encoder.module text_encoder_without_ddp = text_encoder arg_opt = utils.AttrDict(config['optimizer']) optimizer = create_optimizer(arg_opt, model_without_ddp,image_encoder_without_ddp,text_encoder_without_ddp) arg_sche = utils.AttrDict(config['schedular'])
# import ruamel.yaml as yaml def main(args, config): torch.cuda.current_device() torch.cuda._initialized = True print("Total CUDA devices: ", torch.cuda.device_count()) torch.set_default_tensor_type('torch.FloatTensor') utils.init_distributed_mode(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True start_epoch = 0 max_epoch = config['schedular']['epochs'] warmup_steps = config['schedular']['warmup_epochs'] num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank print('sampler_rank',sampler_rank,'num_tasks',num_tasks) #### Dataset #### print("Creating dataset") if args.add_dataset == True: train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) else: train_dataset = MIMIC_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) train_dataloader = DataLoader( train_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=train_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) train_dataloader.num_samples = len(train_dataset) train_dataloader.num_batches = len(train_dataloader) val_dataset = Chestxray14_Dataset(config['chestxray_valid_file'],config['image_res']) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) val_dataloader =DataLoader( val_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=val_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) val_dataloader.num_samples = len(val_dataset) val_dataloader.num_batches = len(val_dataloader) test_dataset = Chestxray14_Dataset(config['chestxray_test_file'],config['image_res']) test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader =DataLoader( test_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=test_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader.num_samples = len(test_dataset) test_dataloader.num_batches = len(test_dataloader) test_dataset_chexpert = CheXpert_Dataset(config['chexpert_valid_file'],config['image_res']) test_sampler_chexpert = torch.utils.data.distributed.DistributedSampler(test_dataset_chexpert,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader_chexpert =DataLoader( test_dataset_chexpert, batch_size=config['batch_size'], num_workers=4, pin_memory=True, sampler=test_sampler_chexpert, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader_chexpert.num_samples = len(test_dataset_chexpert) test_dataloader_chexpert.num_batches = len(test_dataloader_chexpert) if args.image_encoder_name == 'resnet': image_encoder = ModelRes(res_base_model='resnet50').cuda() elif args.image_encoder_name == 'dense': image_encoder = ModelDense(dense_base_model = 'densenet121').cuda() if args.bert_model_name == 'emilyalsentzer/Bio_ClinicalBERT': tokenizer = BertTokenizer.from_pretrained(args.bert_model_name) text_encoder = CLP_clinical2(bert_model_name=args.bert_model_name).cuda() else: tokenizer = AutoTokenizer.from_pretrained(args.bert_model_name,do_lower_case=True, local_files_only=True) text_encoder = CLP_clinical(bert_model_name=args.bert_model_name).cuda() if args.bert_pretrained: checkpoint = torch.load(args.bert_pretrained, map_location='cpu') state_dict = checkpoint["state_dict"] text_encoder.load_state_dict(state_dict) print('Load pretrained bert success from: ',args.bert_pretrained) if args.freeze_bert: for param in text_encoder.parameters(): param.requires_grad = False if args.add_dataset: if 'lam' in config: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim, lam = config['lam']).cuda() else: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim).cuda() else: if 'lam' in config: model = TQN_Model(class_num = args.class_num, lam = config['lam']).cuda() else: model = TQN_Model(class_num = args.class_num).cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) model_without_ddp = model.module if args.finetune: image_encoder_without_ddp = image_encoder else: image_encoder = torch.nn.parallel.DistributedDataParallel(image_encoder, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) image_encoder_without_ddp = image_encoder.module text_encoder_without_ddp = text_encoder arg_opt = utils.AttrDict(config['optimizer']) optimizer = create_optimizer(arg_opt, model_without_ddp,image_encoder_without_ddp,text_encoder_without_ddp) arg_sche = utils.AttrDict(config['schedular'])
lr_scheduler, _ = create_scheduler(arg_sche, optimizer)
1
2023-10-30 00:24:16+00:00
24k
ifrit98/storage-subnet
neurons/miner.py
[ { "identifier": "hash_data", "path": "storage/shared/ecc.py", "snippet": "def hash_data(data):\n \"\"\"\n Compute a SHA3-256 hash of the input data and return its integer representation.\n\n The function handles both byte-like and non-byte-like inputs by converting non-byte inputs to\n strin...
import os import sys import copy import json import time import torch import typing import base64 import asyncio import aioredis import argparse import threading import traceback import bittensor as bt import storage from collections import defaultdict from Crypto.Random import get_random_bytes from typing import Dict from pprint import pprint, pformat from storage.shared.ecc import ( hash_data, setup_CRS, ECCommitment, ecc_point_to_hex, hex_to_ecc_point, ) from storage.shared.merkle import ( MerkleTree, ) from storage.shared.utils import b64_encode, b64_decode, chunk_data, safe_key_search from storage.miner import ( run, set_weights, ) from storage.miner.utils import ( compute_subsequent_commitment, save_data_to_filesystem, load_from_filesystem, commit_data_with_seed, init_wandb, get_directory_size, get_free_disk_space, update_storage_stats, ) from storage.miner.config import ( config, check_config, add_args, ) from storage.miner.database import ( store_chunk_metadata, update_seed_info, get_chunk_metadata, )
14,993
""" caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def retrieve_blacklist_fn( self, synapse: storage.protocol.Retrieve ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()")
# The MIT License (MIT) # Copyright © 2023 Yuma Rao # Copyright © 2023 philanthrope # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # import this repo class miner: @classmethod def check_config(cls, config: "bt.Config"): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ check_config(cls, config) @classmethod def add_args(cls, parser): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ add_args(cls, parser) @classmethod def config(cls): """ Retrieves the configuration for the neuron. Returns: bt.Config: The configuration object for the neuron. This class method returns the neuron's configuration, which is used throughout the neuron's lifecycle for various functionalities and operations. """ return config(cls) subtensor: "bt.subtensor" wallet: "bt.wallet" metagraph: "bt.metagraph" def __init__(self): self.config = miner.config() self.check_config(self.config) bt.logging(config=self.config, logging_dir=self.config.miner.full_path) bt.logging.info(f"{self.config}") bt.logging.info("miner.__init__()") # Init device. bt.logging.debug("loading device") self.device = torch.device(self.config.miner.device) bt.logging.debug(str(self.device)) # Init subtensor bt.logging.debug("loading subtensor") self.subtensor = bt.subtensor(config=self.config) bt.logging.debug(str(self.subtensor)) self.current_block = self.subtensor.get_current_block() # Init wallet. bt.logging.debug("loading wallet") self.wallet = bt.wallet(config=self.config) self.wallet.create_if_non_existent() if not self.config.wallet._mock: if not self.subtensor.is_hotkey_registered_on_subnet( hotkey_ss58=self.wallet.hotkey.ss58_address, netuid=self.config.netuid ): raise Exception( f"Wallet not currently registered on netuid {self.config.netuid}, please first register wallet before running" ) bt.logging.debug(f"wallet: {str(self.wallet)}") # Init metagraph. bt.logging.debug("loading metagraph") self.metagraph = bt.metagraph( netuid=self.config.netuid, network=self.subtensor.network, sync=False ) # Make sure not to sync without passing subtensor self.metagraph.sync(subtensor=self.subtensor) # Sync metagraph with subtensor. bt.logging.debug(str(self.metagraph)) # Setup database self.database = aioredis.StrictRedis( host=self.config.database.host, port=self.config.database.port, db=self.config.database.index, socket_keepalive=True, socket_connect_timeout=300, ) self.my_subnet_uid = self.metagraph.hotkeys.index( self.wallet.hotkey.ss58_address ) bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}") # Init wandb. if not self.config.wandb.off: bt.logging.debug("loading wandb") init_wandb(self) # The axon handles request processing, allowing validators to send this process requests. self.axon = bt.axon(wallet=self.wallet, config=self.config) bt.logging.info(f"Axon {self.axon}") # Attach determiners which functions are called when servicing a request. bt.logging.info(f"Attaching forward functions to axon.") self.axon.attach( forward_fn=self.store, blacklist_fn=self.store_blacklist_fn, priority_fn=self.store_priority_fn, ).attach( forward_fn=self.challenge, blacklist_fn=self.challenge_blacklist_fn, priority_fn=self.challenge_priority_fn, ).attach( forward_fn=self.retrieve, blacklist_fn=self.retrieve_blacklist_fn, priority_fn=self.retrieve_priority_fn, ) # Serve passes the axon information to the network + netuid we are hosting on. # This will auto-update if the axon port of external ip have changed. bt.logging.info( f"Serving axon {self.axon} on network: {self.subtensor.chain_endpoint} with netuid: {self.config.netuid}" ) self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor) # Start starts the miner's axon, making it active on the network. bt.logging.info(f"Starting axon server on port: {self.config.axon.port}") self.axon.start() # Init the event loop. self.loop = asyncio.get_event_loop() # Instantiate runners self.should_exit: bool = False self.is_running: bool = False self.thread: threading.Thread = None self.lock = asyncio.Lock() self.request_timestamps: Dict = {} self.step = 0 # Init the miner's storage request tracker self.request_count = 0 self.start_request_count_timer() self.requests_per_hour = [] self.average_requests_per_hour = 0 # Init the miner's storage usage tracker update_storage_stats(self) def start_request_count_timer(self): """ Initializes and starts a timer for tracking the number of requests received by the miner in an hour. This method sets up a one-hour timer that, upon expiration, calls the `reset_request_count` method to log the number of requests received and reset the count for the next hour. The timer is set to run in a separate thread to avoid blocking the main execution. Usage: Should be called during the initialization of the miner to start tracking requests per hour. """ self.request_count_timer = threading.Timer(3600, self.reset_request_count) self.request_count_timer.start() def reset_request_count(self): """ Logs the number of requests received in the last hour and resets the count. This method is automatically called when the one-hour timer set by `start_request_count_timer` expires. It logs the count of requests received in the last hour and then resets the count. Additionally, it restarts the timer for the next hour. Usage: This method is intended to be called automatically by a timer and typically should not be called directly. """ bt.logging.info( f"Number of requests received in the last hour: {self.request_count}" ) self.requests_per_hour.append(self.request_count) bt.logging.info(f"Requests per hour: {self.requests_per_hour}") self.average_requests_per_hour = sum(self.requests_per_hour) / len( self.requests_per_hour ) bt.logging.info(f"Average requests per hour: {self.average_requests_per_hour}") self.request_count = 0 self.start_request_count_timer() @property async def total_storage(self): """ Calculates the total size of data stored by the miner. This method fetches all data keys from the Redis database and sums up the size of each data object. It provides an estimate of the total amount of data currently held by the miner. Returns: int: Total size of data (in bytes) stored by the miner. Example: >>> miner.total_storage() 102400 # Example output indicating 102,400 bytes of data stored """ # Fetch all keys from Redis all_keys = await safe_key_search(self.database, "*") # Filter out keys that contain a period (temporary, remove later) filtered_keys = [key for key in all_keys if b"." not in key] # Get the size of each data object and sum them up total_size = sum( [ await get_chunk_metadata(self.database, key).get(b"size", 0) for key in filtered_keys ] ) return total_size def store_blacklist_fn( self, synapse: storage.protocol.Store ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def store_priority_fn(self, synapse: storage.protocol.Store) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def challenge_blacklist_fn( self, synapse: storage.protocol.Challenge ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def challenge_priority_fn(self, synapse: storage.protocol.Challenge) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def retrieve_blacklist_fn( self, synapse: storage.protocol.Retrieve ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()")
data_hash = hash_data(encrypted_byte_data)
0
2023-10-26 18:54:47+00:00
24k
cpacker/MemGPT
memgpt/agent.py
[ { "identifier": "AgentState", "path": "memgpt/data_types.py", "snippet": "class AgentState:\n def __init__(\n self,\n name: str,\n user_id: uuid.UUID,\n persona: str, # the filename where the persona was originally sourced from\n human: str, # the filename where t...
import datetime import uuid import glob import inspect import os import json import traceback from pathlib import Path from typing import List, Tuple from box import Box from memgpt.data_types import AgentState, Message from memgpt.models import chat_completion_response from memgpt.interface import AgentInterface from memgpt.persistence_manager import PersistenceManager, LocalStateManager from memgpt.config import MemGPTConfig from memgpt.system import get_login_event, package_function_response, package_summarize_message, get_initial_boot_messages from memgpt.memory import CoreMemory as InContextMemory, summarize_messages from memgpt.llm_api_tools import create, is_context_overflow_error from memgpt.utils import ( get_tool_call_id, get_local_time, parse_json, united_diff, printd, count_tokens, get_schema_diff, validate_function_response, verify_first_message_correctness, ) from memgpt.constants import ( FIRST_MESSAGE_ATTEMPTS, MESSAGE_SUMMARY_WARNING_FRAC, MESSAGE_SUMMARY_TRUNC_TOKEN_FRAC, MESSAGE_SUMMARY_TRUNC_KEEP_N_LAST, CORE_MEMORY_HUMAN_CHAR_LIMIT, CORE_MEMORY_PERSONA_CHAR_LIMIT, LLM_MAX_TOKENS, CLI_WARNING_PREFIX, JSON_ENSURE_ASCII, ) from .errors import LLMError from .functions.functions import USER_FUNCTIONS_DIR, load_all_function_sets
16,133
Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response else: # Standard non-function reply self.interface.internal_monologue(response_message.content) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=response_message.model_dump(), ) ) # extend conversation with assistant's reply heartbeat_request = None function_failed = None return messages, heartbeat_request, function_failed def step(self, user_message, first_message=False, first_message_retry_limit=FIRST_MESSAGE_ATTEMPTS, skip_verify=False): """Top-level event message handler for the MemGPT agent""" try: # Step 0: add user message if user_message is not None: self.interface.user_message(user_message) packed_user_message = {"role": "user", "content": user_message} # Special handling for AutoGen messages with 'name' field try: user_message_json = json.loads(user_message) # Treat 'name' as a special field # If it exists in the input message, elevate it to the 'message' level if "name" in user_message_json: packed_user_message["name"] = user_message_json["name"] user_message_json.pop("name", None) packed_user_message["content"] = json.dumps(user_message_json, ensure_ascii=JSON_ENSURE_ASCII) except Exception as e: print(f"{CLI_WARNING_PREFIX}handling of 'name' field failed with: {e}") input_message_sequence = self.messages + [packed_user_message] else: input_message_sequence = self.messages if len(input_message_sequence) > 1 and input_message_sequence[-1]["role"] != "user": printd(f"{CLI_WARNING_PREFIX}Attempting to run ChatCompletion without user as the last message in the queue") # Step 1: send the conversation and available functions to GPT if not skip_verify and (first_message or self.messages_total == self.messages_total_init): printd(f"This is the first message. Running extra verifier on AI response.") counter = 0 while True: response = self._get_ai_reply( message_sequence=input_message_sequence, first_message=True, # passed through to the prompt formatter ) if verify_first_message_correctness(response, require_monologue=self.first_message_verify_mono): break counter += 1 if counter > first_message_retry_limit: raise Exception(f"Hit first message retry limit ({first_message_retry_limit})") else: response = self._get_ai_reply( message_sequence=input_message_sequence, ) # Step 2: check if LLM wanted to call a function # (if yes) Step 3: call the function # (if yes) Step 4: send the info on the function call and function response to LLM response_message = response.choices[0].message response_message_copy = response_message.copy() all_response_messages, heartbeat_request, function_failed = self._handle_ai_response(response_message) # Add the extra metadata to the assistant response # (e.g. enough metadata to enable recreating the API call) # assert "api_response" not in all_response_messages[0] # all_response_messages[0]["api_response"] = response_message_copy # assert "api_args" not in all_response_messages[0] # all_response_messages[0]["api_args"] = { # "model": self.model, # "messages": input_message_sequence, # "functions": self.functions, # } # Step 4: extend the message history if user_message is not None: all_new_messages = [ Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=packed_user_message, ) ] + all_response_messages else: all_new_messages = all_response_messages # Check the memory pressure and potentially issue a memory pressure warning current_total_tokens = response.usage.total_tokens active_memory_warning = False # We can't do summarize logic properly if context_window is undefined if self.agent_state.llm_config.context_window is None: # Fallback if for some reason context_window is missing, just set to the default print(f"{CLI_WARNING_PREFIX}could not find context_window in config, setting to default {LLM_MAX_TOKENS['DEFAULT']}") print(f"{self.agent_state}") self.agent_state.llm_config.context_window = ( str(LLM_MAX_TOKENS[self.model]) if (self.model is not None and self.model in LLM_MAX_TOKENS) else str(LLM_MAX_TOKENS["DEFAULT"]) )
def link_functions(function_schemas): """Link function definitions to list of function schemas""" # need to dynamically link the functions # the saved agent.functions will just have the schemas, but we need to # go through the functions library and pull the respective python functions # Available functions is a mapping from: # function_name -> { # json_schema: schema # python_function: function # } # agent.functions is a list of schemas (OpenAI kwarg functions style, see: https://platform.openai.com/docs/api-reference/chat/create) # [{'name': ..., 'description': ...}, {...}] available_functions = load_all_function_sets() linked_function_set = {} for f_schema in function_schemas: # Attempt to find the function in the existing function library f_name = f_schema.get("name") if f_name is None: raise ValueError(f"While loading agent.state.functions encountered a bad function schema object with no name:\n{f_schema}") linked_function = available_functions.get(f_name) if linked_function is None: raise ValueError( f"Function '{f_name}' was specified in agent.state.functions, but is not in function library:\n{available_functions.keys()}" ) # Once we find a matching function, make sure the schema is identical if json.dumps(f_schema, ensure_ascii=JSON_ENSURE_ASCII) != json.dumps( linked_function["json_schema"], ensure_ascii=JSON_ENSURE_ASCII ): # error_message = ( # f"Found matching function '{f_name}' from agent.state.functions inside function library, but schemas are different." # + f"\n>>>agent.state.functions\n{json.dumps(f_schema, indent=2, ensure_ascii=JSON_ENSURE_ASCII)}" # + f"\n>>>function library\n{json.dumps(linked_function['json_schema'], indent=2, ensure_ascii=JSON_ENSURE_ASCII)}" # ) schema_diff = get_schema_diff(f_schema, linked_function["json_schema"]) error_message = ( f"Found matching function '{f_name}' from agent.state.functions inside function library, but schemas are different.\n" + "".join(schema_diff) ) # NOTE to handle old configs, instead of erroring here let's just warn # raise ValueError(error_message) printd(error_message) linked_function_set[f_name] = linked_function return linked_function_set def initialize_memory(ai_notes, human_notes): if ai_notes is None: raise ValueError(ai_notes) if human_notes is None: raise ValueError(human_notes) memory = InContextMemory(human_char_limit=CORE_MEMORY_HUMAN_CHAR_LIMIT, persona_char_limit=CORE_MEMORY_PERSONA_CHAR_LIMIT) memory.edit_persona(ai_notes) memory.edit_human(human_notes) return memory def construct_system_with_memory(system, memory, memory_edit_timestamp, archival_memory=None, recall_memory=None, include_char_count=True): full_system_message = "\n".join( [ system, "\n", f"### Memory [last modified: {memory_edit_timestamp.strip()}]", f"{len(recall_memory) if recall_memory else 0} previous messages between you and the user are stored in recall memory (use functions to access them)", f"{len(archival_memory) if archival_memory else 0} total memories you created are stored in archival memory (use functions to access them)", "\nCore memory shown below (limited in size, additional information stored in archival / recall memory):", f'<persona characters="{len(memory.persona)}/{memory.persona_char_limit}">' if include_char_count else "<persona>", memory.persona, "</persona>", f'<human characters="{len(memory.human)}/{memory.human_char_limit}">' if include_char_count else "<human>", memory.human, "</human>", ] ) return full_system_message def initialize_message_sequence( model, system, memory, archival_memory=None, recall_memory=None, memory_edit_timestamp=None, include_initial_boot_message=True, ): if memory_edit_timestamp is None: memory_edit_timestamp = get_local_time() full_system_message = construct_system_with_memory( system, memory, memory_edit_timestamp, archival_memory=archival_memory, recall_memory=recall_memory ) first_user_message = get_login_event() # event letting MemGPT know the user just logged in if include_initial_boot_message: if model is not None and "gpt-3.5" in model: initial_boot_messages = get_initial_boot_messages("startup_with_send_message_gpt35") else: initial_boot_messages = get_initial_boot_messages("startup_with_send_message") messages = ( [ {"role": "system", "content": full_system_message}, ] + initial_boot_messages + [ {"role": "user", "content": first_user_message}, ] ) else: messages = [ {"role": "system", "content": full_system_message}, {"role": "user", "content": first_user_message}, ] return messages class Agent(object): def __init__( self, agent_state: AgentState, interface: AgentInterface, # extras messages_total=None, # TODO remove? first_message_verify_mono=True, # TODO move to config? memgpt_config: MemGPTConfig = None, ): # Hold a copy of the state that was used to init the agent self.agent_state = agent_state # gpt-4, gpt-3.5-turbo, ... self.model = agent_state.llm_config.model # Store the system instructions (used to rebuild memory) if "system" not in agent_state.state: raise ValueError(f"'system' not found in provided AgentState") self.system = agent_state.state["system"] if "functions" not in agent_state.state: raise ValueError(f"'functions' not found in provided AgentState") # Store the functions schemas (this is passed as an argument to ChatCompletion) self.functions = agent_state.state["functions"] # these are the schema # Link the actual python functions corresponding to the schemas self.functions_python = {k: v["python_function"] for k, v in link_functions(function_schemas=self.functions).items()} assert all([callable(f) for k, f in self.functions_python.items()]), self.functions_python # Initialize the memory object if "persona" not in agent_state.state: raise ValueError(f"'persona' not found in provided AgentState") if "human" not in agent_state.state: raise ValueError(f"'human' not found in provided AgentState") self.memory = initialize_memory(ai_notes=agent_state.state["persona"], human_notes=agent_state.state["human"]) # Interface must implement: # - internal_monologue # - assistant_message # - function_message # ... # Different interfaces can handle events differently # e.g., print in CLI vs send a discord message with a discord bot self.interface = interface # Create the persistence manager object based on the AgentState info # TODO self.persistence_manager = LocalStateManager(agent_state=agent_state) # State needed for heartbeat pausing self.pause_heartbeats_start = None self.pause_heartbeats_minutes = 0 self.first_message_verify_mono = first_message_verify_mono # Controls if the convo memory pressure warning is triggered # When an alert is sent in the message queue, set this to True (to avoid repeat alerts) # When the summarizer is run, set this back to False (to reset) self.agent_alerted_about_memory_pressure = False # Read local config if not provided if not memgpt_config: self.memgpt_config = MemGPTConfig() else: self.memgpt_config = memgpt_config # Initialize connection to metedata store # self.ms = MetadataStore(self.memgpt_config) # Once the memory object is initialized, use it to "bake" the system message if "messages" in agent_state.state and agent_state.state["messages"] is not None: # print(f"Agent.__init__ :: loading, state={agent_state.state['messages']}") if not isinstance(agent_state.state["messages"], list): raise ValueError(f"'messages' in AgentState was bad type: {type(agent_state.state['messages'])}") assert all([isinstance(msg, str) for msg in agent_state.state["messages"]]) # Convert to IDs, and pull from the database self._messages = [ self.persistence_manager.recall_memory.storage.get(uuid.UUID(msg_id)) for msg_id in agent_state.state["messages"] ] assert all([isinstance(msg, Message) for msg in self._messages]), (self._messages, agent_state.state["messages"]) else: # print(f"Agent.__init__ :: creating, state={agent_state.state['messages']}") init_messages = initialize_message_sequence( self.model, self.system, self.memory, ) init_messages_objs = [] for msg in init_messages: init_messages_objs.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=msg ) ) self._messages = [] self.messages_total = 0 self._append_to_messages(added_messages=init_messages_objs) assert all([isinstance(msg, Message) for msg in self._messages]), (self._messages, init_messages) # Keep track of the total number of messages throughout all time self.messages_total = messages_total if messages_total is not None else (len(self._messages) - 1) # (-system) # self.messages_total_init = self.messages_total self.messages_total_init = len(self._messages) - 1 printd(f"Agent initialized, self.messages_total={self.messages_total}") # Create the agent in the DB # self.save() self.update_state() @property def messages(self) -> List[dict]: """Getter method that converts the internal Message list into OpenAI-style dicts""" return [msg.to_openai_dict() for msg in self._messages] @messages.setter def messages(self, value): raise Exception("Modifying message list directly not allowed") def _trim_messages(self, num): """Trim messages from the front, not including the system message""" self.persistence_manager.trim_messages(num) new_messages = [self.messages[0]] + self.messages[num:] self._messages = new_messages def _prepend_to_messages(self, added_messages: List[Message]): """Wrapper around self.messages.prepend to allow additional calls to a state/persistence manager""" assert all([isinstance(msg, Message) for msg in added_messages]) self.persistence_manager.prepend_to_messages(added_messages) new_messages = [self.messages[0]] + added_messages + self.messages[1:] # prepend (no system) self._messages = new_messages self.messages_total += len(added_messages) # still should increment the message counter (summaries are additions too) def _append_to_messages(self, added_messages: List[Message]): """Wrapper around self.messages.append to allow additional calls to a state/persistence manager""" assert all([isinstance(msg, Message) for msg in added_messages]) self.persistence_manager.append_to_messages(added_messages) # strip extra metadata if it exists # for msg in added_messages: # msg.pop("api_response", None) # msg.pop("api_args", None) new_messages = self._messages + added_messages # append self._messages = new_messages self.messages_total += len(added_messages) def _swap_system_message(self, new_system_message: Message): assert isinstance(new_system_message, Message) assert new_system_message.role == "system", new_system_message assert self._messages[0].role == "system", self._messages self.persistence_manager.swap_system_message(new_system_message) new_messages = [new_system_message] + self._messages[1:] # swap index 0 (system) self._messages = new_messages def _get_ai_reply( self, message_sequence: List[dict], function_call: str = "auto", first_message: bool = False, # hint ) -> chat_completion_response.ChatCompletionResponse: """Get response from LLM API""" try: response = create( agent_state=self.agent_state, messages=message_sequence, functions=self.functions, functions_python=self.functions_python, function_call=function_call, # hint first_message=first_message, ) # special case for 'length' if response.choices[0].finish_reason == "length": raise Exception("Finish reason was length (maximum context length)") # catches for soft errors if response.choices[0].finish_reason not in ["stop", "function_call", "tool_calls"]: raise Exception(f"API call finish with bad finish reason: {response}") # unpack with response.choices[0].message.content return response except Exception as e: raise e def _handle_ai_response( self, response_message: chat_completion_response.Message, override_tool_call_id: bool = True ) -> Tuple[List[Message], bool, bool]: """Handles parsing and function execution""" messages = [] # append these to the history when done # Step 2: check if LLM wanted to call a function if response_message.function_call or (response_message.tool_calls is not None and len(response_message.tool_calls) > 0): if response_message.function_call: raise DeprecationWarning(response_message) if response_message.tool_calls is not None and len(response_message.tool_calls) > 1: raise NotImplementedError(f">1 tool call not supported") # The content if then internal monologue, not chat self.interface.internal_monologue(response_message.content) # generate UUID for tool call if override_tool_call_id or response_message.function_call: tool_call_id = get_tool_call_id() # needs to be a string for JSON response_message.tool_calls[0].id = tool_call_id else: tool_call_id = response_message.tool_calls[0].id assert tool_call_id is not None # should be defined # only necessary to add the tool_cal_id to a function call (antipattern) # response_message_dict = response_message.model_dump() # response_message_dict["tool_call_id"] = tool_call_id # role: assistant (requesting tool call, set tool call ID) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=response_message.model_dump(), ) ) # extend conversation with assistant's reply printd(f"Function call message: {messages[-1]}") # Step 3: call the function # Note: the JSON response may not always be valid; be sure to handle errors # Failure case 1: function name is wrong function_call = ( response_message.function_call if response_message.function_call is not None else response_message.tool_calls[0].function ) function_name = function_call.name printd(f"Request to call function {function_name} with tool_call_id: {tool_call_id}") try: function_to_call = self.functions_python[function_name] except KeyError as e: error_msg = f"No function named {function_name}" function_response = package_function_response(False, error_msg) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response self.interface.function_message(f"Error: {error_msg}") return messages, None, True # force a heartbeat to allow agent to handle error # Failure case 2: function name is OK, but function args are bad JSON try: raw_function_args = function_call.arguments function_args = parse_json(raw_function_args) except Exception as e: error_msg = f"Error parsing JSON for function '{function_name}' arguments: {raw_function_args}" function_response = package_function_response(False, error_msg) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response self.interface.function_message(f"Error: {error_msg}") return messages, None, True # force a heartbeat to allow agent to handle error # (Still parsing function args) # Handle requests for immediate heartbeat heartbeat_request = function_args.pop("request_heartbeat", None) if not (isinstance(heartbeat_request, bool) or heartbeat_request is None): printd( f"{CLI_WARNING_PREFIX}'request_heartbeat' arg parsed was not a bool or None, type={type(heartbeat_request)}, value={heartbeat_request}" ) heartbeat_request = None # Failure case 3: function failed during execution self.interface.function_message(f"Running {function_name}({function_args})") try: spec = inspect.getfullargspec(function_to_call).annotations for name, arg in function_args.items(): if isinstance(function_args[name], dict): function_args[name] = spec[name](**function_args[name]) function_args["self"] = self # need to attach self to arg since it's dynamically linked function_response = function_to_call(**function_args) if function_name in ["conversation_search", "conversation_search_date", "archival_memory_search"]: # with certain functions we rely on the paging mechanism to handle overflow truncate = False else: # but by default, we add a truncation safeguard to prevent bad functions from # overflow the agent context window truncate = True function_response_string = validate_function_response(function_response, truncate=truncate) function_args.pop("self", None) function_response = package_function_response(True, function_response_string) function_failed = False except Exception as e: function_args.pop("self", None) # error_msg = f"Error calling function {function_name} with args {function_args}: {str(e)}" # Less detailed - don't provide full args, idea is that it should be in recent context so no need (just adds noise) error_msg = f"Error calling function {function_name}: {str(e)}" error_msg_user = f"{error_msg}\n{traceback.format_exc()}" printd(error_msg_user) function_response = package_function_response(False, error_msg) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response self.interface.function_message(f"Error: {error_msg}") return messages, None, True # force a heartbeat to allow agent to handle error # If no failures happened along the way: ... # Step 4: send the info on the function call and function response to GPT self.interface.function_message(f"Success: {function_response_string}") messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response else: # Standard non-function reply self.interface.internal_monologue(response_message.content) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=response_message.model_dump(), ) ) # extend conversation with assistant's reply heartbeat_request = None function_failed = None return messages, heartbeat_request, function_failed def step(self, user_message, first_message=False, first_message_retry_limit=FIRST_MESSAGE_ATTEMPTS, skip_verify=False): """Top-level event message handler for the MemGPT agent""" try: # Step 0: add user message if user_message is not None: self.interface.user_message(user_message) packed_user_message = {"role": "user", "content": user_message} # Special handling for AutoGen messages with 'name' field try: user_message_json = json.loads(user_message) # Treat 'name' as a special field # If it exists in the input message, elevate it to the 'message' level if "name" in user_message_json: packed_user_message["name"] = user_message_json["name"] user_message_json.pop("name", None) packed_user_message["content"] = json.dumps(user_message_json, ensure_ascii=JSON_ENSURE_ASCII) except Exception as e: print(f"{CLI_WARNING_PREFIX}handling of 'name' field failed with: {e}") input_message_sequence = self.messages + [packed_user_message] else: input_message_sequence = self.messages if len(input_message_sequence) > 1 and input_message_sequence[-1]["role"] != "user": printd(f"{CLI_WARNING_PREFIX}Attempting to run ChatCompletion without user as the last message in the queue") # Step 1: send the conversation and available functions to GPT if not skip_verify and (first_message or self.messages_total == self.messages_total_init): printd(f"This is the first message. Running extra verifier on AI response.") counter = 0 while True: response = self._get_ai_reply( message_sequence=input_message_sequence, first_message=True, # passed through to the prompt formatter ) if verify_first_message_correctness(response, require_monologue=self.first_message_verify_mono): break counter += 1 if counter > first_message_retry_limit: raise Exception(f"Hit first message retry limit ({first_message_retry_limit})") else: response = self._get_ai_reply( message_sequence=input_message_sequence, ) # Step 2: check if LLM wanted to call a function # (if yes) Step 3: call the function # (if yes) Step 4: send the info on the function call and function response to LLM response_message = response.choices[0].message response_message_copy = response_message.copy() all_response_messages, heartbeat_request, function_failed = self._handle_ai_response(response_message) # Add the extra metadata to the assistant response # (e.g. enough metadata to enable recreating the API call) # assert "api_response" not in all_response_messages[0] # all_response_messages[0]["api_response"] = response_message_copy # assert "api_args" not in all_response_messages[0] # all_response_messages[0]["api_args"] = { # "model": self.model, # "messages": input_message_sequence, # "functions": self.functions, # } # Step 4: extend the message history if user_message is not None: all_new_messages = [ Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=packed_user_message, ) ] + all_response_messages else: all_new_messages = all_response_messages # Check the memory pressure and potentially issue a memory pressure warning current_total_tokens = response.usage.total_tokens active_memory_warning = False # We can't do summarize logic properly if context_window is undefined if self.agent_state.llm_config.context_window is None: # Fallback if for some reason context_window is missing, just set to the default print(f"{CLI_WARNING_PREFIX}could not find context_window in config, setting to default {LLM_MAX_TOKENS['DEFAULT']}") print(f"{self.agent_state}") self.agent_state.llm_config.context_window = ( str(LLM_MAX_TOKENS[self.model]) if (self.model is not None and self.model in LLM_MAX_TOKENS) else str(LLM_MAX_TOKENS["DEFAULT"]) )
if current_total_tokens > MESSAGE_SUMMARY_WARNING_FRAC * int(self.agent_state.llm_config.context_window):
25
2023-10-11 07:38:37+00:00
24k
PixArt-alpha/PixArt-alpha
train_scripts/train_controlnet.py
[ { "identifier": "IDDPM", "path": "diffusion/iddpm.py", "snippet": "def IDDPM(\n timestep_respacing,\n noise_schedule=\"linear\",\n use_kl=False,\n sigma_small=False,\n predict_xstart=False,\n learn_sigma=True,\n pred_sigma=True,\n rescale_learned_s...
import argparse import datetime import os import sys import time import types import warnings import torch from pathlib import Path from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.utils import DistributedType from mmcv.runner import LogBuffer from torch.utils.data import RandomSampler from diffusion import IDDPM from diffusion.data.builder import build_dataset, build_dataloader, set_data_root from diffusion.model.builder import build_model from diffusion.model.nets import PixArtMS, ControlPixArtHalf, ControlPixArtMSHalf from diffusion.utils.checkpoint import save_checkpoint, load_checkpoint from diffusion.utils.data_sampler import AspectRatioBatchSampler, BalancedAspectRatioBatchSampler from diffusion.utils.dist_utils import synchronize, get_world_size, clip_grad_norm_ from diffusion.utils.logger import get_root_logger from diffusion.utils.lr_scheduler import build_lr_scheduler from diffusion.utils.misc import set_random_seed, read_config, init_random_seed, DebugUnderflowOverflow from diffusion.utils.optimizer import build_optimizer, auto_scale_lr from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
16,472
config = read_config(args.config) if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None config.work_dir = args.work_dir if args.cloud: config.data_root = '/data/data' if args.data_root: config.data_root = args.data_root if args.resume_from is not None: config.load_from = None config.resume_from = dict( checkpoint=args.resume_from, load_ema=False, resume_optimizer=args.resume_optimizer, resume_lr_scheduler=args.resume_lr_scheduler) if args.debug: config.log_interval = 1 config.train_batch_size = 6 config.optimizer.update({'lr': args.lr}) os.umask(0o000) # file permission: 666; dir permission: 777 os.makedirs(config.work_dir, exist_ok=True) init_handler = InitProcessGroupKwargs() init_handler.timeout = datetime.timedelta(seconds=9600) # change timeout to avoid a strange NCCL bug # Initialize accelerator and tensorboard logging if config.use_fsdp: init_train = 'FSDP' set_fsdp_env() fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),) else: init_train = 'DDP' fsdp_plugin = None even_batches = True if config.multi_scale: even_batches=False, accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with=args.report_to, project_dir=os.path.join(config.work_dir, "logs"), fsdp_plugin=fsdp_plugin, even_batches=even_batches, kwargs_handlers=[init_handler] ) logger = get_root_logger(os.path.join(config.work_dir, 'train_log.log')) config.seed = init_random_seed(config.get('seed', None)) set_random_seed(config.seed) if accelerator.is_main_process: config.dump(os.path.join(config.work_dir, 'config.py')) logger.info(f"Config: \n{config.pretty_text}") logger.info(f"World_size: {get_world_size()}, seed: {config.seed}") logger.info(f"Initializing: {init_train} for training") image_size = config.image_size # @param [512, 1024] latent_size = int(image_size) // 8 pred_sigma = getattr(config, 'pred_sigma', True) learn_sigma = getattr(config, 'learn_sigma', True) and pred_sigma model_kwargs={"window_block_indexes": config.window_block_indexes, "window_size": config.window_size, "use_rel_pos": config.use_rel_pos, "lewei_scale": config.lewei_scale, 'config':config, 'model_max_length': config.model_max_length} # build models train_diffusion = IDDPM(str(config.train_sampling_steps)) model: PixArtMS = build_model(config.model, config.grad_checkpointing, config.get('fp32_attention', False), input_size=latent_size, learn_sigma=learn_sigma, pred_sigma=pred_sigma, **model_kwargs) if config.load_from is not None and args.resume_from is None: # load from PixArt model missing, unexpected = load_checkpoint(config.load_from, model) logger.warning(f'Missing keys: {missing}') logger.warning(f'Unexpected keys: {unexpected}') if image_size == 1024: model: ControlPixArtMSHalf = ControlPixArtMSHalf(model, copy_blocks_num=config.copy_blocks_num).train() else: model: ControlPixArtHalf = ControlPixArtHalf(model, copy_blocks_num=config.copy_blocks_num).train() logger.info(f"{model.__class__.__name__} Model Parameters: {sum(p.numel() for p in model.parameters()):,}") logger.info(f"T5 max token length: {config.model_max_length}") # if args.local_rank == 0: # for name, params in model.named_parameters(): # if params.requires_grad == False: logger.info(f"freeze param: {name}") # # for name, params in model.named_parameters(): # if params.requires_grad == True: logger.info(f"trainable param: {name}") # prepare for FSDP clip grad norm calculation if accelerator.distributed_type == DistributedType.FSDP: for m in accelerator._models: m.clip_grad_norm_ = types.MethodType(clip_grad_norm_, m) # build dataloader set_data_root(config.data_root) dataset = build_dataset(config.data, resolution=image_size, aspect_ratio_type=config.aspect_ratio_type, train_ratio=config.train_ratio) if config.multi_scale: batch_sampler = AspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, drop_last=True, ratio_nums=dataset.ratio_nums, config=config, valid_num=1) # batch_sampler = BalancedAspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, # batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, # ratio_nums=dataset.ratio_nums) train_dataloader = build_dataloader(dataset, batch_sampler=batch_sampler, num_workers=config.num_workers) else: train_dataloader = build_dataloader(dataset, num_workers=config.num_workers, batch_size=config.train_batch_size, shuffle=True) # build optimizer and lr scheduler lr_scale_ratio = 1 if config.get('auto_lr', None):
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def train(): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() start_step = start_epoch * len(train_dataloader) global_step = 0 total_steps = len(train_dataloader) * config.num_epochs load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) if not load_vae_feat: raise ValueError("Only support load vae features for now.") # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start = time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start z = batch[0] # 4 x 4 x 128 x 128 z:vae output, 3x1024x1024->vae->4x128x128 clean_images = z * config.scale_factor # vae needed scale factor y = batch[1] # 4 x 1 x 120 x 4096 # T5 extracted feature of caption, 120 token, 4096 y_mask = batch[2] # 4 x 1 x 1 x 120 # caption indicate whether valid data_info = batch[3] # Sample a random timestep for each image bs = clean_images.shape[0] timesteps = torch.randint(0, config.train_sampling_steps, (bs,), device=clean_images.device).long() grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() loss_term = train_diffusion.training_losses(model, clean_images, timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info, c=data_info['condition'] * config.scale_factor)) loss = loss_term['loss'].mean() accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch - 1) * len(train_dataloader) + step + 1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['img_hw'][0][0].item()}, {data_info['img_hw'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) if (global_step + 1) % 1000 == 0 and config.s3_work_dir is not None: logger.info(f"s3_work_dir: {config.s3_work_dir}") global_step += 1 data_time_start = time.time() synchronize() if accelerator.is_main_process: if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: os.umask(0o000) # file permission: 666; dir permission: 777 save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() synchronize() # After each epoch you optionally sample some demo images with evaluate() and save the model if accelerator.is_main_process: if epoch % config.save_model_epochs == 0 or epoch == config.num_epochs: os.umask(0o000) # file permission: 666; dir permission: 777 save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume_from', help='the dir to save logs and models') parser.add_argument('--local-rank', type=int, default=-1) parser.add_argument('--local_rank', type=int, default=-1) parser.add_argument('--debug', action='store_true') parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--tracker_project_name", type=str, default="text2image-fine-tune", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) parser.add_argument('--lr', type=float, default=2e-4) parser.add_argument('--data_root', type=str, default=None) parser.add_argument('--resume_optimizer', action='store_true') parser.add_argument('--resume_lr_scheduler', action='store_true') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() config = read_config(args.config) if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None config.work_dir = args.work_dir if args.cloud: config.data_root = '/data/data' if args.data_root: config.data_root = args.data_root if args.resume_from is not None: config.load_from = None config.resume_from = dict( checkpoint=args.resume_from, load_ema=False, resume_optimizer=args.resume_optimizer, resume_lr_scheduler=args.resume_lr_scheduler) if args.debug: config.log_interval = 1 config.train_batch_size = 6 config.optimizer.update({'lr': args.lr}) os.umask(0o000) # file permission: 666; dir permission: 777 os.makedirs(config.work_dir, exist_ok=True) init_handler = InitProcessGroupKwargs() init_handler.timeout = datetime.timedelta(seconds=9600) # change timeout to avoid a strange NCCL bug # Initialize accelerator and tensorboard logging if config.use_fsdp: init_train = 'FSDP' set_fsdp_env() fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),) else: init_train = 'DDP' fsdp_plugin = None even_batches = True if config.multi_scale: even_batches=False, accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with=args.report_to, project_dir=os.path.join(config.work_dir, "logs"), fsdp_plugin=fsdp_plugin, even_batches=even_batches, kwargs_handlers=[init_handler] ) logger = get_root_logger(os.path.join(config.work_dir, 'train_log.log')) config.seed = init_random_seed(config.get('seed', None)) set_random_seed(config.seed) if accelerator.is_main_process: config.dump(os.path.join(config.work_dir, 'config.py')) logger.info(f"Config: \n{config.pretty_text}") logger.info(f"World_size: {get_world_size()}, seed: {config.seed}") logger.info(f"Initializing: {init_train} for training") image_size = config.image_size # @param [512, 1024] latent_size = int(image_size) // 8 pred_sigma = getattr(config, 'pred_sigma', True) learn_sigma = getattr(config, 'learn_sigma', True) and pred_sigma model_kwargs={"window_block_indexes": config.window_block_indexes, "window_size": config.window_size, "use_rel_pos": config.use_rel_pos, "lewei_scale": config.lewei_scale, 'config':config, 'model_max_length': config.model_max_length} # build models train_diffusion = IDDPM(str(config.train_sampling_steps)) model: PixArtMS = build_model(config.model, config.grad_checkpointing, config.get('fp32_attention', False), input_size=latent_size, learn_sigma=learn_sigma, pred_sigma=pred_sigma, **model_kwargs) if config.load_from is not None and args.resume_from is None: # load from PixArt model missing, unexpected = load_checkpoint(config.load_from, model) logger.warning(f'Missing keys: {missing}') logger.warning(f'Unexpected keys: {unexpected}') if image_size == 1024: model: ControlPixArtMSHalf = ControlPixArtMSHalf(model, copy_blocks_num=config.copy_blocks_num).train() else: model: ControlPixArtHalf = ControlPixArtHalf(model, copy_blocks_num=config.copy_blocks_num).train() logger.info(f"{model.__class__.__name__} Model Parameters: {sum(p.numel() for p in model.parameters()):,}") logger.info(f"T5 max token length: {config.model_max_length}") # if args.local_rank == 0: # for name, params in model.named_parameters(): # if params.requires_grad == False: logger.info(f"freeze param: {name}") # # for name, params in model.named_parameters(): # if params.requires_grad == True: logger.info(f"trainable param: {name}") # prepare for FSDP clip grad norm calculation if accelerator.distributed_type == DistributedType.FSDP: for m in accelerator._models: m.clip_grad_norm_ = types.MethodType(clip_grad_norm_, m) # build dataloader set_data_root(config.data_root) dataset = build_dataset(config.data, resolution=image_size, aspect_ratio_type=config.aspect_ratio_type, train_ratio=config.train_ratio) if config.multi_scale: batch_sampler = AspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, drop_last=True, ratio_nums=dataset.ratio_nums, config=config, valid_num=1) # batch_sampler = BalancedAspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, # batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, # ratio_nums=dataset.ratio_nums) train_dataloader = build_dataloader(dataset, batch_sampler=batch_sampler, num_workers=config.num_workers) else: train_dataloader = build_dataloader(dataset, num_workers=config.num_workers, batch_size=config.train_batch_size, shuffle=True) # build optimizer and lr scheduler lr_scale_ratio = 1 if config.get('auto_lr', None):
lr_scale_ratio = auto_scale_lr(config.train_batch_size * get_world_size() * config.gradient_accumulation_steps,
22
2023-10-12 14:16:33+00:00
24k
NVlabs/EmerNeRF
train_emernerf.py
[ { "identifier": "metrics", "path": "datasets/metrics.py", "snippet": "def compute_valid_depth_rmse(prediction: Tensor, target: Tensor) -> float:\ndef compute_psnr(prediction: Tensor, target: Tensor) -> float:\ndef compute_ssim(\n prediction: Union[Tensor, np.ndarray], target: Union[Tensor, np.ndarray...
import argparse import json import logging import os import time import imageio import numpy as np import torch import torch.utils.data import builders import loss import utils.misc as misc import wandb from typing import List, Optional from omegaconf import OmegaConf from tqdm import tqdm from datasets import metrics from datasets.base import SceneDataset from radiance_fields import DensityField, RadianceField from radiance_fields.render_utils import render_rays from radiance_fields.video_utils import render_pixels, save_videos from third_party.nerfacc_prop_net import PropNetEstimator, get_proposal_requires_grad_fn from utils.logging import MetricLogger, setup_logging from utils.visualization_tools import visualize_voxels, visualize_scene_flow from datasets.waymo import WaymoDataset from datasets.nuscenes import NuScenesDataset
21,002
proposal_networks=proposal_networks, proposal_estimator=proposal_estimator, optimizer=optimizer, scheduler=scheduler, ) else: start_step = 0 logger.info( f"Will start training for {cfg.optim.num_iters} iterations from scratch" ) if args.visualize_voxel or args.eval_only: if cfg.nerf.model.head.enable_flow_branch: logger.info("Visualizing scene flow...") visualize_scene_flow( cfg=cfg, model=model, dataset=dataset, device=device, ) if cfg.nerf.model.head.enable_feature_head: logger.info("Visualizing voxel features...") visualize_voxels( cfg, model, proposal_estimator, proposal_networks, dataset, device=device, save_html=True, is_dynamic=cfg.nerf.model.head.enable_dynamic_branch, ) logger.info("Visualization done!") if args.eval_only: do_evaluation( step=start_step, cfg=cfg, model=model, proposal_networks=proposal_networks, proposal_estimator=proposal_estimator, dataset=dataset, args=args, ) exit() # ------ build losses -------- # # rgb loss if cfg.data.pixel_source.load_rgb: rgb_loss_fn = loss.RealValueLoss( loss_type=cfg.supervision.rgb.loss_type, coef=cfg.supervision.rgb.loss_coef, name="rgb", check_nan=cfg.optim.check_nan, ) # lidar related losses if cfg.data.lidar_source.load_lidar and cfg.supervision.depth.enable: depth_loss_fn = loss.DepthLoss( loss_type=cfg.supervision.depth.loss_type, coef=cfg.supervision.depth.loss_coef, depth_error_percentile=cfg.supervision.depth.depth_error_percentile, check_nan=cfg.optim.check_nan, ) if cfg.supervision.depth.line_of_sight.enable: line_of_sight_loss_fn = loss.LineOfSightLoss( loss_type=cfg.supervision.depth.line_of_sight.loss_type, name="line_of_sight", depth_error_percentile=cfg.supervision.depth.depth_error_percentile, coef=cfg.supervision.depth.line_of_sight.loss_coef, check_nan=cfg.optim.check_nan, ) else: line_of_sight_loss_fn = None else: depth_loss_fn = None line_of_sight_loss_fn = None if cfg.data.pixel_source.load_sky_mask and cfg.nerf.model.head.enable_sky_head: sky_loss_fn = loss.SkyLoss( loss_type=cfg.supervision.sky.loss_type, coef=cfg.supervision.sky.loss_coef, check_nan=cfg.optim.check_nan, ) else: sky_loss_fn = None if cfg.data.pixel_source.load_features and cfg.nerf.model.head.enable_feature_head: feature_loss_fn = loss.RealValueLoss( loss_type=cfg.supervision.feature.loss_type, coef=cfg.supervision.feature.loss_coef, name="feature", check_nan=cfg.optim.check_nan, ) else: feature_loss_fn = None ## ------ dynamic related losses -------- # if cfg.nerf.model.head.enable_dynamic_branch: dynamic_reg_loss_fn = loss.DynamicRegularizationLoss( loss_type=cfg.supervision.dynamic.loss_type, coef=cfg.supervision.dynamic.loss_coef, entropy_skewness=cfg.supervision.dynamic.entropy_loss_skewness, check_nan=cfg.optim.check_nan, ) else: dynamic_reg_loss_fn = None if cfg.nerf.model.head.enable_shadow_head: shadow_loss_fn = loss.DynamicRegularizationLoss( name="shadow", loss_type=cfg.supervision.shadow.loss_type, coef=cfg.supervision.shadow.loss_coef, check_nan=cfg.optim.check_nan, ) else: shadow_loss_fn = None metrics_file = os.path.join(cfg.log_dir, "metrics.json") metric_logger = MetricLogger(delimiter=" ", output_file=metrics_file)
logger = logging.getLogger() current_time = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()) # a global list of keys to render, # comment out the keys you don't want to render or uncomment the keys you want to render render_keys = [ "gt_rgbs", "rgbs", "depths", # "median_depths", "gt_dino_feats", "dino_feats", "dynamic_rgbs", "dynamic_depths", "static_rgbs", "static_depths", "forward_flows", "backward_flows", "dynamic_rgb_on_static_dinos", "dino_pe", "dino_feats_pe_free", # "dynamic_dino_on_static_rgbs", # "shadow_reduced_static_rgbs", # "shadow_only_static_rgbs", # "shadows", # "gt_sky_masks", # "sky_masks", ] def get_args_parser(): parser = argparse.ArgumentParser("Train EmernNerf for a single scene") parser.add_argument("--config_file", help="path to config file", type=str) parser.add_argument( "--eval_only", action="store_true", help="perform evaluation only" ) parser.add_argument( "--visualize_voxel", action="store_true", help="perform evaluation only" ) parser.add_argument( "--render_data_video", action="store_true", help="Render a data video", ) parser.add_argument( "--render_data_video_only", action="store_true", help="Quit after rendering a data video", ) parser.add_argument( "--render_video_postfix", type=str, default=None, help="an optional postfix for video", ) parser.add_argument( "--output_root", default="./work_dirs/", help="path to save checkpoints and logs", type=str, ) # wandb logging part parser.add_argument( "--enable_wandb", action="store_true", help="enable wandb logging" ) parser.add_argument( "--entity", default="YOUR ENTITY NAME", type=str, help="wandb entity name", required=False, ) parser.add_argument( "--project", default="emernerf", type=str, help="wandb project name, also used to enhance log_dir", required=True, ) parser.add_argument( "--run_name", default="debug", type=str, help="wandb run name, also used to enhance log_dir", required=True, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) return parser def setup(args): # ------ get config from args -------- # default_config = OmegaConf.create(OmegaConf.load("configs/default_config.yaml")) cfg = OmegaConf.load(args.config_file) cfg = OmegaConf.merge(default_config, cfg, OmegaConf.from_cli(args.opts)) log_dir = os.path.join(args.output_root, args.project, args.run_name) cfg.log_dir = log_dir cfg.nerf.model.num_cams = cfg.data.pixel_source.num_cams cfg.nerf.model.unbounded = cfg.nerf.unbounded cfg.nerf.propnet.unbounded = cfg.nerf.unbounded cfg.nerf.model.resume_from = cfg.resume_from os.makedirs(log_dir, exist_ok=True) for folder in [ "images", "full_videos", "test_videos", "lowres_videos", "metrics", "configs_bk", "buffer_maps", ]: os.makedirs(os.path.join(log_dir, folder), exist_ok=True) # ------ setup logging -------- # if args.enable_wandb: # sometimes wandb fails to init in cloud machines, so we give it several (many) tries while ( wandb.init( project=args.project, entity=args.entity, sync_tensorboard=True, settings=wandb.Settings(start_method="fork"), ) is not wandb.run ): continue wandb.run.name = args.run_name wandb.run.save() wandb.config.update(OmegaConf.to_container(cfg, resolve=True)) wandb.config.update(args) misc.fix_random_seeds(cfg.optim.seed) global logger setup_logging(output=log_dir, level=logging.INFO, time_string=current_time) logger.info( "\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())) ) # -------- write config -------- # logger.info(f"Config:\n{OmegaConf.to_yaml(cfg)}") saved_cfg_path = os.path.join(log_dir, "config.yaml") with open(saved_cfg_path, "w") as f: OmegaConf.save(config=cfg, f=f) # also save a backup copy saved_cfg_path_bk = os.path.join( log_dir, "configs_bk", f"config_{current_time}.yaml" ) with open(saved_cfg_path_bk, "w") as f: OmegaConf.save(config=cfg, f=f) logger.info(f"Full config saved to {saved_cfg_path}, and {saved_cfg_path_bk}") return cfg @torch.no_grad() def do_evaluation( step: int = 0, cfg: OmegaConf = None, model: RadianceField = None, proposal_networks: Optional[List[DensityField]] = None, proposal_estimator: PropNetEstimator = None, dataset: SceneDataset = None, args: argparse.Namespace = None, ): logger.info("Evaluating on the full set...") model.eval() proposal_estimator.eval() for p in proposal_networks: p.eval() if cfg.eval.eval_occ: assert cfg.data.dataset == "waymo", "only support waymo dataset for now" device = model.device # use every cfg.eval.occ_annotation_stride frames for training train_indices = np.arange( 0, dataset.num_lidar_timesteps, cfg.eval.occ_annotation_stride ) test_indices = [ x for x in range(dataset.num_lidar_timesteps) if x not in train_indices ] # collect centroids and labels centroids_bank, label_bank = metrics.collect_centroids( train_indices, dataset, model, device ) logger.info("Evaluating Few-shot Occ...") occ_metrics = metrics.eval_few_shot_occ( test_indices, dataset, model, device, centroids_bank, label_bank ) occ_metrics_file = f"{cfg.log_dir}/metrics/occ_eval_{current_time}.json" with open(occ_metrics_file, "w") as f: json.dump(occ_metrics, f) if args.enable_wandb: wandb.log(occ_metrics) logger.info( f"Few-shot Occupancy evaluation metrics saved to {occ_metrics_file}" ) logger.info("Few-shot Occ Results:") logger.info(json.dumps(occ_metrics, indent=4)) logger.info( "===> Note: zero accuracy means no valid points for that class in the scene" ) torch.cuda.empty_cache() if cfg.eval.eval_lidar_flow and cfg.nerf.model.head.enable_flow_branch: assert cfg.data.dataset == "waymo", "only support waymo dataset for now" logger.info("Evaluating Lidar Flow...") # use metrics from NSFP all_flow_metrics = { "EPE3D": [], "acc3d_strict": [], "acc3d_relax": [], "angle_error": [], "outlier": [], } for data_dict in tqdm( dataset.full_lidar_set, "Evaluating Lidar Flow", dynamic_ncols=True ): lidar_flow_class = data_dict["lidar_flow_class"] for k, v in data_dict.items(): # remove invalid flow (the information is from GT) data_dict[k] = v[lidar_flow_class != -1] if data_dict[k].shape[0] == 0: logger.info(f"no valid points, skipping...") continue if cfg.eval.remove_ground_when_eval_lidar_flow: # following the setting in scene flow estimation works for k, v in data_dict.items(): data_dict[k] = v[~data_dict["lidar_ground"]] lidar_points = ( data_dict["lidar_origins"] + data_dict["lidar_ranges"] * data_dict["lidar_viewdirs"] ) normalized_timestamps = data_dict["lidar_normed_timestamps"] pred_results = model.query_flow( positions=lidar_points, normed_timestamps=normalized_timestamps, ) pred_flow = pred_results["forward_flow"] # flow is only valid when the point is not static pred_flow[pred_results["dynamic_density"] < 0.2] *= 0 # metrics in NSFP flow_metrics = metrics.compute_scene_flow_metrics( pred_flow[None, ...], data_dict["lidar_flow"][None, ...] ) for k, v in flow_metrics.items(): all_flow_metrics[k].append(v) logger.info("Lidar Flow Results:") avg_flow_metrics = {k: np.mean(v) for k, v in all_flow_metrics.items()} logger.info(json.dumps(avg_flow_metrics, indent=4)) flow_metrics_file = f"{cfg.log_dir}/metrics/flow_eval_{current_time}.json" with open(flow_metrics_file, "w") as f: json.dump(avg_flow_metrics, f) logger.info(f"Flow estimation evaluation metrics saved to {flow_metrics_file}") if args.enable_wandb: wandb.log(avg_flow_metrics) torch.cuda.empty_cache() if cfg.data.pixel_source.load_rgb and cfg.render.render_low_res: logger.info("Rendering full set but in a low_resolution...") dataset.pixel_source.update_downscale_factor(1 / cfg.render.low_res_downscale) render_results = render_pixels( cfg=cfg, model=model, proposal_networks=proposal_networks, proposal_estimator=proposal_estimator, dataset=dataset.full_pixel_set, compute_metrics=True, return_decomposition=True, ) dataset.pixel_source.reset_downscale_factor() if args.render_video_postfix is None: video_output_pth = os.path.join(cfg.log_dir, "lowres_videos", f"{step}.mp4") else: video_output_pth = os.path.join( cfg.log_dir, "lowres_videos", f"{step}_{args.render_video_postfix}.mp4", ) vis_frame_dict = save_videos( render_results, video_output_pth, num_timestamps=dataset.num_img_timesteps, keys=render_keys, save_seperate_video=cfg.logging.save_seperate_video, num_cams=dataset.pixel_source.num_cams, fps=cfg.render.fps, verbose=True, ) if args.enable_wandb: for k, v in vis_frame_dict.items(): wandb.log({f"pixel_rendering/lowres_full/{k}": wandb.Image(v)}) del render_results, vis_frame_dict torch.cuda.empty_cache() if cfg.data.pixel_source.load_rgb: logger.info("Evaluating Pixels...") if dataset.test_pixel_set is not None and cfg.render.render_test: logger.info("Evaluating Test Set Pixels...") render_results = render_pixels( cfg=cfg, model=model, proposal_estimator=proposal_estimator, dataset=dataset.test_pixel_set, proposal_networks=proposal_networks, compute_metrics=True, return_decomposition=True, ) eval_dict = {} for k, v in render_results.items(): if k in [ "psnr", "ssim", "feat_psnr", "masked_psnr", "masked_ssim", "masked_feat_psnr", ]: eval_dict[f"pixel_metrics/test/{k}"] = v if args.enable_wandb: wandb.log(eval_dict) test_metrics_file = f"{cfg.log_dir}/metrics/images_test_{current_time}.json" with open(test_metrics_file, "w") as f: json.dump(eval_dict, f) logger.info(f"Image evaluation metrics saved to {test_metrics_file}") if args.render_video_postfix is None: video_output_pth = f"{cfg.log_dir}/test_videos/{step}.mp4" else: video_output_pth = ( f"{cfg.log_dir}/test_videos/{step}_{args.render_video_postfix}.mp4" ) vis_frame_dict = save_videos( render_results, video_output_pth, num_timestamps=dataset.num_test_timesteps, keys=render_keys, num_cams=dataset.pixel_source.num_cams, save_seperate_video=cfg.logging.save_seperate_video, fps=cfg.render.fps, verbose=True, # save_images=True, ) if args.enable_wandb: for k, v in vis_frame_dict.items(): wandb.log({"pixel_rendering/test/" + k: wandb.Image(v)}) del render_results, vis_frame_dict torch.cuda.empty_cache() if cfg.render.render_full: logger.info("Evaluating Full Set...") render_results = render_pixels( cfg=cfg, model=model, proposal_estimator=proposal_estimator, dataset=dataset.full_pixel_set, proposal_networks=proposal_networks, compute_metrics=True, return_decomposition=True, ) eval_dict = {} for k, v in render_results.items(): if k in [ "psnr", "ssim", "feat_psnr", "masked_psnr", "masked_ssim", "masked_feat_psnr", ]: eval_dict[f"pixel_metrics/full/{k}"] = v if args.enable_wandb: wandb.log(eval_dict) test_metrics_file = f"{cfg.log_dir}/metrics/images_full_{current_time}.json" with open(test_metrics_file, "w") as f: json.dump(eval_dict, f) logger.info(f"Image evaluation metrics saved to {test_metrics_file}") if args.render_video_postfix is None: video_output_pth = f"{cfg.log_dir}/full_videos/{step}.mp4" else: video_output_pth = ( f"{cfg.log_dir}/full_videos/{step}_{args.render_video_postfix}.mp4" ) vis_frame_dict = save_videos( render_results, video_output_pth, num_timestamps=dataset.num_img_timesteps, keys=render_keys, num_cams=dataset.pixel_source.num_cams, save_seperate_video=cfg.logging.save_seperate_video, fps=cfg.render.fps, verbose=True, ) if args.enable_wandb: for k, v in vis_frame_dict.items(): wandb.log({"pixel_rendering/full/" + k: wandb.Image(v)}) del render_results, vis_frame_dict torch.cuda.empty_cache() # TODO: add a novel trajectory rendering part def main(args): cfg = setup(args) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # ------ build dataset -------- # # we need to set some hyper-parameters for the model based on the dataset, # e.g., aabb, number of training timestamps, number of cameras, etc, so # we build the dataset at first. if cfg.data.dataset == "waymo": dataset = WaymoDataset(data_cfg=cfg.data) else: dataset = NuScenesDataset(data_cfg=cfg.data) # To give us a quick preview of the scene, we render a data video if args.render_data_video or args.render_data_video_only: save_pth = os.path.join(cfg.log_dir, "data.mp4") # define a `render_data_videos` per dataset. dataset.render_data_videos(save_pth=save_pth, split="full") if args.render_data_video_only: logger.info("Render data video only, exiting...") exit() # ------ build proposal networks and models -------- # # we input the dataset to the model builder to set some hyper-parameters ( proposal_estimator, proposal_networks, ) = builders.build_estimator_and_propnet_from_cfg( nerf_cfg=cfg.nerf, optim_cfg=cfg.optim, dataset=dataset, device=device ) model = builders.build_model_from_cfg( cfg=cfg.nerf.model, dataset=dataset, device=device ) logger.info(f"PropNetEstimator: {proposal_networks}") logger.info(f"Model: {model}") # ------ build optimizer and grad scaler -------- # optimizer = builders.build_optimizer_from_cfg(cfg=cfg.optim, model=model) pixel_grad_scaler = torch.cuda.amp.GradScaler(2**10) lidar_grad_scaler = torch.cuda.amp.GradScaler(2**10) # ------ build scheduler -------- # scheduler = builders.build_scheduler_from_cfg(cfg=cfg.optim, optimizer=optimizer) if cfg.resume_from is not None: start_step = misc.resume_from_checkpoint( ckpt_path=cfg.resume_from, model=model, proposal_networks=proposal_networks, proposal_estimator=proposal_estimator, optimizer=optimizer, scheduler=scheduler, ) else: start_step = 0 logger.info( f"Will start training for {cfg.optim.num_iters} iterations from scratch" ) if args.visualize_voxel or args.eval_only: if cfg.nerf.model.head.enable_flow_branch: logger.info("Visualizing scene flow...") visualize_scene_flow( cfg=cfg, model=model, dataset=dataset, device=device, ) if cfg.nerf.model.head.enable_feature_head: logger.info("Visualizing voxel features...") visualize_voxels( cfg, model, proposal_estimator, proposal_networks, dataset, device=device, save_html=True, is_dynamic=cfg.nerf.model.head.enable_dynamic_branch, ) logger.info("Visualization done!") if args.eval_only: do_evaluation( step=start_step, cfg=cfg, model=model, proposal_networks=proposal_networks, proposal_estimator=proposal_estimator, dataset=dataset, args=args, ) exit() # ------ build losses -------- # # rgb loss if cfg.data.pixel_source.load_rgb: rgb_loss_fn = loss.RealValueLoss( loss_type=cfg.supervision.rgb.loss_type, coef=cfg.supervision.rgb.loss_coef, name="rgb", check_nan=cfg.optim.check_nan, ) # lidar related losses if cfg.data.lidar_source.load_lidar and cfg.supervision.depth.enable: depth_loss_fn = loss.DepthLoss( loss_type=cfg.supervision.depth.loss_type, coef=cfg.supervision.depth.loss_coef, depth_error_percentile=cfg.supervision.depth.depth_error_percentile, check_nan=cfg.optim.check_nan, ) if cfg.supervision.depth.line_of_sight.enable: line_of_sight_loss_fn = loss.LineOfSightLoss( loss_type=cfg.supervision.depth.line_of_sight.loss_type, name="line_of_sight", depth_error_percentile=cfg.supervision.depth.depth_error_percentile, coef=cfg.supervision.depth.line_of_sight.loss_coef, check_nan=cfg.optim.check_nan, ) else: line_of_sight_loss_fn = None else: depth_loss_fn = None line_of_sight_loss_fn = None if cfg.data.pixel_source.load_sky_mask and cfg.nerf.model.head.enable_sky_head: sky_loss_fn = loss.SkyLoss( loss_type=cfg.supervision.sky.loss_type, coef=cfg.supervision.sky.loss_coef, check_nan=cfg.optim.check_nan, ) else: sky_loss_fn = None if cfg.data.pixel_source.load_features and cfg.nerf.model.head.enable_feature_head: feature_loss_fn = loss.RealValueLoss( loss_type=cfg.supervision.feature.loss_type, coef=cfg.supervision.feature.loss_coef, name="feature", check_nan=cfg.optim.check_nan, ) else: feature_loss_fn = None ## ------ dynamic related losses -------- # if cfg.nerf.model.head.enable_dynamic_branch: dynamic_reg_loss_fn = loss.DynamicRegularizationLoss( loss_type=cfg.supervision.dynamic.loss_type, coef=cfg.supervision.dynamic.loss_coef, entropy_skewness=cfg.supervision.dynamic.entropy_loss_skewness, check_nan=cfg.optim.check_nan, ) else: dynamic_reg_loss_fn = None if cfg.nerf.model.head.enable_shadow_head: shadow_loss_fn = loss.DynamicRegularizationLoss( name="shadow", loss_type=cfg.supervision.shadow.loss_type, coef=cfg.supervision.shadow.loss_coef, check_nan=cfg.optim.check_nan, ) else: shadow_loss_fn = None metrics_file = os.path.join(cfg.log_dir, "metrics.json") metric_logger = MetricLogger(delimiter=" ", output_file=metrics_file)
proposal_requires_grad_fn = get_proposal_requires_grad_fn()
8
2023-10-11 20:56:27+00:00
24k
alibaba-damo-academy/FunCodec
funcodec/models/encoder/sanm_encoder.py
[ { "identifier": "overlap_chunk", "path": "funcodec/modules/streaming_utils/chunk_utilis.py", "snippet": "class overlap_chunk():\n\t\"\"\"\n\tauthor: Speech Lab, Alibaba Group, China\n\tSan-m: Memory equipped self-attention for end-to-end speech recognition\n\thttps://arxiv.org/abs/2006.01713\n\n\t\"\"\"...
from typing import List from typing import Optional from typing import Sequence from typing import Tuple from typing import Union from funcodec.modules.streaming_utils.chunk_utilis import overlap_chunk from funcodec.modules.nets_utils import make_pad_mask from funcodec.modules.attention import MultiHeadedAttention, MultiHeadedAttentionSANM from funcodec.modules.embedding import SinusoidalPositionEncoder from funcodec.modules.layer_norm import LayerNorm from funcodec.modules.multi_layer_conv import Conv1dLinear from funcodec.modules.multi_layer_conv import MultiLayeredConv1d from funcodec.modules.positionwise_feed_forward import ( PositionwiseFeedForward, # noqa: H301 ) from funcodec.modules.repeat import repeat from funcodec.modules.subsampling import Conv2dSubsampling from funcodec.modules.subsampling import Conv2dSubsampling2 from funcodec.modules.subsampling import Conv2dSubsampling6 from funcodec.modules.subsampling import Conv2dSubsampling8 from funcodec.modules.subsampling import TooShortUttError from funcodec.modules.subsampling import check_short_utt from funcodec.models.encoder.abs_encoder import AbsEncoder import logging import torch import torch.nn as nn import numpy as np
15,077
self.normalize_before = normalize_before if positionwise_layer_type == "linear": positionwise_layer = PositionwiseFeedForward positionwise_layer_args = ( output_size, linear_units, dropout_rate, ) elif positionwise_layer_type == "conv1d": positionwise_layer = MultiLayeredConv1d positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) elif positionwise_layer_type == "conv1d-linear": positionwise_layer = Conv1dLinear positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") if selfattention_layer_type == "selfattn": encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, output_size, attention_dropout_rate, ) elif selfattention_layer_type == "sanm": encoder_selfattn_layer = MultiHeadedAttentionSANM encoder_selfattn_layer_args0 = ( attention_heads, input_size, output_size, attention_dropout_rate, kernel_size, sanm_shfit, ) encoder_selfattn_layer_args = ( attention_heads, output_size, output_size, attention_dropout_rate, kernel_size, sanm_shfit, ) self.encoders0 = repeat( 1, lambda lnum: EncoderLayerSANM( input_size, output_size, encoder_selfattn_layer(*encoder_selfattn_layer_args0), positionwise_layer(*positionwise_layer_args), dropout_rate, normalize_before, concat_after, ), ) self.encoders = repeat( num_blocks-1, lambda lnum: EncoderLayerSANM( output_size, output_size, encoder_selfattn_layer(*encoder_selfattn_layer_args), positionwise_layer(*positionwise_layer_args), dropout_rate, normalize_before, concat_after, ), ) if self.normalize_before: self.after_norm = LayerNorm(output_size) self.interctc_layer_idx = interctc_layer_idx if len(interctc_layer_idx) > 0: assert 0 < min(interctc_layer_idx) and max(interctc_layer_idx) < num_blocks self.interctc_use_conditioning = interctc_use_conditioning self.conditioning_layer = None self.dropout = nn.Dropout(dropout_rate) self.tf2torch_tensor_name_prefix_torch = tf2torch_tensor_name_prefix_torch self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf def output_size(self) -> int: return self._output_size def forward( self, xs_pad: torch.Tensor, ilens: torch.Tensor, prev_states: torch.Tensor = None, ctc = None, ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: """Embed positions in tensor. Args: xs_pad: input tensor (B, L, D) ilens: input length (B) prev_states: Not to be used now. Returns: position embedded tensor and mask """ masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device) xs_pad = xs_pad * self.output_size()**0.5 if self.embed is None: xs_pad = xs_pad elif ( isinstance(self.embed, Conv2dSubsampling) or isinstance(self.embed, Conv2dSubsampling2) or isinstance(self.embed, Conv2dSubsampling6) or isinstance(self.embed, Conv2dSubsampling8) ):
class EncoderLayerSANM(nn.Module): def __init__( self, in_size, size, self_attn, feed_forward, dropout_rate, normalize_before=True, concat_after=False, stochastic_depth_rate=0.0, ): """Construct an EncoderLayer object.""" super(EncoderLayerSANM, self).__init__() self.self_attn = self_attn self.feed_forward = feed_forward self.norm1 = LayerNorm(in_size) self.norm2 = LayerNorm(size) self.dropout = nn.Dropout(dropout_rate) self.in_size = in_size self.size = size self.normalize_before = normalize_before self.concat_after = concat_after if self.concat_after: self.concat_linear = nn.Linear(size + size, size) self.stochastic_depth_rate = stochastic_depth_rate self.dropout_rate = dropout_rate def forward(self, x, mask, cache=None, mask_shfit_chunk=None, mask_att_chunk_encoder=None): """Compute encoded features. Args: x_input (torch.Tensor): Input tensor (#batch, time, size). mask (torch.Tensor): Mask tensor for the input (#batch, time). cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size). Returns: torch.Tensor: Output tensor (#batch, time, size). torch.Tensor: Mask tensor (#batch, time). """ skip_layer = False # with stochastic depth, residual connection `x + f(x)` becomes # `x <- x + 1 / (1 - p) * f(x)` at training time. stoch_layer_coeff = 1.0 if self.training and self.stochastic_depth_rate > 0: skip_layer = torch.rand(1).item() < self.stochastic_depth_rate stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate) if skip_layer: if cache is not None: x = torch.cat([cache, x], dim=1) return x, mask residual = x if self.normalize_before: x = self.norm1(x) if self.concat_after: x_concat = torch.cat((x, self.self_attn(x, mask, mask_shfit_chunk=mask_shfit_chunk, mask_att_chunk_encoder=mask_att_chunk_encoder)), dim=-1) if self.in_size == self.size: x = residual + stoch_layer_coeff * self.concat_linear(x_concat) else: x = stoch_layer_coeff * self.concat_linear(x_concat) else: if self.in_size == self.size: x = residual + stoch_layer_coeff * self.dropout( self.self_attn(x, mask, mask_shfit_chunk=mask_shfit_chunk, mask_att_chunk_encoder=mask_att_chunk_encoder) ) else: x = stoch_layer_coeff * self.dropout( self.self_attn(x, mask, mask_shfit_chunk=mask_shfit_chunk, mask_att_chunk_encoder=mask_att_chunk_encoder) ) if not self.normalize_before: x = self.norm1(x) residual = x if self.normalize_before: x = self.norm2(x) x = residual + stoch_layer_coeff * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm2(x) return x, mask, cache, mask_shfit_chunk, mask_att_chunk_encoder class SANMEncoder(AbsEncoder): """ author: Speech Lab, Alibaba Group, China San-m: Memory equipped self-attention for end-to-end speech recognition https://arxiv.org/abs/2006.01713 """ def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: Optional[str] = "conv2d", pos_enc_class=SinusoidalPositionEncoder, normalize_before: bool = True, concat_after: bool = False, positionwise_layer_type: str = "linear", positionwise_conv_kernel_size: int = 1, padding_idx: int = -1, interctc_layer_idx: List[int] = [], interctc_use_conditioning: bool = False, kernel_size : int = 11, sanm_shfit : int = 0, selfattention_layer_type: str = "sanm", tf2torch_tensor_name_prefix_torch: str = "encoder", tf2torch_tensor_name_prefix_tf: str = "seq2seq/encoder", ): super().__init__() self._output_size = output_size if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(input_size, output_size), torch.nn.LayerNorm(output_size), torch.nn.Dropout(dropout_rate), torch.nn.ReLU(), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling(input_size, output_size, dropout_rate) elif input_layer == "conv2d2": self.embed = Conv2dSubsampling2(input_size, output_size, dropout_rate) elif input_layer == "conv2d6": self.embed = Conv2dSubsampling6(input_size, output_size, dropout_rate) elif input_layer == "conv2d8": self.embed = Conv2dSubsampling8(input_size, output_size, dropout_rate) elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx), SinusoidalPositionEncoder(), ) elif input_layer is None: if input_size == output_size: self.embed = None else: self.embed = torch.nn.Linear(input_size, output_size) elif input_layer == "pe": self.embed = SinusoidalPositionEncoder() else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before if positionwise_layer_type == "linear": positionwise_layer = PositionwiseFeedForward positionwise_layer_args = ( output_size, linear_units, dropout_rate, ) elif positionwise_layer_type == "conv1d": positionwise_layer = MultiLayeredConv1d positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) elif positionwise_layer_type == "conv1d-linear": positionwise_layer = Conv1dLinear positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") if selfattention_layer_type == "selfattn": encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, output_size, attention_dropout_rate, ) elif selfattention_layer_type == "sanm": encoder_selfattn_layer = MultiHeadedAttentionSANM encoder_selfattn_layer_args0 = ( attention_heads, input_size, output_size, attention_dropout_rate, kernel_size, sanm_shfit, ) encoder_selfattn_layer_args = ( attention_heads, output_size, output_size, attention_dropout_rate, kernel_size, sanm_shfit, ) self.encoders0 = repeat( 1, lambda lnum: EncoderLayerSANM( input_size, output_size, encoder_selfattn_layer(*encoder_selfattn_layer_args0), positionwise_layer(*positionwise_layer_args), dropout_rate, normalize_before, concat_after, ), ) self.encoders = repeat( num_blocks-1, lambda lnum: EncoderLayerSANM( output_size, output_size, encoder_selfattn_layer(*encoder_selfattn_layer_args), positionwise_layer(*positionwise_layer_args), dropout_rate, normalize_before, concat_after, ), ) if self.normalize_before: self.after_norm = LayerNorm(output_size) self.interctc_layer_idx = interctc_layer_idx if len(interctc_layer_idx) > 0: assert 0 < min(interctc_layer_idx) and max(interctc_layer_idx) < num_blocks self.interctc_use_conditioning = interctc_use_conditioning self.conditioning_layer = None self.dropout = nn.Dropout(dropout_rate) self.tf2torch_tensor_name_prefix_torch = tf2torch_tensor_name_prefix_torch self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf def output_size(self) -> int: return self._output_size def forward( self, xs_pad: torch.Tensor, ilens: torch.Tensor, prev_states: torch.Tensor = None, ctc = None, ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: """Embed positions in tensor. Args: xs_pad: input tensor (B, L, D) ilens: input length (B) prev_states: Not to be used now. Returns: position embedded tensor and mask """ masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device) xs_pad = xs_pad * self.output_size()**0.5 if self.embed is None: xs_pad = xs_pad elif ( isinstance(self.embed, Conv2dSubsampling) or isinstance(self.embed, Conv2dSubsampling2) or isinstance(self.embed, Conv2dSubsampling6) or isinstance(self.embed, Conv2dSubsampling8) ):
short_status, limit_size = check_short_utt(self.embed, xs_pad.size(1))
15
2023-10-07 02:00:40+00:00
24k
Beckschen/3D-TransUNet
nn_transunet/trainer/nnUNetTrainerV2.py
[ { "identifier": "get_moreDA_augmentation", "path": "nn_transunet/data/data_augmentation_moreDA.py", "snippet": "def get_moreDA_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params,\n border_val_seg=-1,\n se...
from collections import OrderedDict from typing import Tuple from ..data.data_augmentation_moreDA import get_moreDA_augmentation from ..trainer.loss_functions import MultipleOutputLoss2 from ..trainer.network_trainer import maybe_to_torch, to_cuda from ..trainer.nnUNetTrainer import nnUNetTrainer from ..networks.nnunet_model import Generic_UNet from ..data.default_data_augmentation import default_2D_augmentation_params, \ get_patch_size, default_3D_augmentation_params from ..data.dataset_loading import unpack_dataset from sklearn.model_selection import KFold from torch.cuda.amp import autocast from batchgenerators.utilities.file_and_folder_operations import * from torch import nn from loss_functions import DC_and_CE_loss from ..networks.transunet3d_model import Generic_TransUNet_max_ppbp import numpy as np import torch import torch.nn.functional as F
16,882
# See the License for the specific language governing permissions and # limitations under the License. softmax_helper = lambda x: F.softmax(x, 1) def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): return initial_lr * (1 - epoch / max_epochs)**exponent class InitWeights_He(object): def __init__(self, neg_slope=1e-2): self.neg_slope = neg_slope def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class InitWeights_XavierUniform(object): def __init__(self, gain=1): self.gain = gain def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.xavier_uniform_(module.weight, self.gain) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class nnUNetTrainerV2(nnUNetTrainer): """ Info for Fabian: same as internal nnUNetTrainerV2_2 """ def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False, input_size=(64, 160, 160),args=None): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) if args is not None: self.input_size=input_size self.model = args.model self.resume = args.resume self.disable_ds=args.disable_ds self.max_num_epochs = args.max_num_epochs # set 1 gpu training self.initial_lr = args.initial_lr # 0.01 self.args = args if self.disable_ds: print("disable_ds") # print("not runnable for this feature! current nnunetV2 (w/o DDP) only support deep supervision version") # raise NotImplementedError else: print("runnning DDP, inheriting nnUNetTrainerV2") self.save_every = 1 # prev 50 # self.max_num_epochs = 1000 # self.initial_lr = 1e-2 self.deep_supervision_scales = None self.ds_loss_weights = None self.pin_memory = True def initialize(self, training=True, force_load_plans=False): """ - replaced get_default_augmentation with get_moreDA_augmentation - enforce to only run this code once - loss function wrapper for deep supervision :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights if self.disable_ds: self.ds_loss_weights[0]=1 self.ds_loss_weights[1:]=0 self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}) else: # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset")
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. softmax_helper = lambda x: F.softmax(x, 1) def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): return initial_lr * (1 - epoch / max_epochs)**exponent class InitWeights_He(object): def __init__(self, neg_slope=1e-2): self.neg_slope = neg_slope def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class InitWeights_XavierUniform(object): def __init__(self, gain=1): self.gain = gain def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.xavier_uniform_(module.weight, self.gain) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class nnUNetTrainerV2(nnUNetTrainer): """ Info for Fabian: same as internal nnUNetTrainerV2_2 """ def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False, input_size=(64, 160, 160),args=None): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) if args is not None: self.input_size=input_size self.model = args.model self.resume = args.resume self.disable_ds=args.disable_ds self.max_num_epochs = args.max_num_epochs # set 1 gpu training self.initial_lr = args.initial_lr # 0.01 self.args = args if self.disable_ds: print("disable_ds") # print("not runnable for this feature! current nnunetV2 (w/o DDP) only support deep supervision version") # raise NotImplementedError else: print("runnning DDP, inheriting nnUNetTrainerV2") self.save_every = 1 # prev 50 # self.max_num_epochs = 1000 # self.initial_lr = 1e-2 self.deep_supervision_scales = None self.ds_loss_weights = None self.pin_memory = True def initialize(self, training=True, force_load_plans=False): """ - replaced get_default_augmentation with get_moreDA_augmentation - enforce to only run this code once - loss function wrapper for deep supervision :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights if self.disable_ds: self.ds_loss_weights[0]=1 self.ds_loss_weights[1:]=0 self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}) else: # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
7
2023-10-11 05:19:25+00:00
24k
AMAAI-Lab/Video2Music
train.py
[ { "identifier": "compute_vevo_accuracy", "path": "dataset/vevo_dataset.py", "snippet": "def compute_vevo_accuracy(out, tgt):\n softmax = nn.Softmax(dim=-1)\n out = torch.argmax(softmax(out), dim=-1)\n\n out = out.flatten()\n tgt = tgt.flatten()\n\n mask = (tgt != CHORD_PAD)\n\n out = o...
import os import csv import shutil import torch import torch.nn as nn from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from torch.optim import Adam from dataset.vevo_dataset import compute_vevo_accuracy, create_vevo_datasets from model.music_transformer import MusicTransformer from model.video_music_transformer import VideoMusicTransformer from model.loss import SmoothCrossEntropyLoss from utilities.constants import * from utilities.device import get_device, use_cuda from utilities.lr_scheduling import LrStepTracker, get_lr from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params from utilities.run_model_vevo import train_epoch, eval_model from torch.utils.tensorboard import SummaryWriter
14,509
if args.is_video: for vf in train_dataset[0]["semanticList"]: total_vf_dim += vf.shape[1] total_vf_dim += 1 # Scene_offset total_vf_dim += 1 # Motion # Emotion if args.emo_model.startswith("6c"): total_vf_dim += 6 else: total_vf_dim += 5 train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) if args.is_video: model = VideoMusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_video=args.max_sequence_video, max_sequence_chord=args.max_sequence_chord, total_vf_dim=total_vf_dim, rpr=args.rpr).to(get_device()) else: model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_chord=args.max_sequence_chord, rpr=args.rpr).to(get_device()) start_epoch = BASELINE_EPOCH if(args.continue_weights is not None): if(args.continue_epoch is None): print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") assert(False) else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif(args.continue_epoch is not None): print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") assert(False) ##### Lr Scheduler vs static lr ##### if(args.lr is None): if(args.continue_epoch is None): init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step) else: lr = args.lr ##### Not smoothing evaluation loss ##### eval_loss_func = nn.CrossEntropyLoss(ignore_index=CHORD_PAD) ##### SmoothCrossEntropyLoss or CrossEntropyLoss for training ##### if(args.ce_smoothing is None): train_loss_func = eval_loss_func else: train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, CHORD_SIZE, ignore_index=CHORD_PAD) eval_loss_emotion_func = nn.BCEWithLogitsLoss() train_loss_emotion_func = eval_loss_emotion_func ##### Optimizer ##### opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) if(args.lr is None): lr_scheduler = LambdaLR(opt, lr_stepper.step) else: lr_scheduler = None ##### Tracking best evaluation loss ##### best_eval_loss = float("inf") best_eval_loss_epoch = -1 ##### Results reporting ##### if(not os.path.isfile(results_file)): with open(results_file, "w", newline="") as o_stream: writer = csv.writer(o_stream) writer.writerow(CSV_HEADER) ##### TRAIN LOOP ##### for epoch in range(start_epoch, args.epochs): if(epoch > BASELINE_EPOCH): print(SEPERATOR) print("NEW EPOCH:", epoch+1) print(SEPERATOR) print("") # Train train_epoch(epoch+1, model, train_loader, train_loss_func, train_loss_emotion_func, opt, lr_scheduler, args.print_modulus, isVideo= args.is_video) print(SEPERATOR) print("Evaluating:") else: print(SEPERATOR) print("Baseline model evaluation (Epoch 0):") train_metric_dict = eval_model(model, train_loader, train_loss_func, train_loss_emotion_func, isVideo= args.is_video) train_total_loss = train_metric_dict["avg_total_loss"] train_loss_chord = train_metric_dict["avg_loss_chord"] train_loss_emotion = train_metric_dict["avg_loss_emotion"] train_h1 = train_metric_dict["avg_h1"] train_h3 = train_metric_dict["avg_h3"] train_h5 = train_metric_dict["avg_h5"] eval_metric_dict = eval_model(model, val_loader, eval_loss_func, eval_loss_emotion_func, isVideo= args.is_video) eval_total_loss = eval_metric_dict["avg_total_loss"] eval_loss_chord = eval_metric_dict["avg_loss_chord"] eval_loss_emotion = eval_metric_dict["avg_loss_emotion"] eval_h1 = eval_metric_dict["avg_h1"] eval_h3 = eval_metric_dict["avg_h3"] eval_h5 = eval_metric_dict["avg_h5"]
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss (total)", "Avg Train loss (chord)", "Avg Train loss (emotion)", "Avg Eval loss (total)", "Avg Eval loss (chord)", "Avg Eval loss (emotion)"] BASELINE_EPOCH = -1 version = VERSION split_ver = SPLIT_VER split_path = "split_" + split_ver VIS_MODELS_ARR = [ "2d/clip_l14p" ] # main def main( vm = "" , isPrintArgs = True ): args = parse_train_args() if isPrintArgs: print_train_args(args) if vm != "": args.vis_models = vm if args.is_video: vis_arr = args.vis_models.split(" ") vis_arr.sort() vis_abbr_path = "" for v in vis_arr: vis_abbr_path = vis_abbr_path + "_" + VIS_ABBR_DIC[v] vis_abbr_path = vis_abbr_path[1:] else: vis_abbr_path = "no_video" if(args.force_cpu): use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs( args.output_dir, exist_ok=True) os.makedirs( os.path.join( args.output_dir, version), exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, version, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, version, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, version) os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if(args.no_tensorboard): tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, version, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) train_dataset, val_dataset, _ = create_vevo_datasets( dataset_root = "./dataset/", max_seq_chord = args.max_sequence_chord, max_seq_video = args.max_sequence_video, vis_models = args.vis_models, emo_model = args.emo_model, split_ver = SPLIT_VER, random_seq = True, is_video = args.is_video) total_vf_dim = 0 if args.is_video: for vf in train_dataset[0]["semanticList"]: total_vf_dim += vf.shape[1] total_vf_dim += 1 # Scene_offset total_vf_dim += 1 # Motion # Emotion if args.emo_model.startswith("6c"): total_vf_dim += 6 else: total_vf_dim += 5 train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) if args.is_video: model = VideoMusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_video=args.max_sequence_video, max_sequence_chord=args.max_sequence_chord, total_vf_dim=total_vf_dim, rpr=args.rpr).to(get_device()) else: model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_chord=args.max_sequence_chord, rpr=args.rpr).to(get_device()) start_epoch = BASELINE_EPOCH if(args.continue_weights is not None): if(args.continue_epoch is None): print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") assert(False) else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif(args.continue_epoch is not None): print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") assert(False) ##### Lr Scheduler vs static lr ##### if(args.lr is None): if(args.continue_epoch is None): init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step) else: lr = args.lr ##### Not smoothing evaluation loss ##### eval_loss_func = nn.CrossEntropyLoss(ignore_index=CHORD_PAD) ##### SmoothCrossEntropyLoss or CrossEntropyLoss for training ##### if(args.ce_smoothing is None): train_loss_func = eval_loss_func else: train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, CHORD_SIZE, ignore_index=CHORD_PAD) eval_loss_emotion_func = nn.BCEWithLogitsLoss() train_loss_emotion_func = eval_loss_emotion_func ##### Optimizer ##### opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) if(args.lr is None): lr_scheduler = LambdaLR(opt, lr_stepper.step) else: lr_scheduler = None ##### Tracking best evaluation loss ##### best_eval_loss = float("inf") best_eval_loss_epoch = -1 ##### Results reporting ##### if(not os.path.isfile(results_file)): with open(results_file, "w", newline="") as o_stream: writer = csv.writer(o_stream) writer.writerow(CSV_HEADER) ##### TRAIN LOOP ##### for epoch in range(start_epoch, args.epochs): if(epoch > BASELINE_EPOCH): print(SEPERATOR) print("NEW EPOCH:", epoch+1) print(SEPERATOR) print("") # Train train_epoch(epoch+1, model, train_loader, train_loss_func, train_loss_emotion_func, opt, lr_scheduler, args.print_modulus, isVideo= args.is_video) print(SEPERATOR) print("Evaluating:") else: print(SEPERATOR) print("Baseline model evaluation (Epoch 0):") train_metric_dict = eval_model(model, train_loader, train_loss_func, train_loss_emotion_func, isVideo= args.is_video) train_total_loss = train_metric_dict["avg_total_loss"] train_loss_chord = train_metric_dict["avg_loss_chord"] train_loss_emotion = train_metric_dict["avg_loss_emotion"] train_h1 = train_metric_dict["avg_h1"] train_h3 = train_metric_dict["avg_h3"] train_h5 = train_metric_dict["avg_h5"] eval_metric_dict = eval_model(model, val_loader, eval_loss_func, eval_loss_emotion_func, isVideo= args.is_video) eval_total_loss = eval_metric_dict["avg_total_loss"] eval_loss_chord = eval_metric_dict["avg_loss_chord"] eval_loss_emotion = eval_metric_dict["avg_loss_emotion"] eval_h1 = eval_metric_dict["avg_h1"] eval_h3 = eval_metric_dict["avg_h3"] eval_h5 = eval_metric_dict["avg_h5"]
lr = get_lr(opt)
8
2023-10-13 09:06:24+00:00
24k
eai-lab/On-NAS
cifar_search.py
[ { "identifier": "genotypes", "path": "utils/genotypes.py", "snippet": "PRIMITIVES = [\n \"max_pool_3x3\",\n \"avg_pool_3x3\",\n \"skip_connect\", # identity\n \"sep_conv_3x3\",\n \"sep_conv_5x5\",\n \"dil_conv_3x3\",\n \"dil_conv_5x5\",\n \"none\",\n]\nPRIMITIVES_FEWSHOT = [\n ...
import os import torch import torch.nn as nn import numpy as np import utils.utils as utils import random import time import pandas as pd import copy import argparse from utils import genotypes as gt from models.search_cnn import SearchCNNController from models.search_cnn_PC import SearchCNNControllerPC from task_optimizer.darts import Darts,Architect from task_optimizer.darts import train as d_train from tqdm import tqdm from tqdm import tqdm
16,121
def _init_alpha_normalizer(name, task_train_steps, t_max, t_min, temp_anneal_mode): normalizer = dict() normalizer["name"] = name normalizer["params"] = dict() normalizer["params"]["curr_step"] = 0.0 # current step for scheduling normalizer normalizer["params"]["max_steps"] = float( task_train_steps ) # for scheduling normalizer normalizer["params"]["t_max"] = t_max normalizer["params"]["t_min"] = t_min normalizer["params"]["temp_anneal_mode"] = temp_anneal_mode # temperature annealing return normalizer def main(config): # set default gpu device id torch.cuda.set_device(config.gpus[0]) # set seed np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) random.seed(config.seed) torch.backends.cudnn.benchmark = True # get data with meta info input_size, input_channels, n_classes, train_data = utils.get_data( config.dataset, config.data_path, cutout_length=0, validation=False) _,_,_,_,test_data = utils.get_data(config.dataset, config.data_path, cutout_length=0, validation=True) # input my model architecture here normalizer = _init_alpha_normalizer( config.normalizer, config.task_train_steps, config.normalizer_t_max, config.normalizer_t_min, config.normalizer_temp_anneal_mode, ) net_crit = nn.CrossEntropyLoss().to(device) model = SearchCNNController( 3, config.init_channels, config.k, config.layers, config, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, alpha_prune_threshold=config.alpha_prune_threshold, ) if config.meta_model == 'pc_adaptation': print("model created as PC adaptation") model = SearchCNNControllerPC( 3, config.init_channels, config.k, config.layers, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, use_pc_adaptation=True, alpha_prune_threshold=config.alpha_prune_threshold ) ############################################################ model = model.to(device) # weights optimizer w_optim = torch.optim.Adam(model.weights(), config.w_lr, betas=(0.0, 0.999), weight_decay=config.w_weight_decay) # alphas optimizer alpha_optim = torch.optim.Adam(model.alphas(), config.alpha_lr, betas=(0.0, 0.999), weight_decay=config.alpha_weight_decay) # split data to train/validation n_train = len(train_data) split = n_train // 2 # changed here indices = list(range(n_train)) train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:]) #and order of these valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split]) train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=train_sampler, num_workers=config.workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=valid_sampler, num_workers=config.workers, pin_memory=True) test_loader = torch.utils.data.DataLoader(test_data,batch_size=config.batch_size, shuffle=True, num_workers=config.workers, pin_memory=True) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optim, config.epochs, eta_min=0.0)
""" Search cell """ ''' Based on https://github.com/boschresearch/metanas which is licensed under GNU Affero General Public License, ''' device = torch.device("cuda") # tensorboard def _init_alpha_normalizer(name, task_train_steps, t_max, t_min, temp_anneal_mode): normalizer = dict() normalizer["name"] = name normalizer["params"] = dict() normalizer["params"]["curr_step"] = 0.0 # current step for scheduling normalizer normalizer["params"]["max_steps"] = float( task_train_steps ) # for scheduling normalizer normalizer["params"]["t_max"] = t_max normalizer["params"]["t_min"] = t_min normalizer["params"]["temp_anneal_mode"] = temp_anneal_mode # temperature annealing return normalizer def main(config): # set default gpu device id torch.cuda.set_device(config.gpus[0]) # set seed np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) random.seed(config.seed) torch.backends.cudnn.benchmark = True # get data with meta info input_size, input_channels, n_classes, train_data = utils.get_data( config.dataset, config.data_path, cutout_length=0, validation=False) _,_,_,_,test_data = utils.get_data(config.dataset, config.data_path, cutout_length=0, validation=True) # input my model architecture here normalizer = _init_alpha_normalizer( config.normalizer, config.task_train_steps, config.normalizer_t_max, config.normalizer_t_min, config.normalizer_temp_anneal_mode, ) net_crit = nn.CrossEntropyLoss().to(device) model = SearchCNNController( 3, config.init_channels, config.k, config.layers, config, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, alpha_prune_threshold=config.alpha_prune_threshold, ) if config.meta_model == 'pc_adaptation': print("model created as PC adaptation") model = SearchCNNControllerPC( 3, config.init_channels, config.k, config.layers, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, use_pc_adaptation=True, alpha_prune_threshold=config.alpha_prune_threshold ) ############################################################ model = model.to(device) # weights optimizer w_optim = torch.optim.Adam(model.weights(), config.w_lr, betas=(0.0, 0.999), weight_decay=config.w_weight_decay) # alphas optimizer alpha_optim = torch.optim.Adam(model.alphas(), config.alpha_lr, betas=(0.0, 0.999), weight_decay=config.alpha_weight_decay) # split data to train/validation n_train = len(train_data) split = n_train // 2 # changed here indices = list(range(n_train)) train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:]) #and order of these valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split]) train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=train_sampler, num_workers=config.workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=valid_sampler, num_workers=config.workers, pin_memory=True) test_loader = torch.utils.data.DataLoader(test_data,batch_size=config.batch_size, shuffle=True, num_workers=config.workers, pin_memory=True) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optim, config.epochs, eta_min=0.0)
architect = Architect(model, config.w_momentum, config.w_weight_decay, use_first_order_darts=True)
4
2023-10-08 02:42:27+00:00
24k
LukeForeverYoung/UReader
serve/model_worker.py
[ { "identifier": "IO", "path": "serve/io_utils.py", "snippet": "class IO:\n @staticmethod\n def register(options):\n pass\n\n def open(self, path: str, mode: str):\n raise NotImplementedError\n\n def exists(self, path: str) -> bool:\n raise NotImplementedError\n\n def ...
from PIL import Image from io import BytesIO from .io_utils import IO, DefaultIO, OSS from mplug_owl.processing_mplug_owl import MplugOwlProcessor, MplugOwlImageProcessor from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration from mplug_owl.configuration_mplug_owl import MplugOwlConfig from mplug_owl.tokenization_mplug_owl import MplugOwlTokenizer from transformers import GenerationConfig from .model_utils import post_process_output, Stream, Iteratorize from pathlib import Path from mplug_owl.processing_mplug_owl import MplugOwlProcessor from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration from pipeline.data_utils.processors.builder import build_processors from pipeline.data_utils.processors import * from transformers.models.llama.tokenization_llama import LlamaTokenizer from icecream import ic import torch import gradio as gr import logging import sys import os import json import requests import datetime import uuid import base64 import time import sys import transformers
15,300
# text += text_list[ri+1] # images = torch.cat(images, dim=0) # patch_position = torch.cat(patch_position, dim=0) # print(text) # ic(images.shape) # ic(patch_position.shape) # from mplug_owl.processing_mplug_owl import tokenize_prompts # input_ids = tokenize_prompts(text, tokenizer=self.tokenizer, return_tensors='pt') # return { # "pixel_values": images, # 'patch_position': patch_position, # "input_ids": input_ids # } class mPLUG_Owl_Server: def __init__( self, base_model='MAGAer13/mplug-owl-llama-7b', log_dir='./', load_in_8bit=False, bf16=True, device="cuda", io=None, config=None, ): self.log_dir = log_dir self.config = config self.image_processor = build_processors(config['valid_processors'])['sft'] self.tokenizer = LlamaTokenizer.from_pretrained(base_model) self.processor = MplugOwlProcessor(self.image_processor, self.tokenizer) self.model = MplugOwlForConditionalGeneration.from_pretrained( base_model, torch_dtype=torch.float, ) ckpt = {} for cf in Path(base_model).iterdir(): if 'pytorch_model' in cf.name and cf.name.endswith('.bin'): ckpt.update(torch.load(cf, map_location='cpu')) msg = self.model.load_state_dict(ckpt, strict=False) print(msg) del ckpt self.bf16 = bf16 self.load_in_8bit = load_in_8bit if not load_in_8bit: if bf16: self.model.bfloat16() else: self.model.half() self.model.cuda() self.model.eval() self.io = io def evaluate( self, pixel_values=None, patch_positions=None, input_ids=None, temperature=1.0, top_p=0.9, top_k=5, num_beams=3, max_new_tokens=256, stream_output=True, length_penalty=1.0, no_repeat_ngram_size=2, do_sample=False, early_stopping=True, **kwargs ): generation_config = dict( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, no_repeat_ngram_size=no_repeat_ngram_size, do_sample=do_sample, early_stopping=early_stopping, length_penalty=length_penalty, ) generate_params = { "pixel_values": pixel_values, "patch_positions": patch_positions, "input_ids": input_ids, "return_dict_in_generate": True, "output_scores": True, "max_new_tokens": max_new_tokens, } generate_params.update(generation_config) if stream_output: # Stream the reply 1 token at a time. # This is based on the trick of using 'stopping_criteria' to create an iterator, # from https://github.com/oobabooga/text-generation-webui/blob/ad37f396fc8bcbab90e11ecf17c56c97bfbd4a9c/modules/text_generation.py#L216-L243. def generate_with_callback(callback=None, **kwargs): kwargs.setdefault( "stopping_criteria", transformers.StoppingCriteriaList() ) kwargs["stopping_criteria"].append(Stream(callback_func=callback)) with torch.no_grad(): self.model.generate(**kwargs) def generate_with_streaming(**kwargs): return Iteratorize(generate_with_callback, kwargs, callback=None) with generate_with_streaming(**generate_params) as generator: for output in generator: # new_tokens = len(output) - len(input_ids[0]) decoded_output = self.tokenizer.decode(output) if output[-1] in [self.tokenizer.eos_token_id]: break
sys.path.append("..") server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" # from pipeline.data_utils.xgpt3_dataset import ImageIO # class ImageProcessor(object): # def __init__(self, resolution=224, tokenizer=None): # normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) # # self.transform = transforms.Compose([ # # transforms.Resize((resolution, resolution),interpolation=Image.BICUBIC), # # transforms.ToTensor(), # # normalize, # # ]) # from megatron.data.processors import doc_processor # processor_class = os.environ.get('DocProcessor','DocSFTProcessor') # self.transform = getattr(doc_processor,processor_class)() # self.image_io = ImageIO() # self.tokenizer=tokenizer # def __call__(self, image_paths, prompts): # if isinstance(image_paths, str): # image_paths = [image_paths] # images = [] # images = self.image_io._load_img(image_paths) # images = [self.transform(image, None) for image in images] # image_input, text_input, patch_position # patch_position = [_[2] for _ in images] # images = [_[0] for _ in images] # text_list = prompts[0].split('<image>') # text = text_list[0] # for ri, image in enumerate(images): # if args.patch_pos_embed_type == 'pre': # # 对于pre处理 v2t最终输出的是一张图的token # text += '<image>' # else: # # 对于post处理 v2t最终输出的是多图 # text += '<image>'*image.shape[0] # text += text_list[ri+1] # images = torch.cat(images, dim=0) # patch_position = torch.cat(patch_position, dim=0) # print(text) # ic(images.shape) # ic(patch_position.shape) # from mplug_owl.processing_mplug_owl import tokenize_prompts # input_ids = tokenize_prompts(text, tokenizer=self.tokenizer, return_tensors='pt') # return { # "pixel_values": images, # 'patch_position': patch_position, # "input_ids": input_ids # } class mPLUG_Owl_Server: def __init__( self, base_model='MAGAer13/mplug-owl-llama-7b', log_dir='./', load_in_8bit=False, bf16=True, device="cuda", io=None, config=None, ): self.log_dir = log_dir self.config = config self.image_processor = build_processors(config['valid_processors'])['sft'] self.tokenizer = LlamaTokenizer.from_pretrained(base_model) self.processor = MplugOwlProcessor(self.image_processor, self.tokenizer) self.model = MplugOwlForConditionalGeneration.from_pretrained( base_model, torch_dtype=torch.float, ) ckpt = {} for cf in Path(base_model).iterdir(): if 'pytorch_model' in cf.name and cf.name.endswith('.bin'): ckpt.update(torch.load(cf, map_location='cpu')) msg = self.model.load_state_dict(ckpt, strict=False) print(msg) del ckpt self.bf16 = bf16 self.load_in_8bit = load_in_8bit if not load_in_8bit: if bf16: self.model.bfloat16() else: self.model.half() self.model.cuda() self.model.eval() self.io = io def evaluate( self, pixel_values=None, patch_positions=None, input_ids=None, temperature=1.0, top_p=0.9, top_k=5, num_beams=3, max_new_tokens=256, stream_output=True, length_penalty=1.0, no_repeat_ngram_size=2, do_sample=False, early_stopping=True, **kwargs ): generation_config = dict( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, no_repeat_ngram_size=no_repeat_ngram_size, do_sample=do_sample, early_stopping=early_stopping, length_penalty=length_penalty, ) generate_params = { "pixel_values": pixel_values, "patch_positions": patch_positions, "input_ids": input_ids, "return_dict_in_generate": True, "output_scores": True, "max_new_tokens": max_new_tokens, } generate_params.update(generation_config) if stream_output: # Stream the reply 1 token at a time. # This is based on the trick of using 'stopping_criteria' to create an iterator, # from https://github.com/oobabooga/text-generation-webui/blob/ad37f396fc8bcbab90e11ecf17c56c97bfbd4a9c/modules/text_generation.py#L216-L243. def generate_with_callback(callback=None, **kwargs): kwargs.setdefault( "stopping_criteria", transformers.StoppingCriteriaList() ) kwargs["stopping_criteria"].append(Stream(callback_func=callback)) with torch.no_grad(): self.model.generate(**kwargs) def generate_with_streaming(**kwargs): return Iteratorize(generate_with_callback, kwargs, callback=None) with generate_with_streaming(**generate_params) as generator: for output in generator: # new_tokens = len(output) - len(input_ids[0]) decoded_output = self.tokenizer.decode(output) if output[-1] in [self.tokenizer.eos_token_id]: break
yield post_process_output(decoded_output)
8
2023-10-08 06:29:02+00:00
24k
LeapLabTHU/Rank-DETR
projects/rank_detr/configs/models/rank_detr_r50.py
[ { "identifier": "HungarianMatcher", "path": "detrex/modeling/matcher/matcher.py", "snippet": "class HungarianMatcher(nn.Module):\n \"\"\"HungarianMatcher which computes an assignment between targets and predictions.\n\n For efficiency reasons, the targets don't include the no_object. Because of th...
import torch.nn as nn from detectron2.modeling.backbone import ResNet, BasicStem from detectron2.layers import ShapeSpec from detectron2.config import LazyCall as L from detrex.modeling.matcher import HungarianMatcher from detrex.modeling.neck import ChannelMapper from detrex.layers import PositionEmbeddingSine from projects.rank_detr.modeling import ( RankDETR, RankDetrTransformerEncoder, RankDetrTransformerDecoder, RankDetrTransformer, RankDetrCriterion, HighOrderMatcher, )
18,287
model = L(RankDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(RankDetrTransformer)(
model = L(RankDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(RankDetrTransformer)(
encoder=L(RankDetrTransformerEncoder)(
3
2023-10-12 03:02:25+00:00
24k
sakemin/cog-musicgen-remixer
predict.py
[ { "identifier": "MultiBandDiffusion", "path": "audiocraft/models/multibanddiffusion.py", "snippet": "class MultiBandDiffusion:\n \"\"\"Sample from multiple diffusion models.\n\n Args:\n DPs (list of DiffusionProcess): Diffusion processes.\n codec_model (CompressionModel): Underlying ...
import os import random import torchaudio import typing as tp import numpy as np import torch import librosa import subprocess import math import allin1 import pytsmod as tsm import shutil import shutil from typing import Optional from cog import BasePredictor, Input, Path from audiocraft.models import MusicGen, MultiBandDiffusion from audiocraft.solvers.compression import CompressionSolver from audiocraft.models.loaders import ( load_compression_model, load_lm_model, ) from audiocraft.data.audio import audio_write from audiocraft.models.builders import get_lm_model from omegaconf import OmegaConf from audiocraft.modules.btc.btc_model import BTC_model from audiocraft.modules.btc.utils.mir_eval_modules import idx2chord from demucs.audio import convert_audio from demucs.apply import apply_model
14,708
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu"
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.mbd = MultiBandDiffusion.get_mbd_musicgen()
0
2023-10-09 09:55:24+00:00
24k
oracle/guardian-ai
tests/unitary/test_fairness_bias_mitigation.py
[ { "identifier": "ModelBiasMitigator", "path": "guardian_ai/fairness/bias_mitigation/sklearn.py", "snippet": "class ModelBiasMitigator:\n r\"\"\"\n Class to mitigate the bias of an already fitted machine learning model.\n\n The mitigation procedure works by multiplying the majority class label\n...
import math import os import pickle import tempfile import numpy as np import pandas as pd import pytest from sklearn.metrics import balanced_accuracy_score, log_loss, roc_auc_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.linear_model import LogisticRegression from guardian_ai.fairness.bias_mitigation import ModelBiasMitigator from guardian_ai.fairness.metrics import model_statistical_parity from guardian_ai.utils.exception import GuardianAITypeError, GuardianAIValueError from tests.utils import get_dummy_dataset
16,715
@pytest.fixture(scope="module", params=METRIC_COMBOS.values(), ids=METRIC_COMBOS.keys()) def responsible_model_and_metrics(sensitive_dataset_and_model, request): X, y, model, sensitive_attr_names = sensitive_dataset_and_model fairness_metric, accuracy_metric = request.param ( fairness_name, fairness_callable, fairness_hib, fairness_uses_probas, ) = fairness_metric ( accuracy_name, accuracy_callable, accuracy_hib, accuracy_uses_probas, ) = accuracy_metric resp_model = ModelBiasMitigator( model, sensitive_attr_names, fairness_metric=fairness_name, accuracy_metric=accuracy_name, n_trials_per_group=5, random_seed=RANDOM_SEED, ) # limit number of trials for faster tests resp_model.fit(X, y) return X, y, sensitive_attr_names, resp_model, fairness_metric, accuracy_metric def test_sanity_checks(responsible_model_and_metrics): ( X, y, sensitive_attr_names, resp_model, fairness_metric, accuracy_metric, ) = responsible_model_and_metrics assert len(resp_model.predict(X)) == len(X) assert len(resp_model.predict_proba(X)) == len(X) assert resp_model._best_trials_detailed is not None def test_display(responsible_model_and_metrics): ( X, y, sensitive_attr_names, resp_model, fairness_metric, accuracy_metric, ) = responsible_model_and_metrics resp_model.show_tradeoff() # Assert that displays worked correctly (best we can do automatically currently) assert True def test_group_ranges(sensitive_dataset_and_model): X, y, model, sensitive_attr_names = sensitive_dataset_and_model group_small_range = np.array([[0.4, 0.6], [0.6, 0.4]]) group_big_range = np.array([[0.05, 0.95], [0.95, 0.05]]) probas = np.vstack((group_small_range, group_big_range)) groups = ["small"] * len(group_small_range) + ["big"] * len(group_big_range) groups = pd.DataFrame(groups, columns=["group_val"]) unique_groups = groups["group_val"].unique() unique_group_names = groups["group_val"].unique().tolist() resp_model = ModelBiasMitigator( model, sensitive_attr_names, fairness_metric=A_FAIRNESS_METRIC, accuracy_metric=AN_ACCURACY_METRIC, random_seed=RANDOM_SEED, ) group_ranges = resp_model._get_group_ranges( probas, groups, unique_groups, unique_group_names ) small_ratio = 0.6 / (0.4 + 1e-6) expected_small = (1 / small_ratio, small_ratio) expected_big = (0.1, 10.0) for received, expected in zip(group_ranges["small"], expected_small): assert is_close(received, expected) for received, expected in zip(group_ranges["big"], expected_big): assert is_close(received, expected) def test_accepted_inputs(sensitive_dataset_and_model): X, y, model, sensitive_attr_names = sensitive_dataset_and_model ### Bool or 'auto' attributes # Sanity checks ModelBiasMitigator( model, sensitive_attr_names, fairness_metric=A_FAIRNESS_METRIC, accuracy_metric=AN_ACCURACY_METRIC, higher_accuracy_is_better="auto", higher_fairness_is_better="auto", fairness_metric_uses_probas="auto", accuracy_metric_uses_probas="auto", ) def test_bool_auto_attr(attr_name): # Only 'auto' supported str
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ # Constants used when any metric is needed A_FAIRNESS_METRIC = "equalized_odds" AN_ACCURACY_METRIC = "accuracy" RANDOM_SEED = 12345 @pytest.fixture(scope="module", autouse=True) def init(): np.random.seed(RANDOM_SEED) def is_close(a, b): return math.isclose(a, b, rel_tol=1e-5) def approx_dict(d): return pytest.approx(d, rel=1e-5) class DummyBinaryStochasticModel: def predict(self, X): return np.random.randint(0, 2, size=X.shape[0]) def create_concat_sensitive_attrs(dataset, n_classes): if not isinstance(n_classes, list): n_classes = list(n_classes) sensitive_dataset = dataset.copy() sensitive_attrs_names = [] for i, n_classes_i in enumerate(n_classes): sensitive_vals = np.array( [f"sensitive_val_{idx}" for idx in range(n_classes_i)] ) attr_name = f"sensitive_attr_{i}" sensitive_dataset = concat_sensitive_attr_column( sensitive_vals, sensitive_dataset, attr_name ) sensitive_attrs_names.append(attr_name) return sensitive_dataset, sensitive_attrs_names def concat_sensitive_attr_column(vals, dataset, attr_name): sensitive_vals = np.random.choice(vals, size=len(dataset)) sensitive_feats = pd.DataFrame(np.transpose(sensitive_vals), columns=[attr_name]) return pd.concat([dataset, sensitive_feats], axis=1) @pytest.fixture(scope="module") def model_type(): return "LogisticRegression" @pytest.fixture(scope="module") def base_dataset(): return get_dummy_dataset(n_samples=500, n_features=5, n_classes=2) # By default, all tests are ran with (1 protected attr with 2 groups), # (1 protected attr with more than 2 groups), and (more than 2 protected attr) SENSITIVE_FEATURES_VARIATIONS = { "one_attr_two_classes": {"n_classes": (2,)}, "one_attr_n_classes": {"n_classes": (4,)}, "n_attrs": {"n_classes": (3, 4)}, } @pytest.fixture( scope="module", params=SENSITIVE_FEATURES_VARIATIONS.values(), ids=SENSITIVE_FEATURES_VARIATIONS.keys(), ) def sensitive_dataset_and_model(model_type, base_dataset, request): dataset, target = base_dataset dataset, sensitive_attr_names = create_concat_sensitive_attrs( dataset, **request.param ) model = Pipeline( steps=[ ("preprocessor", OneHotEncoder(handle_unknown="ignore")), ("classifier", LogisticRegression(random_state=0)), ] ) model.fit(dataset, target) return dataset, target, model, sensitive_attr_names # (metric_name, callable, higher_is_better, requires_proba) dict FAIRNESS_METRICS = { "statistical_parity": ( "statistical_parity", model_statistical_parity, False, False, ), } def neg_log_loss_score(y_true, y_pred, **kwargs): return -log_loss(y_true, y_pred, **kwargs) # (metric_name, callable, higher_is_better, requires_proba) dict ACCURACY_METRICS = { "roc_auc": ("roc_auc", roc_auc_score, True, True), "balanced_accuracy": ("balanced_accuracy", balanced_accuracy_score, True, False), "neg_log_loss": ("neg_log_loss", neg_log_loss_score, False, True), } METRIC_COMBOS = { f"{fair_name}--{acc_name}": (fair_metric, acc_metric) for fair_name, fair_metric in FAIRNESS_METRICS.items() for acc_name, acc_metric in ACCURACY_METRICS.items() } @pytest.fixture(scope="module", params=METRIC_COMBOS.values(), ids=METRIC_COMBOS.keys()) def responsible_model_and_metrics(sensitive_dataset_and_model, request): X, y, model, sensitive_attr_names = sensitive_dataset_and_model fairness_metric, accuracy_metric = request.param ( fairness_name, fairness_callable, fairness_hib, fairness_uses_probas, ) = fairness_metric ( accuracy_name, accuracy_callable, accuracy_hib, accuracy_uses_probas, ) = accuracy_metric resp_model = ModelBiasMitigator( model, sensitive_attr_names, fairness_metric=fairness_name, accuracy_metric=accuracy_name, n_trials_per_group=5, random_seed=RANDOM_SEED, ) # limit number of trials for faster tests resp_model.fit(X, y) return X, y, sensitive_attr_names, resp_model, fairness_metric, accuracy_metric def test_sanity_checks(responsible_model_and_metrics): ( X, y, sensitive_attr_names, resp_model, fairness_metric, accuracy_metric, ) = responsible_model_and_metrics assert len(resp_model.predict(X)) == len(X) assert len(resp_model.predict_proba(X)) == len(X) assert resp_model._best_trials_detailed is not None def test_display(responsible_model_and_metrics): ( X, y, sensitive_attr_names, resp_model, fairness_metric, accuracy_metric, ) = responsible_model_and_metrics resp_model.show_tradeoff() # Assert that displays worked correctly (best we can do automatically currently) assert True def test_group_ranges(sensitive_dataset_and_model): X, y, model, sensitive_attr_names = sensitive_dataset_and_model group_small_range = np.array([[0.4, 0.6], [0.6, 0.4]]) group_big_range = np.array([[0.05, 0.95], [0.95, 0.05]]) probas = np.vstack((group_small_range, group_big_range)) groups = ["small"] * len(group_small_range) + ["big"] * len(group_big_range) groups = pd.DataFrame(groups, columns=["group_val"]) unique_groups = groups["group_val"].unique() unique_group_names = groups["group_val"].unique().tolist() resp_model = ModelBiasMitigator( model, sensitive_attr_names, fairness_metric=A_FAIRNESS_METRIC, accuracy_metric=AN_ACCURACY_METRIC, random_seed=RANDOM_SEED, ) group_ranges = resp_model._get_group_ranges( probas, groups, unique_groups, unique_group_names ) small_ratio = 0.6 / (0.4 + 1e-6) expected_small = (1 / small_ratio, small_ratio) expected_big = (0.1, 10.0) for received, expected in zip(group_ranges["small"], expected_small): assert is_close(received, expected) for received, expected in zip(group_ranges["big"], expected_big): assert is_close(received, expected) def test_accepted_inputs(sensitive_dataset_and_model): X, y, model, sensitive_attr_names = sensitive_dataset_and_model ### Bool or 'auto' attributes # Sanity checks ModelBiasMitigator( model, sensitive_attr_names, fairness_metric=A_FAIRNESS_METRIC, accuracy_metric=AN_ACCURACY_METRIC, higher_accuracy_is_better="auto", higher_fairness_is_better="auto", fairness_metric_uses_probas="auto", accuracy_metric_uses_probas="auto", ) def test_bool_auto_attr(attr_name): # Only 'auto' supported str
with pytest.raises(GuardianAIValueError):
3
2023-10-09 09:48:50+00:00
24k
jiangjiechen/auction-arena
app.py
[ { "identifier": "create_items", "path": "src/item_base.py", "snippet": "def create_items(item_info_jsl):\n '''\n item_info: a list of dict (name, price, desc, id)\n '''\n item_info_jsl = LoadJsonL(item_info_jsl)\n item_list = []\n for info in item_info_jsl:\n item_list.append(It...
import os import gradio as gr from app_modules.presets import * from app_modules.overwrites import * from app_modules.utils import * from src.item_base import create_items from src.bidder_base import Bidder from src.human_bidder import HumanBidder from src.auctioneer_base import Auctioneer from auction_workflow import run_auction, make_auction_hash from utils import chunks, reset_state_list
15,646
BIDDER_NUM = 4 items = create_items('data/items_demo.jsonl') def auction_loop_app(*args): global items bidder_list = args[0] # gr.State() -> session state items_id = args[1] os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '') os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '') thread_num = args[4] item_shuffle = args[5] enable_discount = args[6] min_markup_pct = args[7] args = args[8:] auction_hash = make_auction_hash() items_to_bid = [items[i] for i in items_id] auctioneer = Auctioneer(enable_discount=enable_discount, min_markup_pct=min_markup_pct) auctioneer.init_items(items_to_bid) if item_shuffle: auctioneer.shuffle_items() # must correspond to the order in app's parameters input_keys = [ 'chatbot', 'model_name', 'desire', 'plan_strategy', 'budget', 'correct_belief', 'enable_learning', 'temperature', 'overestimate_percent', ] # convert flatten list into a json list input_jsl = []
BIDDER_NUM = 4 items = create_items('data/items_demo.jsonl') def auction_loop_app(*args): global items bidder_list = args[0] # gr.State() -> session state items_id = args[1] os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '') os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '') thread_num = args[4] item_shuffle = args[5] enable_discount = args[6] min_markup_pct = args[7] args = args[8:] auction_hash = make_auction_hash() items_to_bid = [items[i] for i in items_id] auctioneer = Auctioneer(enable_discount=enable_discount, min_markup_pct=min_markup_pct) auctioneer.init_items(items_to_bid) if item_shuffle: auctioneer.shuffle_items() # must correspond to the order in app's parameters input_keys = [ 'chatbot', 'model_name', 'desire', 'plan_strategy', 'budget', 'correct_belief', 'enable_learning', 'temperature', 'overestimate_percent', ] # convert flatten list into a json list input_jsl = []
for i, chunk in enumerate(chunks(args, len(input_keys))):
6
2023-10-08 09:30:57+00:00
24k
sakemin/cog-musicgen-chord
predict.py
[ { "identifier": "CompressionSolver", "path": "audiocraft/solvers/compression.py", "snippet": "class CompressionSolver(base.StandardSolver):\n \"\"\"Solver for compression task.\n\n The compression task combines a set of perceptual and objective losses\n to train an EncodecModel (composed of an ...
import os import random import torchaudio import typing as tp import numpy as np import torch import subprocess from typing import Optional from cog import BasePredictor, Input, Path from audiocraft.solvers.compression import CompressionSolver from audiocraft.models import MusicGen, MultiBandDiffusion from audiocraft.solvers.compression import CompressionSolver from audiocraft.models.loaders import ( load_compression_model, load_lm_model, ) from audiocraft.data.audio import audio_write from audiocraft.models.builders import get_lm_model from omegaconf import OmegaConf
17,926
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu" self.mbd = MultiBandDiffusion.get_mbd_musicgen() if str(weights) == "weights": weights = None if weights is not None: print("Fine-tuned model weights loaded!") self.model = load_ckpt(weights, self.device, url=True) def _load_model( self, model_path: str, cls: Optional[any] = None, load_args: Optional[dict] = {}, model_id: Optional[str] = None, device: Optional[str] = None, ) -> MusicGen: if device is None: device = self.device
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu" self.mbd = MultiBandDiffusion.get_mbd_musicgen() if str(weights) == "weights": weights = None if weights is not None: print("Fine-tuned model weights loaded!") self.model = load_ckpt(weights, self.device, url=True) def _load_model( self, model_path: str, cls: Optional[any] = None, load_args: Optional[dict] = {}, model_id: Optional[str] = None, device: Optional[str] = None, ) -> MusicGen: if device is None: device = self.device
compression_model = load_compression_model(
4
2023-10-09 09:52:24+00:00
24k
zhijie-group/LOVECon
test_lovecon.py
[ { "identifier": "UNetPseudo3DConditionModel", "path": "video_diffusion/models/unet_3d_condition.py", "snippet": "class UNetPseudo3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Opti...
import os import copy import click import re import numpy as np import torch import torch.utils.data import torch.utils.checkpoint import decord import shutil from glob import glob from typing import Optional,Dict from tqdm.auto import tqdm from omegaconf import OmegaConf from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDIMScheduler, ) from diffusers.utils.import_utils import is_xformers_available from transformers import AutoTokenizer, CLIPTextModel from einops import rearrange from video_diffusion.models.unet_3d_condition import UNetPseudo3DConditionModel from video_diffusion.models.controlnet_3d_condition import ControlNetPseudo3DModel from video_diffusion.data.dataset import ImageSequenceDataset from video_diffusion.common.util import get_time_string, get_function_args from video_diffusion.common.logger import get_logger_config_path from video_diffusion.common.image_util import log_train_samples from video_diffusion.common.instantiate_from_config import instantiate_from_config from video_diffusion.pipelines.p2p_validation_loop_controlnet import P2pSampleLogger from annotator.util import get_control from video_diffusion.pipelines.DDIMInterpolationScheduler import DDIMInterpolationScheduler from RIFEModel.RIFE_HDv3 import Model
20,217
shutil.rmtree(video_frame_folder) os.makedirs(video_frame_folder,exist_ok=True) for i in range(video.shape[0]): frame = video[i] frame_path = os.path.join(video_frame_folder,f'frame-{i:04}.jpg') frame = Image.fromarray(frame.numpy().astype(np.uint8)) frame.save(frame_path) dataset_config.update({'path': video_frame_folder} ) time_string = get_time_string() if logdir is None: logdir = config.replace('config', 'result').replace('.yml', '').replace('.yaml', '') logdir += f"_{time_string}" accelerator = Accelerator( gradient_accumulation_steps=gradient_accumulation_steps, mixed_precision=mixed_precision, ) if accelerator.is_main_process: os.makedirs(logdir, exist_ok=True) OmegaConf.save(args, os.path.join(logdir, "config.yml")) logger = get_logger_config_path(logdir) if seed is not None: set_seed(seed) # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained( pretrained_model_path, subfolder="tokenizer", use_fast=False, ) # Load models and create wrapper for stable diffusion text_encoder = CLIPTextModel.from_pretrained( pretrained_model_path, subfolder="text_encoder", ) vae = AutoencoderKL.from_pretrained( pretrained_model_path, subfolder="vae", ) #加载unet报错 unet = UNetPseudo3DConditionModel.from_2d_model( os.path.join(pretrained_model_path, "unet"), model_config=model_config ) controlnet = ControlNetPseudo3DModel.from_2d_model( pretrained_controlnet_model_path, model_config=model_config ) if 'target' not in test_pipeline_config: test_pipeline_config['target'] = 'video_diffusion.pipelines.stable_diffusion.SpatioTemporalStableDiffusionControlPipeline' scheduler = DDIMScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) pipeline = instantiate_from_config( test_pipeline_config, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, control_type = control_type, editing_type = editing_config.editing_type, dilation_kernel = editing_config.dilation_kernel, disk_store=kwargs.get('disk_store', False) ) pipeline.scheduler.set_timesteps(editing_config['num_inference_steps']) if editing_config.use_interpolater: new_scheduler = DDIMInterpolationScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) interpolater = Model() interpolater.load_model('RIFEModel', -1) new_scheduler.set_model(vae,interpolater) print('using interpolater') pipeline.add_new_scheduler(new_scheduler) pipeline.new_scheduler.set_timesteps(editing_config['num_inference_steps']) pipeline.set_progress_bar_config(disable=True) # pipeline.print_pipeline(logger) if is_xformers_available(): try: pipeline.enable_xformers_memory_efficient_attention() except Exception as e: logger.warning( "Could not enable memory efficient attention. Make sure xformers is installed" f" correctly and a GPU is available: {e}" ) vae.requires_grad_(False) unet.requires_grad_(False) text_encoder.requires_grad_(False) prompt_ids = tokenizer( dataset_config["prompt"], truncation=True, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ).input_ids video_dataset = ImageSequenceDataset(**dataset_config, prompt_ids=prompt_ids) train_dataloader = torch.utils.data.DataLoader( video_dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=collate_fn, ) train_sample_save_path = os.path.join(logdir, "train_samples.gif")
decord.bridge.set_bridge('torch') # from video_diffusion.pipelines.p2p_validation_loop_controlnet_ablation import P2pSampleLogger # logger = get_logger(__name__) def collate_fn(examples): """Concat a batch of sampled image in dataloader """ batch = { "prompt_ids": torch.cat([example["prompt_ids"] for example in examples], dim=0), "images": torch.stack([example["images"] for example in examples]), } return batch def test( config: str, pretrained_model_path: str, control_type:str, pretrained_controlnet_model_path :str, dataset_config: Dict, logdir: str = None, editing_config: Optional[Dict] = None, test_pipeline_config: Optional[Dict] = None, gradient_accumulation_steps: int = 1, seed: Optional[int] = None, mixed_precision: Optional[str] = "fp16", batch_size: int = 1, model_config: dict={}, verbose: bool=True, **kwargs ): args = get_function_args() vr = decord.VideoReader(dataset_config.video_path) fps = vr.get_avg_fps() duration = len(vr) / fps print("There are {} frames in the video but we take {} frames".format(len(vr), dataset_config.n_sample_frame)) if dataset_config.n_sample_frame <= 50: duration = 100 fps = 10 sample_index = list(range(0,len(vr), 1))[:dataset_config.n_sample_frame] video = vr.get_batch(sample_index) video_name_match = re.search(r"(.*)/(.*).mp4", dataset_config.video_path) video_name = video_name_match.group(2) video_frame_folder = os.path.join('data',video_name) if os.path.exists(video_frame_folder): shutil.rmtree(video_frame_folder) os.makedirs(video_frame_folder,exist_ok=True) for i in range(video.shape[0]): frame = video[i] frame_path = os.path.join(video_frame_folder,f'frame-{i:04}.jpg') frame = Image.fromarray(frame.numpy().astype(np.uint8)) frame.save(frame_path) dataset_config.update({'path': video_frame_folder} ) time_string = get_time_string() if logdir is None: logdir = config.replace('config', 'result').replace('.yml', '').replace('.yaml', '') logdir += f"_{time_string}" accelerator = Accelerator( gradient_accumulation_steps=gradient_accumulation_steps, mixed_precision=mixed_precision, ) if accelerator.is_main_process: os.makedirs(logdir, exist_ok=True) OmegaConf.save(args, os.path.join(logdir, "config.yml")) logger = get_logger_config_path(logdir) if seed is not None: set_seed(seed) # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained( pretrained_model_path, subfolder="tokenizer", use_fast=False, ) # Load models and create wrapper for stable diffusion text_encoder = CLIPTextModel.from_pretrained( pretrained_model_path, subfolder="text_encoder", ) vae = AutoencoderKL.from_pretrained( pretrained_model_path, subfolder="vae", ) #加载unet报错 unet = UNetPseudo3DConditionModel.from_2d_model( os.path.join(pretrained_model_path, "unet"), model_config=model_config ) controlnet = ControlNetPseudo3DModel.from_2d_model( pretrained_controlnet_model_path, model_config=model_config ) if 'target' not in test_pipeline_config: test_pipeline_config['target'] = 'video_diffusion.pipelines.stable_diffusion.SpatioTemporalStableDiffusionControlPipeline' scheduler = DDIMScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) pipeline = instantiate_from_config( test_pipeline_config, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, control_type = control_type, editing_type = editing_config.editing_type, dilation_kernel = editing_config.dilation_kernel, disk_store=kwargs.get('disk_store', False) ) pipeline.scheduler.set_timesteps(editing_config['num_inference_steps']) if editing_config.use_interpolater: new_scheduler = DDIMInterpolationScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) interpolater = Model() interpolater.load_model('RIFEModel', -1) new_scheduler.set_model(vae,interpolater) print('using interpolater') pipeline.add_new_scheduler(new_scheduler) pipeline.new_scheduler.set_timesteps(editing_config['num_inference_steps']) pipeline.set_progress_bar_config(disable=True) # pipeline.print_pipeline(logger) if is_xformers_available(): try: pipeline.enable_xformers_memory_efficient_attention() except Exception as e: logger.warning( "Could not enable memory efficient attention. Make sure xformers is installed" f" correctly and a GPU is available: {e}" ) vae.requires_grad_(False) unet.requires_grad_(False) text_encoder.requires_grad_(False) prompt_ids = tokenizer( dataset_config["prompt"], truncation=True, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ).input_ids video_dataset = ImageSequenceDataset(**dataset_config, prompt_ids=prompt_ids) train_dataloader = torch.utils.data.DataLoader( video_dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=collate_fn, ) train_sample_save_path = os.path.join(logdir, "train_samples.gif")
log_train_samples(save_path=train_sample_save_path, train_dataloader=train_dataloader)
6
2023-10-09 14:38:28+00:00
24k
ielab/llm-rankers
run.py
[ { "identifier": "SearchResult", "path": "rankers/rankers.py", "snippet": "class SearchResult:\n docid: str\n score: float\n text: str" }, { "identifier": "PointwiseLlmRanker", "path": "rankers/pointwise.py", "snippet": "class PointwiseLlmRanker(LlmRanker):\n\n def __init__(se...
import logging import ir_datasets import argparse import sys import json import time import random from pyserini.search.lucene import LuceneSearcher from pyserini.search._base import get_topics from rankers.rankers import SearchResult from rankers.pointwise import PointwiseLlmRanker, MonoT5LlmRanker from rankers.setwise import SetwiseLlmRanker, OpenAiSetwiseLlmRanker from rankers.pairwise import PairwiseLlmRanker, DuoT5LlmRanker, OpenAiPairwiseLlmRanker from rankers.listwise import OpenAiListwiseLlmRanker, ListwiseLlmRanker from tqdm import tqdm
14,507
if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else: ranker = PointwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) elif args.setwise: if args.run.openai_key: ranker = OpenAiSetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, num_child=args.setwise.num_child, method=args.setwise.method, k=args.setwise.k) else: ranker = SetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, num_child=args.setwise.num_child, scoring=args.run.scoring, method=args.setwise.method, num_permutation=args.setwise.num_permutation, k=args.setwise.k) elif args.pairwise: if args.pairwise.method != 'allpair': args.pairwise.batch_size = 2 logger.info(f'Setting batch_size to 2.') if args.run.openai_key: ranker = OpenAiPairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, method=args.pairwise.method, k=args.pairwise.k) elif 'duot5' in args.run.model_name_or_path: ranker = DuoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) else: ranker = PairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) elif args.listwise: if args.run.openai_key: ranker = OpenAiListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, window_size=args.listwise.window_size, step_size=args.listwise.step_size, num_repeat=args.listwise.num_repeat) else: ranker = ListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, window_size=args.listwise.window_size, step_size=args.listwise.step_size, scoring=args.run.scoring, num_repeat=args.listwise.num_repeat) else: raise ValueError('Must specify either --pointwise, --setwise, --pairwise or --listwise.') query_map = {} if args.run.ir_dataset_name is not None: dataset = ir_datasets.load(args.run.ir_dataset_name) for query in dataset.queries_iter(): qid = query.query_id text = query.text query_map[qid] = ranker.truncate(text, args.run.query_length) dataset = ir_datasets.load(args.run.ir_dataset_name) docstore = dataset.docs_store() else: topics = get_topics(args.run.pyserini_index+'-test') for topic_id in list(topics.keys()): text = topics[topic_id]['title'] query_map[str(topic_id)] = ranker.truncate(text, args.run.query_length) docstore = LuceneSearcher.from_prebuilt_index(args.run.pyserini_index+'.flat') logger.info(f'Loading first stage run from {args.run.run_path}.') first_stage_rankings = [] with open(args.run.run_path, 'r') as f: current_qid = None current_ranking = [] for line in tqdm(f): qid, _, docid, _, score, _ = line.strip().split() if qid != current_qid: if current_qid is not None: first_stage_rankings.append((current_qid, query_map[current_qid], current_ranking[:args.run.hits])) current_ranking = [] current_qid = qid if len(current_ranking) >= args.run.hits: continue if args.run.ir_dataset_name is not None: text = docstore.get(docid).text if 'title' in dir(docstore.get(docid)): text = f'{docstore.get(docid).title} {text}' else: data = json.loads(docstore.doc(docid).raw()) text = data['text'] if 'title' in data: text = f'{data["title"]} {text}' text = ranker.truncate(text, args.run.passage_length)
random.seed(929) logger = logging.getLogger(__name__) def parse_args(parser, commands): # Divide argv by commands split_argv = [[]] for c in sys.argv[1:]: if c in commands.choices: split_argv.append([c]) else: split_argv[-1].append(c) # Initialize namespace args = argparse.Namespace() for c in commands.choices: setattr(args, c, None) # Parse each command parser.parse_args(split_argv[0], namespace=args) # Without command for argv in split_argv[1:]: # Commands n = argparse.Namespace() setattr(args, argv[0], n) parser.parse_args(argv, namespace=n) return args def write_run_file(path, results, tag): with open(path, 'w') as f: for qid, _, ranking in results: rank = 1 for doc in ranking: docid = doc.docid score = doc.score f.write(f"{qid}\tQ0\t{docid}\t{rank}\t{score}\t{tag}\n") rank += 1 def main(args): if args.pointwise: if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else: ranker = PointwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) elif args.setwise: if args.run.openai_key: ranker = OpenAiSetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, num_child=args.setwise.num_child, method=args.setwise.method, k=args.setwise.k) else: ranker = SetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, num_child=args.setwise.num_child, scoring=args.run.scoring, method=args.setwise.method, num_permutation=args.setwise.num_permutation, k=args.setwise.k) elif args.pairwise: if args.pairwise.method != 'allpair': args.pairwise.batch_size = 2 logger.info(f'Setting batch_size to 2.') if args.run.openai_key: ranker = OpenAiPairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, method=args.pairwise.method, k=args.pairwise.k) elif 'duot5' in args.run.model_name_or_path: ranker = DuoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) else: ranker = PairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) elif args.listwise: if args.run.openai_key: ranker = OpenAiListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, window_size=args.listwise.window_size, step_size=args.listwise.step_size, num_repeat=args.listwise.num_repeat) else: ranker = ListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, window_size=args.listwise.window_size, step_size=args.listwise.step_size, scoring=args.run.scoring, num_repeat=args.listwise.num_repeat) else: raise ValueError('Must specify either --pointwise, --setwise, --pairwise or --listwise.') query_map = {} if args.run.ir_dataset_name is not None: dataset = ir_datasets.load(args.run.ir_dataset_name) for query in dataset.queries_iter(): qid = query.query_id text = query.text query_map[qid] = ranker.truncate(text, args.run.query_length) dataset = ir_datasets.load(args.run.ir_dataset_name) docstore = dataset.docs_store() else: topics = get_topics(args.run.pyserini_index+'-test') for topic_id in list(topics.keys()): text = topics[topic_id]['title'] query_map[str(topic_id)] = ranker.truncate(text, args.run.query_length) docstore = LuceneSearcher.from_prebuilt_index(args.run.pyserini_index+'.flat') logger.info(f'Loading first stage run from {args.run.run_path}.') first_stage_rankings = [] with open(args.run.run_path, 'r') as f: current_qid = None current_ranking = [] for line in tqdm(f): qid, _, docid, _, score, _ = line.strip().split() if qid != current_qid: if current_qid is not None: first_stage_rankings.append((current_qid, query_map[current_qid], current_ranking[:args.run.hits])) current_ranking = [] current_qid = qid if len(current_ranking) >= args.run.hits: continue if args.run.ir_dataset_name is not None: text = docstore.get(docid).text if 'title' in dir(docstore.get(docid)): text = f'{docstore.get(docid).title} {text}' else: data = json.loads(docstore.doc(docid).raw()) text = data['text'] if 'title' in data: text = f'{data["title"]} {text}' text = ranker.truncate(text, args.run.passage_length)
current_ranking.append(SearchResult(docid=docid, score=float(score), text=text))
0
2023-10-14 01:39:38+00:00
24k
LiYunfengLYF/LightFC
lib/train/data/base_functions.py
[ { "identifier": "sampler", "path": "lib/train/data/sampler.py", "snippet": "def no_processing(data):\r\n def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\r\n num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\r\n ...
import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.optimizer.anan import Adan from lib.train.optimizer.lion import Lion from lib.utils.misc import is_main_process
21,064
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE settings.save_interval = cfg.TRAIN.SAVE_INTERVAL def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: # assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", # "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append( Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append( Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader, env_num=settings.env_num)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE settings.save_interval = cfg.TRAIN.SAVE_INTERVAL def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: # assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", # "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append( Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append( Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader, env_num=settings.env_num)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(
ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader, env_num=settings.env_num))
7
2023-10-08 11:44:32+00:00
24k
LiyaoTang/ERDA
utils/trainer.py
[ { "identifier": "log_config", "path": "config/utils.py", "snippet": "def log_config(config, title='', f_out=None, prefix='', base=None):\n if f_out is None:\n f_out = sys.stdout\n if base is None:\n root = os.path.join(os.getcwd(), os.path.dirname(__file__), '../')\n sys.path ...
import os, re, gc, sys, time, pickle, psutil, subprocess import numpy as np import tensorflow as tf from config import log_config from utils.logger import print_dict, print_table from utils.ply import read_ply, write_ply from utils.tester import ModelTester from utils.average_gradients import average_gradients from utils.AdamWOptimizer import AdamWeightDecayOptimizer from utils.logger import setup_logger from utils.scheduler import StepScheduler, LrScheduler from utils.metrics import AverageMeter from utils.tf_graph_builder import GraphBuilder
17,900
if tf.__version__.split('.')[0] == '2': tf = tf.compat.v1 tf.disable_v2_behavior() # PLY reader FILE_DIR = os.path.abspath(__file__) BASE_DIR = os.path.dirname(FILE_DIR) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.insert(0, ROOT_DIR) sys.path.insert(0, BASE_DIR) sys.path.insert(0, os.path.join(ROOT_DIR, 'models')) sys.path.insert(0, os.path.join(ROOT_DIR, 'utils')) DEBUG = False class ModelTrainer: """ get & train the model (potential multi-gpu training) """ def __init__(self, config, verbose=True): self.config = config self.verbose = verbose self.tester = ModelTester(config, verbose=False) def add_summary(self, model): with tf.variable_scope('summary'): summary = model.summary log_content = self.config.log_content if 'var' in log_content: summary['per_log'] += [tf.summary.histogram(v.name, v) for g, v in gvs] if 'gard' in log_content: summary['per_log'] += [tf.summary.histogram(f'{v.name}_grad', g) for g, v in gvs] sum_levels = ['per_step', 'per_log', 'per_epoch'] assert all([k in sum_levels for k in summary.keys()]), f'undesired keys in summary dict: {str(summary.keys())}' for i in range(len(sum_levels)): summary[lv] = tf.summary.merge(summary[lv]) if summary[lv] else [tf.no_op] self.summary = summary return # Training main method # ------------------------------------------------------------------------------------------------------------------ def train(self): config = self.config with tf.Graph().as_default(): # use one graph # prepare compute graph g = GraphBuilder(config, verbose=self.verbose) ops, sess, grads, saver = g.ops, g.sess, g.grads, g.saver model, dataset = g.model, g.dataset self.model = model # printing model parameters if self.verbose: print('\n --------- printing grads {') re_list = ['.*bias:.*', '.*batch_normalization.*'] # skipping print_table([(v.name, g) for g, v in grads if not any([bool(re.fullmatch(expr, v.name)) for expr in re_list])], prefix='\t') print('} --------- printing grads') # all ops in graph print('\n --------- all ops {') re_list = ['optimizer.*', 'gpu_.*', 'gradients.*', 'save.*'] # '.*/batch_normalization/.*', '.*/bias:.*' # skipping for n in tf.get_default_graph().as_graph_def().node: if any([bool(re.fullmatch(expr, n.name)) for expr in re_list]): continue print('\t', n.name) print('} --------- all ops') # model params all_params_size = sum([np.prod(v.shape) for _, v in grads]) # all_params_size = tf.reduce_sum([tf.reduce_prod(v.shape) for _, v in grads]) # all_params_size = sess.run(all_params_size) print(f'==> Model have {all_params_size} total Params', flush=True) # init sess sess.run(tf.global_variables_initializer()) if self.config.model_path: except_list = [f'.*{n}.*' for n in self.config.exclude_vars] + ['optimizer.*'] if not self.config.continue_training else [] g.restore(sess, self.config.model_path, except_list=except_list) print(f'Model restored -- {self.config.model_path}') # running voting - used throughout the training process (accumulated voting) validation_probs = self.tester.init_pointcloud_log(dataset, 'validation', config.num_classes) # train func if config.debug_nan: self.train_one_epoch = self.train_one_epoch_debug # train metric_best = None # save_snap = [i for i in range(1, config.max_epoch + 1) if i % config.save_freq == 0]
if tf.__version__.split('.')[0] == '2': tf = tf.compat.v1 tf.disable_v2_behavior() # PLY reader FILE_DIR = os.path.abspath(__file__) BASE_DIR = os.path.dirname(FILE_DIR) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.insert(0, ROOT_DIR) sys.path.insert(0, BASE_DIR) sys.path.insert(0, os.path.join(ROOT_DIR, 'models')) sys.path.insert(0, os.path.join(ROOT_DIR, 'utils')) DEBUG = False class ModelTrainer: """ get & train the model (potential multi-gpu training) """ def __init__(self, config, verbose=True): self.config = config self.verbose = verbose self.tester = ModelTester(config, verbose=False) def add_summary(self, model): with tf.variable_scope('summary'): summary = model.summary log_content = self.config.log_content if 'var' in log_content: summary['per_log'] += [tf.summary.histogram(v.name, v) for g, v in gvs] if 'gard' in log_content: summary['per_log'] += [tf.summary.histogram(f'{v.name}_grad', g) for g, v in gvs] sum_levels = ['per_step', 'per_log', 'per_epoch'] assert all([k in sum_levels for k in summary.keys()]), f'undesired keys in summary dict: {str(summary.keys())}' for i in range(len(sum_levels)): summary[lv] = tf.summary.merge(summary[lv]) if summary[lv] else [tf.no_op] self.summary = summary return # Training main method # ------------------------------------------------------------------------------------------------------------------ def train(self): config = self.config with tf.Graph().as_default(): # use one graph # prepare compute graph g = GraphBuilder(config, verbose=self.verbose) ops, sess, grads, saver = g.ops, g.sess, g.grads, g.saver model, dataset = g.model, g.dataset self.model = model # printing model parameters if self.verbose: print('\n --------- printing grads {') re_list = ['.*bias:.*', '.*batch_normalization.*'] # skipping print_table([(v.name, g) for g, v in grads if not any([bool(re.fullmatch(expr, v.name)) for expr in re_list])], prefix='\t') print('} --------- printing grads') # all ops in graph print('\n --------- all ops {') re_list = ['optimizer.*', 'gpu_.*', 'gradients.*', 'save.*'] # '.*/batch_normalization/.*', '.*/bias:.*' # skipping for n in tf.get_default_graph().as_graph_def().node: if any([bool(re.fullmatch(expr, n.name)) for expr in re_list]): continue print('\t', n.name) print('} --------- all ops') # model params all_params_size = sum([np.prod(v.shape) for _, v in grads]) # all_params_size = tf.reduce_sum([tf.reduce_prod(v.shape) for _, v in grads]) # all_params_size = sess.run(all_params_size) print(f'==> Model have {all_params_size} total Params', flush=True) # init sess sess.run(tf.global_variables_initializer()) if self.config.model_path: except_list = [f'.*{n}.*' for n in self.config.exclude_vars] + ['optimizer.*'] if not self.config.continue_training else [] g.restore(sess, self.config.model_path, except_list=except_list) print(f'Model restored -- {self.config.model_path}') # running voting - used throughout the training process (accumulated voting) validation_probs = self.tester.init_pointcloud_log(dataset, 'validation', config.num_classes) # train func if config.debug_nan: self.train_one_epoch = self.train_one_epoch_debug # train metric_best = None # save_snap = [i for i in range(1, config.max_epoch + 1) if i % config.save_freq == 0]
lr_scheduler = LrScheduler(config)
10
2023-10-13 08:03:07+00:00
24k
bilibini/Lovely_Image_Downloader
py/Python38/site-packages/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "py/Python38/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-...
import functools import logging import typing import warnings import ssl from types import TracebackType from urllib.parse import urljoin from ._collections import HTTPHeaderDict, RecentlyUsedContainer from ._request_methods import RequestMethods from .connection import ProxyConfig from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, URLSchemeUnknown, ) from .response import BaseHTTPResponse from .util.connection import _TYPE_SOCKET_OPTIONS from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.timeout import Timeout from .util.url import Url, parse_url from typing_extensions import Literal
21,111
key__socks_options: frozenset[tuple[str, str]] | None key_assert_hostname: bool | str | None key_assert_fingerprint: str | None key_server_hostname: str | None key_blocksize: int | None def _default_key_normalizer( key_class: type[PoolKey], request_context: dict[str, typing.Any] ) -> PoolKey: """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None # Default key_blocksize to _DEFAULT_BLOCKSIZE if missing from the context if context.get("key_blocksize") is None: context["key_blocksize"] = _DEFAULT_BLOCKSIZE return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example: .. code-block:: python import urllib3 http = urllib3.PoolManager(num_pools=2) resp1 = http.request("GET", "https://google.com/") resp2 = http.request("GET", "https://google.com/mail") resp3 = http.request("GET", "https://yahoo.com/") print(len(http.pools)) # 2 """ proxy: Url | None = None proxy_config: ProxyConfig | None = None def __init__( self, num_pools: int = 10, headers: typing.Mapping[str, str] | None = None, **connection_pool_kw: typing.Any, ) -> None: super().__init__(headers) self.connection_pool_kw = connection_pool_kw
from __future__ import annotations if typing.TYPE_CHECKING: __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ssl_minimum_version", "ssl_maximum_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # Default value for `blocksize` - a new parameter introduced to # http.client.HTTPConnection & http.client.HTTPSConnection in Python 3.7 _DEFAULT_BLOCKSIZE = 16384 _SelfT = typing.TypeVar("_SelfT") class PoolKey(typing.NamedTuple): """ All known keyword arguments that could be provided to the pool manager, its pools, or the underlying connections. All custom key schemes should include the fields in this key at a minimum. """ key_scheme: str key_host: str key_port: int | None key_timeout: Timeout | float | int | None key_retries: Retry | bool | int | None key_block: bool | None key_source_address: tuple[str, int] | None key_key_file: str | None key_key_password: str | None key_cert_file: str | None key_cert_reqs: str | None key_ca_certs: str | None key_ssl_version: int | str | None key_ssl_minimum_version: ssl.TLSVersion | None key_ssl_maximum_version: ssl.TLSVersion | None key_ca_cert_dir: str | None key_ssl_context: ssl.SSLContext | None key_maxsize: int | None key_headers: frozenset[tuple[str, str]] | None key__proxy: Url | None key__proxy_headers: frozenset[tuple[str, str]] | None key__proxy_config: ProxyConfig | None key_socket_options: _TYPE_SOCKET_OPTIONS | None key__socks_options: frozenset[tuple[str, str]] | None key_assert_hostname: bool | str | None key_assert_fingerprint: str | None key_server_hostname: str | None key_blocksize: int | None def _default_key_normalizer( key_class: type[PoolKey], request_context: dict[str, typing.Any] ) -> PoolKey: """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None # Default key_blocksize to _DEFAULT_BLOCKSIZE if missing from the context if context.get("key_blocksize") is None: context["key_blocksize"] = _DEFAULT_BLOCKSIZE return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example: .. code-block:: python import urllib3 http = urllib3.PoolManager(num_pools=2) resp1 = http.request("GET", "https://google.com/") resp2 = http.request("GET", "https://google.com/mail") resp3 = http.request("GET", "https://yahoo.com/") print(len(http.pools)) # 2 """ proxy: Url | None = None proxy_config: ProxyConfig | None = None def __init__( self, num_pools: int = 10, headers: typing.Mapping[str, str] | None = None, **connection_pool_kw: typing.Any, ) -> None: super().__init__(headers) self.connection_pool_kw = connection_pool_kw
self.pools: RecentlyUsedContainer[PoolKey, HTTPConnectionPool]
1
2023-10-11 09:08:57+00:00
24k
MTgeophysics/mtpy-v2
mtpy/modeling/modem/data.py
[ { "identifier": "MTDataFrame", "path": "mtpy/core/mt_dataframe.py", "snippet": "class MTDataFrame:\n \"\"\"\n Dataframe for a single station\n\n Tried subclassing pandas.DataFrame, but that turned out to not be straight\n forward, so when with compilation instead.\n\n Think about having p...
import numpy as np import pandas as pd from pathlib import Path from loguru import logger from mtpy.core.mt_dataframe import MTDataFrame from mtpy.core.mt_location import MTLocation from mtpy.modeling.errors import ModelErrors
15,848
error_value_z percentage to multiply Z by to set error *default* is 5 for 5% of Z as error A 2x2 numpy array of values can be specified to explicitly set the error_value_z for each component. error_value_tipper absolute error between 0 and 1. fn_basename basename of data file. *default* is 'ModEM_Data.dat' formatting ['1' | '2'], format of the output data file, *default* is '1' header_strings strings for header of data file following the format outlined in the ModEM documentation inv_comp_dict dictionary of inversion components inv_mode inversion mode, options are: *default* is '1' * '1' --> for 'Full_Impedance' and 'Full_Vertical_Components' * '2' --> 'Full_Impedance' * '3' --> 'Off_Diagonal_Impedance' and 'Full_Vertical_Components' * '4' --> 'Off_Diagonal_Impedance' * '5' --> 'Full_Vertical_Components' * '6' --> 'Full_Interstation_TF' * '7' --> 'Off_Diagonal_Rho_Phase' inv_mode_dict dictionary for inversion modes max_num_periods maximum number of periods model_epsg epsg code for model projection, provide this to project model to non-utm coordinates. Find the epsg code for your projection on http://spatialreference.org/ref/ or google search epsg "your projection" model_utm_zone alternative to model_epsg, choose a utm zone to project all sites to (e.g. '55S') mt_dict dictionary of mtpy.core.mt.MT objects with keys being station names period_buffer float or int if specified, apply a buffer so that interpolation doesn't stretch too far over periods period_dict dictionary of period index for period_list period_list list of periods to invert for period_max maximum value of period to invert for period_min minimum value of period to invert for period_buffer buffer so that interpolation doesn't stretch too far over periods. Provide a float or integer factor, greater than which interpolation will not stretch. e.g. 1.5 means only interpolate to a maximum of 1.5 times each side of each frequency value rotate_angle Angle to rotate data to assuming 0 is N and E is 90 save_path path to save data file to units [ [V/m]/[T] | [mV/km]/[nT] | Ohm ] units of Z *default* is [mV/km]/[nT] wave_sign_impedance [ + | - ] sign of time dependent wave. *default* is '+' as positive downwards. wave_sign_tipper [ + | - ] sign of time dependent wave. *default* is '+' as positive downwards. ====================== ==================================================== :Example 1 --> create inversion period list: :: >>> from pathlib import Path >>> import mtpy.modeling.modem as modem >>> edi_path = Path(r"/home/mt/edi_files") >>> edi_list = list(edi_path.glob("*.edi")) >>> md = modem.Data(edi_list, period_min=.1, period_max=300,\ >>> ... max_num_periods=12) >>> md.write_data_file(save_path=r"/home/modem/inv1") >>> md :Example 2 --> set inverions period list from data: :: >>> md = modem.Data(edi_list) >>> #get period list from an .edi file >>> inv_period_list = 1./md.mt_dict["mt01"].Z.freq >>> #invert for every third period in inv_period_list >>> inv_period_list = inv_period_list[np.arange(0, len(inv_period_list, 3))] >>> md.period_list = inv_period_list >>> md.write_data_file(save_path=r"/home/modem/inv1") :Example 3 --> change error values: :: >>> mdr.error_type = 'floor' >>> mdr.error_floor = 10 >>> mdr.error_tipper = .03 >>> mdr.write_data_file(save_path=r"/home/modem/inv2") :Example 4 --> change inversion type: :: >>> mdr.inv_mode = '3' >>> mdr.write_data_file(save_path=r"/home/modem/inv2") :Example 5 --> rotate data: :: >>> md.rotation_angle = 60 >>> md.write_data_file(save_path=r"/home/modem/Inv1") >>> # or >>> md.write_data_file(save_path=r"/home/modem/Inv1", \ rotation_angle=60) """ def __init__(self, dataframe=None, center_point=None, **kwargs): self.logger = logger self.dataframe = dataframe if center_point is None: self.center_point = MTLocation() else: self.center_point = center_point self.wave_sign_impedance = "+" self.wave_sign_tipper = "+" self.z_units = "[mV/km]/[nT]" self.t_units = "" self.inv_mode = "1" self.formatting = "1" self.rotation_angle = 0
""" ================== ModEM ================== # Generate files for ModEM # revised by JP 2017 # revised by AK 2017 to bring across functionality from ak branch # revised by JP 2021 adding functionality and updating. # revised by JP 2022 to work with new structure of a central object """ # ============================================================================= # Imports # ============================================================================= # ============================================================================= class Data: """ Data will read and write .dat files for ModEM and convert a WS data file to ModEM format. ..note: :: the data is interpolated onto the given periods such that all stations invert for the same periods. The interpolation is a linear interpolation of each of the real and imaginary parts of the impedance tensor and induction tensor. See mtpy.core.mt.MT.interpolate for more details :param edi_list: list of edi files to read ====================== ==================================================== Attributes Description ====================== ==================================================== _dtype internal variable defining the data type of data_array _logger python logging object that put messages in logging format defined in logging configure file, see MtPyLog for more information _t_shape internal variable defining shape of tipper array in _dtype _z_shape internal variable defining shape of Z array in _dtype center_position (east, north, evel) for center point of station array. All stations are relative to this location for plotting purposes. comp_index_dict dictionary for index values of component of Z and T station_locations Stations object data_array numpy.ndarray (num_stations) structured to store data. keys are: * station --> station name * lat --> latitude in decimal degrees * lon --> longitude in decimal degrees * elev --> elevation (m) * rel_east -- > relative east location to center_position (m) * rel_north --> relative north location to center_position (m) * east --> UTM east (m) * north --> UTM north (m) * zone --> UTM zone * z --> impedance tensor array with shape (num_freq, 2, 2) * z_err --> impedance tensor error array with shape (num_freq, 2, 2) * tip --> Tipper array with shape (num_freq, 1, 2) * tipperr --> Tipper array with shape (num_freq, 1, 2) data_fn full path to data file data_period_list period list from all the data edi_list list of full paths to edi files error_type_tipper [ 'abs' | 'floor' ] *default* is 'abs' error_type_z [ 'egbert' | 'mean_od' | 'eigen' | 'median'] *default* is 'egbert_floor' * add '_floor' to any of the above to set the error as an error floor, otherwise all components are give weighted the same * 'egbert' sets error to error_value_z * sqrt(abs(zxy*zyx)) * 'mean_od' sets error to error_value_z * mean([Zxy, Zyx]) (non zeros) * 'eigen' sets error to error_value_z * eigenvalues(Z[ii]) * 'median' sets error to error_value_z * median([Zxx, Zxy, Zyx, Zyy]) (non zeros) A 2x2 numpy array of error_type_z can be specified to explicitly set the error_type_z for each component. error_value_z percentage to multiply Z by to set error *default* is 5 for 5% of Z as error A 2x2 numpy array of values can be specified to explicitly set the error_value_z for each component. error_value_tipper absolute error between 0 and 1. fn_basename basename of data file. *default* is 'ModEM_Data.dat' formatting ['1' | '2'], format of the output data file, *default* is '1' header_strings strings for header of data file following the format outlined in the ModEM documentation inv_comp_dict dictionary of inversion components inv_mode inversion mode, options are: *default* is '1' * '1' --> for 'Full_Impedance' and 'Full_Vertical_Components' * '2' --> 'Full_Impedance' * '3' --> 'Off_Diagonal_Impedance' and 'Full_Vertical_Components' * '4' --> 'Off_Diagonal_Impedance' * '5' --> 'Full_Vertical_Components' * '6' --> 'Full_Interstation_TF' * '7' --> 'Off_Diagonal_Rho_Phase' inv_mode_dict dictionary for inversion modes max_num_periods maximum number of periods model_epsg epsg code for model projection, provide this to project model to non-utm coordinates. Find the epsg code for your projection on http://spatialreference.org/ref/ or google search epsg "your projection" model_utm_zone alternative to model_epsg, choose a utm zone to project all sites to (e.g. '55S') mt_dict dictionary of mtpy.core.mt.MT objects with keys being station names period_buffer float or int if specified, apply a buffer so that interpolation doesn't stretch too far over periods period_dict dictionary of period index for period_list period_list list of periods to invert for period_max maximum value of period to invert for period_min minimum value of period to invert for period_buffer buffer so that interpolation doesn't stretch too far over periods. Provide a float or integer factor, greater than which interpolation will not stretch. e.g. 1.5 means only interpolate to a maximum of 1.5 times each side of each frequency value rotate_angle Angle to rotate data to assuming 0 is N and E is 90 save_path path to save data file to units [ [V/m]/[T] | [mV/km]/[nT] | Ohm ] units of Z *default* is [mV/km]/[nT] wave_sign_impedance [ + | - ] sign of time dependent wave. *default* is '+' as positive downwards. wave_sign_tipper [ + | - ] sign of time dependent wave. *default* is '+' as positive downwards. ====================== ==================================================== :Example 1 --> create inversion period list: :: >>> from pathlib import Path >>> import mtpy.modeling.modem as modem >>> edi_path = Path(r"/home/mt/edi_files") >>> edi_list = list(edi_path.glob("*.edi")) >>> md = modem.Data(edi_list, period_min=.1, period_max=300,\ >>> ... max_num_periods=12) >>> md.write_data_file(save_path=r"/home/modem/inv1") >>> md :Example 2 --> set inverions period list from data: :: >>> md = modem.Data(edi_list) >>> #get period list from an .edi file >>> inv_period_list = 1./md.mt_dict["mt01"].Z.freq >>> #invert for every third period in inv_period_list >>> inv_period_list = inv_period_list[np.arange(0, len(inv_period_list, 3))] >>> md.period_list = inv_period_list >>> md.write_data_file(save_path=r"/home/modem/inv1") :Example 3 --> change error values: :: >>> mdr.error_type = 'floor' >>> mdr.error_floor = 10 >>> mdr.error_tipper = .03 >>> mdr.write_data_file(save_path=r"/home/modem/inv2") :Example 4 --> change inversion type: :: >>> mdr.inv_mode = '3' >>> mdr.write_data_file(save_path=r"/home/modem/inv2") :Example 5 --> rotate data: :: >>> md.rotation_angle = 60 >>> md.write_data_file(save_path=r"/home/modem/Inv1") >>> # or >>> md.write_data_file(save_path=r"/home/modem/Inv1", \ rotation_angle=60) """ def __init__(self, dataframe=None, center_point=None, **kwargs): self.logger = logger self.dataframe = dataframe if center_point is None: self.center_point = MTLocation() else: self.center_point = center_point self.wave_sign_impedance = "+" self.wave_sign_tipper = "+" self.z_units = "[mV/km]/[nT]" self.t_units = "" self.inv_mode = "1" self.formatting = "1" self.rotation_angle = 0
self.z_model_error = ModelErrors(
2
2023-10-11 22:24:50+00:00
24k
weavel-ai/promptmodel-python
promptmodel/llms/llm_proxy.py
[ { "identifier": "LLM", "path": "promptmodel/llms/llm.py", "snippet": "class LLM:\n def __init__(self):\n pass\n\n @classmethod\n def __parse_output_pattern__(\n cls,\n raw_output: Optional[str] = None,\n parsing_type: Optional[ParsingType] = None,\n ) -> ParseResu...
from typing import ( Any, AsyncGenerator, Callable, Dict, Generator, List, Optional, Tuple, Union, ) from uuid import UUID from threading import Thread from rich import print from uuid import uuid4 from litellm.utils import ModelResponse, get_max_tokens from promptmodel.llms.llm import LLM from promptmodel.database.models import ( DeployedPrompt, DeployedFunctionModel, DeployedFunctionModelVersion, ) from promptmodel.database.crud import ( get_deployed_prompts, ) from promptmodel.promptmodel_init import CacheManager from promptmodel.utils.config_utils import read_config, upsert_config from promptmodel.utils.random_utils import select_version_by_ratio from promptmodel.utils import logger from promptmodel.utils.async_utils import run_async_in_sync from promptmodel.utils.token_counting import ( num_tokens_for_messages_for_each, num_tokens_from_functions_input, ) from promptmodel.utils.output_utils import update_dict from promptmodel.apis.base import AsyncAPIClient from promptmodel.types.response import ( LLMResponse, LLMStreamResponse, FunctionModelConfig, ChatModelConfig, UnitConfig, PMDetail, ) from promptmodel.types.request import ChatLogRequest
18,010
class LLMProxy(LLM): def __init__( self, name: str, version: Optional[Union[str, int]] = "deploy", unit_config: Optional[UnitConfig] = None ): super().__init__() self._name = name self.version = version self.unit_config = unit_config def _wrap_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]: def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = run_async_in_sync( LLMProxy.fetch_prompts(self._name, self.version) ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) log_uuid = str(uuid4()) # Call the generator with the arguments stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args) api_response = None dict_cache = {} # to store aggregated dictionary values string_cache = "" # to store aggregated string values error_occurs = False error_log = None for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.parsed_outputs: dict_cache = update_dict(dict_cache, item.parsed_outputs) if item.raw_output: string_cache += item.raw_output if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None
class LLMProxy(LLM): def __init__( self, name: str, version: Optional[Union[str, int]] = "deploy", unit_config: Optional[UnitConfig] = None ): super().__init__() self._name = name self.version = version self.unit_config = unit_config def _wrap_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]: def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = run_async_in_sync( LLMProxy.fetch_prompts(self._name, self.version) ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) log_uuid = str(uuid4()) # Call the generator with the arguments stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args) api_response = None dict_cache = {} # to store aggregated dictionary values string_cache = "" # to store aggregated string values error_occurs = False error_log = None for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.parsed_outputs: dict_cache = update_dict(dict_cache, item.parsed_outputs) if item.raw_output: string_cache += item.raw_output if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None
item.pm_detail = PMDetail(
20
2023-10-09 03:35:44+00:00
24k
cambridgeltl/ClaPS
run_prune_search.py
[ { "identifier": "PromptedClassificationReward", "path": "rewards/text_classification_reward.py", "snippet": "class PromptedClassificationReward:\n def __init__(\n self,\n args,\n task_lm: str,\n is_mask_lm: Optional[bool],\n num_classes: int,\n verbalizers: L...
import random import numpy as np import json import argparse import os import torch import logging from tqdm import tqdm from transformers import AutoTokenizer, set_seed from rewards.text_classification_reward import PromptedClassificationReward from utils.fsc_datasets import PromptedClassificationDataset from algs.genetics import GeneticAlgorithmTrainer, Genetics from algs.particle_swarm import ParticleSwarmOptimizer from algs.greedy import GreedyTrainer
14,471
vocab_id.append(v) logger.info(len(vocab_key)) else: # random select 10% of the vocab vocab, vocab_key, vocab_id = random_pruning(args, vocab, args["percentile"]) logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def random_pruning(args, vocab: dict, percent: int = 99): vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) length = int(len(vocab_key) * (100 - percent) / 100) pruned_index = random.sample(list(np.arange(len(vocab_key))), length) vocab_key = [vocab_key[i] for i in pruned_index] vocab_id = [vocab_id[i] for i in pruned_index] vocab = {vocab_key[i]: vocab_id[i] for i in range(len(vocab_key))} logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def main(args): print(args) set_seed(args["seed"]) revocab_flag = args["reprune_vocab"] shots = args["num_shots"] batch_size = args["train_batch_size"] args["is_mask_lm"] = False special_space = "▁" if "bert" in args["model_name"]: args["is_mask_lm"] = True special_space = "Ġ" logging.info("......Loading dataset......") prompt_dataset = PromptedClassificationDataset(args) verbalizer_predefined = prompt_dataset.get_verbalizer() args["verbalizers"] = verbalizer_predefined logging.info("verbalizers: %s", verbalizer_predefined) args["num_labels"] = len(verbalizer_predefined) train_dataset, val_dataset, test_dataset = prompt_dataset.get_few_shot_dataset( shots ) logging.info("......truncating vocab......") crossover_tokenizer = AutoTokenizer.from_pretrained(args["model_name"]) vocab = crossover_tokenizer.get_vocab() # preprocess the vocab special_tokens = [ crossover_tokenizer.unk_token, crossover_tokenizer.pad_token, crossover_tokenizer.sep_token, crossover_tokenizer.cls_token, ] vocab = { word: index for word, index in vocab.items() if word not in special_tokens and special_space in word } for v in verbalizer_predefined: if v not in vocab: print("verbalizer not in vocab: ", v) assert v in vocab logging.info("the vocab length before action set pruning: %s", len(vocab)) dataset = train_dataset print(dataset) batch_size = min(batch_size, len(dataset)) idx = np.random.choice(len(dataset), batch_size, replace=False) data = [dataset[i] for i in idx] logging.info(f"Length of dataset = {len(data)}") obj_func = PromptedClassificationReward( args=args, reward_type=args["reward_type"], task_lm=args["model_name"], is_mask_lm=args["is_mask_lm"], num_classes=args["num_labels"], verbalizers=args["verbalizers"], use_bn_calibration=args["bn_calibrate"], ) if revocab_flag: # pruning efficiency section # random select 10% of the vocab if args["vocab_path"] != "none": # this is to do kmeans clustering and pruning vocab, _, vocab_id = load_vocab(args) kl_dict, collect_kl_np = find_kl_dict( args, data, vocab, obj_func, prompt_dataset ) else: if not args["run_manual"]: kl_dict, collect_kl_np = load_kl_dict(args) else: kl_dict = {} collect_kl_np = [] if not args["run_manual"]: vocab, _, vocab_id = action_set_pruning(args, kl_dict, collect_kl_np, vocab) else: vocab_id = [v for k, v in vocab.items()] if args["method"] == "genetic": genetics = Genetics(crossover_tokenizer, vocab_id) trainer = GeneticAlgorithmTrainer( pop_size=128, mutate_size=64, crossover_size=64, mutate_frac=0.1, str_len=5, epochs=30, stages=1, n_classes=args["num_labels"], genetics=genetics, eval_batch_size=args["eval_batch_size"], obj_func=obj_func, prompt_dataset=prompt_dataset, use_bn_calibrator=args["bn_calibrate"], logger=logger, ) elif args["method"] == "particle_swarm":
logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def remove_special_token(text: str, special_token: str) -> str: return text.replace(special_token, "") def find_kl_dict(args, data, vocab, obj_func, prompted_dataset): premise_texts, hypothesis_texts, class_labels = prompted_dataset.get_data(data) if args["prune_type"] == "kl": default_probs = obj_func.compute_default_kl( premise_texts, hypothesis_texts, class_labels, "", True ) else: default_probs = obj_func.compute_default_reward( premise_texts, hypothesis_texts, class_labels, "", True ) collect_kl = [] kl_dict = {} for v, k in tqdm(vocab.items()): if args["prune_type"] == "kl": kl = obj_func.compute_kl( premise_texts, hypothesis_texts, class_labels, v, True, default_probs ) else: kl = obj_func.compute_reward_diff( premise_texts, hypothesis_texts, class_labels, v, True, default_probs ) collect_kl.append(kl) kl_dict[v] = kl for k, v in kl_dict.items(): kl_dict[k] = float(v) with open(args["dict_path"], "w") as fp: json.dump(kl_dict, fp, indent=4, ensure_ascii=False) collect_kl_np = [] for tensor in collect_kl: collect_kl_np.append(tensor.cpu().numpy()) return kl_dict, collect_kl_np def load_kl_dict(args): # load the KL dict from json file with open(args["dict_path"], "r") as fp: kl_dict = json.load(fp) collect_kl_np = [] for k, v in kl_dict.items(): collect_kl_np.append(v) return kl_dict, collect_kl_np def load_vocab(args): with open(args["vocab_path"], "r") as fp: vocab = json.load(fp) vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) return vocab, vocab_key, vocab_id def action_set_pruning(args, kl_dict, collect_kl_np, vocab): if not args["random_prune"]: collect_kl_np = np.array(collect_kl_np) top_10_percent = np.percentile(collect_kl_np, args["percentile"]) # filter the vocab based on the top_10_percent_idx new_vocab = { word: vocab[word] for word, value in kl_dict.items() if value > top_10_percent } vocab = new_vocab vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) logger.info(len(vocab_key)) else: # random select 10% of the vocab vocab, vocab_key, vocab_id = random_pruning(args, vocab, args["percentile"]) logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def random_pruning(args, vocab: dict, percent: int = 99): vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) length = int(len(vocab_key) * (100 - percent) / 100) pruned_index = random.sample(list(np.arange(len(vocab_key))), length) vocab_key = [vocab_key[i] for i in pruned_index] vocab_id = [vocab_id[i] for i in pruned_index] vocab = {vocab_key[i]: vocab_id[i] for i in range(len(vocab_key))} logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def main(args): print(args) set_seed(args["seed"]) revocab_flag = args["reprune_vocab"] shots = args["num_shots"] batch_size = args["train_batch_size"] args["is_mask_lm"] = False special_space = "▁" if "bert" in args["model_name"]: args["is_mask_lm"] = True special_space = "Ġ" logging.info("......Loading dataset......") prompt_dataset = PromptedClassificationDataset(args) verbalizer_predefined = prompt_dataset.get_verbalizer() args["verbalizers"] = verbalizer_predefined logging.info("verbalizers: %s", verbalizer_predefined) args["num_labels"] = len(verbalizer_predefined) train_dataset, val_dataset, test_dataset = prompt_dataset.get_few_shot_dataset( shots ) logging.info("......truncating vocab......") crossover_tokenizer = AutoTokenizer.from_pretrained(args["model_name"]) vocab = crossover_tokenizer.get_vocab() # preprocess the vocab special_tokens = [ crossover_tokenizer.unk_token, crossover_tokenizer.pad_token, crossover_tokenizer.sep_token, crossover_tokenizer.cls_token, ] vocab = { word: index for word, index in vocab.items() if word not in special_tokens and special_space in word } for v in verbalizer_predefined: if v not in vocab: print("verbalizer not in vocab: ", v) assert v in vocab logging.info("the vocab length before action set pruning: %s", len(vocab)) dataset = train_dataset print(dataset) batch_size = min(batch_size, len(dataset)) idx = np.random.choice(len(dataset), batch_size, replace=False) data = [dataset[i] for i in idx] logging.info(f"Length of dataset = {len(data)}") obj_func = PromptedClassificationReward( args=args, reward_type=args["reward_type"], task_lm=args["model_name"], is_mask_lm=args["is_mask_lm"], num_classes=args["num_labels"], verbalizers=args["verbalizers"], use_bn_calibration=args["bn_calibrate"], ) if revocab_flag: # pruning efficiency section # random select 10% of the vocab if args["vocab_path"] != "none": # this is to do kmeans clustering and pruning vocab, _, vocab_id = load_vocab(args) kl_dict, collect_kl_np = find_kl_dict( args, data, vocab, obj_func, prompt_dataset ) else: if not args["run_manual"]: kl_dict, collect_kl_np = load_kl_dict(args) else: kl_dict = {} collect_kl_np = [] if not args["run_manual"]: vocab, _, vocab_id = action_set_pruning(args, kl_dict, collect_kl_np, vocab) else: vocab_id = [v for k, v in vocab.items()] if args["method"] == "genetic": genetics = Genetics(crossover_tokenizer, vocab_id) trainer = GeneticAlgorithmTrainer( pop_size=128, mutate_size=64, crossover_size=64, mutate_frac=0.1, str_len=5, epochs=30, stages=1, n_classes=args["num_labels"], genetics=genetics, eval_batch_size=args["eval_batch_size"], obj_func=obj_func, prompt_dataset=prompt_dataset, use_bn_calibrator=args["bn_calibrate"], logger=logger, ) elif args["method"] == "particle_swarm":
trainer = ParticleSwarmOptimizer(
4
2023-10-08 12:39:44+00:00
24k
clessig/atmorep
atmorep/core/trainer.py
[ { "identifier": "AtmoRep", "path": "atmorep/core/atmorep_model.py", "snippet": "class AtmoRep( torch.nn.Module) :\n\n def __init__(self, cf) :\n '''Constructor'''\n \n super( AtmoRep, self).__init__()\n\n self.cf = cf\n\n ###################################################\n def create( self...
import torch import torchinfo import numpy as np import code import os import datetime import functools import pandas as pd import wandb import torch.distributed as dist import torch.utils.data.distributed import atmorep.config.config as config import atmorep.utils.token_infos_transformations as token_infos_transformations import atmorep.utils.utils as utils from pathlib import Path from typing import TypeVar from torch.distributed.optim import ZeroRedundancyOptimizer from atmorep.core.atmorep_model import AtmoRep from atmorep.core.atmorep_model import AtmoRepData from atmorep.training.bert import prepare_batch_BERT_multifield from atmorep.transformer.transformer_base import positional_encoding_harmonic from atmorep.utils.utils import shape_to_str from atmorep.utils.utils import relMSELoss from atmorep.utils.utils import Gaussian from atmorep.utils.utils import CRPS from atmorep.utils.utils import NetMode from atmorep.utils.utils import sgn_exp from atmorep.datasets.data_writer import write_forecast, write_BERT, write_attention
14,945
num_tokens = field_info[3] token_size = field_info[4] lat_d_h, lon_d_h = int(np.floor(token_size[1]/2.)), int(np.floor(token_size[2]/2.)) tinfos = token_infos[fidx].reshape( [-1, num_levels, *num_tokens, cf.size_token_info]) res = tinfos[0,0,0,0,0][-1].item() batch_size = tinfos.shape[0] sources_b = detok( sources[fidx].numpy()) if is_predicted : # split according to levels lens_levels = [t.shape[0] for t in tokens_masked_idx[fidx]] targets_b = torch.split( targets[fidx], lens_levels) preds_mu_b = torch.split( log_preds[fidx][0], lens_levels) preds_ens_b = torch.split( log_preds[fidx][2], lens_levels) # split according to batch lens_batches = [ [bv.shape[0] for bv in b] for b in tokens_masked_idx_list[fidx] ] targets_b = [torch.split( targets_b[vidx], lens) for vidx,lens in enumerate(lens_batches)] preds_mu_b = [torch.split(preds_mu_b[vidx], lens) for vidx,lens in enumerate(lens_batches)] preds_ens_b =[torch.split(preds_ens_b[vidx],lens) for vidx,lens in enumerate(lens_batches)] # recover token shape targets_b = [[targets_b[vidx][bidx].reshape([-1, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] preds_mu_b = [[preds_mu_b[vidx][bidx].reshape([-1, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] preds_ens_b = [[preds_ens_b[vidx][bidx].reshape( [-1, cf.net_tail_num_nets, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] # for all batch items coords_b = [] for bidx, tinfo in enumerate(tinfos) : # use first vertical levels since a column is considered lats = np.arange(tinfo[0,0,0,0,4]-lat_d_h*res, tinfo[0,0,-1,0,4]+lat_d_h*res+0.001,res) if tinfo[0,0,0,-1,5] < tinfo[0,0,0,0,5] : lons = np.remainder( np.arange( tinfo[0,0,0,0,5] - lon_d_h*res, 360. + tinfo[0,0,0,-1,5] + lon_d_h*res + 0.001, res), 360.) else : lons = np.arange(tinfo[0,0,0,0,5]-lon_d_h*res, tinfo[0,0,0,-1,5]+lon_d_h*res+0.001,res) lons = np.remainder( lons, 360.) # time stamp in token_infos is at start time so needs to be advanced by token_size[0]-1 s = utils.token_info_to_time( tinfo[0,0,0,0,:3] ) - pd.Timedelta(hours=token_size[0]-1) e = utils.token_info_to_time( tinfo[0,-1,0,0,:3] ) dates = pd.date_range( start=s, end=e, freq='h') # target etc are aliasing targets_b which simplifies bookkeeping below if is_predicted : target = [targets_b[vidx][bidx] for vidx in range(num_levels)] pred_mu = [preds_mu_b[vidx][bidx] for vidx in range(num_levels)] pred_ens = [preds_ens_b[vidx][bidx] for vidx in range(num_levels)] dates_masked_l, lats_masked_l, lons_masked_l = [], [], [] for vidx, _ in enumerate(field_info[2]) : normalizer = self.model.normalizer( fidx, vidx) y, m = dates[0].year, dates[0].month sources_b[bidx,vidx] = normalizer.denormalize( y, m, sources_b[bidx,vidx], [lats, lons]) if is_predicted : # TODO: make sure normalizer_local / normalizer_global is used in data_loader idx = tokens_masked_idx_list[fidx][vidx][bidx] tinfo_masked = tinfos[bidx,vidx].flatten( 0,2) tinfo_masked = tinfo_masked[idx] lad, lod = lat_d_h*res, lon_d_h*res lats_masked, lons_masked, dates_masked = [], [], [] for t in tinfo_masked : lats_masked.append( np.expand_dims( np.arange(t[4]-lad, t[4]+lad+0.001,res), 0)) lons_masked.append( np.expand_dims( np.arange(t[5]-lod, t[5]+lod+0.001,res), 0)) r = pd.date_range( start=utils.token_info_to_time(t), periods=token_size[0], freq='h') dates_masked.append( np.expand_dims(r.to_pydatetime().astype( 'datetime64[s]'), 0) ) lats_masked = np.concatenate( lats_masked, 0) lons_masked = np.remainder( np.concatenate( lons_masked, 0), 360.) dates_masked = np.concatenate( dates_masked, 0) for ii,(t,p,e,la,lo) in enumerate(zip( target[vidx], pred_mu[vidx], pred_ens[vidx], lats_masked, lons_masked)) : targets_b[vidx][bidx][ii] = normalizer.denormalize( y, m, t, [la, lo]) preds_mu_b[vidx][bidx][ii] = normalizer.denormalize( y, m, p, [la, lo]) preds_ens_b[vidx][bidx][ii] = normalizer.denormalize( y, m, e, [la, lo]) dates_masked_l += [ dates_masked ] lats_masked_l += [ [90.-lat for lat in lats_masked] ] lons_masked_l += [ lons_masked ] dates = dates.to_pydatetime().astype( 'datetime64[s]') coords_b += [ [dates, 90.-lats, lons, dates_masked_l, lats_masked_l, lons_masked_l] ] fn = field_info[0] sources_out.append( [fn, sources_b]) if is_predicted : targets_out.append([fn, [[t.numpy(force=True) for t in t_v] for t_v in targets_b]]) preds_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_mu_b]]) ensembles_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_ens_b]]) else : targets_out.append( [fn, []]) preds_out.append( [fn, []]) ensembles_out.append( [fn, []]) sources_dates_out.append( [c[0] for c in coords_b]) sources_lats_out.append( [c[1] for c in coords_b]) sources_lons_out.append( [c[2] for c in coords_b]) if is_predicted : targets_dates_out.append( [c[3] for c in coords_b]) targets_lats_out.append( [c[4] for c in coords_b]) targets_lons_out.append( [c[5] for c in coords_b]) else : targets_dates_out.append( [ ]) targets_lats_out.append( [ ]) targets_lons_out.append( [ ]) levels = [[np.array(l) for l in field[2]] for field in cf.fields]
#################################################################################################### # # Copyright (C) 2022 # #################################################################################################### # # project : atmorep # # author : atmorep collaboration # # description : # # license : # #################################################################################################### # code.interact(local=locals()) # import horovod.torch as hvd #################################################################################################### class Trainer_Base() : def __init__( self, cf, devices ) : self.cf = cf self.devices = devices self.device_in = devices[0] self.device_out = devices[-1] self.fields_prediction_idx = [] self.loss_weights = torch.zeros( len(cf.fields_prediction) ) for ifield, field in enumerate(cf.fields_prediction) : self.loss_weights[ifield] = self.cf.fields_prediction[ifield][1] for idx, field_info in enumerate(cf.fields) : if field_info[0] == field[0] : self.fields_prediction_idx.append( idx) break self.loss_weights = self.loss_weights.to( self.device_out) self.MSELoss = torch.nn.MSELoss() # transformation for token infos if hasattr( cf, 'token_infos_transformation') : self.tok_infos_trans = getattr( token_infos_transformations, cf.token_infos_transformation) else : self.tok_infos_trans = getattr( token_infos_transformations, 'identity') if 0 == cf.par_rank : directory = Path( config.path_results, 'id{}'.format( cf.wandb_id)) if not os.path.exists(directory): os.makedirs( directory) directory = Path( config.path_models, 'id{}'.format( cf.wandb_id)) if not os.path.exists(directory): os.makedirs( directory) ################################################### def create( self, load_embeds=True) : net = AtmoRep( self.cf) self.model = AtmoRepData( net) self.model.create( self.pre_batch, self.devices, load_embeds) # TODO: pass the properly to model / net self.model.net.encoder_to_decoder = self.encoder_to_decoder self.model.net.decoder_to_tail = self.decoder_to_tail return self ################################################### @classmethod def load( Typename, cf, model_id, epoch, devices) : trainer = Typename( cf, devices).create( load_embeds=False) trainer.model.net = trainer.model.net.load( model_id, devices, cf, epoch) # TODO: pass the properly to model / net trainer.model.net.encoder_to_decoder = trainer.encoder_to_decoder trainer.model.net.decoder_to_tail = trainer.decoder_to_tail str = 'Loaded model id = {}{}.'.format( model_id, f' at epoch = {epoch}' if epoch> -2 else '') print( str) return trainer ################################################### def save( self, epoch) : self.model.net.save( epoch) ################################################### def get_learn_rates( self) : cf = self.cf size_padding = 5 learn_rates = np.zeros( cf.num_epochs + size_padding) learn_rates[:cf.lr_start_epochs] = np.linspace( cf.lr_start, cf.lr_max, num = cf.lr_start_epochs) lr = learn_rates[cf.lr_start_epochs-1] ic = 0 for epoch in range( cf.lr_start_epochs, cf.num_epochs + size_padding) : lr = max( lr / cf.lr_decay_rate, cf.lr_min) learn_rates[epoch] = lr if ic > 9999 : # sanity check assert "Maximum number of epochs exceeded." return learn_rates ################################################### def run( self, epoch = -1) : cf = self.cf model = self.model learn_rates = self.get_learn_rates() if cf.with_ddp : self.model_ddp = torch.nn.parallel.DistributedDataParallel( model, static_graph=True) if not cf.optimizer_zero : self.optimizer = torch.optim.AdamW( self.model_ddp.parameters(), lr=cf.lr_start, weight_decay=cf.weight_decay) else : self.optimizer = ZeroRedundancyOptimizer(self.model_ddp.parameters(), optimizer_class=torch.optim.AdamW, lr=cf.lr_start ) else : self.optimizer = torch.optim.AdamW( self.model.parameters(), lr=cf.lr_start, weight_decay=cf.weight_decay) if 0 == cf.par_rank : # print( self.model.net) model_parameters = filter(lambda p: p.requires_grad, self.model_ddp.parameters()) num_params = sum([np.prod(p.size()) for p in model_parameters]) print( f'Number of trainable parameters: {num_params:,}') # test at the beginning as reference self.model.load_data( NetMode.test, batch_size=cf.batch_size_test) if cf.test_initial : cur_test_loss = self.validate( epoch, cf.BERT_strategy).cpu().numpy() test_loss = np.array( [cur_test_loss]) else : # generic value based on data normalization test_loss = np.array( [1.0]) epoch += 1 batch_size = cf.batch_size_start - cf.batch_size_delta if cf.profile : lr = learn_rates[epoch] for g in self.optimizer.param_groups: g['lr'] = lr self.model.load_data( NetMode.train, batch_size = cf.batch_size_max) self.profile() # training loop while True : if epoch >= cf.num_epochs : break lr = learn_rates[epoch] for g in self.optimizer.param_groups: g['lr'] = lr batch_size = min( cf.batch_size_max, batch_size + cf.batch_size_delta) tstr = datetime.datetime.now().strftime("%H:%M:%S") print( '{} : {} :: batch_size = {}, lr = {}'.format( epoch, tstr, batch_size, lr) ) self.model.load_data( NetMode.train, batch_size = batch_size) self.train( epoch) if cf.with_wandb and 0 == cf.par_rank : self.save( epoch) cur_test_loss = self.validate( epoch, cf.BERT_strategy).cpu().numpy() # self.validate( epoch, 'forecast') # save model if cur_test_loss < test_loss.min() : self.save( -2) test_loss = np.append( test_loss, [cur_test_loss]) epoch += 1 tstr = datetime.datetime.now().strftime("%H:%M:%S") print( 'Finished training at {} with test loss = {}.'.format( tstr, test_loss[-1]) ) # save final network if cf.with_wandb and 0 == cf.par_rank : self.save( -2) ################################################### def train( self, epoch): model = self.model cf = self.cf model.mode( NetMode.train) self.optimizer.zero_grad() loss_total = [[] for i in range(len(cf.losses)) ] std_dev_total = [[] for i in range(len(self.fields_prediction_idx)) ] mse_loss_total = [] grad_loss_total = [] ctr = 0 for batch_idx in range( model.len( NetMode.train)) : batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) preds, _ = self.model_ddp( batch_data) loss, mse_loss, losses = self.loss( preds, batch_idx) self.optimizer.zero_grad() loss.backward() self.optimizer.step() [loss_total[idx].append( losses[key]) for idx, key in enumerate(losses)] mse_loss_total.append( mse_loss.detach().cpu() ) grad_loss_total.append( loss.detach().cpu() ) [std_dev_total[idx].append( pred[1].detach().cpu()) for idx, pred in enumerate(preds)] # logging if int((batch_idx * cf.batch_size_max) / 4) > ctr : # wandb logging if cf.with_wandb and (0 == cf.par_rank) : loss_dict = { "training loss": torch.mean( torch.tensor( mse_loss_total)), "gradient loss": torch.mean( torch.tensor( grad_loss_total)) } # log individual loss terms for individual fields for idx, cur_loss in enumerate(loss_total) : loss_name = self.cf.losses[idx] lt = torch.tensor(cur_loss) for i, field in enumerate(cf.fields_prediction) : idx_name = loss_name + ', ' + field[0] idx_std_name = 'stddev, ' + field[0] loss_dict[idx_name] = torch.mean( lt[:,i]).cpu().detach() loss_dict[idx_std_name] = torch.mean(torch.cat(std_dev_total[i],0)).cpu().detach() wandb.log( loss_dict ) # console output print('train epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:1.5f} : {:1.5f} :: {:1.5f}'.format( epoch, batch_idx, model.len( NetMode.train), 100. * batch_idx/model.len(NetMode.train), torch.mean( torch.tensor( grad_loss_total)), torch.mean(torch.tensor(mse_loss_total)), torch.mean( preds[0][1]) ), flush=True) # save model (use -2 as epoch to indicate latest, stored without epoch specification) # self.save( -2) # reset loss_total = [[] for i in range(len(cf.losses)) ] mse_loss_total = [] grad_loss_total = [] std_dev_total = [[] for i in range(len(self.fields_prediction_idx)) ] ctr += 1 # save gradients if cf.save_grads and cf.with_wandb and (0 == cf.par_rank) : dir_name = './grads/id{}'.format( cf.wandb_id) if not os.path.exists(dir_name): os.makedirs(dir_name) rmsprop_ws = [] for k in range( len(self.optimizer.state_dict()['state']) ) : rmsprop_ws.append(self.optimizer.state_dict()['state'][k]['exp_avg_sq'].mean().unsqueeze(0)) rmsprop_ws = torch.cat( rmsprop_ws) fname = '{}/{}_epoch{}_rmsprop.npy'.format( dir_name, cf.wandb_id, epoch) np.save( fname, rmsprop_ws.cpu().detach().numpy() ) idx = 0 for name, param in self.model.named_parameters(): if param.requires_grad : fname = '{}/{}_epoch{}_{:05d}_{}_grad.npy'.format( dir_name, cf.wandb_id, epoch, idx,name) np.save( fname, param.grad.cpu().detach().numpy() ) idx += 1 # clean memory self.optimizer.zero_grad() del batch_data, loss, loss_total, mse_loss_total, grad_loss_total, std_dev_total ################################################### def profile( self): model = self.model cf = self.cf model.mode( NetMode.train) self.optimizer.zero_grad() # See https://pytorch.org/tutorials/intermediate/tensorboard_profiler_tutorial.html # for details on how to load and analyse report # https://pytorch.org/blog/trace-analysis-for-masses/ # do for all par_ranks to avoid that they run out of sync print( '---------------------------------') print( 'Profiling:') pname = './logs/profile_par_rank' + str(cf.par_rank) + '_' + cf.wandb_id + '/profile' with torch.profiler.profile( activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2), on_trace_ready=torch.profiler.tensorboard_trace_handler(pname), profile_memory=True, record_shapes=True, with_stack=True) as prof: for batch_idx in range( 2 * (1+1+3) ) : batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) preds, _ = self.model_ddp( batch_data) loss, mse_loss, losses = self.loss( preds, batch_idx) self.optimizer.zero_grad() # loss.backward() # self.optimizer.step() prof.step() print( 'Profiling finished.') print( '---------------------------------') ################################################### def validate( self, epoch, BERT_test_strategy = 'BERT'): cf = self.cf BERT_strategy_train = cf.BERT_strategy cf.BERT_strategy = BERT_test_strategy self.model.mode( NetMode.test) total_loss = 0. total_losses = torch.zeros( len(self.fields_prediction_idx) ) test_len = 0 self.mode_test = True # run in training mode offset = 0 if -1 == epoch and 0 == cf.par_rank : if 1 == cf.num_accs_per_task : # bug in torchinfo; fixed in v1.8.0 offset += 1 print( 'Network size:') batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) torchinfo.summary( self.model, input_data=[batch_data]) # run test set evaluation with torch.no_grad() : for it in range( self.model.len( NetMode.test) - offset) : batch_data = self.model.next() if cf.par_rank < cf.log_test_num_ranks : # keep on cpu since it will otherwise clog up GPU memory (sources, token_infos, targets, tmis, tmis_list) = batch_data[0] # targets if len(batch_data[1]) > 0 : if type(batch_data[1][0][0]) is list : targets = [batch_data[1][i][0][0] for i in range( len(batch_data[1]))] else : targets = batch_data[1][0] # store on cpu log_sources = ( [source.detach().clone().cpu() for source in sources ], [ti.detach().clone().cpu() for ti in token_infos], [target.detach().clone().cpu() for target in targets ], tmis, tmis_list ) batch_data = self.prepare_batch( batch_data) preds, atts = self.model( batch_data) loss = torch.tensor( 0.) ifield = 0 for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] # hook for custom test loss self.test_loss( pred, target) # base line loss cur_loss = self.MSELoss( pred[0], target = target ).cpu().item() loss += cur_loss total_losses[ifield] += cur_loss ifield += 1 total_loss += loss test_len += 1 # store detailed results on current test set for book keeping if cf.par_rank < cf.log_test_num_ranks : log_preds = [[p.detach().clone().cpu() for p in pred] for pred in preds] self.log_validate( epoch, it, log_sources, log_preds) if cf.attention: self.log_attention( epoch, it, [atts, [ti.detach().clone().cpu() for ti in token_infos]]) # average over all nodes total_loss /= test_len * len(self.cf.fields_prediction) total_losses /= test_len if cf.with_ddp : total_loss_cuda = total_loss.cuda() total_losses_cuda = total_losses.cuda() dist.all_reduce( total_loss_cuda, op=torch.distributed.ReduceOp.AVG ) dist.all_reduce( total_losses_cuda, op=torch.distributed.ReduceOp.AVG ) total_loss = total_loss_cuda.cpu() total_losses = total_losses_cuda.cpu() if 0 == cf.par_rank : print( 'validation loss for strategy={} at epoch {} : {}'.format( BERT_test_strategy, epoch, total_loss), flush=True) if cf.with_wandb and (0 == cf.par_rank) : loss_dict = {"val. loss {}".format(BERT_test_strategy) : total_loss} total_losses = total_losses.cpu().detach() for i, field in enumerate(cf.fields_prediction) : idx_name = 'val., {}, '.format(BERT_test_strategy) + field[0] loss_dict[idx_name] = total_losses[i] print( 'validation loss for {} : {}'.format( field[0], total_losses[i] )) wandb.log( loss_dict) batch_data = [] torch.cuda.empty_cache() cf.BERT_strategy = BERT_strategy_train self.mode_test = False return total_loss ################################################### def evaluate( self, data_idx = 0, log = True): cf = self.cf self.model.mode( NetMode.test) log_sources = [] test_len = 0 # evaluate loss = torch.tensor( 0.) with torch.no_grad() : for it in range( self.model.len( NetMode.test)) : batch_data = self.model.next() if cf.par_rank < cf.log_test_num_ranks : # keep on cpu since it will otherwise clog up GPU memory (sources, token_infos, targets, tmis, tmis_list) = batch_data[0] # targets if len(batch_data[1]) > 0 : targets = [] for target_field in batch_data[1] : targets.append(torch.cat([target_vl[0].unsqueeze(1) for target_vl in target_field],1)) # store on cpu log_sources = ( [source.detach().clone().cpu() for source in sources ], [ti.detach().clone().cpu() for ti in token_infos], [target.detach().clone().cpu() for target in targets ], tmis, tmis_list ) batch_data = self.prepare_batch( batch_data) preds, atts = self.model( batch_data) ifield = 0 for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] cur_loss = self.MSELoss( pred[0], target = target ).cpu() loss += cur_loss ifield += 1 test_len += 1 # logging if cf.par_rank < cf.log_test_num_ranks : self.log_validate( data_idx, it, log_sources, preds) if cf.attention: self.log_attention( data_idx , it, [atts, [ti.detach().clone().cpu() for ti in token_infos]]) # average over all nodes loss /= test_len * len(self.cf.fields_prediction) if cf.with_ddp : loss_cuda = loss.cuda() dist.all_reduce( loss_cuda, op=torch.distributed.ReduceOp.AVG ) loss = loss_cuda.cpu() if 0 == cf.par_rank : print( 'Loss {}'.format( loss)) ################################################### def test_loss( self, pred, target) : '''Hook for custom test loss''' pass ################################################### def loss( self, preds, batch_idx = 0) : # TODO: move implementations to individual files cf = self.cf mse_loss_total = torch.tensor( 0.,) losses = dict(zip(cf.losses,[[] for loss in cf.losses ])) for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] mse_loss = self.MSELoss( pred[0], target = target) mse_loss_total += mse_loss.cpu().detach() # MSE loss if 'mse' in self.cf.losses : losses['mse'].append( mse_loss) # MSE loss if 'mse_ensemble' in self.cf.losses : loss_en = torch.tensor( 0., device=target.device) for en in torch.transpose( pred[2], 1, 0) : loss_en += self.MSELoss( en, target = target) # losses['mse_ensemble'].append( 50. * loss_en / pred[2].shape[1]) losses['mse_ensemble'].append( loss_en / pred[2].shape[1]) # Generalized cross entroy loss for continuous distributions if 'stats' in self.cf.losses : stats_loss = Gaussian( target, pred[0], pred[1]) diff = (stats_loss-1.) # stats_loss = 0.01 * torch.mean( diff * diff) + torch.mean( torch.sqrt(torch.abs( pred[1])) ) stats_loss = torch.mean( diff * diff) + torch.mean( torch.sqrt( torch.abs( pred[1])) ) losses['stats'].append( stats_loss) # Generalized cross entroy loss for continuous distributions if 'stats_area' in self.cf.losses : diff = torch.abs( torch.special.erf( (target - pred[0]) / (pred[1] * pred[1])) ) stats_area = 0.2 * torch.mean( diff * diff) + torch.mean( torch.sqrt(torch.abs( pred[1])) ) losses['stats_area'].append( stats_area) # CRPS score if 'crps' in self.cf.losses : crps_loss = torch.mean( CRPS( target, pred[0], pred[1])) losses['crps'].append( crps_loss) loss = torch.tensor( 0., device=self.device_out) for key in losses : # print( 'LOSS : {} :: {}'.format( key, losses[key])) for ifield, val in enumerate(losses[key]) : loss += self.loss_weights[ifield] * val.to( self.device_out) loss /= len(self.cf.fields_prediction) * len( self.cf.losses) mse_loss = mse_loss_total / len(self.cf.fields_prediction) return loss, mse_loss, losses #################################################################################################### class Trainer_BERT( Trainer_Base) : ################################################### def __init__( self, cf, devices) : Trainer_Base.__init__( self, cf, devices) self.rng_seed = cf.rng_seed if not self.rng_seed : self.rng_seed = int(torch.randint( 100000000, (1,))) # TODO: generate only rngs that are needed ll = len(cf.fields) * 8 #len(cf.vertical_levels) if cf.BERT_fields_synced : self.rngs = [np.random.default_rng(self.rng_seed) for _ in range(ll)] else : self.rngs = [np.random.default_rng(self.rng_seed+i) for i in range(ll)] # batch preprocessing to be done in loader (mainly for performance reasons since it's # parallelized there) self.pre_batch = functools.partial( prepare_batch_BERT_multifield, self.cf, self.rngs, self.cf.fields, self.cf.BERT_strategy ) ################################################### def prepare_batch( self, xin) : '''Move data to device and some additional final preprocessing before model eval''' cf = self.cf devs = self.devices # unpack loader output # xin[0] since BERT does not have targets (sources, token_infos, targets, fields_tokens_masked_idx,fields_tokens_masked_idx_list) = xin[0] # network input batch_data = [ ( sources[i].to( devs[ cf.fields[i][1][3] ], non_blocking=True), self.tok_infos_trans(token_infos[i]).to( self.devices[0], non_blocking=True)) for i in range(len(sources)) ] # store token number since BERT selects sub-cube (optionally) self.num_tokens = [] for field_idx in range(len(batch_data)) : self.num_tokens.append( list(batch_data[field_idx][0].shape[2:5])) # target self.targets = [] for ifield in self.fields_prediction_idx : self.targets.append( targets[ifield].to( devs[cf.fields[ifield][1][3]], non_blocking=True )) # idxs of masked tokens tmi_out = [[] for _ in range(len(fields_tokens_masked_idx))] for i,tmi in enumerate(fields_tokens_masked_idx) : tmi_out[i] = [tmi_l.to( devs[cf.fields[i][1][3]], non_blocking=True) for tmi_l in tmi] self.tokens_masked_idx = tmi_out # idxs of masked tokens per batch entry self.fields_tokens_masked_idx_list = fields_tokens_masked_idx_list # learnable class token (cannot be done in the data loader since this is running in parallel) if cf.learnable_mask : for ifield, (source, _) in enumerate(batch_data) : source = torch.flatten( torch.flatten( torch.flatten( source, 1, 4), 2, 4), 0, 1) assert len(cf.fields[ifield][2]) == 1 tmidx = self.tokens_masked_idx[ifield][0] source[ tmidx ] = self.model.net.masks[ifield].to( source.device) return batch_data ################################################### def encoder_to_decoder( self, embeds_layers) : return ([embeds_layers[i][-1] for i in range(len(embeds_layers))] , embeds_layers ) ################################################### def decoder_to_tail( self, idx_pred, pred) : '''Positional encoding of masked tokens for tail network evaluation''' field_idx = self.fields_prediction_idx[idx_pred] dev = self.devices[ self.cf.fields[field_idx][1][3] ] target_idx = self.tokens_masked_idx[field_idx] assert len(target_idx) > 0, 'no masked tokens but target variable' # select "fixed" masked tokens for loss computation # recover vertical level dimension num_tokens = self.num_tokens[field_idx] num_vlevels = len(self.cf.fields[field_idx][2]) # flatten token dimensions: remove space-time separation pred = torch.flatten( pred, 2, 3).to( dev) # extract masked token level by level pred_masked = [] for lidx, level in enumerate(self.cf.fields[field_idx][2]) : # select masked tokens, flattened along batch dimension for easier indexing and processing pred_l = torch.flatten( pred[:,lidx], 0, 1) pred_masked_l = pred_l[ target_idx[lidx] ] target_idx_l = target_idx[lidx] # add positional encoding of masked tokens # # TODO: do we need the positional encoding? # compute space time indices of all tokens target_idxs_v = level * torch.ones( target_idx_l.shape[0], device=dev) num_tokens_space = num_tokens[1] * num_tokens[2] # remove offset introduced by linearization target_idx_l = torch.remainder( target_idx_l, np.prod(num_tokens)) target_idxs_t = (target_idx_l / num_tokens_space).int() temp = torch.remainder( target_idx_l, num_tokens_space) target_idxs_x = (temp / num_tokens[1]).int() target_idxs_y = torch.remainder( temp, num_tokens[2]) # apply harmonic positional encoding dim_embed = pred.shape[-1] pe = torch.zeros( pred_masked_l.shape[0], dim_embed, device=dev) xs = (2. * np.pi / dim_embed) * torch.arange( 0, dim_embed, 2, device=dev) pe[:, 0::2] = 0.5 * torch.sin( torch.outer( 8 * target_idxs_x, xs) ) \ + torch.sin( torch.outer( target_idxs_t, xs) ) pe[:, 1::2] = 0.5 * torch.cos( torch.outer( 8 * target_idxs_y, xs) ) \ + torch.cos( torch.outer( target_idxs_v, xs) ) # TODO: with or without final positional encoding? # pred_masked.append( pred_masked_l + pe) pred_masked.append( pred_masked_l) # flatten along level dimension, for loss evaluation we effectively have level, batch, ... # as ordering of dimensions pred_masked = torch.cat( pred_masked, 0) return pred_masked ################################################### def log_validate( self, epoch, bidx, log_sources, log_preds) : '''Hook for logging: output associated with concrete training strategy.''' if not hasattr( self.cf, 'wandb_id') : return if 'forecast' == self.cf.BERT_strategy : self.log_validate_forecast( epoch, bidx, log_sources, log_preds) elif 'BERT' == self.cf.BERT_strategy : self.log_validate_BERT( epoch, bidx, log_sources, log_preds) else : assert False ################################################### def log_validate_forecast( self, epoch, batch_idx, log_sources, log_preds) : '''Logging for BERT_strategy=forecast.''' cf = self.cf detok = utils.detokenize # TODO, TODO: for 6h forecast we need to iterate over predicted token slices # save source: remains identical so just save ones (sources, token_infos, targets, _, _) = log_sources sources_out, targets_out, preds_out, ensembles_out = [ ], [ ], [ ], [ ] # reconstruct geo-coords (identical for all fields) forecast_num_tokens = 1 if hasattr( cf, 'forecast_num_tokens') : forecast_num_tokens = cf.forecast_num_tokens num_tokens = cf.fields[0][3] token_size = cf.fields[0][4] lat_d_h, lon_d_h = int(np.floor(token_size[1]/2.)), int(np.floor(token_size[2]/2.)) lats, lons = [ ], [ ] for tinfo in token_infos[0] : lat_min, lat_max = tinfo[0][4], tinfo[ num_tokens[1]*num_tokens[2]-1 ][4] lon_min, lon_max = tinfo[0][5], tinfo[ num_tokens[1]*num_tokens[2]-1 ][5] res = tinfo[0][-1] lat = torch.arange( lat_min - lat_d_h*res, lat_max + lat_d_h*res + 0.001, res) if lon_max < lon_min : lon = torch.arange( lon_min - lon_d_h*res, 360. + lon_max + lon_d_h*res + 0.001, res) else : lon = torch.arange( lon_min - lon_d_h*res, lon_max + lon_d_h*res + 0.001, res) lats.append( lat.numpy()) lons.append( torch.remainder( lon, 360.).numpy()) # check that last token (bottom right corner) has the expected coords # assert np.allclose( ) # extract dates for each token entry, constant for each batch and field dates_t = [] for b_token_infos in token_infos[0] : dates_t.append(utils.token_info_to_time(b_token_infos[0])-pd.Timedelta(hours=token_size[0]-1)) # TODO: check that last token matches first one # process input fields for fidx, field_info in enumerate(cf.fields) : # reshape from tokens to contiguous physical field num_levels = len(field_info[2]) source = detok( sources[fidx].cpu().detach().numpy()) # recover tokenized shape target = detok( targets[fidx].cpu().detach().numpy().reshape( [ -1, num_levels, forecast_num_tokens, *field_info[3][1:], *field_info[4] ])) # TODO: check that geo-coords match to general ones that have been pre-determined for bidx in range(token_infos[fidx].shape[0]) : for vidx, _ in enumerate(field_info[2]) : denormalize = self.model.normalizer( fidx, vidx).denormalize date, coords = dates_t[bidx], [lats[bidx], lons[bidx]] source[bidx,vidx] = denormalize( date.year, date.month, source[bidx,vidx], coords) target[bidx,vidx] = denormalize( date.year, date.month, target[bidx,vidx], coords) # append sources_out.append( [field_info[0], source]) targets_out.append( [field_info[0], target]) # process predicted fields for fidx, fn in enumerate(cf.fields_prediction) : # field_info = cf.fields[ self.fields_prediction_idx[fidx] ] num_levels = len(field_info[2]) # predictions pred = log_preds[fidx][0].cpu().detach().numpy() pred = detok( pred.reshape( [ -1, num_levels, forecast_num_tokens, *field_info[3][1:], *field_info[4] ])) # ensemble ensemble = log_preds[fidx][2].cpu().detach().numpy() ensemble = detok( ensemble.reshape( [ -1, cf.net_tail_num_nets, num_levels, forecast_num_tokens, *field_info[3][1:], *field_info[4] ]) ) # denormalize for bidx in range(token_infos[fidx].shape[0]) : for vidx, vl in enumerate(field_info[2]) : denormalize = self.model.normalizer( self.fields_prediction_idx[fidx], vidx).denormalize date, coords = dates_t[bidx], [lats[bidx], lons[bidx]] pred[bidx,vidx] = denormalize( date.year, date.month, pred[bidx,vidx], coords) ensemble[bidx,:,vidx] = denormalize(date.year, date.month, ensemble[bidx,:,vidx], coords) # append preds_out.append( [fn[0], pred]) ensembles_out.append( [fn[0], ensemble]) # generate time range dates_sources, dates_targets = [ ], [ ] for bidx in range( source.shape[0]) : r = pd.date_range( start=dates_t[bidx], periods=source.shape[2], freq='h') dates_sources.append( r.to_pydatetime().astype( 'datetime64[s]') ) dates_targets.append( dates_sources[-1][ -forecast_num_tokens*token_size[0] : ] ) levels = np.array(cf.fields[0][2]) lats = [90.-lat for lat in lats] write_forecast( cf.wandb_id, epoch, batch_idx, levels, sources_out, [dates_sources, lats, lons], targets_out, [dates_targets, lats, lons], preds_out, ensembles_out ) ################################################### def log_validate_BERT( self, epoch, batch_idx, log_sources, log_preds) : '''Logging for BERT_strategy=BERT.''' cf = self.cf detok = utils.detokenize # save source: remains identical so just save ones (sources, token_infos, targets, tokens_masked_idx, tokens_masked_idx_list) = log_sources sources_out, targets_out, preds_out, ensembles_out = [ ], [ ], [ ], [ ] sources_dates_out, sources_lats_out, sources_lons_out = [ ], [ ], [ ] targets_dates_out, targets_lats_out, targets_lons_out = [ ], [ ], [ ] for fidx, field_info in enumerate(cf.fields) : # reconstruct coordinates is_predicted = fidx in self.fields_prediction_idx num_levels = len(field_info[2]) num_tokens = field_info[3] token_size = field_info[4] lat_d_h, lon_d_h = int(np.floor(token_size[1]/2.)), int(np.floor(token_size[2]/2.)) tinfos = token_infos[fidx].reshape( [-1, num_levels, *num_tokens, cf.size_token_info]) res = tinfos[0,0,0,0,0][-1].item() batch_size = tinfos.shape[0] sources_b = detok( sources[fidx].numpy()) if is_predicted : # split according to levels lens_levels = [t.shape[0] for t in tokens_masked_idx[fidx]] targets_b = torch.split( targets[fidx], lens_levels) preds_mu_b = torch.split( log_preds[fidx][0], lens_levels) preds_ens_b = torch.split( log_preds[fidx][2], lens_levels) # split according to batch lens_batches = [ [bv.shape[0] for bv in b] for b in tokens_masked_idx_list[fidx] ] targets_b = [torch.split( targets_b[vidx], lens) for vidx,lens in enumerate(lens_batches)] preds_mu_b = [torch.split(preds_mu_b[vidx], lens) for vidx,lens in enumerate(lens_batches)] preds_ens_b =[torch.split(preds_ens_b[vidx],lens) for vidx,lens in enumerate(lens_batches)] # recover token shape targets_b = [[targets_b[vidx][bidx].reshape([-1, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] preds_mu_b = [[preds_mu_b[vidx][bidx].reshape([-1, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] preds_ens_b = [[preds_ens_b[vidx][bidx].reshape( [-1, cf.net_tail_num_nets, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] # for all batch items coords_b = [] for bidx, tinfo in enumerate(tinfos) : # use first vertical levels since a column is considered lats = np.arange(tinfo[0,0,0,0,4]-lat_d_h*res, tinfo[0,0,-1,0,4]+lat_d_h*res+0.001,res) if tinfo[0,0,0,-1,5] < tinfo[0,0,0,0,5] : lons = np.remainder( np.arange( tinfo[0,0,0,0,5] - lon_d_h*res, 360. + tinfo[0,0,0,-1,5] + lon_d_h*res + 0.001, res), 360.) else : lons = np.arange(tinfo[0,0,0,0,5]-lon_d_h*res, tinfo[0,0,0,-1,5]+lon_d_h*res+0.001,res) lons = np.remainder( lons, 360.) # time stamp in token_infos is at start time so needs to be advanced by token_size[0]-1 s = utils.token_info_to_time( tinfo[0,0,0,0,:3] ) - pd.Timedelta(hours=token_size[0]-1) e = utils.token_info_to_time( tinfo[0,-1,0,0,:3] ) dates = pd.date_range( start=s, end=e, freq='h') # target etc are aliasing targets_b which simplifies bookkeeping below if is_predicted : target = [targets_b[vidx][bidx] for vidx in range(num_levels)] pred_mu = [preds_mu_b[vidx][bidx] for vidx in range(num_levels)] pred_ens = [preds_ens_b[vidx][bidx] for vidx in range(num_levels)] dates_masked_l, lats_masked_l, lons_masked_l = [], [], [] for vidx, _ in enumerate(field_info[2]) : normalizer = self.model.normalizer( fidx, vidx) y, m = dates[0].year, dates[0].month sources_b[bidx,vidx] = normalizer.denormalize( y, m, sources_b[bidx,vidx], [lats, lons]) if is_predicted : # TODO: make sure normalizer_local / normalizer_global is used in data_loader idx = tokens_masked_idx_list[fidx][vidx][bidx] tinfo_masked = tinfos[bidx,vidx].flatten( 0,2) tinfo_masked = tinfo_masked[idx] lad, lod = lat_d_h*res, lon_d_h*res lats_masked, lons_masked, dates_masked = [], [], [] for t in tinfo_masked : lats_masked.append( np.expand_dims( np.arange(t[4]-lad, t[4]+lad+0.001,res), 0)) lons_masked.append( np.expand_dims( np.arange(t[5]-lod, t[5]+lod+0.001,res), 0)) r = pd.date_range( start=utils.token_info_to_time(t), periods=token_size[0], freq='h') dates_masked.append( np.expand_dims(r.to_pydatetime().astype( 'datetime64[s]'), 0) ) lats_masked = np.concatenate( lats_masked, 0) lons_masked = np.remainder( np.concatenate( lons_masked, 0), 360.) dates_masked = np.concatenate( dates_masked, 0) for ii,(t,p,e,la,lo) in enumerate(zip( target[vidx], pred_mu[vidx], pred_ens[vidx], lats_masked, lons_masked)) : targets_b[vidx][bidx][ii] = normalizer.denormalize( y, m, t, [la, lo]) preds_mu_b[vidx][bidx][ii] = normalizer.denormalize( y, m, p, [la, lo]) preds_ens_b[vidx][bidx][ii] = normalizer.denormalize( y, m, e, [la, lo]) dates_masked_l += [ dates_masked ] lats_masked_l += [ [90.-lat for lat in lats_masked] ] lons_masked_l += [ lons_masked ] dates = dates.to_pydatetime().astype( 'datetime64[s]') coords_b += [ [dates, 90.-lats, lons, dates_masked_l, lats_masked_l, lons_masked_l] ] fn = field_info[0] sources_out.append( [fn, sources_b]) if is_predicted : targets_out.append([fn, [[t.numpy(force=True) for t in t_v] for t_v in targets_b]]) preds_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_mu_b]]) ensembles_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_ens_b]]) else : targets_out.append( [fn, []]) preds_out.append( [fn, []]) ensembles_out.append( [fn, []]) sources_dates_out.append( [c[0] for c in coords_b]) sources_lats_out.append( [c[1] for c in coords_b]) sources_lons_out.append( [c[2] for c in coords_b]) if is_predicted : targets_dates_out.append( [c[3] for c in coords_b]) targets_lats_out.append( [c[4] for c in coords_b]) targets_lons_out.append( [c[5] for c in coords_b]) else : targets_dates_out.append( [ ]) targets_lats_out.append( [ ]) targets_lons_out.append( [ ]) levels = [[np.array(l) for l in field[2]] for field in cf.fields]
write_BERT( cf.wandb_id, epoch, batch_idx,
11
2023-10-09 19:42:46+00:00
24k
MachinePerceptionLab/Attentive_DFPrior
src/DF_Prior.py
[ { "identifier": "config", "path": "src/config.py", "snippet": "def load_config(path, default_path=None):\ndef update_recursive(dict1, dict2):\ndef get_model(cfg):" }, { "identifier": "Mapper", "path": "src/Mapper.py", "snippet": "class Mapper(object):\n \"\"\"\n Mapper thread. \n\n...
import os import time import numpy as np import torch import torch.multiprocessing import torch.multiprocessing as mp from src import config from src.Mapper import Mapper from src.Tracker import Tracker from src.utils.datasets import get_dataset from src.utils.Logger import Logger from src.utils.Mesher import Mesher from src.utils.Renderer import Renderer
20,642
# import src.fusion as fusion # import open3d as o3d torch.multiprocessing.set_sharing_strategy('file_system') class DF_Prior(): """ DF_Prior main class. Mainly allocate shared resources, and dispatch mapping and tracking process. """ def __init__(self, cfg, args): self.cfg = cfg self.args = args self.occupancy = cfg['occupancy'] self.low_gpu_mem = cfg['low_gpu_mem'] self.verbose = cfg['verbose'] self.dataset = cfg['dataset'] if args.output is None: self.output = cfg['data']['output'] else: self.output = args.output self.ckptsdir = os.path.join(self.output, 'ckpts') os.makedirs(self.output, exist_ok=True) os.makedirs(self.ckptsdir, exist_ok=True) os.makedirs(f'{self.output}/mesh', exist_ok=True) self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][ 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] self.update_cam() model = config.get_model(cfg) self.shared_decoders = model self.scale = cfg['scale'] self.load_bound(cfg) self.load_pretrain(cfg) self.grid_init(cfg) # need to use spawn try: mp.set_start_method('spawn', force=True) except RuntimeError: pass self.frame_reader = get_dataset(cfg, args, self.scale) self.n_img = len(self.frame_reader) self.estimate_c2w_list = torch.zeros((self.n_img, 4, 4)) self.estimate_c2w_list.share_memory_() dataset = self.cfg['data']['dataset'] scene_id = self.cfg['data']['id'] self.scene_id = scene_id print(scene_id) # load tsdf grid if dataset == 'scannet': self.tsdf_volume_shared = torch.load(f'scannet_tsdf_volume/scene{scene_id}_tsdf_volume.pt') elif dataset == 'replica': self.tsdf_volume_shared = torch.load(f'replica_tsdf_volume/{scene_id}_tsdf_volume.pt') self.tsdf_volume_shared = self.tsdf_volume_shared.to(self.cfg['mapping']['device']) self.tsdf_volume_shared.share_memory_() # load tsdf grid bound if dataset == 'scannet': self.tsdf_bnds = torch.load(f'scannet_tsdf_volume/scene{scene_id}_bounds.pt') elif dataset == 'replica': self.tsdf_bnds = torch.load(f'replica_tsdf_volume/{scene_id}_bounds.pt') self.tsdf_bnds = torch.tensor(self.tsdf_bnds).to(self.cfg['mapping']['device']) self.tsdf_bnds.share_memory_() self.vol_bnds = self.tsdf_bnds self.vol_bnds.share_memory_() self.gt_c2w_list = torch.zeros((self.n_img, 4, 4)) self.gt_c2w_list.share_memory_() self.idx = torch.zeros((1)).int() self.idx.share_memory_() self.mapping_first_frame = torch.zeros((1)).int() self.mapping_first_frame.share_memory_() # the id of the newest frame Mapper is processing self.mapping_idx = torch.zeros((1)).int() self.mapping_idx.share_memory_() self.mapping_cnt = torch.zeros((1)).int() # counter for mapping self.mapping_cnt.share_memory_() for key, val in self.shared_c.items(): val = val.to(self.cfg['mapping']['device']) val.share_memory_() self.shared_c[key] = val self.shared_decoders = self.shared_decoders.to( self.cfg['mapping']['device']) self.shared_decoders.share_memory() self.renderer = Renderer(cfg, args, self) self.mesher = Mesher(cfg, args, self) self.logger = Logger(cfg, args, self)
# import src.fusion as fusion # import open3d as o3d torch.multiprocessing.set_sharing_strategy('file_system') class DF_Prior(): """ DF_Prior main class. Mainly allocate shared resources, and dispatch mapping and tracking process. """ def __init__(self, cfg, args): self.cfg = cfg self.args = args self.occupancy = cfg['occupancy'] self.low_gpu_mem = cfg['low_gpu_mem'] self.verbose = cfg['verbose'] self.dataset = cfg['dataset'] if args.output is None: self.output = cfg['data']['output'] else: self.output = args.output self.ckptsdir = os.path.join(self.output, 'ckpts') os.makedirs(self.output, exist_ok=True) os.makedirs(self.ckptsdir, exist_ok=True) os.makedirs(f'{self.output}/mesh', exist_ok=True) self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][ 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] self.update_cam() model = config.get_model(cfg) self.shared_decoders = model self.scale = cfg['scale'] self.load_bound(cfg) self.load_pretrain(cfg) self.grid_init(cfg) # need to use spawn try: mp.set_start_method('spawn', force=True) except RuntimeError: pass self.frame_reader = get_dataset(cfg, args, self.scale) self.n_img = len(self.frame_reader) self.estimate_c2w_list = torch.zeros((self.n_img, 4, 4)) self.estimate_c2w_list.share_memory_() dataset = self.cfg['data']['dataset'] scene_id = self.cfg['data']['id'] self.scene_id = scene_id print(scene_id) # load tsdf grid if dataset == 'scannet': self.tsdf_volume_shared = torch.load(f'scannet_tsdf_volume/scene{scene_id}_tsdf_volume.pt') elif dataset == 'replica': self.tsdf_volume_shared = torch.load(f'replica_tsdf_volume/{scene_id}_tsdf_volume.pt') self.tsdf_volume_shared = self.tsdf_volume_shared.to(self.cfg['mapping']['device']) self.tsdf_volume_shared.share_memory_() # load tsdf grid bound if dataset == 'scannet': self.tsdf_bnds = torch.load(f'scannet_tsdf_volume/scene{scene_id}_bounds.pt') elif dataset == 'replica': self.tsdf_bnds = torch.load(f'replica_tsdf_volume/{scene_id}_bounds.pt') self.tsdf_bnds = torch.tensor(self.tsdf_bnds).to(self.cfg['mapping']['device']) self.tsdf_bnds.share_memory_() self.vol_bnds = self.tsdf_bnds self.vol_bnds.share_memory_() self.gt_c2w_list = torch.zeros((self.n_img, 4, 4)) self.gt_c2w_list.share_memory_() self.idx = torch.zeros((1)).int() self.idx.share_memory_() self.mapping_first_frame = torch.zeros((1)).int() self.mapping_first_frame.share_memory_() # the id of the newest frame Mapper is processing self.mapping_idx = torch.zeros((1)).int() self.mapping_idx.share_memory_() self.mapping_cnt = torch.zeros((1)).int() # counter for mapping self.mapping_cnt.share_memory_() for key, val in self.shared_c.items(): val = val.to(self.cfg['mapping']['device']) val.share_memory_() self.shared_c[key] = val self.shared_decoders = self.shared_decoders.to( self.cfg['mapping']['device']) self.shared_decoders.share_memory() self.renderer = Renderer(cfg, args, self) self.mesher = Mesher(cfg, args, self) self.logger = Logger(cfg, args, self)
self.mapper = Mapper(cfg, args, self)
1
2023-10-13 00:49:57+00:00
24k
fury-05/BookRecomendApp
.pythonlibs/lib/python3.10/site-packages/sklearn/metrics/_ranking.py
[ { "identifier": "UndefinedMetricWarning", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/exceptions.py", "snippet": "class UndefinedMetricWarning(UserWarning):\n \"\"\"Warning used when the metric is invalid\n\n .. versionchanged:: 0.18\n Moved from sklearn.base.\n \"\"\"" },...
import warnings import numpy as np from functools import partial from numbers import Integral, Real from scipy.sparse import csr_matrix, issparse from scipy.stats import rankdata from ..exceptions import UndefinedMetricWarning from ..preprocessing import label_binarize from ..utils import ( assert_all_finite, check_array, check_consistent_length, column_or_1d, ) from ..utils._encode import _encode, _unique from ..utils._param_validation import Interval, StrOptions, validate_params from ..utils.extmath import stable_cumsum from ..utils.fixes import trapezoid from ..utils.multiclass import type_of_target from ..utils.sparsefuncs import count_nonzero from ..utils.validation import _check_pos_label_consistency, _check_sample_weight from ._base import _average_binary_score, _average_multiclass_ovo_score
17,987
"""Compute Receiver operating characteristic (ROC). Note: this implementation is restricted to the binary classification task. Read more in the :ref:`User Guide <roc_metrics>`. Parameters ---------- y_true : array-like of shape (n_samples,) True binary labels. If labels are not either {-1, 1} or {0, 1}, then pos_label should be explicitly given. y_score : array-like of shape (n_samples,) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). pos_label : int, float, bool or str, default=None The label of the positive class. When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1}, ``pos_label`` is set to 1, otherwise an error will be raised. sample_weight : array-like of shape (n_samples,), default=None Sample weights. drop_intermediate : bool, default=True Whether to drop some suboptimal thresholds which would not appear on a plotted ROC curve. This is useful in order to create lighter ROC curves. .. versionadded:: 0.17 parameter *drop_intermediate*. Returns ------- fpr : ndarray of shape (>2,) Increasing false positive rates such that element i is the false positive rate of predictions with score >= `thresholds[i]`. tpr : ndarray of shape (>2,) Increasing true positive rates such that element `i` is the true positive rate of predictions with score >= `thresholds[i]`. thresholds : ndarray of shape (n_thresholds,) Decreasing thresholds on the decision function used to compute fpr and tpr. `thresholds[0]` represents no instances being predicted and is arbitrarily set to `np.inf`. See Also -------- RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic (ROC) curve given an estimator and some data. RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic (ROC) curve given the true and predicted values. det_curve: Compute error rates for different probability thresholds. roc_auc_score : Compute the area under the ROC curve. Notes ----- Since the thresholds are sorted from low to high values, they are reversed upon returning them to ensure they correspond to both ``fpr`` and ``tpr``, which are sorted in reversed order during their calculation. An arbitrary threshold is added for the case `tpr=0` and `fpr=0` to ensure that the curve starts at `(0, 0)`. This threshold corresponds to the `np.inf`. References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ .. [2] Fawcett T. An introduction to ROC analysis[J]. Pattern Recognition Letters, 2006, 27(8):861-874. Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2) >>> fpr array([0. , 0. , 0.5, 0.5, 1. ]) >>> tpr array([0. , 0.5, 0.5, 1. , 1. ]) >>> thresholds array([ inf, 0.8 , 0.4 , 0.35, 0.1 ]) """ fps, tps, thresholds = _binary_clf_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight ) # Attempt to drop thresholds corresponding to points in between and # collinear with other points. These are always suboptimal and do not # appear on a plotted ROC curve (and thus do not affect the AUC). # Here np.diff(_, 2) is used as a "second derivative" to tell if there # is a corner at the point. Both fps and tps must be tested to handle # thresholds with multiple data points (which are combined in # _binary_clf_curve). This keeps all cases where the point should be kept, # but does not drop more complicated cases like fps = [1, 3, 7], # tps = [1, 2, 4]; there is no harm in keeping too many thresholds. if drop_intermediate and len(fps) > 2: optimal_idxs = np.where( np.r_[True, np.logical_or(np.diff(fps, 2), np.diff(tps, 2)), True] )[0] fps = fps[optimal_idxs] tps = tps[optimal_idxs] thresholds = thresholds[optimal_idxs] # Add an extra threshold position # to make sure that the curve starts at (0, 0) tps = np.r_[0, tps] fps = np.r_[0, fps] # get dtype of `y_score` even if it is an array-like thresholds = np.r_[np.inf, thresholds] if fps[-1] <= 0: warnings.warn( "No negative samples in y_true, false positive value should be meaningless",
"""Metrics to assess performance on classification task given scores. Functions named as ``*_score`` return a scalar value to maximize: the higher the better. Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: the lower the better. """ # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Arnaud Joly <a.joly@ulg.ac.be> # Jochen Wersdorfer <jochen@wersdoerfer.de> # Lars Buitinck # Joel Nothman <joel.nothman@gmail.com> # Noel Dawe <noel@dawe.me> # Michal Karbownik <michakarbownik@gmail.com> # License: BSD 3 clause @validate_params( {"x": ["array-like"], "y": ["array-like"]}, prefer_skip_nested_validation=True, ) def auc(x, y): """Compute Area Under the Curve (AUC) using the trapezoidal rule. This is a general function, given points on a curve. For computing the area under the ROC-curve, see :func:`roc_auc_score`. For an alternative way to summarize a precision-recall curve, see :func:`average_precision_score`. Parameters ---------- x : array-like of shape (n,) X coordinates. These must be either monotonic increasing or monotonic decreasing. y : array-like of shape (n,) Y coordinates. Returns ------- auc : float Area Under the Curve. See Also -------- roc_auc_score : Compute the area under the ROC curve. average_precision_score : Compute average precision from prediction scores. precision_recall_curve : Compute precision-recall pairs for different probability thresholds. Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> pred = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2) >>> metrics.auc(fpr, tpr) 0.75 """ check_consistent_length(x, y) x = column_or_1d(x) y = column_or_1d(y) if x.shape[0] < 2: raise ValueError( "At least 2 points are needed to compute area under curve, but x.shape = %s" % x.shape ) direction = 1 dx = np.diff(x) if np.any(dx < 0): if np.all(dx <= 0): direction = -1 else: raise ValueError("x is neither increasing nor decreasing : {}.".format(x)) area = direction * trapezoid(y, x) if isinstance(area, np.memmap): # Reductions such as .sum used internally in trapezoid do not return a # scalar by default for numpy.memmap instances contrary to # regular numpy.ndarray instances. area = area.dtype.type(area) return area @validate_params( { "y_true": ["array-like"], "y_score": ["array-like"], "average": [StrOptions({"micro", "samples", "weighted", "macro"}), None], "pos_label": [Real, str, "boolean"], "sample_weight": ["array-like", None], }, prefer_skip_nested_validation=True, ) def average_precision_score( y_true, y_score, *, average="macro", pos_label=1, sample_weight=None ): """Compute average precision (AP) from prediction scores. AP summarizes a precision-recall curve as the weighted mean of precisions achieved at each threshold, with the increase in recall from the previous threshold used as the weight: .. math:: \\text{AP} = \\sum_n (R_n - R_{n-1}) P_n where :math:`P_n` and :math:`R_n` are the precision and recall at the nth threshold [1]_. This implementation is not interpolated and is different from computing the area under the precision-recall curve with the trapezoidal rule, which uses linear interpolation and can be too optimistic. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_classes) True binary labels or binary label indicators. y_score : array-like of shape (n_samples,) or (n_samples, n_classes) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by :term:`decision_function` on some classifiers). average : {'micro', 'samples', 'weighted', 'macro'} or None, \ default='macro' If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. Will be ignored when ``y_true`` is binary. pos_label : int, float, bool or str, default=1 The label of the positive class. Only applied to binary ``y_true``. For multilabel-indicator ``y_true``, ``pos_label`` is fixed to 1. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- average_precision : float Average precision score. See Also -------- roc_auc_score : Compute the area under the ROC curve. precision_recall_curve : Compute precision-recall pairs for different probability thresholds. Notes ----- .. versionchanged:: 0.19 Instead of linearly interpolating between operating points, precisions are weighted by the change in recall since the last operating point. References ---------- .. [1] `Wikipedia entry for the Average precision <https://en.wikipedia.org/w/index.php?title=Information_retrieval& oldid=793358396#Average_precision>`_ Examples -------- >>> import numpy as np >>> from sklearn.metrics import average_precision_score >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> average_precision_score(y_true, y_scores) 0.83... >>> y_true = np.array([0, 0, 1, 1, 2, 2]) >>> y_scores = np.array([ ... [0.7, 0.2, 0.1], ... [0.4, 0.3, 0.3], ... [0.1, 0.8, 0.1], ... [0.2, 0.3, 0.5], ... [0.4, 0.4, 0.2], ... [0.1, 0.2, 0.7], ... ]) >>> average_precision_score(y_true, y_scores) 0.77... """ def _binary_uninterpolated_average_precision( y_true, y_score, pos_label=1, sample_weight=None ): precision, recall, _ = precision_recall_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight ) # Return the step function integral # The following works because the last entry of precision is # guaranteed to be 1, as returned by precision_recall_curve return -np.sum(np.diff(recall) * np.array(precision)[:-1]) y_type = type_of_target(y_true, input_name="y_true") # Convert to Python primitive type to avoid NumPy type / Python str # comparison. See https://github.com/numpy/numpy/issues/6784 present_labels = np.unique(y_true).tolist() if y_type == "binary": if len(present_labels) == 2 and pos_label not in present_labels: raise ValueError( f"pos_label={pos_label} is not a valid label. It should be " f"one of {present_labels}" ) elif y_type == "multilabel-indicator" and pos_label != 1: raise ValueError( "Parameter pos_label is fixed to 1 for multilabel-indicator y_true. " "Do not set pos_label or set pos_label to 1." ) elif y_type == "multiclass": if pos_label != 1: raise ValueError( "Parameter pos_label is fixed to 1 for multiclass y_true. " "Do not set pos_label or set pos_label to 1." ) y_true = label_binarize(y_true, classes=present_labels) average_precision = partial( _binary_uninterpolated_average_precision, pos_label=pos_label ) return _average_binary_score( average_precision, y_true, y_score, average, sample_weight=sample_weight ) @validate_params( { "y_true": ["array-like"], "y_score": ["array-like"], "pos_label": [Real, str, "boolean", None], "sample_weight": ["array-like", None], }, prefer_skip_nested_validation=True, ) def det_curve(y_true, y_score, pos_label=None, sample_weight=None): """Compute error rates for different probability thresholds. .. note:: This metric is used for evaluation of ranking and error tradeoffs of a binary classification task. Read more in the :ref:`User Guide <det_curve>`. .. versionadded:: 0.24 Parameters ---------- y_true : ndarray of shape (n_samples,) True binary labels. If labels are not either {-1, 1} or {0, 1}, then pos_label should be explicitly given. y_score : ndarray of shape of (n_samples,) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). pos_label : int, float, bool or str, default=None The label of the positive class. When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1}, ``pos_label`` is set to 1, otherwise an error will be raised. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- fpr : ndarray of shape (n_thresholds,) False positive rate (FPR) such that element i is the false positive rate of predictions with score >= thresholds[i]. This is occasionally referred to as false acceptance probability or fall-out. fnr : ndarray of shape (n_thresholds,) False negative rate (FNR) such that element i is the false negative rate of predictions with score >= thresholds[i]. This is occasionally referred to as false rejection or miss rate. thresholds : ndarray of shape (n_thresholds,) Decreasing score values. See Also -------- DetCurveDisplay.from_estimator : Plot DET curve given an estimator and some data. DetCurveDisplay.from_predictions : Plot DET curve given the true and predicted labels. DetCurveDisplay : DET curve visualization. roc_curve : Compute Receiver operating characteristic (ROC) curve. precision_recall_curve : Compute precision-recall curve. Examples -------- >>> import numpy as np >>> from sklearn.metrics import det_curve >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, fnr, thresholds = det_curve(y_true, y_scores) >>> fpr array([0.5, 0.5, 0. ]) >>> fnr array([0. , 0.5, 0.5]) >>> thresholds array([0.35, 0.4 , 0.8 ]) """ fps, tps, thresholds = _binary_clf_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight ) if len(np.unique(y_true)) != 2: raise ValueError( "Only one class present in y_true. Detection error " "tradeoff curve is not defined in that case." ) fns = tps[-1] - tps p_count = tps[-1] n_count = fps[-1] # start with false positives zero first_ind = ( fps.searchsorted(fps[0], side="right") - 1 if fps.searchsorted(fps[0], side="right") > 0 else None ) # stop with false negatives zero last_ind = tps.searchsorted(tps[-1]) + 1 sl = slice(first_ind, last_ind) # reverse the output such that list of false positives is decreasing return (fps[sl][::-1] / n_count, fns[sl][::-1] / p_count, thresholds[sl][::-1]) def _binary_roc_auc_score(y_true, y_score, sample_weight=None, max_fpr=None): """Binary roc auc score.""" if len(np.unique(y_true)) != 2: raise ValueError( "Only one class present in y_true. ROC AUC score " "is not defined in that case." ) fpr, tpr, _ = roc_curve(y_true, y_score, sample_weight=sample_weight) if max_fpr is None or max_fpr == 1: return auc(fpr, tpr) if max_fpr <= 0 or max_fpr > 1: raise ValueError("Expected max_fpr in range (0, 1], got: %r" % max_fpr) # Add a single point at max_fpr by linear interpolation stop = np.searchsorted(fpr, max_fpr, "right") x_interp = [fpr[stop - 1], fpr[stop]] y_interp = [tpr[stop - 1], tpr[stop]] tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp)) fpr = np.append(fpr[:stop], max_fpr) partial_auc = auc(fpr, tpr) # McClish correction: standardize result to be 0.5 if non-discriminant # and 1 if maximal min_area = 0.5 * max_fpr**2 max_area = max_fpr return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area)) @validate_params( { "y_true": ["array-like"], "y_score": ["array-like"], "average": [StrOptions({"micro", "macro", "samples", "weighted"}), None], "sample_weight": ["array-like", None], "max_fpr": [Interval(Real, 0.0, 1, closed="right"), None], "multi_class": [StrOptions({"raise", "ovr", "ovo"})], "labels": ["array-like", None], }, prefer_skip_nested_validation=True, ) def roc_auc_score( y_true, y_score, *, average="macro", sample_weight=None, max_fpr=None, multi_class="raise", labels=None, ): """Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) \ from prediction scores. Note: this implementation can be used with binary, multiclass and multilabel classification, but some restrictions apply (see Parameters). Read more in the :ref:`User Guide <roc_metrics>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_classes) True labels or binary label indicators. The binary and multiclass cases expect labels with shape (n_samples,) while the multilabel case expects binary label indicators with shape (n_samples, n_classes). y_score : array-like of shape (n_samples,) or (n_samples, n_classes) Target scores. * In the binary case, it corresponds to an array of shape `(n_samples,)`. Both probability estimates and non-thresholded decision values can be provided. The probability estimates correspond to the **probability of the class with the greater label**, i.e. `estimator.classes_[1]` and thus `estimator.predict_proba(X, y)[:, 1]`. The decision values corresponds to the output of `estimator.decision_function(X, y)`. See more information in the :ref:`User guide <roc_auc_binary>`; * In the multiclass case, it corresponds to an array of shape `(n_samples, n_classes)` of probability estimates provided by the `predict_proba` method. The probability estimates **must** sum to 1 across the possible classes. In addition, the order of the class scores must correspond to the order of ``labels``, if provided, or else to the numerical or lexicographical order of the labels in ``y_true``. See more information in the :ref:`User guide <roc_auc_multiclass>`; * In the multilabel case, it corresponds to an array of shape `(n_samples, n_classes)`. Probability estimates are provided by the `predict_proba` method and the non-thresholded decision values by the `decision_function` method. The probability estimates correspond to the **probability of the class with the greater label for each output** of the classifier. See more information in the :ref:`User guide <roc_auc_multilabel>`. average : {'micro', 'macro', 'samples', 'weighted'} or None, \ default='macro' If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Note: multiclass ROC AUC currently only handles the 'macro' and 'weighted' averages. For multiclass targets, `average=None` is only implemented for `multi_class='ovr'` and `average='micro'` is only implemented for `multi_class='ovr'`. ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. Will be ignored when ``y_true`` is binary. sample_weight : array-like of shape (n_samples,), default=None Sample weights. max_fpr : float > 0 and <= 1, default=None If not ``None``, the standardized partial AUC [2]_ over the range [0, max_fpr] is returned. For the multiclass case, ``max_fpr``, should be either equal to ``None`` or ``1.0`` as AUC ROC partial computation currently is not supported for multiclass. multi_class : {'raise', 'ovr', 'ovo'}, default='raise' Only used for multiclass targets. Determines the type of configuration to use. The default value raises an error, so either ``'ovr'`` or ``'ovo'`` must be passed explicitly. ``'ovr'``: Stands for One-vs-rest. Computes the AUC of each class against the rest [3]_ [4]_. This treats the multiclass case in the same way as the multilabel case. Sensitive to class imbalance even when ``average == 'macro'``, because class imbalance affects the composition of each of the 'rest' groupings. ``'ovo'``: Stands for One-vs-one. Computes the average AUC of all possible pairwise combinations of classes [5]_. Insensitive to class imbalance when ``average == 'macro'``. labels : array-like of shape (n_classes,), default=None Only used for multiclass targets. List of labels that index the classes in ``y_score``. If ``None``, the numerical or lexicographical order of the labels in ``y_true`` is used. Returns ------- auc : float Area Under the Curve score. See Also -------- average_precision_score : Area under the precision-recall curve. roc_curve : Compute Receiver operating characteristic (ROC) curve. RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic (ROC) curve given an estimator and some data. RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic (ROC) curve given the true and predicted values. References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ .. [2] `Analyzing a portion of the ROC curve. McClish, 1989 <https://www.ncbi.nlm.nih.gov/pubmed/2668680>`_ .. [3] Provost, F., Domingos, P. (2000). Well-trained PETs: Improving probability estimation trees (Section 6.2), CeDER Working Paper #IS-00-04, Stern School of Business, New York University. .. [4] `Fawcett, T. (2006). An introduction to ROC analysis. Pattern Recognition Letters, 27(8), 861-874. <https://www.sciencedirect.com/science/article/pii/S016786550500303X>`_ .. [5] `Hand, D.J., Till, R.J. (2001). A Simple Generalisation of the Area Under the ROC Curve for Multiple Class Classification Problems. Machine Learning, 45(2), 171-186. <http://link.springer.com/article/10.1023/A:1010920819831>`_ Examples -------- Binary case: >>> from sklearn.datasets import load_breast_cancer >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.metrics import roc_auc_score >>> X, y = load_breast_cancer(return_X_y=True) >>> clf = LogisticRegression(solver="liblinear", random_state=0).fit(X, y) >>> roc_auc_score(y, clf.predict_proba(X)[:, 1]) 0.99... >>> roc_auc_score(y, clf.decision_function(X)) 0.99... Multiclass case: >>> from sklearn.datasets import load_iris >>> X, y = load_iris(return_X_y=True) >>> clf = LogisticRegression(solver="liblinear").fit(X, y) >>> roc_auc_score(y, clf.predict_proba(X), multi_class='ovr') 0.99... Multilabel case: >>> import numpy as np >>> from sklearn.datasets import make_multilabel_classification >>> from sklearn.multioutput import MultiOutputClassifier >>> X, y = make_multilabel_classification(random_state=0) >>> clf = MultiOutputClassifier(clf).fit(X, y) >>> # get a list of n_output containing probability arrays of shape >>> # (n_samples, n_classes) >>> y_pred = clf.predict_proba(X) >>> # extract the positive columns for each output >>> y_pred = np.transpose([pred[:, 1] for pred in y_pred]) >>> roc_auc_score(y, y_pred, average=None) array([0.82..., 0.86..., 0.94..., 0.85... , 0.94...]) >>> from sklearn.linear_model import RidgeClassifierCV >>> clf = RidgeClassifierCV().fit(X, y) >>> roc_auc_score(y, clf.decision_function(X), average=None) array([0.81..., 0.84... , 0.93..., 0.87..., 0.94...]) """ y_type = type_of_target(y_true, input_name="y_true") y_true = check_array(y_true, ensure_2d=False, dtype=None) y_score = check_array(y_score, ensure_2d=False) if y_type == "multiclass" or ( y_type == "binary" and y_score.ndim == 2 and y_score.shape[1] > 2 ): # do not support partial ROC computation for multiclass if max_fpr is not None and max_fpr != 1.0: raise ValueError( "Partial AUC computation not available in " "multiclass setting, 'max_fpr' must be" " set to `None`, received `max_fpr={0}` " "instead".format(max_fpr) ) if multi_class == "raise": raise ValueError("multi_class must be in ('ovo', 'ovr')") return _multiclass_roc_auc_score( y_true, y_score, labels, multi_class, average, sample_weight ) elif y_type == "binary": labels = np.unique(y_true) y_true = label_binarize(y_true, classes=labels)[:, 0] return _average_binary_score( partial(_binary_roc_auc_score, max_fpr=max_fpr), y_true, y_score, average, sample_weight=sample_weight, ) else: # multilabel-indicator return _average_binary_score( partial(_binary_roc_auc_score, max_fpr=max_fpr), y_true, y_score, average, sample_weight=sample_weight, ) def _multiclass_roc_auc_score( y_true, y_score, labels, multi_class, average, sample_weight ): """Multiclass roc auc score. Parameters ---------- y_true : array-like of shape (n_samples,) True multiclass labels. y_score : array-like of shape (n_samples, n_classes) Target scores corresponding to probability estimates of a sample belonging to a particular class labels : array-like of shape (n_classes,) or None List of labels to index ``y_score`` used for multiclass. If ``None``, the lexical order of ``y_true`` is used to index ``y_score``. multi_class : {'ovr', 'ovo'} Determines the type of multiclass configuration to use. ``'ovr'``: Calculate metrics for the multiclass case using the one-vs-rest approach. ``'ovo'``: Calculate metrics for the multiclass case using the one-vs-one approach. average : {'micro', 'macro', 'weighted'} Determines the type of averaging performed on the pairwise binary metric scores ``'micro'``: Calculate metrics for the binarized-raveled classes. Only supported for `multi_class='ovr'`. .. versionadded:: 1.2 ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. Classes are assumed to be uniformly distributed. ``'weighted'``: Calculate metrics for each label, taking into account the prevalence of the classes. sample_weight : array-like of shape (n_samples,) or None Sample weights. """ # validation of the input y_score if not np.allclose(1, y_score.sum(axis=1)): raise ValueError( "Target scores need to be probabilities for multiclass " "roc_auc, i.e. they should sum up to 1.0 over classes" ) # validation for multiclass parameter specifications average_options = ("macro", "weighted", None) if multi_class == "ovr": average_options = ("micro",) + average_options if average not in average_options: raise ValueError( "average must be one of {0} for multiclass problems".format(average_options) ) multiclass_options = ("ovo", "ovr") if multi_class not in multiclass_options: raise ValueError( "multi_class='{0}' is not supported " "for multiclass ROC AUC, multi_class must be " "in {1}".format(multi_class, multiclass_options) ) if average is None and multi_class == "ovo": raise NotImplementedError( "average=None is not implemented for multi_class='ovo'." ) if labels is not None: labels = column_or_1d(labels) classes = _unique(labels) if len(classes) != len(labels): raise ValueError("Parameter 'labels' must be unique") if not np.array_equal(classes, labels): raise ValueError("Parameter 'labels' must be ordered") if len(classes) != y_score.shape[1]: raise ValueError( "Number of given labels, {0}, not equal to the number " "of columns in 'y_score', {1}".format(len(classes), y_score.shape[1]) ) if len(np.setdiff1d(y_true, classes)): raise ValueError("'y_true' contains labels not in parameter 'labels'") else: classes = _unique(y_true) if len(classes) != y_score.shape[1]: raise ValueError( "Number of classes in y_true not equal to the number of " "columns in 'y_score'" ) if multi_class == "ovo": if sample_weight is not None: raise ValueError( "sample_weight is not supported " "for multiclass one-vs-one ROC AUC, " "'sample_weight' must be None in this case." ) y_true_encoded = _encode(y_true, uniques=classes) # Hand & Till (2001) implementation (ovo) return _average_multiclass_ovo_score( _binary_roc_auc_score, y_true_encoded, y_score, average=average ) else: # ovr is same as multi-label y_true_multilabel = label_binarize(y_true, classes=classes) return _average_binary_score( _binary_roc_auc_score, y_true_multilabel, y_score, average, sample_weight=sample_weight, ) def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None): """Calculate true and false positives per binary classification threshold. Parameters ---------- y_true : ndarray of shape (n_samples,) True targets of binary classification. y_score : ndarray of shape (n_samples,) Estimated probabilities or output of a decision function. pos_label : int, float, bool or str, default=None The label of the positive class. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- fps : ndarray of shape (n_thresholds,) A count of false positives, at index i being the number of negative samples assigned a score >= thresholds[i]. The total number of negative samples is equal to fps[-1] (thus true negatives are given by fps[-1] - fps). tps : ndarray of shape (n_thresholds,) An increasing count of true positives, at index i being the number of positive samples assigned a score >= thresholds[i]. The total number of positive samples is equal to tps[-1] (thus false negatives are given by tps[-1] - tps). thresholds : ndarray of shape (n_thresholds,) Decreasing score values. """ # Check to make sure y_true is valid y_type = type_of_target(y_true, input_name="y_true") if not (y_type == "binary" or (y_type == "multiclass" and pos_label is not None)): raise ValueError("{0} format is not supported".format(y_type)) check_consistent_length(y_true, y_score, sample_weight) y_true = column_or_1d(y_true) y_score = column_or_1d(y_score) assert_all_finite(y_true) assert_all_finite(y_score) # Filter out zero-weighted samples, as they should not impact the result if sample_weight is not None: sample_weight = column_or_1d(sample_weight) sample_weight = _check_sample_weight(sample_weight, y_true) nonzero_weight_mask = sample_weight != 0 y_true = y_true[nonzero_weight_mask] y_score = y_score[nonzero_weight_mask] sample_weight = sample_weight[nonzero_weight_mask] pos_label = _check_pos_label_consistency(pos_label, y_true) # make y_true a boolean vector y_true = y_true == pos_label # sort scores and corresponding truth values desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1] y_score = y_score[desc_score_indices] y_true = y_true[desc_score_indices] if sample_weight is not None: weight = sample_weight[desc_score_indices] else: weight = 1.0 # y_score typically has many tied values. Here we extract # the indices associated with the distinct values. We also # concatenate a value for the end of the curve. distinct_value_indices = np.where(np.diff(y_score))[0] threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1] # accumulate the true positives with decreasing threshold tps = stable_cumsum(y_true * weight)[threshold_idxs] if sample_weight is not None: # express fps as a cumsum to ensure fps is increasing even in # the presence of floating point errors fps = stable_cumsum((1 - y_true) * weight)[threshold_idxs] else: fps = 1 + threshold_idxs - tps return fps, tps, y_score[threshold_idxs] @validate_params( { "y_true": ["array-like"], "probas_pred": ["array-like"], "pos_label": [Real, str, "boolean", None], "sample_weight": ["array-like", None], "drop_intermediate": ["boolean"], }, prefer_skip_nested_validation=True, ) def precision_recall_curve( y_true, probas_pred, *, pos_label=None, sample_weight=None, drop_intermediate=False ): """Compute precision-recall pairs for different probability thresholds. Note: this implementation is restricted to the binary classification task. The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The last precision and recall values are 1. and 0. respectively and do not have a corresponding threshold. This ensures that the graph starts on the y axis. The first precision and recall values are precision=class balance and recall=1.0 which corresponds to a classifier that always predicts the positive class. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : array-like of shape (n_samples,) True binary labels. If labels are not either {-1, 1} or {0, 1}, then pos_label should be explicitly given. probas_pred : array-like of shape (n_samples,) Target scores, can either be probability estimates of the positive class, or non-thresholded measure of decisions (as returned by `decision_function` on some classifiers). pos_label : int, float, bool or str, default=None The label of the positive class. When ``pos_label=None``, if y_true is in {-1, 1} or {0, 1}, ``pos_label`` is set to 1, otherwise an error will be raised. sample_weight : array-like of shape (n_samples,), default=None Sample weights. drop_intermediate : bool, default=False Whether to drop some suboptimal thresholds which would not appear on a plotted precision-recall curve. This is useful in order to create lighter precision-recall curves. .. versionadded:: 1.3 Returns ------- precision : ndarray of shape (n_thresholds + 1,) Precision values such that element i is the precision of predictions with score >= thresholds[i] and the last element is 1. recall : ndarray of shape (n_thresholds + 1,) Decreasing recall values such that element i is the recall of predictions with score >= thresholds[i] and the last element is 0. thresholds : ndarray of shape (n_thresholds,) Increasing thresholds on the decision function used to compute precision and recall where `n_thresholds = len(np.unique(probas_pred))`. See Also -------- PrecisionRecallDisplay.from_estimator : Plot Precision Recall Curve given a binary classifier. PrecisionRecallDisplay.from_predictions : Plot Precision Recall Curve using predictions from a binary classifier. average_precision_score : Compute average precision from prediction scores. det_curve: Compute error rates for different probability thresholds. roc_curve : Compute Receiver operating characteristic (ROC) curve. Examples -------- >>> import numpy as np >>> from sklearn.metrics import precision_recall_curve >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> precision, recall, thresholds = precision_recall_curve( ... y_true, y_scores) >>> precision array([0.5 , 0.66666667, 0.5 , 1. , 1. ]) >>> recall array([1. , 1. , 0.5, 0.5, 0. ]) >>> thresholds array([0.1 , 0.35, 0.4 , 0.8 ]) """ fps, tps, thresholds = _binary_clf_curve( y_true, probas_pred, pos_label=pos_label, sample_weight=sample_weight ) if drop_intermediate and len(fps) > 2: # Drop thresholds corresponding to points where true positives (tps) # do not change from the previous or subsequent point. This will keep # only the first and last point for each tps value. All points # with the same tps value have the same recall and thus x coordinate. # They appear as a vertical line on the plot. optimal_idxs = np.where( np.concatenate( [[True], np.logical_or(np.diff(tps[:-1]), np.diff(tps[1:])), [True]] ) )[0] fps = fps[optimal_idxs] tps = tps[optimal_idxs] thresholds = thresholds[optimal_idxs] ps = tps + fps # Initialize the result array with zeros to make sure that precision[ps == 0] # does not contain uninitialized values. precision = np.zeros_like(tps) np.divide(tps, ps, out=precision, where=(ps != 0)) # When no positive label in y_true, recall is set to 1 for all thresholds # tps[-1] == 0 <=> y_true == all negative labels if tps[-1] == 0: warnings.warn( "No positive class found in y_true, " "recall is set to one for all thresholds." ) recall = np.ones_like(tps) else: recall = tps / tps[-1] # reverse the outputs so recall is decreasing sl = slice(None, None, -1) return np.hstack((precision[sl], 1)), np.hstack((recall[sl], 0)), thresholds[sl] @validate_params( { "y_true": ["array-like"], "y_score": ["array-like"], "pos_label": [Real, str, "boolean", None], "sample_weight": ["array-like", None], "drop_intermediate": ["boolean"], }, prefer_skip_nested_validation=True, ) def roc_curve( y_true, y_score, *, pos_label=None, sample_weight=None, drop_intermediate=True ): """Compute Receiver operating characteristic (ROC). Note: this implementation is restricted to the binary classification task. Read more in the :ref:`User Guide <roc_metrics>`. Parameters ---------- y_true : array-like of shape (n_samples,) True binary labels. If labels are not either {-1, 1} or {0, 1}, then pos_label should be explicitly given. y_score : array-like of shape (n_samples,) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). pos_label : int, float, bool or str, default=None The label of the positive class. When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1}, ``pos_label`` is set to 1, otherwise an error will be raised. sample_weight : array-like of shape (n_samples,), default=None Sample weights. drop_intermediate : bool, default=True Whether to drop some suboptimal thresholds which would not appear on a plotted ROC curve. This is useful in order to create lighter ROC curves. .. versionadded:: 0.17 parameter *drop_intermediate*. Returns ------- fpr : ndarray of shape (>2,) Increasing false positive rates such that element i is the false positive rate of predictions with score >= `thresholds[i]`. tpr : ndarray of shape (>2,) Increasing true positive rates such that element `i` is the true positive rate of predictions with score >= `thresholds[i]`. thresholds : ndarray of shape (n_thresholds,) Decreasing thresholds on the decision function used to compute fpr and tpr. `thresholds[0]` represents no instances being predicted and is arbitrarily set to `np.inf`. See Also -------- RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic (ROC) curve given an estimator and some data. RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic (ROC) curve given the true and predicted values. det_curve: Compute error rates for different probability thresholds. roc_auc_score : Compute the area under the ROC curve. Notes ----- Since the thresholds are sorted from low to high values, they are reversed upon returning them to ensure they correspond to both ``fpr`` and ``tpr``, which are sorted in reversed order during their calculation. An arbitrary threshold is added for the case `tpr=0` and `fpr=0` to ensure that the curve starts at `(0, 0)`. This threshold corresponds to the `np.inf`. References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ .. [2] Fawcett T. An introduction to ROC analysis[J]. Pattern Recognition Letters, 2006, 27(8):861-874. Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2) >>> fpr array([0. , 0. , 0.5, 0.5, 1. ]) >>> tpr array([0. , 0.5, 0.5, 1. , 1. ]) >>> thresholds array([ inf, 0.8 , 0.4 , 0.35, 0.1 ]) """ fps, tps, thresholds = _binary_clf_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight ) # Attempt to drop thresholds corresponding to points in between and # collinear with other points. These are always suboptimal and do not # appear on a plotted ROC curve (and thus do not affect the AUC). # Here np.diff(_, 2) is used as a "second derivative" to tell if there # is a corner at the point. Both fps and tps must be tested to handle # thresholds with multiple data points (which are combined in # _binary_clf_curve). This keeps all cases where the point should be kept, # but does not drop more complicated cases like fps = [1, 3, 7], # tps = [1, 2, 4]; there is no harm in keeping too many thresholds. if drop_intermediate and len(fps) > 2: optimal_idxs = np.where( np.r_[True, np.logical_or(np.diff(fps, 2), np.diff(tps, 2)), True] )[0] fps = fps[optimal_idxs] tps = tps[optimal_idxs] thresholds = thresholds[optimal_idxs] # Add an extra threshold position # to make sure that the curve starts at (0, 0) tps = np.r_[0, tps] fps = np.r_[0, fps] # get dtype of `y_score` even if it is an array-like thresholds = np.r_[np.inf, thresholds] if fps[-1] <= 0: warnings.warn( "No negative samples in y_true, false positive value should be meaningless",
UndefinedMetricWarning,
0
2023-10-07 13:19:48+00:00
24k
zbzhu99/madiff
diffuser/models/diffusion.py
[ { "identifier": "DPM_Solver", "path": "diffuser/utils/dpm_solver.py", "snippet": "class DPM_Solver:\n def __init__(\n self,\n model_fn,\n noise_schedule,\n algorithm_type=\"dpmsolver++\",\n correcting_x0_fn=None,\n correcting_xt_fn=None,\n thresholding...
import functools import numpy as np import torch import torch.nn.functional as F import diffuser.utils as utils from torch import nn from diffuser.utils.dpm_solver import DPM_Solver, NoiseScheduleVP, model_wrapper from .helpers import Losses, apply_conditioning, cosine_beta_schedule, extract
21,225
clip_denoised=False, predict_epsilon=True, action_weight=1.0, loss_discount=1.0, loss_weights=None, returns_condition=False, condition_guidance_w=0.1, agent_share_noise=False, data_encoder=utils.IdentityEncoder(), **kwargs, ): super().__init__() self.n_agents = n_agents self.horizon = horizon self.history_horizon = history_horizon self.observation_dim = observation_dim self.action_dim = action_dim self.transition_dim = observation_dim + action_dim self.model = model self.returns_condition = returns_condition self.condition_guidance_w = condition_guidance_w self.agent_share_noise = agent_share_noise self.data_encoder = data_encoder betas = cosine_beta_schedule(n_timesteps) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, axis=0) alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]]) self.n_timesteps = int(n_timesteps) self.clip_denoised = clip_denoised self.predict_epsilon = predict_epsilon self.register_buffer("betas", betas) self.register_buffer("alphas_cumprod", alphas_cumprod) self.register_buffer("alphas_cumprod_prev", alphas_cumprod_prev) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", torch.sqrt(alphas_cumprod)) self.register_buffer( "sqrt_one_minus_alphas_cumprod", torch.sqrt(1.0 - alphas_cumprod) ) self.register_buffer( "log_one_minus_alphas_cumprod", torch.log(1.0 - alphas_cumprod) ) self.register_buffer( "sqrt_recip_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod - 1) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = ( betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod) ) self.register_buffer("posterior_variance", posterior_variance) # log calculation clipped because the posterior variance # is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", torch.log(torch.clamp(posterior_variance, min=1e-20)), ) self.register_buffer( "posterior_mean_coef1", betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod), ) self.register_buffer( "posterior_mean_coef2", (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod), ) # get loss coefficients and initialize objective self.loss_type = loss_type loss_weights = self.get_loss_weights(action_weight, loss_discount, loss_weights) self.loss_fn = Losses[loss_type](loss_weights, self.action_dim) def get_loss_weights(self, action_weight, discount, weights_dict): """ sets loss coefficients for trajectory action_weight : float coefficient on first action loss discount : float multiplies t^th timestep of trajectory loss by discount**t weights_dict : dict { i: c } multiplies dimension i of observation loss by c """ self.action_weight = action_weight dim_weights = torch.ones(self.transition_dim, dtype=torch.float32) # set loss coefficients for dimensions of observation if weights_dict is None: weights_dict = {} for ind, w in weights_dict.items(): dim_weights[self.action_dim + ind] *= w # decay loss with trajectory timestep: discount**t discounts = discount ** torch.arange( self.horizon + self.history_horizon, dtype=torch.float ) discounts = discounts / discounts.mean() loss_weights = torch.einsum("h,t->ht", discounts, dim_weights) loss_weights = loss_weights.unsqueeze(1).expand(-1, self.n_agents, -1).clone() # manually set a0 weight loss_weights[self.history_horizon, :, : self.action_dim] = action_weight return loss_weights # ------------------------------------------ sampling ------------------------------------------# def predict_start_from_noise(self, x_t, t, noise): """ if self.predict_epsilon, model output is (scaled) noise; otherwise, model predicts x0 directly """ if self.predict_epsilon: return (
class GaussianDiffusion(nn.Module): def __init__( self, model, n_agents, horizon, history_horizon, observation_dim, action_dim, n_timesteps=1000, loss_type="l1", clip_denoised=False, predict_epsilon=True, action_weight=1.0, loss_discount=1.0, loss_weights=None, returns_condition=False, condition_guidance_w=0.1, agent_share_noise=False, data_encoder=utils.IdentityEncoder(), **kwargs, ): super().__init__() self.n_agents = n_agents self.horizon = horizon self.history_horizon = history_horizon self.observation_dim = observation_dim self.action_dim = action_dim self.transition_dim = observation_dim + action_dim self.model = model self.returns_condition = returns_condition self.condition_guidance_w = condition_guidance_w self.agent_share_noise = agent_share_noise self.data_encoder = data_encoder betas = cosine_beta_schedule(n_timesteps) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, axis=0) alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]]) self.n_timesteps = int(n_timesteps) self.clip_denoised = clip_denoised self.predict_epsilon = predict_epsilon self.register_buffer("betas", betas) self.register_buffer("alphas_cumprod", alphas_cumprod) self.register_buffer("alphas_cumprod_prev", alphas_cumprod_prev) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", torch.sqrt(alphas_cumprod)) self.register_buffer( "sqrt_one_minus_alphas_cumprod", torch.sqrt(1.0 - alphas_cumprod) ) self.register_buffer( "log_one_minus_alphas_cumprod", torch.log(1.0 - alphas_cumprod) ) self.register_buffer( "sqrt_recip_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod - 1) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = ( betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod) ) self.register_buffer("posterior_variance", posterior_variance) # log calculation clipped because the posterior variance # is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", torch.log(torch.clamp(posterior_variance, min=1e-20)), ) self.register_buffer( "posterior_mean_coef1", betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod), ) self.register_buffer( "posterior_mean_coef2", (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod), ) # get loss coefficients and initialize objective self.loss_type = loss_type loss_weights = self.get_loss_weights(action_weight, loss_discount, loss_weights) self.loss_fn = Losses[loss_type](loss_weights, self.action_dim) def get_loss_weights(self, action_weight, discount, weights_dict): """ sets loss coefficients for trajectory action_weight : float coefficient on first action loss discount : float multiplies t^th timestep of trajectory loss by discount**t weights_dict : dict { i: c } multiplies dimension i of observation loss by c """ self.action_weight = action_weight dim_weights = torch.ones(self.transition_dim, dtype=torch.float32) # set loss coefficients for dimensions of observation if weights_dict is None: weights_dict = {} for ind, w in weights_dict.items(): dim_weights[self.action_dim + ind] *= w # decay loss with trajectory timestep: discount**t discounts = discount ** torch.arange( self.horizon + self.history_horizon, dtype=torch.float ) discounts = discounts / discounts.mean() loss_weights = torch.einsum("h,t->ht", discounts, dim_weights) loss_weights = loss_weights.unsqueeze(1).expand(-1, self.n_agents, -1).clone() # manually set a0 weight loss_weights[self.history_horizon, :, : self.action_dim] = action_weight return loss_weights # ------------------------------------------ sampling ------------------------------------------# def predict_start_from_noise(self, x_t, t, noise): """ if self.predict_epsilon, model output is (scaled) noise; otherwise, model predicts x0 directly """ if self.predict_epsilon: return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
3
2023-10-13 13:03:53+00:00
24k
hellloxiaotian/KDNet
train_KDNet.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n # print('weights', weights) # /runs/train/yolov7_distill...
import argparse import logging import math import os import random import time import numpy as np import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torch.utils.data import yaml import test # import test.py to get mAP after each epoch from copy import deepcopy from pathlib import Path from threading import Thread from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from models.experimental import attempt_load from models.experimental import attempt_loadv5 from models.experimental import attempt_load_zxy from models.yolo import Model from utils.autoanchor import check_anchors from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ check_requirements, print_mutation, set_logging, one_cycle, colorstr from utils.google_utils import attempt_download from utils.loss import ComputeLoss, ComputeLossOTA from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume from utils.distill_utils import getMask, compute_mask_loss
20,866
pg0.append(v.im.implicit) else: for iv in v.im: pg0.append(iv.implicit) if hasattr(v, 'imc'): if hasattr(v.imc, 'implicit'): pg0.append(v.imc.implicit) else: for iv in v.imc: pg0.append(iv.implicit) if hasattr(v, 'imb'): if hasattr(v.imb, 'implicit'): pg0.append(v.imb.implicit) else: for iv in v.imb: pg0.append(iv.implicit) if hasattr(v, 'imo'): if hasattr(v.imo, 'implicit'): pg0.append(v.imo.implicit) else: for iv in v.imo: pg0.append(iv.implicit) if hasattr(v, 'ia'): if hasattr(v.ia, 'implicit'): pg0.append(v.ia.implicit) else: for iv in v.ia: pg0.append(iv.implicit) if hasattr(v, 'attn'): if hasattr(v.attn, 'logit_scale'): pg0.append(v.attn.logit_scale) if hasattr(v.attn, 'q_bias'): pg0.append(v.attn.q_bias) if hasattr(v.attn, 'v_bias'): pg0.append(v.attn.v_bias) if hasattr(v.attn, 'relative_position_bias_table'): pg0.append(v.attn.relative_position_bias_table) if hasattr(v, 'rbr_dense'): if hasattr(v.rbr_dense, 'weight_rbr_origin'): pg0.append(v.rbr_dense.weight_rbr_origin) if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'): pg0.append(v.rbr_dense.weight_rbr_avg_conv) if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'): pg0.append(v.rbr_dense.weight_rbr_pfir_conv) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2) if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'): pg0.append(v.rbr_dense.weight_rbr_gconv_dw) if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'): pg0.append(v.rbr_dense.weight_rbr_gconv_pw) if hasattr(v.rbr_dense, 'vector'): pg0.append(v.rbr_dense.vector) if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR if opt.linear_lr: lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear else: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA ema = ModelEMA(model) if rank in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 if pretrained: # Optimizer if ckpt['optimizer'] is not None: optimizer.load_state_dict(ckpt['optimizer']) best_fitness = ckpt['best_fitness'] # EMA if ema and ckpt.get('ema'): ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) ema.updates = ckpt['updates'] # Results if ckpt.get('training_results') is not None: results_file.write_text(ckpt['training_results']) # write results.txt # Epochs start_epoch = ckpt['epoch'] + 1 if opt.resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) if epochs < start_epoch: logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % (weights, ckpt['epoch'], epochs)) epochs += ckpt['epoch'] # finetune additional epochs del ckpt, state_dict # Image sizes gs = max(int(model.stride.max()), 32) # grid size (max stride) nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples # DP mode if cuda and rank == -1 and torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) # SyncBatchNorm if opt.sync_bn and cuda and rank != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') # Trainloader
logger = logging.getLogger(__name__) def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.freeze # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' best = wdir / 'best.pt' results_file = save_dir / 'results.txt' # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile( weights) else None wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check # Model pretrained = weights.endswith('.pt') # load teacher model teacher = attempt_load_zxy(opt.teacher_weights, device=device) if pretrained: with torch_distributed_zero_first(rank): attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] # Freeze freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # parameter names to freeze (full or partial) for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): print('freezing %s' % k) v.requires_grad = False # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d): pg0.append(v.weight) # no decay elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay if hasattr(v, 'im'): if hasattr(v.im, 'implicit'): pg0.append(v.im.implicit) else: for iv in v.im: pg0.append(iv.implicit) if hasattr(v, 'imc'): if hasattr(v.imc, 'implicit'): pg0.append(v.imc.implicit) else: for iv in v.imc: pg0.append(iv.implicit) if hasattr(v, 'imb'): if hasattr(v.imb, 'implicit'): pg0.append(v.imb.implicit) else: for iv in v.imb: pg0.append(iv.implicit) if hasattr(v, 'imo'): if hasattr(v.imo, 'implicit'): pg0.append(v.imo.implicit) else: for iv in v.imo: pg0.append(iv.implicit) if hasattr(v, 'ia'): if hasattr(v.ia, 'implicit'): pg0.append(v.ia.implicit) else: for iv in v.ia: pg0.append(iv.implicit) if hasattr(v, 'attn'): if hasattr(v.attn, 'logit_scale'): pg0.append(v.attn.logit_scale) if hasattr(v.attn, 'q_bias'): pg0.append(v.attn.q_bias) if hasattr(v.attn, 'v_bias'): pg0.append(v.attn.v_bias) if hasattr(v.attn, 'relative_position_bias_table'): pg0.append(v.attn.relative_position_bias_table) if hasattr(v, 'rbr_dense'): if hasattr(v.rbr_dense, 'weight_rbr_origin'): pg0.append(v.rbr_dense.weight_rbr_origin) if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'): pg0.append(v.rbr_dense.weight_rbr_avg_conv) if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'): pg0.append(v.rbr_dense.weight_rbr_pfir_conv) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2) if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'): pg0.append(v.rbr_dense.weight_rbr_gconv_dw) if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'): pg0.append(v.rbr_dense.weight_rbr_gconv_pw) if hasattr(v.rbr_dense, 'vector'): pg0.append(v.rbr_dense.vector) if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR if opt.linear_lr: lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear else: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA ema = ModelEMA(model) if rank in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 if pretrained: # Optimizer if ckpt['optimizer'] is not None: optimizer.load_state_dict(ckpt['optimizer']) best_fitness = ckpt['best_fitness'] # EMA if ema and ckpt.get('ema'): ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) ema.updates = ckpt['updates'] # Results if ckpt.get('training_results') is not None: results_file.write_text(ckpt['training_results']) # write results.txt # Epochs start_epoch = ckpt['epoch'] + 1 if opt.resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) if epochs < start_epoch: logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % (weights, ckpt['epoch'], epochs)) epochs += ckpt['epoch'] # finetune additional epochs del ckpt, state_dict # Image sizes gs = max(int(model.stride.max()), 32) # grid size (max stride) nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples # DP mode if cuda and rank == -1 and torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) # SyncBatchNorm if opt.sync_bn and cuda and rank != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') # Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
5
2023-10-08 13:05:58+00:00
24k
falesiani/torch_ga
tests/test_keras.py
[ { "identifier": "GeometricProductDense", "path": "torch_ga/layers.py", "snippet": "class GeometricProductDense(GeometricAlgebraLayer):\n \"\"\"Analagous to Keras' Dense layer but using multivector-valued matrices\n instead of scalar ones and geometric multiplication instead of standard\n multip...
import unittest as ut import h5py import torch import torch.nn as nn import torch.nn.functional as F import torch from io import BytesIO from torch_ga.layers import ( GeometricProductDense, GeometricSandwichProductDense, GeometricProductElementwise, GeometricSandwichProductElementwise, GeometricProductConv1D, GeometricAlgebraExp, GeometricToTensor, GeometricToTensorWithKind, TensorToGeometric, TensorWithKindToGeometric, ) from torch_ga.blades import BladeKind from torch_ga import GeometricAlgebra
18,943
self.assertEqual(result.shape[-1], len(scalar_bivector_blade_indices)) def test_geometric_sandwich_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] result_indices = torch.concat([ sta.get_kind_blade_indices(BladeKind.VECTOR), sta.get_kind_blade_indices(BladeKind.TRIVECTOR) ], axis=0) geom_prod_layer = GeometricSandwichProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=result_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ) result = geom_prod_layer(geom_tensor) print(f"test_geometric_sandwich_product_dense_v_v:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # vector * vector * ~vector + vector -> vector + trivector self.assertTrue(torch.all(sta.is_pure(result, result_indices))) class TestKerasLayersSerializable(ut.TestCase): def assertTensorsEqual(self, a, b): # self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) assert torch.all(a == b), "%s not equal to %s" % (a, b) def _test_layer_serializable(self, layer, inputs): # Create algebra algebra = layer.algebra # Create model model = nn.Sequential(*[layer]) # Predict on inputs to compare later layer.build(inputs.shape) model_output = model(inputs) # Serialize model to virtual file # model_file = h5py.File(BytesIO(), mode="w") # model.save(model_file) model_file = "./test_model.ph" torch.save(model.state_dict(), model_file) # Load model from stream # loaded_model = tf.keras.models.load_model(model_file) device = torch.device('cpu') loaded_model = nn.Sequential(*[layer]) loaded_model.load_state_dict(torch.load(model_file, map_location=device)) # Predict on same inputs as before loaded_output = loaded_model(inputs) # Check same output for original and loaded model self.assertTensorsEqual(model_output, loaded_output) # Check same recreated algebra self.assertTensorsEqual( # algebra.metric, loaded_model.layers[0].algebra.metric algebra.metric, loaded_model[0].algebra.metric ) self.assertTensorsEqual( # algebra.cayley, loaded_model.layers[0].algebra.cayley algebra.cayley, loaded_model[0].algebra.cayley ) def test_geom_dense_serializable(self): # Create algebra sta = GeometricAlgebra([1, -1, -1, -1]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) torch.manual_seed(0) # Create model self._test_layer_serializable(GeometricProductDense( sta, units=8, blade_indices_kernel=mv_blade_indices, blade_indices_bias=vector_blade_indices ), torch.randn(*[3, 6, sta.num_blades])) def test_sandwich_dense_serializable(self): # Create algebra sta = GeometricAlgebra([1, -1, -1, -1]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) # Create model torch.manual_seed(0) self._test_layer_serializable(GeometricSandwichProductDense( sta, units=8, blade_indices_kernel=mv_blade_indices, blade_indices_bias=vector_blade_indices ), torch.randn([3, 6, sta.num_blades])) def test_geom_elementwise_serializable(self): # Create algebra sta = GeometricAlgebra([1, -1, -1, -1]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) # Create model torch.manual_seed(0)
torch.manual_seed(0) class TestKerasLayers(ut.TestCase): def assertTensorsEqual(self, a, b): # self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) print(f"assertTensorsEqual(a={a},b={b})") assert torch.all(a.squeeze() == b.squeeze()), "%s not equal to %s" % (a, b) def test_tensor_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_to_geom_layer = TensorToGeometric(sta, vector_blade_indices) self.assertTensorsEqual(tensor_to_geom_layer(tensor), gt_geom_tensor) def test_tensor_with_kind_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_kind_to_geom_layer = TensorWithKindToGeometric( sta, BladeKind.VECTOR) self.assertTensorsEqual( tensor_kind_to_geom_layer(tensor), gt_geom_tensor) def test_geometric_to_tensor(self): sta = GeometricAlgebra([1, -1, -1, -1]) gt_tensor = torch.ones([32, 4]) geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_to_tensor_layer = GeometricToTensor(sta, vector_blade_indices) self.assertTensorsEqual(geom_to_tensor_layer(geom_tensor), gt_tensor) def test_geometric_to_tensor_with_kind(self): sta = GeometricAlgebra([1, -1, -1, -1]) gt_tensor = torch.ones([32, 4]) geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_to_tensor_kind_layer = GeometricToTensorWithKind( sta, BladeKind.VECTOR) self.assertTensorsEqual( geom_to_tensor_kind_layer(geom_tensor), gt_tensor) def test_geometric_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_prod_layer = GeometricProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=vector_blade_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ) result = geom_prod_layer(geom_tensor) print(f"test_geometric_product_dense_v_v:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # vector * vector + vector -> scalar + bivector + vector expected_result_indices = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) self.assertTrue(torch.all(sta.is_pure(result, expected_result_indices))) def test_geometric_product_dense_s_mv(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.ones([20, 6, 1]), torch.zeros([20, 6, 15])], axis=-1 ) mv_blade_indices = list(range(16)) geom_prod_layer = GeometricProductDense( sta, 8, blade_indices_kernel=mv_blade_indices, blade_indices_bias=mv_blade_indices ) geom_prod_layer.build(geom_tensor.shape) result = geom_prod_layer(geom_tensor) print(f"test_geometric_product_dense_s_mv:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # scalar * multivector + multivector -> multivector # Check that nothing is zero (it would be extremely unlikely # but not impossible to randomly get a zero here). assert torch.all(result != 0.0) # self.assertTrue(tf.reduce_all(result != 0.0)) def test_geometric_product_dense_sequence(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([20, 6, 4]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) # vector * vector + vector -> scalar + bivector + vector scalar_bivector_blade_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] sequence = nn.Sequential(*[ TensorToGeometric(sta, blade_indices=vector_blade_indices), GeometricProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=vector_blade_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ), GeometricToTensor(sta, blade_indices=scalar_bivector_blade_indices) ]) for e in sequence: e.build(tensor.shape) result = sequence(tensor) print(f"test_geometric_product_dense_sequence:") print(f"tensor={tensor}") print(f"result={result}") self.assertEqual(result.shape[-1], len(scalar_bivector_blade_indices)) def test_geometric_sandwich_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] result_indices = torch.concat([ sta.get_kind_blade_indices(BladeKind.VECTOR), sta.get_kind_blade_indices(BladeKind.TRIVECTOR) ], axis=0) geom_prod_layer = GeometricSandwichProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=result_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ) result = geom_prod_layer(geom_tensor) print(f"test_geometric_sandwich_product_dense_v_v:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # vector * vector * ~vector + vector -> vector + trivector self.assertTrue(torch.all(sta.is_pure(result, result_indices))) class TestKerasLayersSerializable(ut.TestCase): def assertTensorsEqual(self, a, b): # self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) assert torch.all(a == b), "%s not equal to %s" % (a, b) def _test_layer_serializable(self, layer, inputs): # Create algebra algebra = layer.algebra # Create model model = nn.Sequential(*[layer]) # Predict on inputs to compare later layer.build(inputs.shape) model_output = model(inputs) # Serialize model to virtual file # model_file = h5py.File(BytesIO(), mode="w") # model.save(model_file) model_file = "./test_model.ph" torch.save(model.state_dict(), model_file) # Load model from stream # loaded_model = tf.keras.models.load_model(model_file) device = torch.device('cpu') loaded_model = nn.Sequential(*[layer]) loaded_model.load_state_dict(torch.load(model_file, map_location=device)) # Predict on same inputs as before loaded_output = loaded_model(inputs) # Check same output for original and loaded model self.assertTensorsEqual(model_output, loaded_output) # Check same recreated algebra self.assertTensorsEqual( # algebra.metric, loaded_model.layers[0].algebra.metric algebra.metric, loaded_model[0].algebra.metric ) self.assertTensorsEqual( # algebra.cayley, loaded_model.layers[0].algebra.cayley algebra.cayley, loaded_model[0].algebra.cayley ) def test_geom_dense_serializable(self): # Create algebra sta = GeometricAlgebra([1, -1, -1, -1]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) torch.manual_seed(0) # Create model self._test_layer_serializable(GeometricProductDense( sta, units=8, blade_indices_kernel=mv_blade_indices, blade_indices_bias=vector_blade_indices ), torch.randn(*[3, 6, sta.num_blades])) def test_sandwich_dense_serializable(self): # Create algebra sta = GeometricAlgebra([1, -1, -1, -1]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) # Create model torch.manual_seed(0) self._test_layer_serializable(GeometricSandwichProductDense( sta, units=8, blade_indices_kernel=mv_blade_indices, blade_indices_bias=vector_blade_indices ), torch.randn([3, 6, sta.num_blades])) def test_geom_elementwise_serializable(self): # Create algebra sta = GeometricAlgebra([1, -1, -1, -1]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) # Create model torch.manual_seed(0)
self._test_layer_serializable(GeometricProductElementwise(
2
2023-10-07 13:34:07+00:00
24k
Significant-Gravitas/autostandup
bot.py
[ { "identifier": "StreaksDB", "path": "streaks/streaks_db.py", "snippet": "class StreaksDB(BaseDB):\n \"\"\"\n StreaksDB class handles all operations related to the 'streaks' table.\n Inherits from the BaseDB class.\n \"\"\"\n\n def __init__(self, host, user, password, database, port):\n ...
import os import pytz import asyncio import openai import requests from typing import List from dotenv import load_dotenv from datetime import datetime, timedelta from multiprocessing import Process from streaks.streaks_db import StreaksDB from team_members.team_member_db import TeamMemberDB from updates.updates_db import UpdatesDB from weekly_posts.weekly_posts_db import WeeklyPostsDB from streaks.streaks_manager import StreaksManager from team_members.team_member_manager import TeamMemberManager from updates.updates_manager import UpdatesManager from weekly_posts.weekly_post_manager import WeeklyPostManager from scheduler import Scheduler from team_members.team_member import TeamMember from discord.ext import commands, tasks from discord import Intents, DMChannel from flask import Flask from asyncio import Task, ensure_future, CancelledError
15,324
@bot.command(name='forcepostrebuild') async def force_post_rebuild(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to force a post rebuild.") return # Rebuild the post await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send("Post rebuilt successfully.") @bot.command(name='deletelateststatus') async def delete_latest_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to delete status updates.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Delete the newest status using the UpdatesManager's method updates_manager.delete_newest_status(discord_id) await ctx.send(f"Latest status update for user with Discord ID {discord_id} deleted successfully.") @bot.command(name='viewuser') async def view_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view user data.") return # Get the member's statuses using the UpdatesManager's method statuses = updates_manager.get_all_statuses_for_user(discord_id) if not statuses: await ctx.send(f"No status updates found for user with Discord ID {discord_id}.") return # Loop through the statuses and send individual messages for status in statuses: await ctx.send(f"### **Timestamp:** {status['timestamp']}") await ctx.send(f"### **Raw Status:** {status['status']}") await ctx.send(f"### **Summarized Status:** \n{status['summarized_status']}") @bot.command(name='setvacationstatus') async def set_vacation_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to set vacation status.") return member = team_member_manager.find_member(discord_id) if member: new_status = not member.on_vacation team_member_manager.set_member_vacation_status(discord_id, new_status) await ctx.send(f"Vacation status for user with Discord ID {discord_id} set to {'on vacation' if new_status else 'not on vacation'}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='weeklysummary') async def weekly_summary(ctx, discord_id: int, start_date: str, end_date: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to generate weekly summaries.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Convert the start_date and end_date strings to datetime objects # Adjusting the date format to MM-DD-YYYY and setting the time try: start_date = datetime.strptime(start_date, '%m-%d-%Y') end_date = datetime.strptime(end_date, '%m-%d-%Y') # Setting the time to ensure the whole week is captured start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999) except ValueError: await ctx.send("Invalid date format. Please use MM-DD-YYYY.") return # Generate the weekly summary weekly_summary = await updates_manager.generate_weekly_summary(discord_id, start_date, end_date) # Send the weekly summary to the admin user admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"**{member.name}'s Weekly Summary for {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}:**\n{weekly_summary}") else: await ctx.send("Unable to find the admin user.") @bot.event async def on_ready(): print("Bot is online!") # Log that the bot is online streaks_db = StreaksDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) team_member_db = TeamMemberDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) weekly_posts_db = WeeklyPostsDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) updates_db = UpdatesDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) guild = bot.get_guild(GUILD_TOKEN) channel = guild.get_channel(CHANNEL_TOKEN) global updates_manager updates_manager = UpdatesManager(updates_db) global streaks_manager streaks_manager = StreaksManager(streaks_db) global team_member_manager
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝' async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]): # Reset streaks for the previous week for member in team_members: if not member.on_vacation and member.weekly_checkins < 5: streaks_manager.reset_streak(member.discord_id) member.reset_streak() member.reset_weekly_checkins() # Initialize new weekly post await weekly_post_manager.initialize_post(team_members) def get_all_commit_messages_for_user(org_name: str, token: str, member: TeamMember) -> list: """Retrieve all commit messages for a user across all repos in an organization from the last 24 hours.""" headers = { "Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json" } last_update_timestamp, user_time_zone = updates_manager.get_last_update_timestamp(member.discord_id) if last_update_timestamp: # Convert the timestamp to UTC local_tz = pytz.timezone(user_time_zone) localized_timestamp = local_tz.localize(last_update_timestamp) utc_timestamp = localized_timestamp.astimezone(pytz.utc) # Format the timestamp for the GitHub API and append 'Z' since_date = utc_timestamp.isoformat() if not since_date.endswith('Z'): since_date = utc_timestamp.isoformat().replace('+00:00', '') + 'Z' else: # If no updates found, default to last 24 hours since_date = (datetime.utcnow() - timedelta(days=1)).isoformat() + 'Z' all_commit_messages = [] # Paginate through all repositories in the organization repos_url = f"https://api.github.com/orgs/{org_name}/repos?type=all&per_page=100" while repos_url: response = requests.get(repos_url, headers=headers) if response.status_code != 200: # Log error and break loop print(f"Failed to fetch repos: {response.status_code} {response.text}") break repos = response.json() # Iterate over each repository for repo in repos: repo_name = repo["name"] commits_url = f"https://api.github.com/repos/{org_name}/{repo_name}/commits?author={member.github_username}&since={since_date}&per_page=100" # Paginate through commits for the repository while commits_url: response = requests.get(commits_url, headers=headers) if response.status_code != 200: # Log error and continue to the next repository print(f"Failed to fetch commits for {repo_name}: {response.status_code} {response.text}") break commits = response.json() repo_commit_messages = [commit["commit"]["message"] for commit in commits] all_commit_messages.extend(repo_commit_messages) # Check for the 'next' link for commits pagination commits_url = get_pagination_link(response.headers, 'next') # Check for the 'next' link for repositories pagination repos_url = get_pagination_link(response.headers, 'next') return all_commit_messages def get_pagination_link(headers, rel): """Extract pagination link for the 'rel' type from the Link header.""" link = headers.get('Link', None) if link: links = link.split(', ') for link in links: if 'rel="{}"'.format(rel) in link: return link.split('; ')[0].strip('<>') return None async def send_status_request(member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, updates_manager: UpdatesManager): if member.weekly_checkins == 5: return # If already completed 5 check-ins, do nothing user = bot.get_user(member.discord_id) if user: # Notify the admin that a status request is being sent admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"Status request sent to {member.name}.") # Cancel the previous task if it exists ongoing_task: Task = ongoing_status_requests.get(member.discord_id) if ongoing_task: ongoing_task.cancel() # Retrieve all commit messages for the member commit_messages = get_all_commit_messages_for_user(ORG_NAME, ORG_TOKEN, member) if not commit_messages: summarized_report = "You have no commits for the previous working day." msg = f"{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." else: summarized_report = await updates_manager.summarize_technical_updates(commit_messages) msg = f"Here's your summarized report based on your commits:\n{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." raw_updates = summarized_report # Send initial message and wait for reaction await user.send( f"# Good morning {member.name}, time for your daily status update!\n" f"### I'm first going to check your commit messages and try to build a technical report for you.\n" f"### Next I will ask you for any non-technical updates from your previous work day.\n" f"### Finally I will ask you what you plan to work on today." ) sent_message = await user.send(msg) await sent_message.add_reaction(THUMBS_UP_EMOJI) await sent_message.add_reaction(PENCIL_EMOJI) await sent_message.add_reaction(REPORT_SUBMISSION_EMOJI) def check(m) -> bool: return m.author == user and isinstance(m.channel, DMChannel) # Store the new wait_for reaction task in the global dictionary ongoing_task = ensure_future(bot.wait_for('reaction_add', check=lambda r, u: u == user and r.message.id == sent_message.id and isinstance(r.message.channel, DMChannel) and str(r.emoji) in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI])) ongoing_status_requests[member.discord_id] = ongoing_task reaction, reactor = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the reaction for emoji in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: await sent_message.remove_reaction(emoji, bot.user) while str(reaction.emoji) in [PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: if str(reaction.emoji) == PENCIL_EMOJI: await user.send("What would you like me to change?") # Store the new wait_for message (feedback) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task feedback = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the feedback # Send original + feedback to LLM for reformatting summarized_report = await updates_manager.summarize_feedback_and_revisions(summarized_report, feedback.content) elif str(reaction.emoji) == REPORT_SUBMISSION_EMOJI: await user.send("Please submit your technical report directly.") # Store the new wait_for message (report submission) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task direct_report = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the report summarized_report = direct_report.content break # Exit the while loop as the user has submitted their report directly msg = f"Here's the revised report:\n{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." last_sent_message = await send_long_message(user, msg) if last_sent_message: await last_sent_message.add_reaction(THUMBS_UP_EMOJI) await last_sent_message.add_reaction(PENCIL_EMOJI) await last_sent_message.add_reaction(REPORT_SUBMISSION_EMOJI) # Store the new wait_for reaction task in the global dictionary ongoing_task = ensure_future(bot.wait_for('reaction_add', check=lambda r, u: u == user and r.message.id == last_sent_message.id and isinstance(r.message.channel, DMChannel) and str(r.emoji) in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI])) ongoing_status_requests[member.discord_id] = ongoing_task reaction, user = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the reaction for emoji in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: await last_sent_message.remove_reaction(emoji, bot.user) # Prompt user for non-technical updates from the previous day non_technical_msg_prompt = "Please provide any non-technical updates from your previous working day, e.g., important meetings, interviews, etc." await user.send(non_technical_msg_prompt) # Store the new wait_for message (non-technical update) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task non_technical_update_raw = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the non-technical update raw_updates += f"\n\n{non_technical_update_raw.content}" # Summarize non-technical update with LLM non_technical_update = await updates_manager.summarize_non_technical_updates(non_technical_update_raw.content) # Prompt user for their goals for the day goals_msg_prompt = "What do you plan to work on or accomplish today?" await user.send(goals_msg_prompt) # Store the new wait_for message (goals for the day) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task goals_for_today_raw = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the goals # Summarize goals for the day with LLM goals_for_today = await updates_manager.summarize_goals_for_the_day(goals_for_today_raw.content) # Update the streak for this member streak = streaks_manager.get_streak(member.discord_id) streaks_manager.update_streak(member.discord_id, streak + 1) member.update_streak(streaks_manager.get_streak(member.discord_id)) member.increment_weekly_checkins() raw_updates += f"\n\n{goals_for_today_raw.content}" final_updates = f"{summarized_report}\n\n{non_technical_update}\n\n{goals_for_today}" updates_manager.insert_status(member.discord_id, raw_updates, member.time_zone) updates_manager.update_summarized_status(member.discord_id, final_updates) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) # Member name update as a header member_update_header = f"## {member.name}'s Update:" # Compile the final report with Markdown formatting final_report = ( f"\n### Technical Update:\n" f"{summarized_report}\n" f"### Non-Technical Update:\n" f"{non_technical_update}\n" f"### Goals for Today:\n" f"{goals_for_today}" ) stand_up_feedback = await updates_manager.evaluate_performance(final_report) # Concatenate the member name update with the final report and send to the designated Discord channel complete_message = f"{member_update_header}{final_report}" guild = bot.get_guild(GUILD_TOKEN) channel_to_post_in = guild.get_channel(CHANNEL_TOKEN) await user.send(stand_up_feedback) await send_long_message(channel_to_post_in, complete_message) async def send_long_message(destination, msg): max_length = 2000 # Discord's max character limit for a message sent_messages = [] # Keep track of all messages sent while len(msg) > 0: # If the message is shorter than the max length, send it as is if len(msg) <= max_length: sent_message = await destination.send(msg) sent_messages.append(sent_message) break # The message is sent, so break out of the loop # Find the nearest newline character before the max_length split_index = msg.rfind('\n', 0, max_length) # If no newline is found, just split at max_length if split_index == -1: split_index = max_length # Split the message at the found index and send the first part part_to_send = msg[:split_index].strip() sent_message = await destination.send(part_to_send) sent_messages.append(sent_message) # Wait a bit to respect Discord's rate limits await asyncio.sleep(1) # Remove the part that was sent from the message msg = msg[split_index:].strip() # Return the last message sent for reaction addition return sent_messages[-1] if sent_messages else None @bot.command(name='viewscheduledjobs') async def view_scheduled_jobs(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view scheduled jobs.") return # Get all scheduled jobs using the Scheduler's method scheduled_jobs = scheduler.get_all_scheduled_jobs(team_member_manager) # Send the scheduled jobs to the admin user for job in scheduled_jobs: await ctx.send(job) @bot.command(name='statusrequest') async def status_request(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to request status.") return # Find the member object using the Discord ID member_to_request = team_member_manager.find_member(discord_id) if member_to_request: for member in team_member_manager.team_members: scheduler.remove_job(member.discord_id) scheduler.unschedule_weekly_post() # Send the status request to the member await ctx.send(f"Status request sent to user with Discord ID {discord_id}.") for member in team_member_manager.team_members: scheduler.add_job(send_status_request, member, weekly_post_manager, streaks_manager, updates_manager) scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await send_status_request(member_to_request, weekly_post_manager, streaks_manager, updates_manager) await ctx.send(f"Status request received from user with Discord ID {discord_id}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='adduser') async def add_user(ctx, discord_id: int, time_zone: str, name: str, github_username: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to add users.") return # Add the new member using team_member_manager team_member_manager.add_member(discord_id, name, time_zone, github_username) # Update the weekly post to include the new member new_member = team_member_manager.find_member(discord_id) if new_member: await weekly_post_manager.rebuild_post(team_member_manager.team_members) scheduler.add_job(send_status_request, new_member, weekly_post_manager, streaks_manager, updates_manager) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"User {name} added successfully.") @bot.command(name='removeuser') async def remove_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to remove users.") return # Find the member object member_to_remove = team_member_manager.find_member(discord_id) if member_to_remove: # Remove the member from the database team_member_manager.remove_member(discord_id) # Update the weekly post to remove the member await weekly_post_manager.rebuild_post(team_member_manager.team_members) scheduler.remove_job(discord_id) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"User with Discord ID {discord_id} removed successfully.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='listusers') async def list_users(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to list users.") return # List users using team_member_manager users = [(member.discord_id, member.name, member.time_zone, member.github_username, member.current_streak) for member in team_member_manager.team_members] user_list = '\n'.join([f"Name: {user[1]}, Discord ID: {user[0]}, Time Zone: {user[2]}, GitHub Username: {user[3]}, Current Streak: {user[4]}" for user in users]) await ctx.send(f"List of users:\n{user_list}") @bot.command(name='updatetimezone') async def update_timezone(ctx, discord_id: int, new_time_zone: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update timezones.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the timezone in the database team_member_manager.update_member_timezone(discord_id, new_time_zone) scheduler.remove_job(discord_id) scheduler.add_job(send_status_request, member_to_update, weekly_post_manager, streaks_manager, updates_manager) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"Timezone for user with Discord ID {discord_id} updated to {new_time_zone}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='updatestreak') async def update_streak(ctx, discord_id: int, new_streak: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update streaks.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the streak in the database streaks_manager.update_streak(discord_id, new_streak) member_to_update.update_streak(new_streak) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send(f"Streak for user with Discord ID {discord_id} updated to {new_streak}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='forcepostrebuild') async def force_post_rebuild(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to force a post rebuild.") return # Rebuild the post await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send("Post rebuilt successfully.") @bot.command(name='deletelateststatus') async def delete_latest_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to delete status updates.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Delete the newest status using the UpdatesManager's method updates_manager.delete_newest_status(discord_id) await ctx.send(f"Latest status update for user with Discord ID {discord_id} deleted successfully.") @bot.command(name='viewuser') async def view_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view user data.") return # Get the member's statuses using the UpdatesManager's method statuses = updates_manager.get_all_statuses_for_user(discord_id) if not statuses: await ctx.send(f"No status updates found for user with Discord ID {discord_id}.") return # Loop through the statuses and send individual messages for status in statuses: await ctx.send(f"### **Timestamp:** {status['timestamp']}") await ctx.send(f"### **Raw Status:** {status['status']}") await ctx.send(f"### **Summarized Status:** \n{status['summarized_status']}") @bot.command(name='setvacationstatus') async def set_vacation_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to set vacation status.") return member = team_member_manager.find_member(discord_id) if member: new_status = not member.on_vacation team_member_manager.set_member_vacation_status(discord_id, new_status) await ctx.send(f"Vacation status for user with Discord ID {discord_id} set to {'on vacation' if new_status else 'not on vacation'}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='weeklysummary') async def weekly_summary(ctx, discord_id: int, start_date: str, end_date: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to generate weekly summaries.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Convert the start_date and end_date strings to datetime objects # Adjusting the date format to MM-DD-YYYY and setting the time try: start_date = datetime.strptime(start_date, '%m-%d-%Y') end_date = datetime.strptime(end_date, '%m-%d-%Y') # Setting the time to ensure the whole week is captured start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999) except ValueError: await ctx.send("Invalid date format. Please use MM-DD-YYYY.") return # Generate the weekly summary weekly_summary = await updates_manager.generate_weekly_summary(discord_id, start_date, end_date) # Send the weekly summary to the admin user admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"**{member.name}'s Weekly Summary for {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}:**\n{weekly_summary}") else: await ctx.send("Unable to find the admin user.") @bot.event async def on_ready(): print("Bot is online!") # Log that the bot is online streaks_db = StreaksDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) team_member_db = TeamMemberDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) weekly_posts_db = WeeklyPostsDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) updates_db = UpdatesDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) guild = bot.get_guild(GUILD_TOKEN) channel = guild.get_channel(CHANNEL_TOKEN) global updates_manager updates_manager = UpdatesManager(updates_db) global streaks_manager streaks_manager = StreaksManager(streaks_db) global team_member_manager
team_member_manager = TeamMemberManager(team_member_db)
5
2023-10-12 02:01:46+00:00
24k
azuline/rose
rose/cache_test.py
[ { "identifier": "TEST_COLLAGE_1", "path": "conftest.py", "snippet": "TEST_COLLAGE_1 = TESTDATA / \"Collage 1\"" }, { "identifier": "TEST_PLAYLIST_1", "path": "conftest.py", "snippet": "TEST_PLAYLIST_1 = TESTDATA / \"Playlist 1\"" }, { "identifier": "TEST_RELEASE_1", "path": "...
import dataclasses import hashlib import shutil import time import pytest import tomllib from pathlib import Path from conftest import TEST_COLLAGE_1, TEST_PLAYLIST_1, TEST_RELEASE_1, TEST_RELEASE_2, TEST_RELEASE_3 from rose.audiotags import AudioTags from rose.cache import ( CACHE_SCHEMA_PATH, STORED_DATA_FILE_REGEX, CachedCollage, CachedPlaylist, CachedRelease, CachedTrack, _unpack, artist_exists, connect, genre_exists, get_collage, get_playlist, get_release, get_release_logtext, get_track, get_track_logtext, get_tracks_associated_with_release, get_tracks_associated_with_releases, label_exists, list_artists, list_collages, list_genres, list_labels, list_playlists, list_releases, list_tracks, lock, maybe_invalidate_cache_database, update_cache, update_cache_evict_nonexistent_releases, update_cache_for_releases, ) from rose.common import VERSION, Artist, ArtistMapping from rose.config import Config
18,311
] # Assert that source file was not updated to remove the track. with (config.music_source_dir / "!playlists" / "Lala Lisa.toml").open("rb") as fp: data = tomllib.load(fp) assert not [t for t in data["tracks"] if "missing" in t] assert len(data["tracks"]) == 2 @pytest.mark.usefixtures("seeded_cache") def test_list_releases(config: Config) -> None: expected = [ CachedRelease( datafile_mtime="999", id="r1", source_path=Path(config.music_source_dir / "r1"), cover_image_path=None, added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 1", releasetype="album", year=2023, disctotal=1, new=False, genres=["Techno", "Deep House"], labels=["Silk Music"], albumartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="1", ), CachedRelease( datafile_mtime="999", id="r2", source_path=Path(config.music_source_dir / "r2"), cover_image_path=Path(config.music_source_dir / "r2" / "cover.jpg"), added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 2", releasetype="album", year=2021, disctotal=1, new=False, genres=["Classical"], labels=["Native State"], albumartists=ArtistMapping( main=[Artist("Violin Woman")], guest=[Artist("Conductor Woman")] ), metahash="2", ), CachedRelease( datafile_mtime="999", id="r3", source_path=Path(config.music_source_dir / "r3"), cover_image_path=None, added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 3", releasetype="album", year=2021, disctotal=1, new=True, genres=[], labels=[], albumartists=ArtistMapping(), metahash="3", ), ] assert list_releases(config) == expected assert list_releases(config, ["r1"]) == expected[:1] @pytest.mark.usefixtures("seeded_cache") def test_get_release_and_associated_tracks(config: Config) -> None: release = get_release(config, "r1") assert release is not None assert release == CachedRelease( datafile_mtime="999", id="r1", source_path=Path(config.music_source_dir / "r1"), cover_image_path=None, added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 1", releasetype="album", year=2023, disctotal=1, new=False, genres=["Techno", "Deep House"], labels=["Silk Music"], albumartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="1", ) expected_tracks = [ CachedTrack( id="t1", source_path=config.music_source_dir / "r1" / "01.m4a", source_mtime="999", tracktitle="Track 1", tracknumber="01", tracktotal=2, discnumber="01", disctotal=1, duration_seconds=120, trackartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="1", release=release, ), CachedTrack( id="t2", source_path=config.music_source_dir / "r1" / "02.m4a", source_mtime="999", tracktitle="Track 2", tracknumber="02", tracktotal=2, discnumber="01", disctotal=1, duration_seconds=240, trackartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="2", release=release, ), ]
def test_schema(config: Config) -> None: """Test that the schema successfully bootstraps.""" with CACHE_SCHEMA_PATH.open("rb") as fp: schema_hash = hashlib.sha256(fp.read()).hexdigest() maybe_invalidate_cache_database(config) with connect(config) as conn: cursor = conn.execute("SELECT schema_hash, config_hash, version FROM _schema_hash") row = cursor.fetchone() assert row["schema_hash"] == schema_hash assert row["config_hash"] is not None assert row["version"] == VERSION def test_migration(config: Config) -> None: """Test that "migrating" the database correctly migrates it.""" config.cache_database_path.unlink() with connect(config) as conn: conn.execute( """ CREATE TABLE _schema_hash ( schema_hash TEXT , config_hash TEXT , version TEXT , PRIMARY KEY (schema_hash, config_hash, version) ) """ ) conn.execute( """ INSERT INTO _schema_hash (schema_hash, config_hash, version) VALUES ('haha', 'lala', 'blabla') """, ) with CACHE_SCHEMA_PATH.open("rb") as fp: latest_schema_hash = hashlib.sha256(fp.read()).hexdigest() maybe_invalidate_cache_database(config) with connect(config) as conn: cursor = conn.execute("SELECT schema_hash, config_hash, version FROM _schema_hash") row = cursor.fetchone() assert row["schema_hash"] == latest_schema_hash assert row["config_hash"] is not None assert row["version"] == VERSION cursor = conn.execute("SELECT COUNT(*) FROM _schema_hash") assert cursor.fetchone()[0] == 1 def test_locks(config: Config) -> None: """Test that taking locks works. The times are a bit loose b/c GH Actions is slow.""" lock_name = "lol" # Test that the locking and timeout work. start = time.time() with lock(config, lock_name, timeout=0.2): lock1_acq = time.time() with lock(config, lock_name, timeout=0.2): lock2_acq = time.time() # Assert that we had to wait ~0.1sec to get the second lock. assert lock1_acq - start < 0.08 assert lock2_acq - lock1_acq > 0.17 # Test that releasing a lock actually works. start = time.time() with lock(config, lock_name, timeout=0.2): lock1_acq = time.time() with lock(config, lock_name, timeout=0.2): lock2_acq = time.time() # Assert that we had to wait negligible time to get the second lock. assert lock1_acq - start < 0.08 assert lock2_acq - lock1_acq < 0.08 def test_update_cache_all(config: Config) -> None: """Test that the update all function works.""" shutil.copytree(TEST_RELEASE_1, config.music_source_dir / TEST_RELEASE_1.name) shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) # Test that we prune deleted releases too. with connect(config) as conn: conn.execute( """ INSERT INTO releases (id, source_path, added_at, datafile_mtime, title, releasetype, disctotal, metahash) VALUES ('aaaaaa', '0000-01-01T00:00:00+00:00', '999', 'nonexistent', 'aa', 'unknown', false, '0') """ ) update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 2 cursor = conn.execute("SELECT COUNT(*) FROM tracks") assert cursor.fetchone()[0] == 4 def test_update_cache_multiprocessing(config: Config) -> None: """Test that the update all function works.""" shutil.copytree(TEST_RELEASE_1, config.music_source_dir / TEST_RELEASE_1.name) shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) update_cache_for_releases(config, force_multiprocessing=True) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 2 cursor = conn.execute("SELECT COUNT(*) FROM tracks") assert cursor.fetchone()[0] == 4 def test_update_cache_releases(config: Config) -> None: release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) # Check that the release directory was given a UUID. release_id: str | None = None for f in release_dir.iterdir(): if m := STORED_DATA_FILE_REGEX.match(f.name): release_id = m[1] assert release_id is not None # Assert that the release metadata was read correctly. with connect(config) as conn: cursor = conn.execute( """ SELECT id, source_path, title, releasetype, year, new FROM releases WHERE id = ? """, (release_id,), ) row = cursor.fetchone() assert row["source_path"] == str(release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] cursor = conn.execute( "SELECT genre FROM releases_genres WHERE release_id = ?", (release_id,), ) genres = {r["genre"] for r in cursor.fetchall()} assert genres == {"K-Pop", "Pop"} cursor = conn.execute( "SELECT label FROM releases_labels WHERE release_id = ?", (release_id,), ) labels = {r["label"] for r in cursor.fetchall()} assert labels == {"A Cool Label"} cursor = conn.execute( "SELECT artist, role FROM releases_artists WHERE release_id = ?", (release_id,), ) artists = {(r["artist"], r["role"]) for r in cursor.fetchall()} assert artists == { ("BLACKPINK", "main"), } for f in release_dir.iterdir(): if f.suffix != ".m4a": continue # Assert that the track metadata was read correctly. cursor = conn.execute( """ SELECT id, source_path, title, release_id, tracknumber, discnumber, duration_seconds FROM tracks WHERE source_path = ? """, (str(f),), ) row = cursor.fetchone() track_id = row["id"] assert row["title"].startswith("Track") assert row["release_id"] == release_id assert row["tracknumber"] != "" assert row["discnumber"] == "1" assert row["duration_seconds"] == 2 cursor = conn.execute( "SELECT artist, role FROM tracks_artists WHERE track_id = ?", (track_id,), ) artists = {(r["artist"], r["role"]) for r in cursor.fetchall()} assert artists == { ("BLACKPINK", "main"), } def test_update_cache_releases_uncached_with_existing_id(config: Config) -> None: """Test that IDs in filenames are read and preserved.""" release_dir = config.music_source_dir / TEST_RELEASE_2.name shutil.copytree(TEST_RELEASE_2, release_dir) update_cache_for_releases(config, [release_dir]) # Check that the release directory was given a UUID. release_id: str | None = None for f in release_dir.iterdir(): if m := STORED_DATA_FILE_REGEX.match(f.name): release_id = m[1] assert release_id == "ilovecarly" # Hardcoded ID for testing. def test_update_cache_releases_preserves_track_ids_across_rebuilds(config: Config) -> None: """Test that track IDs are preserved across cache rebuilds.""" release_dir = config.music_source_dir / TEST_RELEASE_3.name shutil.copytree(TEST_RELEASE_3, release_dir) update_cache_for_releases(config, [release_dir]) with connect(config) as conn: cursor = conn.execute("SELECT id FROM tracks") first_track_ids = {r["id"] for r in cursor} # Nuke the database. config.cache_database_path.unlink() maybe_invalidate_cache_database(config) # Repeat cache population. update_cache_for_releases(config, [release_dir]) with connect(config) as conn: cursor = conn.execute("SELECT id FROM tracks") second_track_ids = {r["id"] for r in cursor} # Assert IDs are equivalent. assert first_track_ids == second_track_ids def test_update_cache_releases_writes_ids_to_tags(config: Config) -> None: """Test that track IDs and release IDs are written to files.""" release_dir = config.music_source_dir / TEST_RELEASE_3.name shutil.copytree(TEST_RELEASE_3, release_dir) af = AudioTags.from_file(release_dir / "01.m4a") assert af.id is None assert af.release_id is None af = AudioTags.from_file(release_dir / "02.m4a") assert af.id is None assert af.release_id is None update_cache_for_releases(config, [release_dir]) af = AudioTags.from_file(release_dir / "01.m4a") assert af.id is not None assert af.release_id is not None af = AudioTags.from_file(release_dir / "02.m4a") assert af.id is not None assert af.release_id is not None def test_update_cache_releases_already_fully_cached(config: Config) -> None: """Test that a fully cached release No Ops when updated again.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was read correctly. with connect(config) as conn: cursor = conn.execute( "SELECT id, source_path, title, releasetype, year, new FROM releases", ) row = cursor.fetchone() assert row["source_path"] == str(release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] def test_update_cache_releases_disk_update_to_previously_cached(config: Config) -> None: """Test that a cached release is updated after a track updates.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) # I'm too lazy to mutagen update the files, so instead we're going to update the database. And # then touch a file to signify that "we modified it." with connect(config) as conn: conn.execute("UPDATE releases SET title = 'An Uncool Album'") (release_dir / "01.m4a").touch() update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute( "SELECT id, source_path, title, releasetype, year, new FROM releases", ) row = cursor.fetchone() assert row["source_path"] == str(release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] def test_update_cache_releases_disk_update_to_datafile(config: Config) -> None: """Test that a cached release is updated after a datafile updates.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) with connect(config) as conn: conn.execute("UPDATE releases SET datafile_mtime = '0' AND new = false") update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute("SELECT new, added_at FROM releases") row = cursor.fetchone() assert row["new"] assert row["added_at"] def test_update_cache_releases_disk_upgrade_old_datafile(config: Config) -> None: """Test that a legacy invalid datafile is upgraded on index.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) datafile = release_dir / ".rose.lalala.toml" datafile.touch() update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute("SELECT id, new, added_at FROM releases") row = cursor.fetchone() assert row["id"] == "lalala" assert row["new"] assert row["added_at"] with datafile.open("r") as fp: data = fp.read() assert "new = true" in data assert "added_at = " in data def test_update_cache_releases_source_path_renamed(config: Config) -> None: """Test that a cached release is updated after a directory rename.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) moved_release_dir = config.music_source_dir / "moved lol" release_dir.rename(moved_release_dir) update_cache_for_releases(config, [moved_release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute( "SELECT id, source_path, title, releasetype, year, new FROM releases", ) row = cursor.fetchone() assert row["source_path"] == str(moved_release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] def test_update_cache_releases_delete_nonexistent(config: Config) -> None: """Test that deleted releases that are no longer on disk are cleared from cache.""" with connect(config) as conn: conn.execute( """ INSERT INTO releases (id, source_path, added_at, datafile_mtime, title, releasetype, disctotal, metahash) VALUES ('aaaaaa', '0000-01-01T00:00:00+00:00', '999', 'nonexistent', 'aa', 'unknown', false, '0') """ ) update_cache_evict_nonexistent_releases(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 0 def test_update_cache_releases_skips_empty_directory(config: Config) -> None: """Test that an directory with no audio files is skipped.""" rd = config.music_source_dir / "lalala" rd.mkdir() (rd / "ignoreme.file").touch() update_cache_for_releases(config, [rd]) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 0 def test_update_cache_releases_uncaches_empty_directory(config: Config) -> None: """Test that a previously-cached directory with no audio files now is cleared from cache.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) shutil.rmtree(release_dir) release_dir.mkdir() update_cache_for_releases(config, [release_dir]) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 0 def test_update_cache_releases_evicts_relations(config: Config) -> None: """ Test that related entities (artist, genre, label) that have been removed from the tags are properly evicted from the cache on update. """ release_dir = config.music_source_dir / TEST_RELEASE_2.name shutil.copytree(TEST_RELEASE_2, release_dir) # Initial cache population. update_cache_for_releases(config, [release_dir]) # Pretend that we have more artists in the cache. with connect(config) as conn: conn.execute( """ INSERT INTO releases_genres (release_id, genre, genre_sanitized, position) VALUES ('ilovecarly', 'lalala', 'lalala', 2) """, ) conn.execute( """ INSERT INTO releases_labels (release_id, label, label_sanitized, position) VALUES ('ilovecarly', 'lalala', 'lalala', 1) """, ) conn.execute( """ INSERT INTO releases_artists (release_id, artist, artist_sanitized, role, position) VALUES ('ilovecarly', 'lalala', 'lalala', 'main', 1) """, ) conn.execute( """ INSERT INTO tracks_artists (track_id, artist, artist_sanitized, role, position) SELECT id, 'lalala', 'lalala', 'main', 1 FROM tracks """, ) # Second cache refresh. update_cache_for_releases(config, [release_dir], force=True) # Assert that all of the above were evicted. with connect(config) as conn: cursor = conn.execute( "SELECT EXISTS (SELECT * FROM releases_genres WHERE genre = 'lalala')" ) assert not cursor.fetchone()[0] cursor = conn.execute( "SELECT EXISTS (SELECT * FROM releases_labels WHERE label = 'lalala')" ) assert not cursor.fetchone()[0] cursor = conn.execute( "SELECT EXISTS (SELECT * FROM releases_artists WHERE artist = 'lalala')" ) assert not cursor.fetchone()[0] cursor = conn.execute( "SELECT EXISTS (SELECT * FROM tracks_artists WHERE artist = 'lalala')" ) assert not cursor.fetchone()[0] def test_update_cache_releases_ignores_directories(config: Config) -> None: """Test that the ignore_release_directories configuration value works.""" config = dataclasses.replace(config, ignore_release_directories=["lalala"]) release_dir = config.music_source_dir / "lalala" shutil.copytree(TEST_RELEASE_1, release_dir) # Test that both arg+no-arg ignore the directory. update_cache_for_releases(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 0 update_cache_for_releases(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 0 def test_update_cache_releases_notices_deleted_track(config: Config) -> None: """Test that we notice when a track is deleted.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache(config) (release_dir / "02.m4a").unlink() update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM tracks") assert cursor.fetchone()[0] == 1 def test_update_cache_releases_ignores_partially_written_directory(config: Config) -> None: """Test that a partially-written cached release is ignored.""" # 1. Write the directory and index it. This should give it IDs and shit. release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache(config) # 2. Move the directory and "remove" the ID file. renamed_release_dir = config.music_source_dir / "lalala" release_dir.rename(renamed_release_dir) datafile = next(f for f in renamed_release_dir.iterdir() if f.stem.startswith(".rose")) tmpfile = datafile.with_name("tmp") datafile.rename(tmpfile) # 3. Re-update cache. We should see an empty cache now. update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 0 # 4. Put the datafile back. We should now see the release cache again properly. datafile.with_name("tmp").rename(datafile) update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 1 # 5. Rename and remove the ID file again. We should see an empty cache again. release_dir = renamed_release_dir renamed_release_dir = config.music_source_dir / "bahaha" release_dir.rename(renamed_release_dir) next(f for f in renamed_release_dir.iterdir() if f.stem.startswith(".rose")).unlink() update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 0 # 6. Run with force=True. This should index the directory and make a new .rose.toml file. update_cache(config, force=True) assert (renamed_release_dir / datafile.name).is_file() with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 1 def test_update_cache_rename_source_files(config: Config) -> None: """Test that we properly rename the source directory on cache update.""" config = dataclasses.replace(config, rename_source_files=True) shutil.copytree(TEST_RELEASE_1, config.music_source_dir / TEST_RELEASE_1.name) (config.music_source_dir / TEST_RELEASE_1.name / "cover.jpg").touch() update_cache(config) expected_dir = config.music_source_dir / "BLACKPINK - 1990. I Love Blackpink [NEW]" assert expected_dir in list(config.music_source_dir.iterdir()) files_in_dir = list(expected_dir.iterdir()) assert expected_dir / "01. Track 1.m4a" in files_in_dir assert expected_dir / "02. Track 2.m4a" in files_in_dir with connect(config) as conn: cursor = conn.execute("SELECT source_path, cover_image_path FROM releases") row = cursor.fetchone() assert Path(row["source_path"]) == expected_dir assert Path(row["cover_image_path"]) == expected_dir / "cover.jpg" cursor = conn.execute("SELECT source_path FROM tracks") assert {Path(r[0]) for r in cursor} == { expected_dir / "01. Track 1.m4a", expected_dir / "02. Track 2.m4a", } def test_update_cache_rename_source_files_nested_file_directories(config: Config) -> None: """Test that we properly rename arbitrarily nested files and clean up the empty dirs.""" config = dataclasses.replace(config, rename_source_files=True) shutil.copytree(TEST_RELEASE_1, config.music_source_dir / TEST_RELEASE_1.name) (config.music_source_dir / TEST_RELEASE_1.name / "lala").mkdir() (config.music_source_dir / TEST_RELEASE_1.name / "01.m4a").rename( config.music_source_dir / TEST_RELEASE_1.name / "lala" / "1.m4a" ) update_cache(config) expected_dir = config.music_source_dir / "BLACKPINK - 1990. I Love Blackpink [NEW]" assert expected_dir in list(config.music_source_dir.iterdir()) files_in_dir = list(expected_dir.iterdir()) assert expected_dir / "01. Track 1.m4a" in files_in_dir assert expected_dir / "02. Track 2.m4a" in files_in_dir assert expected_dir / "lala" not in files_in_dir with connect(config) as conn: cursor = conn.execute("SELECT source_path FROM releases") assert Path(cursor.fetchone()[0]) == expected_dir cursor = conn.execute("SELECT source_path FROM tracks") assert {Path(r[0]) for r in cursor} == { expected_dir / "01. Track 1.m4a", expected_dir / "02. Track 2.m4a", } def test_update_cache_rename_source_files_collisions(config: Config) -> None: """Test that we properly rename arbitrarily nested files and clean up the empty dirs.""" config = dataclasses.replace(config, rename_source_files=True) # Three copies of the same directory, and two instances of Track 1. shutil.copytree(TEST_RELEASE_1, config.music_source_dir / TEST_RELEASE_1.name) shutil.copyfile( config.music_source_dir / TEST_RELEASE_1.name / "01.m4a", config.music_source_dir / TEST_RELEASE_1.name / "haha.m4a", ) shutil.copytree( config.music_source_dir / TEST_RELEASE_1.name, config.music_source_dir / "Number 2" ) shutil.copytree( config.music_source_dir / TEST_RELEASE_1.name, config.music_source_dir / "Number 3" ) update_cache(config) release_dirs = list(config.music_source_dir.iterdir()) for expected_dir in [ config.music_source_dir / "BLACKPINK - 1990. I Love Blackpink [NEW]", config.music_source_dir / "BLACKPINK - 1990. I Love Blackpink [NEW] [2]", config.music_source_dir / "BLACKPINK - 1990. I Love Blackpink [NEW] [3]", ]: assert expected_dir in release_dirs files_in_dir = list(expected_dir.iterdir()) assert expected_dir / "01. Track 1.m4a" in files_in_dir assert expected_dir / "01. Track 1 [2].m4a" in files_in_dir assert expected_dir / "02. Track 2.m4a" in files_in_dir with connect(config) as conn: cursor = conn.execute( "SELECT id FROM releases WHERE source_path = ?", (str(expected_dir),) ) release_id = cursor.fetchone()[0] assert release_id cursor = conn.execute( "SELECT source_path FROM tracks WHERE release_id = ?", (release_id,) ) assert {Path(r[0]) for r in cursor} == { expected_dir / "01. Track 1.m4a", expected_dir / "01. Track 1 [2].m4a", expected_dir / "02. Track 2.m4a", } def test_update_cache_releases_updates_full_text_search(config: Config) -> None: release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) with connect(config) as conn: cursor = conn.execute( """ SELECT rowid, * FROM rules_engine_fts """ ) print([dict(x) for x in cursor]) cursor = conn.execute( """ SELECT rowid, * FROM tracks """ ) print([dict(x) for x in cursor]) with connect(config) as conn: cursor = conn.execute( """ SELECT t.source_path FROM rules_engine_fts s JOIN tracks t ON t.rowid = s.rowid WHERE s.tracktitle MATCH 'r a c k' """ ) fnames = {Path(r["source_path"]) for r in cursor} assert fnames == { release_dir / "01.m4a", release_dir / "02.m4a", } # And then test the DELETE+INSERT behavior. And that the query still works. update_cache_for_releases(config, [release_dir], force=True) with connect(config) as conn: cursor = conn.execute( """ SELECT t.source_path FROM rules_engine_fts s JOIN tracks t ON t.rowid = s.rowid WHERE s.tracktitle MATCH 'r a c k' """ ) fnames = {Path(r["source_path"]) for r in cursor} assert fnames == { release_dir / "01.m4a", release_dir / "02.m4a", } def test_update_cache_collages(config: Config) -> None: shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) shutil.copytree(TEST_COLLAGE_1, config.music_source_dir / "!collages") update_cache(config) # Assert that the collage metadata was read correctly. with connect(config) as conn: cursor = conn.execute("SELECT name, source_mtime FROM collages") rows = cursor.fetchall() assert len(rows) == 1 row = rows[0] assert row["name"] == "Rose Gold" assert row["source_mtime"] cursor = conn.execute( "SELECT collage_name, release_id, position FROM collages_releases WHERE NOT missing" ) rows = cursor.fetchall() assert len(rows) == 1 row = rows[0] assert row["collage_name"] == "Rose Gold" assert row["release_id"] == "ilovecarly" assert row["position"] == 1 def test_update_cache_collages_missing_release_id(config: Config) -> None: shutil.copytree(TEST_COLLAGE_1, config.music_source_dir / "!collages") update_cache(config) # Assert that the releases in the collage were read as missing. with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM collages_releases WHERE missing") assert cursor.fetchone()[0] == 2 # Assert that source file was updated to set the releases missing. with (config.music_source_dir / "!collages" / "Rose Gold.toml").open("rb") as fp: data = tomllib.load(fp) assert len(data["releases"]) == 2 assert len([r for r in data["releases"] if r["missing"]]) == 2 shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) shutil.copytree(TEST_RELEASE_3, config.music_source_dir / TEST_RELEASE_3.name) update_cache(config) # Assert that the releases in the collage were unflagged as missing. with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM collages_releases WHERE NOT missing") assert cursor.fetchone()[0] == 2 # Assert that source file was updated to remove the missing flag. with (config.music_source_dir / "!collages" / "Rose Gold.toml").open("rb") as fp: data = tomllib.load(fp) assert len([r for r in data["releases"] if "missing" not in r]) == 2 def test_update_cache_collages_missing_release_id_multiprocessing(config: Config) -> None: shutil.copytree(TEST_COLLAGE_1, config.music_source_dir / "!collages") update_cache(config) # Assert that the releases in the collage were read as missing. with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM collages_releases WHERE missing") assert cursor.fetchone()[0] == 2 # Assert that source file was updated to set the releases missing. with (config.music_source_dir / "!collages" / "Rose Gold.toml").open("rb") as fp: data = tomllib.load(fp) assert len(data["releases"]) == 2 assert len([r for r in data["releases"] if r["missing"]]) == 2 shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) shutil.copytree(TEST_RELEASE_3, config.music_source_dir / TEST_RELEASE_3.name) update_cache(config, force_multiprocessing=True) # Assert that the releases in the collage were unflagged as missing. with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM collages_releases WHERE NOT missing") assert cursor.fetchone()[0] == 2 # Assert that source file was updated to remove the missing flag. with (config.music_source_dir / "!collages" / "Rose Gold.toml").open("rb") as fp: data = tomllib.load(fp) assert len([r for r in data["releases"] if "missing" not in r]) == 2 def test_update_cache_collages_on_release_rename(config: Config) -> None: """ Test that a renamed release source directory does not remove the release from any collages. This can occur because the rename operation is executed in SQL as release deletion followed by release creation. """ shutil.copytree(TEST_COLLAGE_1, config.music_source_dir / "!collages") shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) shutil.copytree(TEST_RELEASE_3, config.music_source_dir / TEST_RELEASE_3.name) update_cache(config) (config.music_source_dir / TEST_RELEASE_2.name).rename(config.music_source_dir / "lalala") update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT collage_name, release_id, position FROM collages_releases") rows = [dict(r) for r in cursor] assert rows == [ {"collage_name": "Rose Gold", "release_id": "ilovecarly", "position": 1}, {"collage_name": "Rose Gold", "release_id": "ilovenewjeans", "position": 2}, ] # Assert that source file was not updated to remove the release. with (config.music_source_dir / "!collages" / "Rose Gold.toml").open("rb") as fp: data = tomllib.load(fp) assert not [r for r in data["releases"] if "missing" in r] assert len(data["releases"]) == 2 def test_update_cache_playlists(config: Config) -> None: shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) shutil.copytree(TEST_PLAYLIST_1, config.music_source_dir / "!playlists") update_cache(config) # Assert that the playlist metadata was read correctly. with connect(config) as conn: cursor = conn.execute("SELECT name, source_mtime, cover_path FROM playlists") rows = cursor.fetchall() assert len(rows) == 1 row = rows[0] assert row["name"] == "Lala Lisa" assert row["source_mtime"] is not None assert row["cover_path"] == str(config.music_source_dir / "!playlists" / "Lala Lisa.jpg") cursor = conn.execute( "SELECT playlist_name, track_id, position FROM playlists_tracks ORDER BY position" ) assert [dict(r) for r in cursor] == [ {"playlist_name": "Lala Lisa", "track_id": "iloveloona", "position": 1}, {"playlist_name": "Lala Lisa", "track_id": "ilovetwice", "position": 2}, ] def test_update_cache_playlists_missing_track_id(config: Config) -> None: shutil.copytree(TEST_PLAYLIST_1, config.music_source_dir / "!playlists") update_cache(config) # Assert that the tracks in the playlist were read as missing. with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM playlists_tracks WHERE missing") assert cursor.fetchone()[0] == 2 # Assert that source file was updated to set the tracks missing. with (config.music_source_dir / "!playlists" / "Lala Lisa.toml").open("rb") as fp: data = tomllib.load(fp) assert len(data["tracks"]) == 2 assert len([r for r in data["tracks"] if r["missing"]]) == 2 shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) update_cache(config) # Assert that the tracks in the playlist were unflagged as missing. with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM playlists_tracks WHERE NOT missing") assert cursor.fetchone()[0] == 2 # Assert that source file was updated to remove the missing flag. with (config.music_source_dir / "!playlists" / "Lala Lisa.toml").open("rb") as fp: data = tomllib.load(fp) assert len([r for r in data["tracks"] if "missing" not in r]) == 2 @pytest.mark.parametrize("multiprocessing", [True, False]) def test_update_releases_updates_collages_description_meta( config: Config, multiprocessing: bool ) -> None: shutil.copytree(TEST_RELEASE_1, config.music_source_dir / TEST_RELEASE_1.name) shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) shutil.copytree(TEST_RELEASE_3, config.music_source_dir / TEST_RELEASE_3.name) shutil.copytree(TEST_COLLAGE_1, config.music_source_dir / "!collages") cpath = config.music_source_dir / "!collages" / "Rose Gold.toml" # First cache update: releases are inserted, collage is new. This should update the collage # TOML. update_cache(config) with cpath.open("r") as fp: assert ( fp.read() == """\ releases = [ { uuid = "ilovecarly", description_meta = "Carly Rae Jepsen - 1990. I Love Carly" }, { uuid = "ilovenewjeans", description_meta = "NewJeans - 1990. I Love NewJeans" }, ] """ ) # Now prep for the second update. Reset the TOML to have garbage again, and update the database # such that the virtual dirnames are also incorrect. with cpath.open("w") as fp: fp.write( """\ [[releases]] uuid = "ilovecarly" description_meta = "lalala" [[releases]] uuid = "ilovenewjeans" description_meta = "hahaha" """ ) # Second cache update: releases exist, collages exist, release is "updated." This should also # trigger a metadata update. update_cache_for_releases(config, force=True, force_multiprocessing=multiprocessing) with cpath.open("r") as fp: assert ( fp.read() == """\ releases = [ { uuid = "ilovecarly", description_meta = "Carly Rae Jepsen - 1990. I Love Carly" }, { uuid = "ilovenewjeans", description_meta = "NewJeans - 1990. I Love NewJeans" }, ] """ ) @pytest.mark.parametrize("multiprocessing", [True, False]) def test_update_tracks_updates_playlists_description_meta( config: Config, multiprocessing: bool ) -> None: shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) shutil.copytree(TEST_PLAYLIST_1, config.music_source_dir / "!playlists") ppath = config.music_source_dir / "!playlists" / "Lala Lisa.toml" # First cache update: tracks are inserted, playlist is new. This should update the playlist # TOML. update_cache(config) with ppath.open("r") as fp: assert ( fp.read() == """\ tracks = [ { uuid = "iloveloona", description_meta = "Carly Rae Jepsen - Track 1.m4a" }, { uuid = "ilovetwice", description_meta = "Carly Rae Jepsen - Track 2.m4a" }, ] """ ) # Now prep for the second update. Reset the TOML to have garbage again, and update the database # such that the virtual filenames are also incorrect. with ppath.open("w") as fp: fp.write( """\ [[tracks]] uuid = "iloveloona" description_meta = "lalala" [[tracks]] uuid = "ilovetwice" description_meta = "hahaha" """ ) # Second cache update: tracks exist, playlists exist, track is "updated." This should also # trigger a metadata update. update_cache_for_releases(config, force=True, force_multiprocessing=multiprocessing) with ppath.open("r") as fp: assert ( fp.read() == """\ tracks = [ { uuid = "iloveloona", description_meta = "Carly Rae Jepsen - Track 1.m4a" }, { uuid = "ilovetwice", description_meta = "Carly Rae Jepsen - Track 2.m4a" }, ] """ ) def test_update_cache_playlists_on_release_rename(config: Config) -> None: """ Test that a renamed release source directory does not remove any of its tracks any playlists. This can occur because when a release is renamed, we remove all tracks from the database and then reinsert them. """ shutil.copytree(TEST_PLAYLIST_1, config.music_source_dir / "!playlists") shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) update_cache(config) (config.music_source_dir / TEST_RELEASE_2.name).rename(config.music_source_dir / "lalala") update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT playlist_name, track_id, position FROM playlists_tracks") rows = [dict(r) for r in cursor] assert rows == [ {"playlist_name": "Lala Lisa", "track_id": "iloveloona", "position": 1}, {"playlist_name": "Lala Lisa", "track_id": "ilovetwice", "position": 2}, ] # Assert that source file was not updated to remove the track. with (config.music_source_dir / "!playlists" / "Lala Lisa.toml").open("rb") as fp: data = tomllib.load(fp) assert not [t for t in data["tracks"] if "missing" in t] assert len(data["tracks"]) == 2 @pytest.mark.usefixtures("seeded_cache") def test_list_releases(config: Config) -> None: expected = [ CachedRelease( datafile_mtime="999", id="r1", source_path=Path(config.music_source_dir / "r1"), cover_image_path=None, added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 1", releasetype="album", year=2023, disctotal=1, new=False, genres=["Techno", "Deep House"], labels=["Silk Music"], albumartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="1", ), CachedRelease( datafile_mtime="999", id="r2", source_path=Path(config.music_source_dir / "r2"), cover_image_path=Path(config.music_source_dir / "r2" / "cover.jpg"), added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 2", releasetype="album", year=2021, disctotal=1, new=False, genres=["Classical"], labels=["Native State"], albumartists=ArtistMapping( main=[Artist("Violin Woman")], guest=[Artist("Conductor Woman")] ), metahash="2", ), CachedRelease( datafile_mtime="999", id="r3", source_path=Path(config.music_source_dir / "r3"), cover_image_path=None, added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 3", releasetype="album", year=2021, disctotal=1, new=True, genres=[], labels=[], albumartists=ArtistMapping(), metahash="3", ), ] assert list_releases(config) == expected assert list_releases(config, ["r1"]) == expected[:1] @pytest.mark.usefixtures("seeded_cache") def test_get_release_and_associated_tracks(config: Config) -> None: release = get_release(config, "r1") assert release is not None assert release == CachedRelease( datafile_mtime="999", id="r1", source_path=Path(config.music_source_dir / "r1"), cover_image_path=None, added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 1", releasetype="album", year=2023, disctotal=1, new=False, genres=["Techno", "Deep House"], labels=["Silk Music"], albumartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="1", ) expected_tracks = [ CachedTrack( id="t1", source_path=config.music_source_dir / "r1" / "01.m4a", source_mtime="999", tracktitle="Track 1", tracknumber="01", tracktotal=2, discnumber="01", disctotal=1, duration_seconds=120, trackartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="1", release=release, ), CachedTrack( id="t2", source_path=config.music_source_dir / "r1" / "02.m4a", source_mtime="999", tracktitle="Track 2", tracknumber="02", tracktotal=2, discnumber="01", disctotal=1, duration_seconds=240, trackartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="2", release=release, ), ]
assert get_tracks_associated_with_release(config, release) == expected_tracks
22
2023-10-09 14:42:23+00:00
24k
zhaoyizhou1123/mbrcsl
examples/roboverse/run_mbrcsl_mlpdyn_roboverse.py
[ { "identifier": "EnsembleDynamicsModel", "path": "offlinerlkit/modules/dynamics_module.py", "snippet": "class EnsembleDynamicsModel(nn.Module):\n def __init__(\n self,\n obs_dim: int,\n action_dim: int,\n hidden_dims: Union[List[int], Tuple[int]],\n num_ensemble: in...
import numpy as np import torch import roboverse import argparse import os import random import pickle import datetime from copy import deepcopy from typing import Dict, Tuple from collections import defaultdict from offlinerlkit.modules import EnsembleDynamicsModel from offlinerlkit.dynamics import EnsembleDynamics from offlinerlkit.utils.termination_fns import get_termination_fn from offlinerlkit.utils.scaler import StandardScaler from offlinerlkit.utils.roboverse_utils import PickPlaceObsWrapper, DoubleDrawerObsWrapper, get_pickplace_dataset, get_doubledrawer_dataset from offlinerlkit.utils.logger import Logger, make_log_dirs from offlinerlkit.policy_trainer import RcslPolicyTrainer, DiffusionPolicyTrainer from offlinerlkit.utils.none_or_str import none_or_str from offlinerlkit.policy import SimpleDiffusionPolicy, AutoregressivePolicy
16,163
def get_rollout_trajs(logger: Logger, threshold = 0.9) -> Tuple[Dict[str, np.ndarray], float]: ''' Rollout trajectories or load existing trajectories. If rollout, call `get_rollout_policy()` and `get_dynamics()` first to get rollout policy and dynamics Return: rollout trajectories ''' ''' diffusion behavior policy rollout - threshold: only keep trajs with ret > [threshold] (valid). Usually the max return in dataset - args.num_need_traj: number of valid trajectories needed. End rollout when get enough trajs - args.rollout_epoch: maximum rollout epoch. Should be large ''' device = args.device num_need_traj = args.num_need_traj rollout_data_all = None # Initialize rollout_dataset as nothing num_traj_all = 0 # Initialize total number of rollout trajs start_epoch = 0 # Default starting epoch returns_all = [] if args.rollout_ckpt_path is not None: print(f"Will save rollout trajectories to dir {args.rollout_ckpt_path}") os.makedirs(args.rollout_ckpt_path, exist_ok=True) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") if os.path.exists(data_path): # Load ckpt_data ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] returns_all = ckpt_dict['return'] start_epoch = ckpt_dict['epoch'] + 1 # trajs = ckpt_dict print(f"Loaded checkpoint. Already have {num_traj_all} valid trajectories, start from epoch {start_epoch}.") if num_traj_all >= num_need_traj: print(f"Checkpoint trajectories are enough. Skip rollout procedure.") return rollout_data_all, max(returns_all) # Still need training, get dynamics and rollout policy get_dynamics() get_rollout_policy() with torch.no_grad(): for epoch in range(start_epoch, args.rollout_epoch): batch_indexs = np.random.randint(0, init_obss_dataset.shape[0], size=args.rollout_batch) init_obss = init_obss_dataset[batch_indexs] rollout_data, rollout_info = rollout_simple(init_obss, dynamics, diffusion_policy, args.horizon) # print(pred_state) # Only keep trajs with returns > threshold returns = rollout_info['returns'] rewards_full = rollout_info['rewards_full'] min_last_rewards = np.min(rewards_full[:, -3:], axis = -1) # (B,), final steps must be large max_last_rewards = np.max(rewards_full[:, -3:], axis = -1) max_cond = np.logical_and(max_last_rewards > 0.9, max_last_rewards < 2) min_cond = min_last_rewards > 0.7 valid_cond = np.logical_and(max_cond, min_cond) valid_trajs = np.arange(args.rollout_batch)[valid_cond] # np.array, indexs of all valid trajs valid_data_idxs = [rollout_data['traj_idxs'][i] in valid_trajs for i in range(rollout_data['traj_idxs'].shape[0])] for k in rollout_data: rollout_data[k] = rollout_data[k][valid_data_idxs] # Add rollout_data to rollout_data_all if rollout_data_all is None: # No trajs collected rollout_data_all = deepcopy(rollout_data) else: for k in rollout_data: rollout_data_all[k] = np.concatenate([rollout_data_all[k], rollout_data[k]], axis=0) num_traj_all += len(valid_trajs) returns_all += list(returns[valid_trajs]) print(f"-----------\nEpoch {epoch}, get {len(valid_trajs)} new trajs") logger.logkv("Epoch", epoch) logger.logkv("num_new_trajs", len(valid_trajs)) logger.logkv("num_total_trajs", num_traj_all) logger.dumpkvs() save_path = os.path.join(logger.checkpoint_dir, "rollout.dat") pickle.dump({'epoch': epoch, 'data': rollout_data_all, 'num_traj': num_traj_all, 'return': returns_all}, open(save_path, "wb")) if num_traj_all >= num_need_traj: # Get enough trajs, quit rollout print(f"End rollout. Total epochs used: {epoch+1}") break return rollout_data_all, max(returns_all) rollout_save_dir = make_log_dirs(args.task, args.algo_name, exp_name, vars(args), part="rollout") print(f"Logging diffusion rollout to {rollout_save_dir}") rollout_logger = Logger(rollout_save_dir, {"consoleout_backup": "stdout"}) rollout_logger.log_hyperparameters(vars(args)) rollout_dataset, max_offline_return = get_rollout_trajs(rollout_logger) # train rcsl_policy = AutoregressivePolicy( obs_dim=obs_dim, act_dim = action_dim, hidden_dims=args.rcsl_hidden_dims, lr = args.rcsl_lr, device = args.device ) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(rcsl_policy.rcsl_optim, args.rcsl_epoch) task_name = args.task rcsl_log_dirs = make_log_dirs(task_name, args.algo_name, exp_name, vars(args), part='rcsl') # key: output file name, value: output handler type print(f"Logging autoregressive gaussian rcsl to {rcsl_log_dirs}") rcsl_output_config = { "consoleout_backup": "stdout", "policy_training_progress": "csv", "tb": "tensorboard" } rcsl_logger = Logger(rcsl_log_dirs, rcsl_output_config) rcsl_logger.log_hyperparameters(vars(args))
''' Recommended hyperparameters: pickplace, horizon=40, behavior_epoch=30 doubledraweropen, horizon=50, behavior_epoch=40 doubledrawercloseopen, horizon=80, behavior_epoch=40 ''' def get_args(): parser = argparse.ArgumentParser() # general parser.add_argument("--algo-name", type=str, default="mbrcsl_mlpdyn") parser.add_argument("--task", type=str, default="pickplace", help="task name") parser.add_argument("--seed", type=int, default=0) parser.add_argument("--num_workers", type=int, default=1, help="Dataloader workers, align with cpu number") parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") parser.add_argument("--last_eval", action="store_false") # env config parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace") # transformer_autoregressive dynamics parser.add_argument("--dynamics_lr", type=float, default=1e-3) parser.add_argument("--dynamics_hidden_dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--dynamics_weight_decay", type=float, nargs='*', default=[2.5e-5, 5e-5, 7.5e-5, 7.5e-5, 1e-4]) parser.add_argument("--n_ensemble", type=int, default=7) parser.add_argument("--n_elites", type=int, default=5) parser.add_argument("--load_dynamics_path", type=none_or_str, default=None) # Behavior policy (diffusion) parser.add_argument("--behavior_epoch", type=int, default=30) parser.add_argument("--num_diffusion_iters", type=int, default=5, help="Number of diffusion steps") parser.add_argument('--behavior_batch', type=int, default=256) parser.add_argument('--load_diffusion_path', type=none_or_str, default=None) parser.add_argument('--task_weight', type=float, default=1.4, help="Weight on task data when training diffusion policy") parser.add_argument('--sample_ratio', type=float, default=0.8, help="Use (sample_ratio * num_total_data) data to train diffusion policy") # Rollout parser.add_argument('--rollout_ckpt_path', type=none_or_str, default=None, help="file dir, used to load/store rollout trajs" ) parser.add_argument('--rollout_epoch', type=int, default=200, help="Max number of epochs to rollout the policy") parser.add_argument('--num_need_traj', type=int, default=5000, help="Needed valid trajs in rollout") parser.add_argument("--rollout-batch", type=int, default=200, help="Number of trajs to be sampled at one time") # RCSL policy (mlp) parser.add_argument("--rcsl_hidden_dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--rcsl_lr", type=float, default=1e-3) parser.add_argument("--rcsl_batch", type=int, default=256) parser.add_argument("--rcsl_epoch", type=int, default=100) parser.add_argument("--eval_episodes", type=int, default=100) parser.add_argument("--holdout_ratio", type=float, default=0.2) return parser.parse_args() def rollout_simple( init_obss: np.ndarray, dynamics, rollout_policy: SimpleDiffusionPolicy, rollout_length: int ) -> Tuple[Dict[str, np.ndarray], Dict]: ''' Only serves for non-terminal cases Sample a batch of trajectories at the same time. Output rollout_transitions contain keys: obss, next_obss, actions rewards, (N,1) rtgs, (N,1) traj_idxs, (N) ''' num_transitions = 0 rewards_arr = np.array([]) rollout_transitions = defaultdict(list) batch_size = init_obss.shape[0] valid_idxs = np.arange(init_obss.shape[0]) # maintain current valid trajectory indexes returns = np.zeros(init_obss.shape[0]) # maintain return of each trajectory acc_returns = np.zeros(init_obss.shape[0]) # maintain accumulated return of each valid trajectory max_rewards = np.zeros(init_obss.shape[0]) # maintain max reward seen in trajectory rewards_full = np.zeros((init_obss.shape[0], rollout_length)) # full rewards (batch, H) # rollout observations = init_obss goal = np.zeros((init_obss.shape[0],1), dtype = np.float32) for t in range(rollout_length): actions = rollout_policy.select_action(observations, goal) next_observations, rewards, terminals, info = dynamics.step(observations, actions) rollout_transitions["observations"].append(observations) rollout_transitions["next_observations"].append(next_observations) rollout_transitions["actions"].append(actions) rollout_transitions["rewards"].append(rewards) rollout_transitions["terminals"].append(terminals) rollout_transitions["traj_idxs"].append(valid_idxs) rollout_transitions["acc_rets"].append(acc_returns) rewards = rewards.reshape(batch_size) # (B) rewards_full[:, t] = rewards num_transitions += len(observations) rewards_arr = np.append(rewards_arr, rewards.flatten()) returns = returns + rewards.flatten() # Update return (for valid idxs only) max_rewards = np.maximum(max_rewards, rewards.flatten()) # Update max reward acc_returns = acc_returns + rewards.flatten() observations = deepcopy(next_observations) for k, v in rollout_transitions.items(): rollout_transitions[k] = np.concatenate(v, axis=0) traj_idxs = rollout_transitions["traj_idxs"] rtgs = returns[traj_idxs] - rollout_transitions["acc_rets"] # rtgs = returns[traj_idxs] rollout_transitions["rtgs"] = rtgs[..., None] # (N,1) return rollout_transitions, \ {"num_transitions": num_transitions, "reward_mean": rewards_arr.mean(), "returns": returns, "max_rewards": max_rewards, "rewards_full": rewards_full} def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pickplace': env = roboverse.make('Widow250PickTray-v0') env = PickPlaceObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "pickplace_prior.npy") task_data_path = os.path.join(args.data_dir, "pickplace_task.npy") diff_dataset, _ = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledraweropen': env = roboverse.make('Widow250DoubleDrawerOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "closed_drawer_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawercloseopen': env = roboverse.make('Widow250DoubleDrawerCloseOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_1_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawerpickplaceopen': env = roboverse.make('Widow250DoubleDrawerPickPlaceOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_2_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) else: raise NotImplementedError env.reset(seed=args.seed) timestamp = datetime.datetime.now().strftime("%y-%m%d-%H%M%S") exp_name = f"timestamp_{timestamp}&{args.seed}" log_dirs = make_log_dirs(args.task, args.algo_name, exp_name, vars(args), part = "dynamics") # key: output file name, value: output handler type print(f"Logging dynamics to {log_dirs}") output_config = { "consoleout_backup": "stdout", "policy_training_progress": "csv", "dynamics_training_progress": "csv", "tb": "tensorboard" } logger = Logger(log_dirs, output_config) logger.log_hyperparameters(vars(args)) dynamics_model = EnsembleDynamicsModel( obs_dim=obs_dim, action_dim=action_dim, hidden_dims=args.dynamics_hidden_dims, num_ensemble=args.n_ensemble, num_elites=args.n_elites, weight_decays=args.dynamics_weight_decay, device=args.device ) dynamics_optim = torch.optim.Adam( dynamics_model.parameters(), lr=args.dynamics_lr ) scaler = StandardScaler() termination_fn = get_termination_fn(task=args.task) dynamics = EnsembleDynamics( dynamics_model, dynamics_optim, scaler, termination_fn ) # create rollout policy diffusion_policy = SimpleDiffusionPolicy( obs_shape = args.obs_shape, act_shape= args.action_shape, feature_dim = 1, num_training_steps = args.behavior_epoch, num_diffusion_steps = args.num_diffusion_iters, device = args.device ) diff_lr_scheduler = diffusion_policy.get_lr_scheduler() diff_log_dirs = make_log_dirs(args.task, args.algo_name, exp_name, vars(args), part="diffusion") print(f"Logging diffusion to {diff_log_dirs}") # key: output file name, value: output handler type diff_output_config = { "consoleout_backup": "stdout", "policy_training_progress": "csv", "dynamics_training_progress": "csv", "tb": "tensorboard" } diff_logger = Logger(diff_log_dirs, diff_output_config) diff_logger.log_hyperparameters(vars(args)) diff_policy_trainer = DiffusionPolicyTrainer( policy = diffusion_policy, offline_dataset = diff_dataset, logger = diff_logger, seed = args.seed, epoch = args.behavior_epoch, batch_size = args.behavior_batch, lr_scheduler = diff_lr_scheduler, horizon = args.horizon, num_workers = args.num_workers, has_terminal = False, ) # Training helper functions def get_dynamics(): ''' Load or train dynamics model ''' if args.load_dynamics_path: print(f"Load dynamics from {args.load_dynamics_path}") dynamics.load(args.load_dynamics_path) else: print(f"Train dynamics") dynamics.train(dyn_dataset, logger) def get_rollout_policy(): ''' Load or train rollout policy Return: rollout policy ''' if args.load_diffusion_path is not None: print(f"Load behavior policy from {args.load_diffusion_path}") with open(args.load_diffusion_path, 'rb') as f: state_dict = torch.load(f, map_location= args.device) diffusion_policy.load_state_dict(state_dict) else: print(f"Train diffusion behavior policy") diff_policy_trainer.train() # save checkpoint periodically def get_rollout_trajs(logger: Logger, threshold = 0.9) -> Tuple[Dict[str, np.ndarray], float]: ''' Rollout trajectories or load existing trajectories. If rollout, call `get_rollout_policy()` and `get_dynamics()` first to get rollout policy and dynamics Return: rollout trajectories ''' ''' diffusion behavior policy rollout - threshold: only keep trajs with ret > [threshold] (valid). Usually the max return in dataset - args.num_need_traj: number of valid trajectories needed. End rollout when get enough trajs - args.rollout_epoch: maximum rollout epoch. Should be large ''' device = args.device num_need_traj = args.num_need_traj rollout_data_all = None # Initialize rollout_dataset as nothing num_traj_all = 0 # Initialize total number of rollout trajs start_epoch = 0 # Default starting epoch returns_all = [] if args.rollout_ckpt_path is not None: print(f"Will save rollout trajectories to dir {args.rollout_ckpt_path}") os.makedirs(args.rollout_ckpt_path, exist_ok=True) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") if os.path.exists(data_path): # Load ckpt_data ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] returns_all = ckpt_dict['return'] start_epoch = ckpt_dict['epoch'] + 1 # trajs = ckpt_dict print(f"Loaded checkpoint. Already have {num_traj_all} valid trajectories, start from epoch {start_epoch}.") if num_traj_all >= num_need_traj: print(f"Checkpoint trajectories are enough. Skip rollout procedure.") return rollout_data_all, max(returns_all) # Still need training, get dynamics and rollout policy get_dynamics() get_rollout_policy() with torch.no_grad(): for epoch in range(start_epoch, args.rollout_epoch): batch_indexs = np.random.randint(0, init_obss_dataset.shape[0], size=args.rollout_batch) init_obss = init_obss_dataset[batch_indexs] rollout_data, rollout_info = rollout_simple(init_obss, dynamics, diffusion_policy, args.horizon) # print(pred_state) # Only keep trajs with returns > threshold returns = rollout_info['returns'] rewards_full = rollout_info['rewards_full'] min_last_rewards = np.min(rewards_full[:, -3:], axis = -1) # (B,), final steps must be large max_last_rewards = np.max(rewards_full[:, -3:], axis = -1) max_cond = np.logical_and(max_last_rewards > 0.9, max_last_rewards < 2) min_cond = min_last_rewards > 0.7 valid_cond = np.logical_and(max_cond, min_cond) valid_trajs = np.arange(args.rollout_batch)[valid_cond] # np.array, indexs of all valid trajs valid_data_idxs = [rollout_data['traj_idxs'][i] in valid_trajs for i in range(rollout_data['traj_idxs'].shape[0])] for k in rollout_data: rollout_data[k] = rollout_data[k][valid_data_idxs] # Add rollout_data to rollout_data_all if rollout_data_all is None: # No trajs collected rollout_data_all = deepcopy(rollout_data) else: for k in rollout_data: rollout_data_all[k] = np.concatenate([rollout_data_all[k], rollout_data[k]], axis=0) num_traj_all += len(valid_trajs) returns_all += list(returns[valid_trajs]) print(f"-----------\nEpoch {epoch}, get {len(valid_trajs)} new trajs") logger.logkv("Epoch", epoch) logger.logkv("num_new_trajs", len(valid_trajs)) logger.logkv("num_total_trajs", num_traj_all) logger.dumpkvs() save_path = os.path.join(logger.checkpoint_dir, "rollout.dat") pickle.dump({'epoch': epoch, 'data': rollout_data_all, 'num_traj': num_traj_all, 'return': returns_all}, open(save_path, "wb")) if num_traj_all >= num_need_traj: # Get enough trajs, quit rollout print(f"End rollout. Total epochs used: {epoch+1}") break return rollout_data_all, max(returns_all) rollout_save_dir = make_log_dirs(args.task, args.algo_name, exp_name, vars(args), part="rollout") print(f"Logging diffusion rollout to {rollout_save_dir}") rollout_logger = Logger(rollout_save_dir, {"consoleout_backup": "stdout"}) rollout_logger.log_hyperparameters(vars(args)) rollout_dataset, max_offline_return = get_rollout_trajs(rollout_logger) # train rcsl_policy = AutoregressivePolicy( obs_dim=obs_dim, act_dim = action_dim, hidden_dims=args.rcsl_hidden_dims, lr = args.rcsl_lr, device = args.device ) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(rcsl_policy.rcsl_optim, args.rcsl_epoch) task_name = args.task rcsl_log_dirs = make_log_dirs(task_name, args.algo_name, exp_name, vars(args), part='rcsl') # key: output file name, value: output handler type print(f"Logging autoregressive gaussian rcsl to {rcsl_log_dirs}") rcsl_output_config = { "consoleout_backup": "stdout", "policy_training_progress": "csv", "tb": "tensorboard" } rcsl_logger = Logger(rcsl_log_dirs, rcsl_output_config) rcsl_logger.log_hyperparameters(vars(args))
policy_trainer = RcslPolicyTrainer(
10
2023-10-11 08:36:06+00:00
24k
lmb-freiburg/ldce
ldm/models/diffusion/dpm_solver/sampler.py
[ { "identifier": "NoiseScheduleVP", "path": "ldm/models/diffusion/dpm_solver/dpm_solver.py", "snippet": "class NoiseScheduleVP:\n def __init__(\n self,\n schedule='discrete',\n betas=None,\n alphas_cumprod=None,\n continuous_beta_0=0.1,\n ...
import torch from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver
17,743
"""SAMPLING ONLY.""" class DPMSolverSampler(object): def __init__(self, model, **kwargs): super().__init__() self.model = model to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0., mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1., unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... **kwargs ): if conditioning is not None: if isinstance(conditioning, dict): cbs = conditioning[list(conditioning.keys())[0]].shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") else: if conditioning.shape[0] != batch_size: print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") # sampling C, H, W = shape size = (batch_size, C, H, W) # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') device = self.model.betas.device if x_T is None: img = torch.randn(size, device=device) else: img = x_T ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
"""SAMPLING ONLY.""" class DPMSolverSampler(object): def __init__(self, model, **kwargs): super().__init__() self.model = model to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0., mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1., unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... **kwargs ): if conditioning is not None: if isinstance(conditioning, dict): cbs = conditioning[list(conditioning.keys())[0]].shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") else: if conditioning.shape[0] != batch_size: print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") # sampling C, H, W = shape size = (batch_size, C, H, W) # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') device = self.model.betas.device if x_T is None: img = torch.randn(size, device=device) else: img = x_T ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
model_fn = model_wrapper(
1
2023-10-10 09:40:10+00:00
24k
spla-tam/SplaTAM
scripts/post_splatam_opt.py
[ { "identifier": "AzureKinectDataset", "path": "datasets/gradslam_datasets/azure.py", "snippet": "class AzureKinectDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = ...
import argparse import os import random import sys import shutil import cv2 import numpy as np import torch import wandb from importlib.machinery import SourceFileLoader from tqdm import tqdm from datasets.gradslam_datasets import ( load_dataset_config, ICLDataset, ReplicaDataset, AzureKinectDataset, ScannetDataset, Ai2thorDataset, Record3DDataset, RealsenseDataset, TUMDataset, ScannetPPDataset, NeRFCaptureDataset ) from utils.common_utils import seed_everything, save_seq_params, save_params, save_params_ckpt, save_seq_params_ckpt from utils.recon_helpers import setup_camera from utils.gs_helpers import ( params2rendervar, params2depthplussilhouette, transformed_params2depthplussilhouette, transform_to_frame, report_progress, eval, l1_loss_v1, matrix_to_quaternion ) from utils.gs_external import ( calc_ssim, build_rotation, densify, get_expon_lr_func, update_learning_rate ) from diff_gaussian_rasterization import GaussianRasterizer as Renderer
17,050
_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) print("System Paths:") for p in sys.path: print(p) def get_dataset(config_dict, basedir, sequence, **kwargs): if config_dict["dataset_name"].lower() in ["icl"]: return ICLDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["replica"]: return ReplicaDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["azure", "azurekinect"]: return AzureKinectDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannet"]: return ScannetDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["ai2thor"]: return Ai2thorDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["record3d"]:
_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) print("System Paths:") for p in sys.path: print(p) def get_dataset(config_dict, basedir, sequence, **kwargs): if config_dict["dataset_name"].lower() in ["icl"]: return ICLDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["replica"]: return ReplicaDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["azure", "azurekinect"]: return AzureKinectDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannet"]: return ScannetDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["ai2thor"]: return Ai2thorDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["record3d"]:
return Record3DDataset(config_dict, basedir, sequence, **kwargs)
7
2023-11-30 20:26:47+00:00
24k
zhyever/PatchFusion
zoedepth/trainers/zoedepth_custom_trainer.py
[ { "identifier": "SILogLoss", "path": "zoedepth/trainers/loss_sample.py", "snippet": "class SILogLoss(nn.Module):\n \"\"\"SILog loss (pixel-wise)\"\"\"\n def __init__(self, beta=0.15):\n super(SILogLoss, self).__init__()\n self.name = 'SILog'\n self.beta = beta\n\n def forwa...
import os import torch import torch.cuda.amp as amp import torch.nn as nn import numpy as np import wandb import uuid import torch.distributed as dist import copy import torch.optim as optim import matplotlib.pyplot as plt from zoedepth.trainers.loss_sample import SILogLoss, DistributionLoss from zoedepth.trainers.loss import SILogLoss as DenseSILogLoss from zoedepth.trainers.loss import BudgetConstraint, HistogramMatchingLoss, SSIM, ConsistencyLoss from zoedepth.utils.config import DATASETS_CONFIG from zoedepth.utils.misc import compute_metrics from zoedepth.data.preprocess import get_black_border from .base_trainer import BaseTrainer, is_rank_zero, colors, flatten from torchvision import transforms from PIL import Image from tqdm import tqdm from datetime import datetime as dt from zoedepth.utils.misc import generatemask
15,571
plt.savefig('debug.png') pass else: pred = nn.functional.interpolate( pred[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] depths_gt[torch.logical_not(mask)] = DATASETS_CONFIG[dataset]['max_depth'] if self.consistency_training: split_images = torch.split(images, 3, dim=1) images = torch.cat(split_images, dim=0) self.log_images(rgb={"Input": images[0, ...]}, depth={"GT": depths_gt[0], "PredictedMono": pred}, prefix="Train", min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth']) return losses @torch.no_grad() def eval_infer(self, x, image_raw, bboxs=None, crop_area=None, dataset='u4k', bbox_raw=None): m = self.model.module if self.config.multigpu else self.model if dataset == 'u4k': base_h = 540 base_w = 960 elif dataset == 'gta': base_h = 270 base_w = 480 elif dataset == 'nyu': base_h = 120 * 2 base_w = 160 * 2 else: raise NotImplementedError if dataset == 'nyu': if self.sec_stage: images_crops = torch.split(x, 3, dim=1) bboxs_list = torch.split(bboxs, 1, dim=1) crop_areas = torch.split(crop_area, 1, dim=1) pred_depth_crops = [] for i, (img, bbox, crop_area) in enumerate(zip(images_crops, bboxs_list, crop_areas)): with amp.autocast(enabled=self.config.use_amp): if i == 0: out_dict = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None) # whole_depth_pred = out_dict['coarse_depth_pred'] pred_depth_crop = out_dict['metric_depth'] else: pred_depth_crop = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None)['metric_depth'] pred_depth_crop = nn.functional.interpolate( pred_depth_crop, (base_h, base_w), mode='bilinear', align_corners=True) pred_depth_crops.append(pred_depth_crop) x_start, y_start = [0, base_h], [0, base_w] pred_depth = torch.zeros((base_h*2, base_w*2)).cuda() inner_idx = 0 for ii, x in enumerate(x_start): for jj, y in enumerate(y_start): if self.use_blur: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() # do not care about boundry during validation else: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() inner_idx += 1 pred_depth = pred_depth.squeeze(dim=0) else: with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] else: if self.sec_stage: images_crops = torch.split(x, 3, dim=1) bboxs_list = torch.split(bboxs, 1, dim=1) crop_areas = torch.split(crop_area, 1, dim=1) pred_depth_crops = [] for i, (img, bbox, crop_area) in enumerate(zip(images_crops, bboxs_list, crop_areas)): with amp.autocast(enabled=self.config.use_amp): if i == 0: out_dict = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None) # whole_depth_pred = out_dict['coarse_depth_pred'] pred_depth_crop = out_dict['metric_depth'] else: pred_depth_crop = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None)['metric_depth'] pred_depth_crop = nn.functional.interpolate( pred_depth_crop, (base_h, base_w), mode='bilinear', align_corners=True) pred_depth_crops.append(pred_depth_crop) x_start, y_start = [0, base_h], [0, base_w] pred_depth = torch.zeros((base_h*2, base_w*2)).cuda() inner_idx = 0 for ii, x in enumerate(x_start): for jj, y in enumerate(y_start): if self.use_blur: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() # do not care about boundry during validation else: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() inner_idx += 1 pred_depth = pred_depth.squeeze(dim=0) else: with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] return pred_depth @torch.no_grad() def crop_aware_infer(self, x, image_raw): # if we are not avoiding the black border, we can just use the normal inference if not self.config.get("avoid_boundary", False): return self.eval_infer(x) # otherwise, we need to crop the image to avoid the black border # For now, this may be a bit slow due to converting to numpy and back # We assume no normalization is done on the input image # get the black border assert x.shape[0] == 1, "Only batch size 1 is supported for now" x_pil = transforms.ToPILImage()(x[0].cpu()) x_np = np.array(x_pil, dtype=np.uint8)
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Zhenyu Li # This file is partly inspired from ZoeDepth (https://github.com/isl-org/ZoeDepth/blob/main/zoedepth/trainers/zoedepth_trainer.py); author: Shariq Farooq Bhat class Trainer(BaseTrainer): def __init__(self, config, model, train_loader, test_loader=None, device=None): self.addf = config.get("addf", False) self.lazy_epoch = -1 self.boostingdepth = config.get("boostingdepth", False) super().__init__(config, model, train_loader, test_loader=test_loader, device=device) self.device = device self.silog_loss = SILogLoss(beta=config.get("beta", 0.15)) self.dense_silog_loss = DenseSILogLoss(beta=config.get("beta", 0.15)) print("sigloss's beta is set to {}".format(config.get("beta", 0.15))) self.scaler = amp.GradScaler(enabled=self.config.use_amp) self.distribution_loss = DistributionLoss(max_depth=self.config.max_depth) self.sampled_training = config.get("sampled_training", False) self.sec_stage = config.get("sec_stage", False) self.multi_consistency = config.get("multi_consistency", False) self.use_blur = config.get("use_blur", False) self.dynamic = config.get("dynamic", False) if self.dynamic: self.dynamic_unupdate_rate = config.get("dynamic_unupdate_rate", 0.0) self.budget_loss = BudgetConstraint(loss_mu=0.0, flops_all=21552.5684, warm_up=True) self.use_scale_loss = config.get("use_scale_loss", False) if self.use_scale_loss: if config.get("scale_type", "ssim"): self.scale_loss = SSIM(window_size=config.get("window_size", int(11))) else: self.scale_loss = HistogramMatchingLoss(min_depth=self.config.min_depth, max_depth=self.config.max_depth) self.scale_target = config.get("scale_target", None) self.consistency_training = config.get("consistency_training", False) if self.consistency_training: self.consistency_target = config.get("consistency_target", None) self.consistency_loss = ConsistencyLoss(self.consistency_target, config.get("focus_flatten", False), config.get("w_p", 1.0)) print("current weight for consistency loss is {}. focus_flatten is {}. w_p is {}".format(self.config.w_consistency, config.get("focus_flatten", False), config.get("w_p", 1.0))) def train_on_batch(self, batch, train_step, step_rate): """ Expects a batch of images and depth as input batch["image"].shape : batch_size, c, h, w batch["depth"].shape : batch_size, 1, h, w """ images, depths_gt = batch['image'].to(self.device), batch['depth'].to(self.device) image_raw = batch.get("image_raw", None) if image_raw is not None: image_raw = image_raw.to(self.device) sample_points = None if self.sampled_training: sample_points = batch['sample_points'].to(self.device) bbox = batch.get("bbox", None) if bbox is not None: bbox = bbox.to(self.device) bbox_raw = batch.get("bbox_raw", None) if bbox_raw is not None: bbox_raw = bbox_raw.to(self.device) depth_raw = batch.get("depth_raw", None) if depth_raw is not None: depth_raw = depth_raw.to(self.device) crop_area = batch.get("crop_area", None) if crop_area is not None: crop_area = crop_area.to(self.device) shift = batch.get("shift", None) if shift is not None: shift = shift.to(self.device) dataset = batch['dataset'][0] b, c, h, w = images.size() mask = batch["mask"].to(self.device).to(torch.bool) sample_mask = batch.get("sample_mask", None) if sample_mask is not None: sample_mask = sample_mask.to(self.device).to(torch.bool) mask_raw = batch.get("mask_raw", None) if mask_raw is not None: mask_raw = mask_raw.to(self.device).to(torch.bool) losses = {} with amp.autocast(enabled=self.config.use_amp): if self.sampled_training: output = self.model(images, sample_points, mode='train', image_raw=image_raw, bbox=bbox, depth_raw=depth_raw, crop_area=crop_area, shift=shift, bbox_raw=bbox_raw) else: output = self.model(images, None, mode='train', image_raw=image_raw, bbox=bbox, depth_raw=depth_raw, crop_area=crop_area, shift=shift, bbox_raw=bbox_raw) if self.boostingdepth: if self.lazy_epoch < self.epoch: output.update_learning_rate() self.lazy_epoch = self.epoch input_dict = dict() input_dict['data_gtfake'] = depths_gt output.set_input_train_gt(input_dict) output.optimize_parameters() pred_depths = output.fake_B pred = output.fake_B # print(torch.min(pred), torch.max(pred)) losses = output.get_current_losses() else: pred_depths = output['metric_depth'] if self.sampled_training: sampled_depth_gt = sample_points[:, :, -1].float().unsqueeze(dim=-1) sampled_depth_gt = sampled_depth_gt.permute(0, 2, 1) if self.config.get("representation", "") == 'biLaplacian': # only for sampled training for now l_dist, l_si = self.distribution_loss(output, sampled_depth_gt, mask=sample_mask) loss = self.config.w_dist * l_dist + self.config.w_si * l_si losses['distribution_loss'] = l_dist losses['sigloss'] = l_si if self.multi_consistency: coarse, fine = output['coarse_depth_pred'], output['fine_depth_pred'] l_si_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=False) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c loss += self.config.w_si * (l_si_f + l_si_c) else: if self.sampled_training: l_si = self.silog_loss( pred_depths, sampled_depth_gt, mask=sample_mask) loss = self.config.w_si * l_si losses[self.silog_loss.name] = l_si if self.multi_consistency: coarse, fine = output['coarse_depth_pred'], output['fine_depth_pred'] l_si_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=False) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c loss += self.config.w_si * (l_si_f + l_si_c) else: if self.multi_consistency: #### here here here pred_depths, coarse, fine = output['metric_depth'], output['coarse_depth_pred'], output['fine_depth_pred'] if self.consistency_training: depths_gt = torch.split(depths_gt, 1, dim=1) depths_gt = torch.cat(depths_gt, dim=0) mask = torch.split(mask, 1, dim=-1) mask = torch.cat(mask, dim=0).permute(0, 3, 1, 2) mask_raw = torch.cat([mask_raw, mask_raw], dim=0) depth_raw = torch.cat([depth_raw, depth_raw], dim=0) temp_features = output.get('temp_features', None) l_si_1, pred = self.dense_silog_loss( pred_depths, depths_gt, mask=mask, interpolate=True, return_interpolated=True) l_si_f, pred_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=True) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses[self.silog_loss.name] = l_si_1 losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c # loss = l_si_1 + l_si_f + l_si_c loss = l_si_1 if self.consistency_training: try: # depths_gt? pred_f? l_consistency = self.consistency_loss(pred, shift, mask, temp_features, pred_f=depths_gt) # use the resized pred except RuntimeError as e: print(e) print("some runtime error here! Hack with 0") l_consistency = torch.Tensor([0]).squeeze() losses[self.consistency_loss.name] = l_consistency loss += l_consistency * self.config.w_consistency else: l_si, pred = self.dense_silog_loss( pred_depths, depths_gt, mask=mask, interpolate=True, return_interpolated=True) loss = self.config.w_si * l_si losses[self.silog_loss.name] = l_si if self.dynamic: if step_rate > self.dynamic_unupdate_rate: warm_up_rate = min(1.0, (step_rate - self.dynamic_unupdate_rate) / 0.02) flop_cost = self.budget_loss(output['all_cell_flops'], warm_up_rate=warm_up_rate) loss += self.config.w_flop * flop_cost losses['flop_loss'] = flop_cost else: flop_cost = self.budget_loss(output['all_cell_flops'], warm_up_rate=1) loss += 0 * flop_cost losses['flop_loss'] = flop_cost if self.use_scale_loss: if self.scale_target == 'coarse': h_loss = self.scale_loss(pred_depths, output['coarse_depth_pred_roi'], mask, interpolate=True) else: h_loss = self.scale_loss(pred_depths, depths_gt, mask, interpolate=True) loss += self.config.w_scale * h_loss losses['scale_loss'] = h_loss # self.scaler.scale(loss).backward() # if self.config.clip_grad > 0: # self.scaler.unscale_(self.optimizer) # nn.utils.clip_grad_norm_( # self.model.parameters(), self.config.clip_grad) # self.scaler.step(self.optimizer) # self.scaler.update() # self.optimizer.zero_grad() self.scaler.scale(loss).backward() if self.config.clip_grad > 0: self.scaler.unscale_(self.optimizer) nn.utils.clip_grad_norm_( self.model.parameters(), self.config.clip_grad) self.scaler.step(self.optimizer) self.scaler.update() self.optimizer.zero_grad() if self.should_log and (self.step % int(self.config.log_images_every * self.iters_per_epoch)) == 0: if self.config.get("debug", False): pred = nn.functional.interpolate( pred[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] plt.imshow(pred.squeeze().detach().cpu().numpy()) plt.savefig('debug.png') pass else: pred = nn.functional.interpolate( pred[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] depths_gt[torch.logical_not(mask)] = DATASETS_CONFIG[dataset]['max_depth'] if self.consistency_training: split_images = torch.split(images, 3, dim=1) images = torch.cat(split_images, dim=0) self.log_images(rgb={"Input": images[0, ...]}, depth={"GT": depths_gt[0], "PredictedMono": pred}, prefix="Train", min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth']) return losses @torch.no_grad() def eval_infer(self, x, image_raw, bboxs=None, crop_area=None, dataset='u4k', bbox_raw=None): m = self.model.module if self.config.multigpu else self.model if dataset == 'u4k': base_h = 540 base_w = 960 elif dataset == 'gta': base_h = 270 base_w = 480 elif dataset == 'nyu': base_h = 120 * 2 base_w = 160 * 2 else: raise NotImplementedError if dataset == 'nyu': if self.sec_stage: images_crops = torch.split(x, 3, dim=1) bboxs_list = torch.split(bboxs, 1, dim=1) crop_areas = torch.split(crop_area, 1, dim=1) pred_depth_crops = [] for i, (img, bbox, crop_area) in enumerate(zip(images_crops, bboxs_list, crop_areas)): with amp.autocast(enabled=self.config.use_amp): if i == 0: out_dict = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None) # whole_depth_pred = out_dict['coarse_depth_pred'] pred_depth_crop = out_dict['metric_depth'] else: pred_depth_crop = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None)['metric_depth'] pred_depth_crop = nn.functional.interpolate( pred_depth_crop, (base_h, base_w), mode='bilinear', align_corners=True) pred_depth_crops.append(pred_depth_crop) x_start, y_start = [0, base_h], [0, base_w] pred_depth = torch.zeros((base_h*2, base_w*2)).cuda() inner_idx = 0 for ii, x in enumerate(x_start): for jj, y in enumerate(y_start): if self.use_blur: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() # do not care about boundry during validation else: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() inner_idx += 1 pred_depth = pred_depth.squeeze(dim=0) else: with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] else: if self.sec_stage: images_crops = torch.split(x, 3, dim=1) bboxs_list = torch.split(bboxs, 1, dim=1) crop_areas = torch.split(crop_area, 1, dim=1) pred_depth_crops = [] for i, (img, bbox, crop_area) in enumerate(zip(images_crops, bboxs_list, crop_areas)): with amp.autocast(enabled=self.config.use_amp): if i == 0: out_dict = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None) # whole_depth_pred = out_dict['coarse_depth_pred'] pred_depth_crop = out_dict['metric_depth'] else: pred_depth_crop = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None)['metric_depth'] pred_depth_crop = nn.functional.interpolate( pred_depth_crop, (base_h, base_w), mode='bilinear', align_corners=True) pred_depth_crops.append(pred_depth_crop) x_start, y_start = [0, base_h], [0, base_w] pred_depth = torch.zeros((base_h*2, base_w*2)).cuda() inner_idx = 0 for ii, x in enumerate(x_start): for jj, y in enumerate(y_start): if self.use_blur: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() # do not care about boundry during validation else: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() inner_idx += 1 pred_depth = pred_depth.squeeze(dim=0) else: with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] return pred_depth @torch.no_grad() def crop_aware_infer(self, x, image_raw): # if we are not avoiding the black border, we can just use the normal inference if not self.config.get("avoid_boundary", False): return self.eval_infer(x) # otherwise, we need to crop the image to avoid the black border # For now, this may be a bit slow due to converting to numpy and back # We assume no normalization is done on the input image # get the black border assert x.shape[0] == 1, "Only batch size 1 is supported for now" x_pil = transforms.ToPILImage()(x[0].cpu()) x_np = np.array(x_pil, dtype=np.uint8)
black_border_params = get_black_border(x_np)
9
2023-12-04 08:43:15+00:00
24k
baaivision/GeoDream
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.models.geometry.geodream_geometry_volume import GeodreamGeometryVolume from threestudio.utils.typing import * from pysdf import SDF
16,631
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, )
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, )
self.mesh: Optional[Mesh] = None
6
2023-12-01 01:59:42+00:00
24k
horseee/DeepCache
DeepCache/sd/pipeline_text_to_video_zero.py
[ { "identifier": "UNet2DConditionModel", "path": "DeepCache/sd/unet_2d_condition.py", "snippet": "class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):\n r\"\"\"\n A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a samp...
import copy import numpy as np import PIL.Image import torch import torch.nn.functional as F from dataclasses import dataclass from typing import Callable, List, Optional, Union from torch.nn.functional import grid_sample from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL from .unet_2d_condition import UNet2DConditionModel from .pipeline_stable_diffusion import StableDiffusionPipeline, StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import BaseOutput from diffusers.utils.torch_utils import randn_tensor
17,099
images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents class TextToVideoZeroPipeline(StableDiffusionPipeline): r""" Pipeline for zero-shot text-to-video generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet2DConditionModel`]): A [`UNet3DConditionModel`] to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`CLIPImageProcessor`]): A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
def sample_gaussian_centered(n=1000, sample_size=100, std_dev=100): samples = [] while len(samples) < sample_size: # Sample from a Gaussian centered at n/2 sample = int(np.random.normal(loc=n/2, scale=std_dev)) # Check if the sample is in bounds if 1 <= sample < n and sample not in samples: samples.append(sample) return samples def sample_from_quad(total_numbers, n_samples, pow=1.2): while pow > 1: # Generate linearly spaced values between 0 and a max value x_values = np.linspace(0, total_numbers**(1/pow), n_samples+1) # Raise these values to the power of 1.5 to get a non-linear distribution indices = np.unique(np.int32(x_values**pow))[:-1] if len(indices) == n_samples: break pow -=0.02 if pow <= 1: raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.") return indices, pow def sample_from_quad_center(total_numbers, n_samples, center, pow=1.2): while pow > 1: # Generate linearly spaced values between 0 and a max value x_values = np.linspace((-center)**(1/pow), (total_numbers-center)**(1/pow), n_samples+1) indices = [0] + [x+center for x in np.unique(np.int32(x_values**pow))[1:-1]] if len(indices) == n_samples: break pow -=0.02 if pow <= 1: raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.") return indices, pow def rearrange_0(tensor, f): F, C, H, W = tensor.size() tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) return tensor def rearrange_1(tensor): B, C, F, H, W = tensor.size() return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) def rearrange_3(tensor, f): F, D, C = tensor.size() return torch.reshape(tensor, (F // f, f, D, C)) def rearrange_4(tensor): B, F, D, C = tensor.size() return torch.reshape(tensor, (B * F, D, C)) class CrossFrameAttnProcessor: """ Cross frame attention processor. Each frame attends the first frame. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = key.size()[0] // self.batch_size first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class CrossFrameAttnProcessor2_0: """ Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) inner_dim = hidden_states.shape[-1] if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = max(1, key.size()[0] // self.batch_size) first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states @dataclass class TextToVideoPipelineOutput(BaseOutput): r""" Output class for zero-shot text-to-video pipeline. Args: images (`[List[PIL.Image.Image]`, `np.ndarray`]): List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`[List[bool]]`): List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents class TextToVideoZeroPipeline(StableDiffusionPipeline): r""" Pipeline for zero-shot text-to-video generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet2DConditionModel`]): A [`UNet3DConditionModel`] to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`CLIPImageProcessor`]): A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
0
2023-12-01 10:54:04+00:00
24k
alvinliu0/HumanGaussian
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,020
).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from(
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from(
other: BaseGeometry,
1
2023-11-27 02:39:39+00:00
24k
EricGuo5513/momask-codes
eval_t2m_trans_res.py
[ { "identifier": "MaskTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class MaskTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8,\n num_heads=4, dropout=0.1, clip_dim=512, cond_drop_prob=0.1,\n ...
import os import torch import utils.eval_t2m as eval_t2m import numpy as np from os.path import join as pjoin from models.mask_transformer.transformer import MaskTransformer, ResidualTransformer from models.vq.model import RVQVAE from options.eval_option import EvalT2MOptions from utils.get_opt import get_opt from motion_loaders.dataset_motion_loader import get_dataset_motion_loader from models.t2m_eval_wrapper import EvaluatorModelWrapper from utils.fixseed import fixseed
14,519
def load_vq_model(vq_opt): # opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_model = RVQVAE(vq_opt, dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {vq_opt.name} Completed!') return vq_model, vq_opt def load_trans_model(model_opt, which_model): t2m_transformer = MaskTransformer(code_dim=model_opt.code_dim, cond_mode='text', latent_dim=model_opt.latent_dim, ff_size=model_opt.ff_size, num_layers=model_opt.n_layers, num_heads=model_opt.n_heads, dropout=model_opt.dropout, clip_dim=512, cond_drop_prob=model_opt.cond_drop_prob, clip_version=clip_version, opt=model_opt) ckpt = torch.load(pjoin(model_opt.checkpoints_dir, model_opt.dataset_name, model_opt.name, 'model', which_model), map_location=opt.device) model_key = 't2m_transformer' if 't2m_transformer' in ckpt else 'trans' # print(ckpt.keys()) missing_keys, unexpected_keys = t2m_transformer.load_state_dict(ckpt[model_key], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Mask Transformer {opt.name} from epoch {ckpt["ep"]}!') return t2m_transformer def load_res_model(res_opt): res_opt.num_quantizers = vq_opt.num_quantizers res_opt.num_tokens = vq_opt.nb_code res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=res_opt.latent_dim, ff_size=res_opt.ff_size, num_layers=res_opt.n_layers, num_heads=res_opt.n_heads, dropout=res_opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=res_opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=res_opt.share_weight, clip_version=clip_version, opt=res_opt) ckpt = torch.load(pjoin(res_opt.checkpoints_dir, res_opt.dataset_name, res_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) missing_keys, unexpected_keys = res_transformer.load_state_dict(ckpt['res_transformer'], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Residual Transformer {res_opt.name} from epoch {ckpt["ep"]}!') return res_transformer if __name__ == '__main__': parser = EvalT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) dim_pose = 251 if opt.dataset_name == 'kit' else 263 # out_dir = pjoin(opt.check) root_dir = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) model_dir = pjoin(root_dir, 'model') out_dir = pjoin(root_dir, 'eval') os.makedirs(out_dir, exist_ok=True) out_path = pjoin(out_dir, "%s.log"%opt.ext) f = open(pjoin(out_path), 'w') model_opt_path = pjoin(root_dir, 'opt.txt')
def load_vq_model(vq_opt): # opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_model = RVQVAE(vq_opt, dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {vq_opt.name} Completed!') return vq_model, vq_opt def load_trans_model(model_opt, which_model): t2m_transformer = MaskTransformer(code_dim=model_opt.code_dim, cond_mode='text', latent_dim=model_opt.latent_dim, ff_size=model_opt.ff_size, num_layers=model_opt.n_layers, num_heads=model_opt.n_heads, dropout=model_opt.dropout, clip_dim=512, cond_drop_prob=model_opt.cond_drop_prob, clip_version=clip_version, opt=model_opt) ckpt = torch.load(pjoin(model_opt.checkpoints_dir, model_opt.dataset_name, model_opt.name, 'model', which_model), map_location=opt.device) model_key = 't2m_transformer' if 't2m_transformer' in ckpt else 'trans' # print(ckpt.keys()) missing_keys, unexpected_keys = t2m_transformer.load_state_dict(ckpt[model_key], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Mask Transformer {opt.name} from epoch {ckpt["ep"]}!') return t2m_transformer def load_res_model(res_opt): res_opt.num_quantizers = vq_opt.num_quantizers res_opt.num_tokens = vq_opt.nb_code res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=res_opt.latent_dim, ff_size=res_opt.ff_size, num_layers=res_opt.n_layers, num_heads=res_opt.n_heads, dropout=res_opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=res_opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=res_opt.share_weight, clip_version=clip_version, opt=res_opt) ckpt = torch.load(pjoin(res_opt.checkpoints_dir, res_opt.dataset_name, res_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) missing_keys, unexpected_keys = res_transformer.load_state_dict(ckpt['res_transformer'], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Residual Transformer {res_opt.name} from epoch {ckpt["ep"]}!') return res_transformer if __name__ == '__main__': parser = EvalT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) dim_pose = 251 if opt.dataset_name == 'kit' else 263 # out_dir = pjoin(opt.check) root_dir = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) model_dir = pjoin(root_dir, 'model') out_dir = pjoin(root_dir, 'eval') os.makedirs(out_dir, exist_ok=True) out_path = pjoin(out_dir, "%s.log"%opt.ext) f = open(pjoin(out_path), 'w') model_opt_path = pjoin(root_dir, 'opt.txt')
model_opt = get_opt(model_opt_path, device=opt.device)
4
2023-11-29 19:21:27+00:00
24k
dvlab-research/LLMGA
llmga/diffusers/src/diffusers/pipelines/ddpm/pipeline_ddpm.py
[ { "identifier": "randn_tensor", "path": "llmga/diffusers/src/diffusers/utils/torch_utils.py", "snippet": "def randn_tensor(\n shape: Union[Tuple, List],\n generator: Optional[Union[List[\"torch.Generator\"], \"torch.Generator\"]] = None,\n device: Optional[\"torch.device\"] = None,\n dtype: ...
from typing import List, Optional, Tuple, Union from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput import torch
18,721
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class DDPMPipeline(DiffusionPipeline): r""" Pipeline for image generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, num_inference_steps: int = 1000, output_type: Optional[str] = "pil", return_dict: bool = True,
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class DDPMPipeline(DiffusionPipeline): r""" Pipeline for image generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, num_inference_steps: int = 1000, output_type: Optional[str] = "pil", return_dict: bool = True,
) -> Union[ImagePipelineOutput, Tuple]:
2
2023-11-27 18:46:55+00:00
24k
JiahuiLei/GART
solver.py
[ { "identifier": "prepare_real_seq", "path": "lib_data/get_data.py", "snippet": "def prepare_real_seq(\n seq_name,\n dataset_mode,\n split=\"train\",\n image_zoom_ratio=0.5,\n balance=False,\n ins_avt_wild_start_end_skip=None,\n):\n logging.info(\"Prepare real seq: {}\".format(seq_na...
from matplotlib import pyplot as plt from pytorch3d.transforms import matrix_to_axis_angle from tqdm import tqdm from transforms3d.euler import euler2mat from omegaconf import OmegaConf from lib_data.get_data import prepare_real_seq from lib_data.data_provider import DatabasePoseProvider from lib_gart.templates import get_template from lib_gart.model import GaussianTemplateModel, AdditionalBones from lib_gart.optim_utils import * from lib_render.gauspl_renderer import render_cam_pcl from lib_gart.model_utils import transform_mu_frame from utils.misc import * from utils.viz import viz_render from pytorch3d.transforms import axis_angle_to_matrix, matrix_to_axis_angle from pytorch3d.ops import knn_points from lib_guidance.camera_sampling import sample_camera, fov2K, opencv2blender from viz_utils import viz_spinning, viz_human_all, viz_dog_all from utils.ssim import ssim from datetime import datetime from test_utils import test from lib_guidance.mvdream.mvdream_guidance import MVDream from utils.lpips import LPIPS import imageio import torch import numpy as np import os, os.path as osp, shutil, sys import time import logging import argparse
20,497
# from lib_marchingcubes.gaumesh_utils import MeshExtractor try: # from lib_guidance.sd_utils import StableDiffusion except: logging.warning("No guidance module") class TGFitter: def __init__( self, log_dir, profile_fn, mode, template_model_path="data/smpl_model/SMPL_NEUTRAL.pkl", device=torch.device("cuda:0"), **kwargs, ) -> None: self.log_dir = log_dir os.makedirs(self.log_dir, exist_ok=True) self.profile_fn = profile_fn try: shutil.copy(profile_fn, osp.join(self.log_dir, osp.basename(profile_fn))) except: pass self.mode = mode assert self.mode in ["human", "dog"], "Only support human and dog for now" self.template_model_path = template_model_path self.device = device # * auto set attr cfg = OmegaConf.load(profile_fn) # assign the cfg to self attribute for k, v in cfg.items(): setattr(self, k, v) for k, v in kwargs.items(): setattr(self, k, v) # * explicitly set flags self.FAST_TRAINING = getattr(self, "FAST_TRAINING", False) self.LAMBDA_SSIM = getattr(self, "LAMBDA_SSIM", 0.0) self.LAMBDA_LPIPS = getattr(self, "LAMBDA_LPIPS", 0.0) if self.LAMBDA_LPIPS > 0: self.lpips = LPIPS(net="vgg").to(self.device) for param in self.lpips.parameters(): param.requires_grad = False if isinstance(self.RESET_OPACITY_STEPS, int): self.RESET_OPACITY_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.RESET_OPACITY_STEPS == 0 ] if isinstance(self.REGAUSSIAN_STEPS, int): self.REGAUSSIAN_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.REGAUSSIAN_STEPS == 0 ] # prepare base R if self.mode == "human": viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, "sxyz")) else: viz_base_R_opencv = np.asarray(euler2mat(np.pi / 2.0, 0, np.pi, "rxyz")) viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float() self.viz_base_R = viz_base_R_opencv.to(self.device) if self.mode == "human": self.reg_base_R_global = ( matrix_to_axis_angle( torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, "sxyz"))[ None ] )[0] .float() .to(self.device) ) else: # TODO, for generation of dog pass self.writer = create_log( self.log_dir, name=osp.basename(self.profile_fn).split(".")[0], debug=False ) return def prepare_fake_data(self, mode, *args, **kwargs): if mode == "amass": # todo: change to amass
# from lib_marchingcubes.gaumesh_utils import MeshExtractor try: # from lib_guidance.sd_utils import StableDiffusion except: logging.warning("No guidance module") class TGFitter: def __init__( self, log_dir, profile_fn, mode, template_model_path="data/smpl_model/SMPL_NEUTRAL.pkl", device=torch.device("cuda:0"), **kwargs, ) -> None: self.log_dir = log_dir os.makedirs(self.log_dir, exist_ok=True) self.profile_fn = profile_fn try: shutil.copy(profile_fn, osp.join(self.log_dir, osp.basename(profile_fn))) except: pass self.mode = mode assert self.mode in ["human", "dog"], "Only support human and dog for now" self.template_model_path = template_model_path self.device = device # * auto set attr cfg = OmegaConf.load(profile_fn) # assign the cfg to self attribute for k, v in cfg.items(): setattr(self, k, v) for k, v in kwargs.items(): setattr(self, k, v) # * explicitly set flags self.FAST_TRAINING = getattr(self, "FAST_TRAINING", False) self.LAMBDA_SSIM = getattr(self, "LAMBDA_SSIM", 0.0) self.LAMBDA_LPIPS = getattr(self, "LAMBDA_LPIPS", 0.0) if self.LAMBDA_LPIPS > 0: self.lpips = LPIPS(net="vgg").to(self.device) for param in self.lpips.parameters(): param.requires_grad = False if isinstance(self.RESET_OPACITY_STEPS, int): self.RESET_OPACITY_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.RESET_OPACITY_STEPS == 0 ] if isinstance(self.REGAUSSIAN_STEPS, int): self.REGAUSSIAN_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.REGAUSSIAN_STEPS == 0 ] # prepare base R if self.mode == "human": viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, "sxyz")) else: viz_base_R_opencv = np.asarray(euler2mat(np.pi / 2.0, 0, np.pi, "rxyz")) viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float() self.viz_base_R = viz_base_R_opencv.to(self.device) if self.mode == "human": self.reg_base_R_global = ( matrix_to_axis_angle( torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, "sxyz"))[ None ] )[0] .float() .to(self.device) ) else: # TODO, for generation of dog pass self.writer = create_log( self.log_dir, name=osp.basename(self.profile_fn).split(".")[0], debug=False ) return def prepare_fake_data(self, mode, *args, **kwargs): if mode == "amass": # todo: change to amass
provider = DatabasePoseProvider(*args, **kwargs, device=torch.device("cpu"))
1
2023-11-27 17:30:04+00:00
24k
skhu101/GauHuman
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while T...
import os import sys import numpy as np import torch import json import imageio import cv2 import random from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from smpl.smpl_numpy import SMPL from smplx.body_models import SMPLX from data.dna_rendering.dna_rendering_sample_code.SMCReader import SMCReader
17,242
# get the world-to-camera transform and set R, T R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code T = w2c[:3, 3] # Reduce the image resolution by ratio, then remove the back ground ratio = image_scaling if ratio != 1.: H, W = int(image.shape[0] * ratio), int(image.shape[1] * ratio) image = cv2.resize(image, (W, H), interpolation=cv2.INTER_AREA) msk = cv2.resize(msk, (W, H), interpolation=cv2.INTER_NEAREST) K[:2] = K[:2] * ratio image = Image.fromarray(np.array(image*255.0, dtype=np.byte), "RGB") focalX = K[0,0] focalY = K[1,1] FovX = focal2fov(focalX, image.size[0]) FovY = focal2fov(focalY, image.size[1]) # load smpl data i = int(os.path.basename(image_path)[:-4]) vertices_path = os.path.join(path, 'smpl_vertices', '{}.npy'.format(i)) xyz = np.load(vertices_path).astype(np.float32) smpl_param_path = os.path.join(path, "smpl_params", '{}.npy'.format(i)) smpl_param = np.load(smpl_param_path, allow_pickle=True).item() Rh = smpl_param['Rh'] smpl_param['R'] = cv2.Rodrigues(Rh)[0].astype(np.float32) smpl_param['Th'] = smpl_param['Th'].astype(np.float32) smpl_param['shapes'] = smpl_param['shapes'].astype(np.float32) smpl_param['poses'] = smpl_param['poses'].astype(np.float32) # obtain the original bounds for point sampling min_xyz = np.min(xyz, axis=0) max_xyz = np.max(xyz, axis=0) min_xyz -= 0.05 max_xyz += 0.05 world_bound = np.stack([min_xyz, max_xyz], axis=0) # get bounding mask and bcakground mask bound_mask = get_bound_2d_mask(world_bound, K, w2c[:3], image.size[1], image.size[0]) bound_mask = Image.fromarray(np.array(bound_mask*255.0, dtype=np.byte)) bkgd_mask = Image.fromarray(np.array(msk*255.0, dtype=np.byte)) cam_infos.append(CameraInfo(uid=idx, pose_id=pose_index, R=R, T=T, K=K, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, bkgd_mask=bkgd_mask, bound_mask=bound_mask, width=image.size[0], height=image.size[1], smpl_param=smpl_param, world_vertex=xyz, world_bound=world_bound, big_pose_smpl_param=big_pose_smpl_param, big_pose_world_vertex=big_pose_xyz, big_pose_world_bound=big_pose_world_bound)) idx += 1 return cam_infos def readZJUMoCapRefineInfo(path, white_background, output_path, eval): train_view = [4] test_view = [i for i in range(0, 23)] test_view.remove(train_view[0]) print("Reading Training Transforms") train_cam_infos = readCamerasZJUMoCapRefine(path, train_view, white_background, split='train') print("Reading Test Transforms") test_cam_infos = readCamerasZJUMoCapRefine(path, test_view, white_background, split='test', novel_view_vis=False) if not eval: train_cam_infos.extend(test_cam_infos) test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) if len(train_view) == 1: nerf_normalization['radius'] = 1 ply_path = os.path.join('output', output_path, "points3d.ply") if not os.path.exists(ply_path): # Since this data set has no colmap data, we start with random points num_pts = 6890 #100_000 print(f"Generating random point cloud ({num_pts})...") # We create random points inside the bounds of the synthetic Blender scenes xyz = train_cam_infos[0].big_pose_world_vertex shs = np.random.random((num_pts, 3)) / 255.0 pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3))) storePly(ply_path, xyz, SH2RGB(shs) * 255) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info ################################## DNARendering ################################## def readCamerasDNARendering(path, output_view, white_background, image_scaling=0.5, split='train'): cam_infos = [] if split == 'train': pose_start = 0 pose_interval = 1 pose_num = 100 else: pose_start = 0 pose_interval = 5 pose_num = 20 smc_reader = SMCReader(path) annots_file_path = path.replace('main', 'annotations').split('.')[0] + '_annots.smc' smc_annots_reader = SMCReader(annots_file_path) gender = smc_reader.actor_info['gender'] smpl_model = {}
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id R = np.transpose(qvec2rotmat(extr.qvec)) T = np.array(extr.tvec) if intr.model=="SIMPLE_PINHOLE": focal_length_x = intr.params[0] FovY = focal2fov(focal_length_x, height) FovX = focal2fov(focal_length_x, width) elif intr.model=="PINHOLE": focal_length_x = intr.params[0] focal_length_y = intr.params[1] FovY = focal2fov(focal_length_y, height) FovX = focal2fov(focal_length_x, width) else: assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" image_path = os.path.join(images_folder, os.path.basename(extr.name)) image_name = os.path.basename(image_path).split(".")[0] image = Image.open(image_path) cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height) cam_infos.append(cam_info) sys.stdout.write('\n') return cam_infos def fetchPly(path): plydata = PlyData.read(path) vertices = plydata['vertex'] positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0 normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T return BasicPointCloud(points=positions, colors=colors, normals=normals) def storePly(path, xyz, rgb): # Define the dtype for the structured array dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] normals = np.zeros_like(xyz) elements = np.empty(xyz.shape[0], dtype=dtype) attributes = np.concatenate((xyz, normals, rgb), axis=1) elements[:] = list(map(tuple, attributes)) # Create the PlyData object and write to file vertex_element = PlyElement.describe(elements, 'vertex') ply_data = PlyData([vertex_element]) ply_data.write(path) def readColmapSceneInfo(path, images, eval, llffhold=8): try: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) except: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) reading_dir = "images" if images == None else images cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir)) cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name) if eval: train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0] test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0] else: train_cam_infos = cam_infos test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "sparse/0/points3D.ply") bin_path = os.path.join(path, "sparse/0/points3D.bin") txt_path = os.path.join(path, "sparse/0/points3D.txt") if not os.path.exists(ply_path): print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") try: xyz, rgb, _ = read_points3D_binary(bin_path) except: xyz, rgb, _ = read_points3D_text(txt_path) storePly(ply_path, xyz, rgb) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"): cam_infos = [] with open(os.path.join(path, transformsfile)) as json_file: contents = json.load(json_file) fovx = contents["camera_angle_x"] frames = contents["frames"] for idx, frame in enumerate(frames[:20]): cam_name = os.path.join(path, frame["file_path"] + extension) # NeRF 'transform_matrix' is a camera-to-world transform c2w = np.array(frame["transform_matrix"]) # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward) c2w[:3, 1:3] *= -1 # get the world-to-camera transform and set R, T w2c = np.linalg.inv(c2w) R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code T = w2c[:3, 3] image_path = os.path.join(path, cam_name) image_name = Path(cam_name).stem image = Image.open(image_path) im_data = np.array(image.convert("RGBA")) bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0]) norm_data = im_data / 255.0 arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4]) image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB") fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1]) FovY = fovy FovX = fovx cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, bkgd_mask=None, bound_mask=None, width=image.size[0], height=image.size[1])) return cam_infos def readNerfSyntheticInfo(path, white_background, eval, extension=".png"): print("Reading Training Transforms") train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension) print("Reading Test Transforms") test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension) if not eval: train_cam_infos.extend(test_cam_infos) test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "points3d.ply") if not os.path.exists(ply_path): # Since this data set has no colmap data, we start with random points num_pts = 100_000 print(f"Generating random point cloud ({num_pts})...") # We create random points inside the bounds of the synthetic Blender scenes xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3 shs = np.random.random((num_pts, 3)) / 255.0 pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3))) storePly(ply_path, xyz, SH2RGB(shs) * 255) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info ################################## MonoCap ################################## def get_camera_extrinsics_monocap(view_index, val=False, camera_view_num=36): def norm_np_arr(arr): return arr / np.linalg.norm(arr) def lookat(eye, at, up): zaxis = norm_np_arr(at - eye) xaxis = norm_np_arr(np.cross(zaxis, up)) yaxis = np.cross(xaxis, zaxis) _viewMatrix = np.array([ [xaxis[0], xaxis[1], xaxis[2], -np.dot(xaxis, eye)], [yaxis[0], yaxis[1], yaxis[2], -np.dot(yaxis, eye)], [-zaxis[0], -zaxis[1], -zaxis[2], np.dot(zaxis, eye)], [0 , 0 , 0 , 1 ] ]) return _viewMatrix def fix_eye(phi, theta): camera_distance = 3 return np.array([ camera_distance * np.sin(theta) * np.cos(phi), camera_distance * np.sin(theta) * np.sin(phi), camera_distance * np.cos(theta) ]) if val: at = np.array([0, 0.8, 0]).astype(np.float32) eye = fix_eye(np.pi + np.pi/12 + 1e-6, -np.pi/2 + 2 * np.pi * view_index / camera_view_num + 1e-6).astype(np.float32) + at extrinsics = lookat(eye, at, np.array([0, 1, 0])).astype(np.float32) return extrinsics def readCamerasMonoCapdata(path, output_view, white_background, image_scaling=1.0, split='train', novel_view_vis=False): cam_infos = [] if 'olek_images0812' in path or 'vlad_images1011' in path: pose_start = 1 else: pose_start = 0 if split == 'train': pose_interval = 5 pose_num = 100 elif split == 'test': pose_interval = 30 pose_num = 17 annot_path = os.path.join(path, 'annots.npy') annots = np.load(annot_path, allow_pickle=True).item() cam = annots['cams'] # load SMPL model smpl_model = SMPL(sex='neutral', model_dir='assets/SMPL_NEUTRAL_renderpeople.pkl') # SMPL in canonical space big_pose_smpl_param = {} big_pose_smpl_param['R'] = np.eye(3).astype(np.float32) big_pose_smpl_param['Th'] = np.zeros((1,3)).astype(np.float32) big_pose_smpl_param['shapes'] = np.zeros((1,10)).astype(np.float32) big_pose_smpl_param['poses'] = np.zeros((1,72)).astype(np.float32) big_pose_smpl_param['poses'][0, 5] = 45/180*np.array(np.pi) big_pose_smpl_param['poses'][0, 8] = -45/180*np.array(np.pi) big_pose_smpl_param['poses'][0, 23] = -30/180*np.array(np.pi) big_pose_smpl_param['poses'][0, 26] = 30/180*np.array(np.pi) big_pose_xyz, _ = smpl_model(big_pose_smpl_param['poses'], big_pose_smpl_param['shapes'].reshape(-1)) big_pose_xyz = (np.matmul(big_pose_xyz, big_pose_smpl_param['R'].transpose()) + big_pose_smpl_param['Th']).astype(np.float32) # obtain the original bounds for point sampling big_pose_min_xyz = np.min(big_pose_xyz, axis=0) big_pose_max_xyz = np.max(big_pose_xyz, axis=0) big_pose_min_xyz -= 0.05 big_pose_max_xyz += 0.05 big_pose_world_bound = np.stack([big_pose_min_xyz, big_pose_max_xyz], axis=0) idx = 0 for pose_index in range(pose_start, pose_start+pose_num*pose_interval, pose_interval): for view_index in output_view: if novel_view_vis: view_index_look_at = view_index view_index = 0 # Load image, mask, K, D, R, T if 'olek_images0812' in path: image_path = os.path.join(path, 'images', str(view_index).zfill(2), str(pose_index).zfill(6)+'.jpg') msk_path = os.path.join(path, 'mask', str(view_index).zfill(2), str(pose_index).zfill(6)+'.png') elif 'vlad_images1011' in path: image_path = os.path.join(path, 'images', str(view_index).zfill(3), str(pose_index).zfill(6)+'.jpg') msk_path = os.path.join(path, 'mask', str(view_index).zfill(3), str(pose_index).zfill(6)+'.jpg') else: image_path = os.path.join(path, 'images', str(view_index).zfill(2), str(pose_index).zfill(4)+'.jpg') msk_path = os.path.join(path, 'mask', str(view_index).zfill(2), str(pose_index).zfill(4)+'.png') image_name = view_index image = np.array(imageio.imread(image_path).astype(np.float32) / 255.) msk = imageio.imread(msk_path).astype(np.float32) / 255 if msk.shape[-1] == 3: msk = msk[:,:,0] if not novel_view_vis: cam_id = view_index K = cam['K'][cam_id] D = cam['D'][cam_id] R = cam["R"][cam_id] T = cam["T"][cam_id][...,None].reshape(-1, 1) / 1000 # undistort image and mask image = cv2.undistort(image, K, D) msk = cv2.undistort(msk, K, D) else: pose = np.matmul(np.array([[1,0,0,0], [0,-1,0,0], [0,0,-1,0], [0,0,0,1]]), get_camera_extrinsics_monocap(view_index_look_at, val=True)) R = pose[:3,:3] T = pose[:3, 3].reshape(-1, 1) cam_id = view_index K = cam['K'][cam_id] # mask image if 'olek_images0812' in path or 'vlad_images1011' in path: image = image * msk[...,None].repeat(3, axis=2) else: image[msk == 0] = 1 if white_background else 0 # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward) w2c = np.eye(4) w2c[:3,:3] = R w2c[:3,3:4] = T # get the world-to-camera transform and set R, T R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code T = w2c[:3, 3] # Reduce the image resolution by ratio, then remove the back ground ratio = image_scaling if ratio != 1.0: H, W = int(image.shape[0] * ratio), int(image.shape[1] * ratio) image = cv2.resize(image, (W, H), interpolation=cv2.INTER_AREA) msk = cv2.resize(msk, (W, H), interpolation=cv2.INTER_NEAREST) K[:2] = K[:2] * ratio image = Image.fromarray(np.array(image*255.0, dtype=np.byte), "RGB") focalX = K[0,0] focalY = K[1,1] FovX = focal2fov(focalX, image.size[0]) FovY = focal2fov(focalY, image.size[1]) # load smpl data params_path = os.path.join(path, 'params', '{}.npy'.format(pose_index)) params = np.load(params_path, allow_pickle=True).item() Rh = params['Rh'].astype(np.float32) Th = params['Th'].astype(np.float32) smpl_param = {} smpl_param['shapes'] = np.array(params['shapes']).astype(np.float32) smpl_param['poses'] = np.array(params["poses"]).astype(np.float32).reshape(1,72) smpl_param['R'] = cv2.Rodrigues(Rh)[0].astype(np.float32) #np.eye(3).astype(np.float32) smpl_param['Th'] = Th #np.array(params["Th"]).astype(np.float32) xyz, _ = smpl_model(smpl_param['poses'], smpl_param['shapes'].reshape(-1)) xyz = (np.matmul(xyz, smpl_param['R'].transpose()) + smpl_param['Th']).astype(np.float32) # obtain the original bounds for point sampling min_xyz = np.min(xyz, axis=0) max_xyz = np.max(xyz, axis=0) min_xyz -= 0.1 max_xyz += 0.1 world_bound = np.stack([min_xyz, max_xyz], axis=0) # get bounding mask and bcakground mask bound_mask = get_bound_2d_mask(world_bound, K, w2c[:3], image.size[1], image.size[0]) bound_mask = Image.fromarray(np.array(bound_mask*255.0, dtype=np.byte)) bkgd_mask = Image.fromarray(np.array(msk*255.0, dtype=np.byte)) cam_infos.append(CameraInfo(uid=idx, pose_id=pose_index, R=R, T=T, K=K, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, bkgd_mask=bkgd_mask, bound_mask=bound_mask, width=image.size[0], height=image.size[1], smpl_param=smpl_param, world_vertex=xyz, world_bound=world_bound, big_pose_smpl_param=big_pose_smpl_param, big_pose_world_vertex=big_pose_xyz, big_pose_world_bound=big_pose_world_bound)) idx += 1 return cam_infos def readMonoCapdataInfo(path, white_background, output_path, eval): if 'olek_images0812' in path: train_view = [44] test_view = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 49] test_view = [45] elif 'vlad_images1011' in path: train_view = [66] test_view = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100] else: train_view = [0] test_view = range(1,11) print("Reading Training Transforms") train_cam_infos = readCamerasMonoCapdata(path, train_view, white_background, split='train') print("Reading Test Transforms") test_cam_infos = readCamerasMonoCapdata(path, test_view, white_background, split='test', novel_view_vis=False) if not eval: train_cam_infos.extend(test_cam_infos) test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) if len(train_view) == 1: nerf_normalization['radius'] = 1 # ply_path = os.path.join(path, "points3d.ply") ply_path = os.path.join('output', output_path, "points3d.ply") if not os.path.exists(ply_path): # Since this data set has no colmap data, we start with random points num_pts = 6890 #100_000 print(f"Generating random point cloud ({num_pts})...") # We create random points inside the bounds of the synthetic Blender scenes xyz = train_cam_infos[0].big_pose_world_vertex shs = np.random.random((num_pts, 3)) / 255.0 pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3))) storePly(ply_path, xyz, SH2RGB(shs) * 255) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info ################################## ZJUMoCapRefine ################################## def get_camera_extrinsics_zju_mocap_refine(view_index, val=False, camera_view_num=36): def norm_np_arr(arr): return arr / np.linalg.norm(arr) def lookat(eye, at, up): zaxis = norm_np_arr(at - eye) xaxis = norm_np_arr(np.cross(zaxis, up)) yaxis = np.cross(xaxis, zaxis) _viewMatrix = np.array([ [xaxis[0], xaxis[1], xaxis[2], -np.dot(xaxis, eye)], [yaxis[0], yaxis[1], yaxis[2], -np.dot(yaxis, eye)], [-zaxis[0], -zaxis[1], -zaxis[2], np.dot(zaxis, eye)], [0 , 0 , 0 , 1 ] ]) return _viewMatrix def fix_eye(phi, theta): camera_distance = 3 return np.array([ camera_distance * np.sin(theta) * np.cos(phi), camera_distance * np.sin(theta) * np.sin(phi), camera_distance * np.cos(theta) ]) if val: eye = fix_eye(np.pi + 2 * np.pi * view_index / camera_view_num + 1e-6, np.pi/2 + np.pi/12 + 1e-6).astype(np.float32) + np.array([0, 0, -0.8]).astype(np.float32) at = np.array([0, 0, -0.8]).astype(np.float32) extrinsics = lookat(eye, at, np.array([0, 0, -1])).astype(np.float32) return extrinsics def readCamerasZJUMoCapRefine(path, output_view, white_background, image_scaling=0.5, split='train', novel_view_vis=False): cam_infos = [] pose_start = 0 if split == 'train': pose_interval = 5 pose_num = 100 elif split == 'test': pose_start = 0 pose_interval = 30 pose_num = 17 ann_file = os.path.join(path, 'annots.npy') annots = np.load(ann_file, allow_pickle=True).item() cams = annots['cams'] ims = np.array([ np.array(ims_data['ims'])[output_view] for ims_data in annots['ims'][pose_start:pose_start + pose_num * pose_interval][::pose_interval] ]) cam_inds = np.array([ np.arange(len(ims_data['ims']))[output_view] for ims_data in annots['ims'][pose_start:pose_start + pose_num * pose_interval][::pose_interval] ]) if 'CoreView_313' in path or 'CoreView_315' in path: for i in range(ims.shape[0]): ims[i] = [x.split('/')[0] + '/' + x.split('/')[1].split('_')[4] + '.jpg' for x in ims[i]] smpl_model = SMPL(sex='neutral', model_dir='assets/SMPL_NEUTRAL_renderpeople.pkl') # SMPL in canonical space big_pose_smpl_param = {} big_pose_smpl_param['R'] = np.eye(3).astype(np.float32) big_pose_smpl_param['Th'] = np.zeros((1,3)).astype(np.float32) big_pose_smpl_param['shapes'] = np.zeros((1,10)).astype(np.float32) big_pose_smpl_param['poses'] = np.zeros((1,72)).astype(np.float32) big_pose_smpl_param['poses'][0, 5] = 45/180*np.array(np.pi) big_pose_smpl_param['poses'][0, 8] = -45/180*np.array(np.pi) big_pose_smpl_param['poses'][0, 23] = -30/180*np.array(np.pi) big_pose_smpl_param['poses'][0, 26] = 30/180*np.array(np.pi) big_pose_xyz, _ = smpl_model(big_pose_smpl_param['poses'], big_pose_smpl_param['shapes'].reshape(-1)) big_pose_xyz = (np.matmul(big_pose_xyz, big_pose_smpl_param['R'].transpose()) + big_pose_smpl_param['Th']).astype(np.float32) # obtain the original bounds for point sampling big_pose_min_xyz = np.min(big_pose_xyz, axis=0) big_pose_max_xyz = np.max(big_pose_xyz, axis=0) big_pose_min_xyz -= 0.05 big_pose_max_xyz += 0.05 big_pose_world_bound = np.stack([big_pose_min_xyz, big_pose_max_xyz], axis=0) idx = 0 for pose_index in range(pose_num): for view_index in range(len(output_view)): if novel_view_vis: view_index_look_at = view_index view_index = 0 # Load image, mask, K, D, R, T image_path = os.path.join(path, ims[pose_index][view_index].replace('\\', '/')) image_name = ims[pose_index][view_index].split('.')[0] image = np.array(imageio.imread(image_path).astype(np.float32)/255.) msk_path = image_path.replace('images', 'mask').replace('jpg', 'png') msk = imageio.imread(msk_path) msk = (msk != 0).astype(np.uint8) if not novel_view_vis: cam_ind = cam_inds[pose_index][view_index] K = np.array(cams['K'][cam_ind]) D = np.array(cams['D'][cam_ind]) R = np.array(cams['R'][cam_ind]) T = np.array(cams['T'][cam_ind]) / 1000. image = cv2.undistort(image, K, D) msk = cv2.undistort(msk, K, D) else: pose = np.matmul(np.array([[1,0,0,0], [0,-1,0,0], [0,0,-1,0], [0,0,0,1]]), get_camera_extrinsics_zju_mocap_refine(view_index_look_at, val=True)) R = pose[:3,:3] T = pose[:3, 3].reshape(-1, 1) cam_ind = cam_inds[pose_index][view_index] K = np.array(cams['K'][cam_ind]) image[msk == 0] = 1 if white_background else 0 # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward) w2c = np.eye(4) w2c[:3,:3] = R w2c[:3,3:4] = T # get the world-to-camera transform and set R, T R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code T = w2c[:3, 3] # Reduce the image resolution by ratio, then remove the back ground ratio = image_scaling if ratio != 1.: H, W = int(image.shape[0] * ratio), int(image.shape[1] * ratio) image = cv2.resize(image, (W, H), interpolation=cv2.INTER_AREA) msk = cv2.resize(msk, (W, H), interpolation=cv2.INTER_NEAREST) K[:2] = K[:2] * ratio image = Image.fromarray(np.array(image*255.0, dtype=np.byte), "RGB") focalX = K[0,0] focalY = K[1,1] FovX = focal2fov(focalX, image.size[0]) FovY = focal2fov(focalY, image.size[1]) # load smpl data i = int(os.path.basename(image_path)[:-4]) vertices_path = os.path.join(path, 'smpl_vertices', '{}.npy'.format(i)) xyz = np.load(vertices_path).astype(np.float32) smpl_param_path = os.path.join(path, "smpl_params", '{}.npy'.format(i)) smpl_param = np.load(smpl_param_path, allow_pickle=True).item() Rh = smpl_param['Rh'] smpl_param['R'] = cv2.Rodrigues(Rh)[0].astype(np.float32) smpl_param['Th'] = smpl_param['Th'].astype(np.float32) smpl_param['shapes'] = smpl_param['shapes'].astype(np.float32) smpl_param['poses'] = smpl_param['poses'].astype(np.float32) # obtain the original bounds for point sampling min_xyz = np.min(xyz, axis=0) max_xyz = np.max(xyz, axis=0) min_xyz -= 0.05 max_xyz += 0.05 world_bound = np.stack([min_xyz, max_xyz], axis=0) # get bounding mask and bcakground mask bound_mask = get_bound_2d_mask(world_bound, K, w2c[:3], image.size[1], image.size[0]) bound_mask = Image.fromarray(np.array(bound_mask*255.0, dtype=np.byte)) bkgd_mask = Image.fromarray(np.array(msk*255.0, dtype=np.byte)) cam_infos.append(CameraInfo(uid=idx, pose_id=pose_index, R=R, T=T, K=K, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, bkgd_mask=bkgd_mask, bound_mask=bound_mask, width=image.size[0], height=image.size[1], smpl_param=smpl_param, world_vertex=xyz, world_bound=world_bound, big_pose_smpl_param=big_pose_smpl_param, big_pose_world_vertex=big_pose_xyz, big_pose_world_bound=big_pose_world_bound)) idx += 1 return cam_infos def readZJUMoCapRefineInfo(path, white_background, output_path, eval): train_view = [4] test_view = [i for i in range(0, 23)] test_view.remove(train_view[0]) print("Reading Training Transforms") train_cam_infos = readCamerasZJUMoCapRefine(path, train_view, white_background, split='train') print("Reading Test Transforms") test_cam_infos = readCamerasZJUMoCapRefine(path, test_view, white_background, split='test', novel_view_vis=False) if not eval: train_cam_infos.extend(test_cam_infos) test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) if len(train_view) == 1: nerf_normalization['radius'] = 1 ply_path = os.path.join('output', output_path, "points3d.ply") if not os.path.exists(ply_path): # Since this data set has no colmap data, we start with random points num_pts = 6890 #100_000 print(f"Generating random point cloud ({num_pts})...") # We create random points inside the bounds of the synthetic Blender scenes xyz = train_cam_infos[0].big_pose_world_vertex shs = np.random.random((num_pts, 3)) / 255.0 pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3))) storePly(ply_path, xyz, SH2RGB(shs) * 255) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info ################################## DNARendering ################################## def readCamerasDNARendering(path, output_view, white_background, image_scaling=0.5, split='train'): cam_infos = [] if split == 'train': pose_start = 0 pose_interval = 1 pose_num = 100 else: pose_start = 0 pose_interval = 5 pose_num = 20 smc_reader = SMCReader(path) annots_file_path = path.replace('main', 'annotations').split('.')[0] + '_annots.smc' smc_annots_reader = SMCReader(annots_file_path) gender = smc_reader.actor_info['gender'] smpl_model = {}
smpl_model[gender] = SMPLX('assets/models/smplx/', smpl_type='smplx',
13
2023-11-29 07:10:39+00:00
24k
UX-Decoder/LLaVA-Grounding
llava/model/language_model/llava_llama_gd.py
[ { "identifier": "LlavaMetaModel", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaModel:\n\n def __init__(self, config):\n super(LlavaMetaModel, self).__init__(config)\n\n if hasattr(config, \"mm_vision_tower\"):\n self.vision_tower = build_vision_tower(config...
from typing import List, Optional, Tuple, Union from torch.nn import CrossEntropyLoss from transformers import AutoConfig, AutoModelForCausalLM, \ LlamaConfig, LlamaModel, LlamaForCausalLM from transformers.modeling_outputs import CausalLMOutputWithPast from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM, LlavaMetaForCausalLM_gd,LlavaMetaForCausalLM_gd_interactive import torch import torch.nn as nn import transformers
15,136
llava_inputs['seg_inputs']=batched_inputs return self.forward_inner(**llava_inputs) def forward_inner( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, seg_inputs: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict _, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=None, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) flickr_len = len(seg_inputs['flickr']) ground_idx = [torch.argwhere(lb == 32002)[:, 0] for lb in labels] ##########flickr # if self.seg_model.model.coco_only: ground_idx_flickr = ground_idx[:flickr_len] padded_ground_idx_flickr = torch.nn.utils.rnn.pad_sequence(ground_idx_flickr, batch_first=True, padding_value=-1) padded_mask_flickr = padded_ground_idx_flickr != -1 padded_ground_idx_flickr[padded_ground_idx_flickr == -1] = 0 # ground_idx=[[-1] if len(idx)==0 else idx for idx in ground_idx] if self.seg_model.model.detach_seg: hidden_states = outputs[0].detach() else: hidden_states = outputs[0] hidden_states_flickr = hidden_states[:flickr_len] ground_hs_flickr = torch.gather(hidden_states_flickr, 1, padded_ground_idx_flickr[..., None].repeat(1, 1, hidden_states_flickr.shape[ -1])) seg_inputs['flickr_text_embeddings'] = (ground_hs_flickr, padded_mask_flickr) ##########coco coco_len = len(seg_inputs['coco']) ground_idx_coco = ground_idx[flickr_len:flickr_len+coco_len] if len(ground_idx_coco) > 0: for i, (idx, data) in enumerate(zip(ground_idx_coco, seg_inputs['coco'])): mask = data['grounding_mask'] ground_idx_coco[i] = idx[mask[:len(idx)]] padded_ground_idx_coco = torch.nn.utils.rnn.pad_sequence(ground_idx_coco, batch_first=True, padding_value=-1) padded_mask_coco = padded_ground_idx_coco != -1 padded_ground_idx_coco[padded_ground_idx_coco == -1] = 0 # hidden_states = outputs[0] hidden_states_coco = hidden_states[flickr_len:flickr_len+coco_len] ground_hs_coco = torch.gather(hidden_states_coco, 1, padded_ground_idx_coco[..., None].repeat(1, 1, hidden_states_coco.shape[ -1])) seg_inputs['coco_text_embeddings'] = (ground_hs_coco, padded_mask_coco) ground_loss = self.seg_model(seg_inputs) hidden_states_ = outputs[0] if self.seg_model.model.coco_only and len(ground_idx_coco) > 0: logits = self.lm_head(hidden_states_[flickr_len:]) else: logits = self.lm_head(hidden_states_) ############################################################ loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() if self.seg_model.model.coco_only and len(ground_idx_coco) > 0: shift_labels = labels[..., 1:][flickr_len:].contiguous() else: shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output ground_loss['llava']=loss ground_loss['loss_total']=sum(ground_loss.values()) ignore_list=[f'_{i}' for i in range(1,10)] ignore_list.append('interm') for key in list(ground_loss.keys()): if not key.endswith('_0') and key!='llava' and key !='loss_total': ground_loss.pop(key) return CausalLMOutputWithPast( loss=ground_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IGNORE_INDEX=-100 # @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" # tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances,tokenizer): input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) input_ids = input_ids[:, :tokenizer.model_max_length] labels = labels[:, :tokenizer.model_max_length] batch = dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(tokenizer.pad_token_id), ) if 'image_clip' in instances[0]: images = [instance['image_clip'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images return batch class LlavaConfig(LlamaConfig): model_type = "llava" class LlavaLlamaModel(LlavaMetaModel, LlamaModel): config_class = LlavaConfig def __init__(self, config: LlamaConfig): super(LlavaLlamaModel, self).__init__(config) class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM): config_class = LlavaConfig def __init__(self, config): super(LlamaForCausalLM, self).__init__(config) self.model = LlavaLlamaModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_model(self): return self.model def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "images": kwargs.get("images", None), } ) return model_inputs class LlavaLlamaForCausalLM_gd(LlamaForCausalLM, LlavaMetaForCausalLM_gd): config_class = LlavaConfig def __init__(self, config): super(LlamaForCausalLM, self).__init__(config) self.model = LlavaLlamaModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_model(self): return self.model def forward(self,**batched_inputs): # print(kwargs.keys()) # images_for_llava=torch.stack([inp['image_clip'] for inp in batched_inputs['flickr']]) collator=DataCollatorForSupervisedDataset() if 'refcoco' in batched_inputs: if 'vg' in batched_inputs: llava_inputs = collator(batched_inputs['vg']+batched_inputs['refcoco'], tokenizer=batched_inputs['refcoco'][0]['tokenizer']) else: llava_inputs = collator( batched_inputs['refcoco'], tokenizer=batched_inputs['refcoco'][0]['tokenizer']) elif 'coco' in batched_inputs: llava_inputs=collator(batched_inputs['flickr']+batched_inputs['coco'],tokenizer=batched_inputs['flickr'][0]['tokenizer']) else: llava_inputs=collator(batched_inputs['flickr'],tokenizer=batched_inputs['flickr'][0]['tokenizer']) llava_inputs['seg_inputs']=batched_inputs return self.forward_inner(**llava_inputs) def forward_inner( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, seg_inputs: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict _, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=None, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) ground_idx_coco=[] ground_idx = [torch.argwhere(lb == 32002)[:, 0] for lb in labels] if 'refcoco' in seg_inputs: if 'vg' in seg_inputs: vg_len=len(seg_inputs['vg']) ground_idx_flickr = ground_idx[:vg_len] padded_ground_idx_flickr = torch.nn.utils.rnn.pad_sequence(ground_idx_flickr, batch_first=True, padding_value=-1) padded_mask_flickr = padded_ground_idx_flickr != -1 padded_ground_idx_flickr[padded_ground_idx_flickr == -1] = 0 # ground_idx=[[-1] if len(idx)==0 else idx for idx in ground_idx] hidden_states = outputs[0] hidden_states_flickr = hidden_states[:vg_len] ground_hs_flickr = torch.gather(hidden_states_flickr, 1, padded_ground_idx_flickr[..., None].repeat(1, 1, hidden_states_flickr.shape[ -1])) seg_inputs['vg_text_embeddings'] = (ground_hs_flickr, padded_mask_flickr) flickr_len = len(seg_inputs['refcoco']) ##########flickr # if self.seg_model.model.coco_only: ground_idx_flickr = ground_idx[vg_len:vg_len+flickr_len] if 'vg' in seg_inputs else ground_idx[:flickr_len] padded_ground_idx_flickr = torch.nn.utils.rnn.pad_sequence(ground_idx_flickr, batch_first=True, padding_value=-1) padded_mask_flickr = padded_ground_idx_flickr != -1 padded_ground_idx_flickr[padded_ground_idx_flickr == -1] = 0 # ground_idx=[[-1] if len(idx)==0 else idx for idx in ground_idx] hidden_states = outputs[0] hidden_states_flickr = hidden_states[vg_len:vg_len+flickr_len] if 'vg' in seg_inputs else hidden_states[:flickr_len] ground_hs_flickr = torch.gather(hidden_states_flickr, 1, padded_ground_idx_flickr[..., None].repeat(1, 1, hidden_states_flickr.shape[ -1])) seg_inputs['refcoco_text_embeddings'] = (ground_hs_flickr, padded_mask_flickr) # seg_inputs['flickr']=seg_inputs['refcoco'] else: flickr_len=len(seg_inputs['flickr']) ground_idx = [torch.argwhere(lb == 32002)[:, 0] for lb in labels] zero_mask = [0 if len(idx) == 0 else 1 for idx in ground_idx] ##########flickr # if self.seg_model.model.coco_only: ground_idx_flickr=ground_idx[:flickr_len] padded_ground_idx_flickr = torch.nn.utils.rnn.pad_sequence(ground_idx_flickr, batch_first=True, padding_value=-1) padded_mask_flickr=padded_ground_idx_flickr!=-1 padded_ground_idx_flickr[padded_ground_idx_flickr==-1]=0 # ground_idx=[[-1] if len(idx)==0 else idx for idx in ground_idx] hidden_states = outputs[0] hidden_states_flickr=hidden_states[:flickr_len] ground_hs_flickr=torch.gather(hidden_states_flickr,1,padded_ground_idx_flickr[...,None].repeat(1,1,hidden_states_flickr.shape[-1])) seg_inputs['flickr_text_embeddings']=(ground_hs_flickr,padded_mask_flickr) ##########coco ground_idx_coco = ground_idx[flickr_len:] if len(ground_idx_coco)>0: for i,(idx,data) in enumerate(zip(ground_idx_coco,seg_inputs['coco'])): mask=data['grounding_mask'] ground_idx_coco[i]=idx[mask[:len(idx)]] padded_ground_idx_coco = torch.nn.utils.rnn.pad_sequence(ground_idx_coco, batch_first=True, padding_value=-1) padded_mask_coco = padded_ground_idx_coco != -1 padded_ground_idx_coco[padded_ground_idx_coco == -1] = 0 hidden_states = outputs[0] hidden_states_coco = hidden_states[flickr_len:] ground_hs_coco = torch.gather(hidden_states_coco, 1, padded_ground_idx_coco[..., None].repeat(1, 1, hidden_states_coco.shape[ -1])) seg_inputs['coco_text_embeddings'] = (ground_hs_coco, padded_mask_coco) ground_loss=self.seg_model(seg_inputs) if self.seg_model.model.coco_only and len(ground_idx_coco)>0: logits = self.lm_head(hidden_states_coco) else: logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() if self.seg_model.model.coco_only and len(ground_idx_coco) > 0: shift_labels = labels[..., 1:][flickr_len:].contiguous() else: shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output ground_loss['llava']=loss ground_loss['loss_total']=sum(ground_loss.values()) return CausalLMOutputWithPast( loss=ground_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "images": kwargs.get("images", None), } ) return model_inputs def forward_eval(self, inputs): collator=DataCollatorForSupervisedDataset() llava_inputs=collator(inputs,tokenizer=inputs[0]['tokenizer']) llava_inputs['seg_inputs']=inputs return self.forward_inner_eval(**llava_inputs) def forward_inner_eval( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, seg_inputs: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict _, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) output_ids, seg_hidden_states = self.auto_regressive_generate(attention_mask, past_key_values, inputs_embeds, output_attentions, seg_inputs[0]["tokenizer"], return_dict) output_text = seg_inputs[0]["tokenizer"].batch_decode([output_ids], skip_special_tokens=True)[0] if len(seg_hidden_states)==0: return output_text, [], [] seg_tokens = torch.cat(seg_hidden_states, dim=1) padded_mask = seg_tokens.new_ones(seg_tokens.shape[:2]) > 0 predicted_boxes, predicted_masks=self.seg_model.model.forward_eval(seg_inputs, (seg_tokens,padded_mask)) return output_text, predicted_boxes, predicted_masks def auto_regressive_generate(self, attention_mask, past_key_values, inputs_embeds, output_attentions, tokenizer, return_dict, temporature=0.0 ): ######## # llm_inputs['obj_num'] = False seg_token = tokenizer.encode("<seg>")[1] seg_token_list = [] output_ids = [] output_logits = [] length = inputs_embeds.shape[1] for i in range(1000): # import pdb;pdb.set_trace() if i == 0: results = self.model( input_ids=None, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=True, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict ) else: attention_mask = cur_hidden.new_ones( 1, past_key_values[0][0].shape[-2] + 1, device="cuda") # print("Attention mask shape: ", attention_mask.shape) results = self.model( input_ids=torch.as_tensor([[cur_id]], device=inputs_embeds.device), attention_mask=attention_mask, past_key_values=past_key_values, # inputs_embeds=cur_hidden, use_cache=True, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict ) cur_hidden = results.hidden_states[-1][:, -1:] # last layer last token logits = self.lm_head(results[0]) cur_logits = logits[0][-1] cur_id = int(torch.argmax(cur_logits)) if temporature < 1e-4: cur_id = int(torch.argmax(cur_logits)) else: probs = torch.softmax(cur_logits / temporature, dim=-1) cur_id = int(torch.multinomial(probs, num_samples=1)) past_key_values = results.past_key_values length += 1 if cur_id==seg_token: seg_token_list.append(cur_hidden) output_ids.append(cur_id) output_logits.append(cur_logits) if tokenizer.decode(output_ids).find("</s>")!=-1: break return output_ids,seg_token_list class LlavaLlamaForCausalLM_joint(LlavaLlamaForCausalLM_gd): def forward(self,**batched_inputs): # print(kwargs.keys()) # images_for_llava=torch.stack([inp['image_clip'] for inp in batched_inputs['flickr']]) collator=DataCollatorForSupervisedDataset() assert 'refcoco' in batched_inputs and 'flickr' in batched_inputs and 'llava' in batched_inputs for data in batched_inputs['llava']: data['image_clip']=data['image'] llava_inputs = collator( batched_inputs['flickr']+batched_inputs['refcoco']+batched_inputs['llava'], tokenizer=batched_inputs['refcoco'][0]['tokenizer']) # if 'refcoco' in batched_inputs: # llava_inputs = collator( batched_inputs['refcoco'], # tokenizer=batched_inputs['refcoco'][0]['tokenizer']) # elif 'coco' in batched_inputs: # llava_inputs=collator(batched_inputs['flickr']+batched_inputs['coco'],tokenizer=batched_inputs['flickr'][0]['tokenizer']) # else: # llava_inputs=collator(batched_inputs['flickr'],tokenizer=batched_inputs['flickr'][0]['tokenizer']) llava_inputs['seg_inputs']=batched_inputs return self.forward_inner(**llava_inputs) def forward_inner( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, seg_inputs: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict _, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=None, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) ground_idx_coco=[] # if 'refcoco' in seg_inputs: flickr_len = len(seg_inputs['flickr']) ground_idx = [torch.argwhere(lb == 32002)[:, 0] for lb in labels] ##########flickr # if self.seg_model.model.coco_only: ground_idx_flickr = ground_idx[:flickr_len] padded_ground_idx_flickr = torch.nn.utils.rnn.pad_sequence(ground_idx_flickr, batch_first=True, padding_value=-1) padded_mask_flickr = padded_ground_idx_flickr != -1 padded_ground_idx_flickr[padded_ground_idx_flickr == -1] = 0 # ground_idx=[[-1] if len(idx)==0 else idx for idx in ground_idx] hidden_states = outputs[0] hidden_states_flickr = hidden_states[:flickr_len] ground_hs_flickr = torch.gather(hidden_states_flickr, 1, padded_ground_idx_flickr[..., None].repeat(1, 1, hidden_states_flickr.shape[ -1])) seg_inputs['flickr_text_embeddings'] = (ground_hs_flickr, padded_mask_flickr) # seg_inputs['flickr']=seg_inputs['refcoco'] # else: ################################################# ################################################# refcoco_len=len(seg_inputs['refcoco']) ground_idx = [torch.argwhere(lb == 32002)[:, 0] for lb in labels] ##########flickr ground_idx_refcoco=ground_idx[flickr_len:flickr_len+refcoco_len] padded_ground_idx_refcoco = torch.nn.utils.rnn.pad_sequence(ground_idx_refcoco, batch_first=True, padding_value=-1) padded_mask_refcoco=padded_ground_idx_refcoco!=-1 padded_ground_idx_refcoco[padded_ground_idx_refcoco==-1]=0 # ground_idx=[[-1] if len(idx)==0 else idx for idx in ground_idx] # hidden_states = outputs[0] hidden_states_refcoco=hidden_states[flickr_len:flickr_len+refcoco_len] ground_hs_refcoco=torch.gather(hidden_states_refcoco,1,padded_ground_idx_refcoco[...,None].repeat(1,1,hidden_states_refcoco.shape[-1])) seg_inputs['refcoco_text_embeddings']=(ground_hs_refcoco,padded_mask_refcoco) ground_loss=self.seg_model(seg_inputs) # if self.seg_model.model.coco_only and len(ground_idx_coco)>0: # logits = self.lm_head(hidden_states_coco) # else: logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() if self.seg_model.model.coco_only and len(ground_idx_coco) > 0: shift_labels = labels[..., 1:][flickr_len:].contiguous() else: shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output ground_loss['llava']=loss ground_loss['loss_total']=sum(ground_loss.values()) return CausalLMOutputWithPast( loss=ground_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class LlavaLlamaForCausalLM_joint_2st(LlavaLlamaForCausalLM_gd): def forward(self,**batched_inputs): # print(kwargs.keys()) # images_for_llava=torch.stack([inp['image_clip'] for inp in batched_inputs['flickr']]) collator=DataCollatorForSupervisedDataset() assert 'coco' in batched_inputs and 'flickr' in batched_inputs and 'llava' in batched_inputs for data in batched_inputs['llava']: data['image_clip']=data['image'] llava_inputs = collator( batched_inputs['flickr']+batched_inputs['coco']+batched_inputs['llava'], tokenizer=batched_inputs['coco'][0]['tokenizer']) # if 'refcoco' in batched_inputs: # llava_inputs = collator( batched_inputs['refcoco'], # tokenizer=batched_inputs['refcoco'][0]['tokenizer']) # elif 'coco' in batched_inputs: # llava_inputs=collator(batched_inputs['flickr']+batched_inputs['coco'],tokenizer=batched_inputs['flickr'][0]['tokenizer']) # else: # llava_inputs=collator(batched_inputs['flickr'],tokenizer=batched_inputs['flickr'][0]['tokenizer']) llava_inputs['seg_inputs']=batched_inputs return self.forward_inner(**llava_inputs) def forward_inner( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, seg_inputs: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict _, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=None, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) flickr_len = len(seg_inputs['flickr']) ground_idx = [torch.argwhere(lb == 32002)[:, 0] for lb in labels] ##########flickr # if self.seg_model.model.coco_only: ground_idx_flickr = ground_idx[:flickr_len] padded_ground_idx_flickr = torch.nn.utils.rnn.pad_sequence(ground_idx_flickr, batch_first=True, padding_value=-1) padded_mask_flickr = padded_ground_idx_flickr != -1 padded_ground_idx_flickr[padded_ground_idx_flickr == -1] = 0 # ground_idx=[[-1] if len(idx)==0 else idx for idx in ground_idx] if self.seg_model.model.detach_seg: hidden_states = outputs[0].detach() else: hidden_states = outputs[0] hidden_states_flickr = hidden_states[:flickr_len] ground_hs_flickr = torch.gather(hidden_states_flickr, 1, padded_ground_idx_flickr[..., None].repeat(1, 1, hidden_states_flickr.shape[ -1])) seg_inputs['flickr_text_embeddings'] = (ground_hs_flickr, padded_mask_flickr) ##########coco coco_len = len(seg_inputs['coco']) ground_idx_coco = ground_idx[flickr_len:flickr_len+coco_len] if len(ground_idx_coco) > 0: for i, (idx, data) in enumerate(zip(ground_idx_coco, seg_inputs['coco'])): mask = data['grounding_mask'] ground_idx_coco[i] = idx[mask[:len(idx)]] padded_ground_idx_coco = torch.nn.utils.rnn.pad_sequence(ground_idx_coco, batch_first=True, padding_value=-1) padded_mask_coco = padded_ground_idx_coco != -1 padded_ground_idx_coco[padded_ground_idx_coco == -1] = 0 # hidden_states = outputs[0] hidden_states_coco = hidden_states[flickr_len:flickr_len+coco_len] ground_hs_coco = torch.gather(hidden_states_coco, 1, padded_ground_idx_coco[..., None].repeat(1, 1, hidden_states_coco.shape[ -1])) seg_inputs['coco_text_embeddings'] = (ground_hs_coco, padded_mask_coco) ground_loss = self.seg_model(seg_inputs) hidden_states_ = outputs[0] if self.seg_model.model.coco_only and len(ground_idx_coco) > 0: logits = self.lm_head(hidden_states_[flickr_len:]) else: logits = self.lm_head(hidden_states_) ############################################################ loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() if self.seg_model.model.coco_only and len(ground_idx_coco) > 0: shift_labels = labels[..., 1:][flickr_len:].contiguous() else: shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output ground_loss['llava']=loss ground_loss['loss_total']=sum(ground_loss.values()) ignore_list=[f'_{i}' for i in range(1,10)] ignore_list.append('interm') for key in list(ground_loss.keys()): if not key.endswith('_0') and key!='llava' and key !='loss_total': ground_loss.pop(key) return CausalLMOutputWithPast( loss=ground_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class LlavaLlamaForCausalLM_joint_2st_it_only_ref_instr(LlamaForCausalLM, LlavaMetaForCausalLM_gd_interactive):
3
2023-12-04 10:59:21+00:00
24k
Vchitect/VBench
vbench/third_party/umt/datasets/build.py
[ { "identifier": "TubeMaskingGenerator", "path": "vbench/third_party/umt/datasets/masking_generator.py", "snippet": "class TubeMaskingGenerator:\n def __init__(self, input_size, mask_ratio):\n self.frames, self.height, self.width = input_size\n self.num_patches_per_frame = self.height * ...
import os from torchvision import transforms from .transforms import * from .masking_generator import TubeMaskingGenerator, RandomMaskingGenerator from .mae import VideoMAE from .kinetics import VideoClsDataset from .kinetics_sparse import VideoClsDataset_sparse from .ssv2 import SSVideoClsDataset, SSRawFrameClsDataset
14,564
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) if args.color_jitter > 0: self.transform = transforms.Compose([ self.train_augmentation, GroupColorJitter(args.color_jitter), GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) else: self.transform = transforms.Compose([ self.train_augmentation, GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) elif args.mask_type == 'random': self.masked_position_generator = RandomMaskingGenerator( args.window_size, args.mask_ratio ) elif args.mask_type in 'attention': self.masked_position_generator = None def __call__(self, images): process_data, _ = self.transform(images) if self.masked_position_generator is None: return process_data, -1 else: return process_data, self.masked_position_generator() def __repr__(self): repr = "(DataAugmentationForVideoMAE,\n" repr += " transform = %s,\n" % str(self.transform) repr += " Masked position generator = %s,\n" % str(self.masked_position_generator) repr += ")" return repr def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAE(args)
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) if args.color_jitter > 0: self.transform = transforms.Compose([ self.train_augmentation, GroupColorJitter(args.color_jitter), GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) else: self.transform = transforms.Compose([ self.train_augmentation, GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) elif args.mask_type == 'random': self.masked_position_generator = RandomMaskingGenerator( args.window_size, args.mask_ratio ) elif args.mask_type in 'attention': self.masked_position_generator = None def __call__(self, images): process_data, _ = self.transform(images) if self.masked_position_generator is None: return process_data, -1 else: return process_data, self.masked_position_generator() def __repr__(self): repr = "(DataAugmentationForVideoMAE,\n" repr += " transform = %s,\n" % str(self.transform) repr += " Masked position generator = %s,\n" % str(self.masked_position_generator) repr += ")" return repr def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAE(args)
dataset = VideoMAE(
2
2023-11-27 12:41:46+00:00
24k
cswry/SeeSR
test_seesr.py
[ { "identifier": "StableDiffusionControlNetPipeline", "path": "pipelines/pipeline_seesr.py", "snippet": "class StableDiffusionControlNetPipeline(DiffusionPipeline, TextualInversionLoaderMixin):\n r\"\"\"\n Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance.\n\n ...
import os import sys import cv2 import glob import argparse import numpy as np import torch import torch.utils.checkpoint import torch.nn as nn import torch.nn.functional as F from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import AutoencoderKL, DDPMScheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer, CLIPImageProcessor from pipelines.pipeline_seesr import StableDiffusionControlNetPipeline from utils.misc import load_dreambooth_lora from utils.wavelet_color_fix import wavelet_color_fix, adain_color_fix from ram.models.ram_lora import ram from ram import inference_ram as inference from ram import get_transform from typing import Mapping, Any from torchvision import transforms from torchvision import transforms from models.controlnet import ControlNetModel from models.unet_2d_condition import UNet2DConditionModel
16,009
weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu and cast to weight_dtype text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) unet.to(accelerator.device, dtype=weight_dtype) controlnet.to(accelerator.device, dtype=weight_dtype) return validation_pipeline def load_tag_model(args, device='cuda'): model = ram(pretrained='preset/models/ram_swin_large_14m.pth', pretrained_condition=args.ram_ft_path, image_size=384, vit='swin_l') model.eval() model.to(device) return model def get_validation_prompt(args, image, model, device='cuda'): validation_prompt = "" lq = tensor_transforms(image).unsqueeze(0).to(device) lq = ram_transforms(lq) res = inference(lq, model) ram_encoder_hidden_states = model.generate_image_embeds(lq) validation_prompt = f"{res[0]}, {args.prompt}," return validation_prompt, ram_encoder_hidden_states def main(args, enable_xformers_memory_efficient_attention=True,): txt_path = os.path.join(args.output_dir, 'txt') os.makedirs(txt_path, exist_ok=True) accelerator = Accelerator( mixed_precision=args.mixed_precision, ) # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the output folder creation if accelerator.is_main_process: os.makedirs(args.output_dir, exist_ok=True) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("SeeSR") pipeline = load_seesr_pipeline(args, accelerator, enable_xformers_memory_efficient_attention) model = load_tag_model(args, accelerator.device) if accelerator.is_main_process: generator = torch.Generator(device=accelerator.device) if args.seed is not None: generator.manual_seed(args.seed) if os.path.isdir(args.image_path): image_names = sorted(glob.glob(f'{args.image_path}/*.*')) else: image_names = [args.image_path] for image_idx, image_name in enumerate(image_names[:]): print(f'================== process {image_idx} imgs... ===================') validation_image = Image.open(image_name).convert("RGB") validation_prompt, ram_encoder_hidden_states = get_validation_prompt(args, validation_image, model) validation_prompt += args.added_prompt # clean, extremely detailed, best quality, sharp, clean negative_prompt = args.negative_prompt #dirty, messy, low quality, frames, deformed, if args.save_prompts: txt_save_path = f"{txt_path}/{os.path.basename(image_name).split('.')[0]}.txt" file = open(txt_save_path, "w") file.write(validation_prompt) file.close() print(f'{validation_prompt}') ori_width, ori_height = validation_image.size resize_flag = False rscale = args.upscale if ori_width < args.process_size//rscale or ori_height < args.process_size//rscale: scale = (args.process_size//rscale)/min(ori_width, ori_height) tmp_image = validation_image.resize((int(scale*ori_width), int(scale*ori_height))) validation_image = tmp_image resize_flag = True validation_image = validation_image.resize((validation_image.size[0]*rscale, validation_image.size[1]*rscale)) validation_image = validation_image.resize((validation_image.size[0]//8*8, validation_image.size[1]//8*8)) width, height = validation_image.size resize_flag = True # print(f'input size: {height}x{width}') for sample_idx in range(args.sample_times): os.makedirs(f'{args.output_dir}/sample{str(sample_idx).zfill(2)}/', exist_ok=True) for sample_idx in range(args.sample_times): with torch.autocast("cuda"): image = pipeline( validation_prompt, validation_image, num_inference_steps=args.num_inference_steps, generator=generator, height=height, width=width, guidance_scale=args.guidance_scale, negative_prompt=negative_prompt, conditioning_scale=args.conditioning_scale, start_point=args.start_point, ram_encoder_hidden_states=ram_encoder_hidden_states, latent_tiled_size=args.latent_tiled_size, latent_tiled_overlap=args.latent_tiled_overlap, args=args, ).images[0] if args.align_method == 'nofix': image = image else: if args.align_method == 'wavelet':
''' * SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution * Modified from diffusers by Rongyuan Wu * 24/12/2023 ''' sys.path.append(os.getcwd()) logger = get_logger(__name__, log_level="INFO") tensor_transforms = transforms.Compose([ transforms.ToTensor(), ]) ram_transforms = transforms.Compose([ transforms.Resize((384, 384)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def load_state_dict_diffbirSwinIR(model: nn.Module, state_dict: Mapping[str, Any], strict: bool=False) -> None: state_dict = state_dict.get("state_dict", state_dict) is_model_key_starts_with_module = list(model.state_dict().keys())[0].startswith("module.") is_state_dict_key_starts_with_module = list(state_dict.keys())[0].startswith("module.") if ( is_model_key_starts_with_module and (not is_state_dict_key_starts_with_module) ): state_dict = {f"module.{key}": value for key, value in state_dict.items()} if ( (not is_model_key_starts_with_module) and is_state_dict_key_starts_with_module ): state_dict = {key[len("module."):]: value for key, value in state_dict.items()} model.load_state_dict(state_dict, strict=strict) def load_seesr_pipeline(args, accelerator, enable_xformers_memory_efficient_attention): # Load scheduler, tokenizer and models. scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_path, subfolder="text_encoder") tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_path, subfolder="tokenizer") vae = AutoencoderKL.from_pretrained(args.pretrained_model_path, subfolder="vae") feature_extractor = CLIPImageProcessor.from_pretrained(f"{args.pretrained_model_path}/feature_extractor") unet = UNet2DConditionModel.from_pretrained(args.seesr_model_path, subfolder="unet") controlnet = ControlNetModel.from_pretrained(args.seesr_model_path, subfolder="controlnet") # Freeze vae and text_encoder vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.requires_grad_(False) controlnet.requires_grad_(False) if enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() controlnet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Get the validation pipeline validation_pipeline = StableDiffusionControlNetPipeline( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, feature_extractor=feature_extractor, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=None, requires_safety_checker=False, ) validation_pipeline._init_tiled_vae(encoder_tile_size=args.vae_encoder_tiled_size, decoder_tile_size=args.vae_decoder_tiled_size) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu and cast to weight_dtype text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) unet.to(accelerator.device, dtype=weight_dtype) controlnet.to(accelerator.device, dtype=weight_dtype) return validation_pipeline def load_tag_model(args, device='cuda'): model = ram(pretrained='preset/models/ram_swin_large_14m.pth', pretrained_condition=args.ram_ft_path, image_size=384, vit='swin_l') model.eval() model.to(device) return model def get_validation_prompt(args, image, model, device='cuda'): validation_prompt = "" lq = tensor_transforms(image).unsqueeze(0).to(device) lq = ram_transforms(lq) res = inference(lq, model) ram_encoder_hidden_states = model.generate_image_embeds(lq) validation_prompt = f"{res[0]}, {args.prompt}," return validation_prompt, ram_encoder_hidden_states def main(args, enable_xformers_memory_efficient_attention=True,): txt_path = os.path.join(args.output_dir, 'txt') os.makedirs(txt_path, exist_ok=True) accelerator = Accelerator( mixed_precision=args.mixed_precision, ) # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the output folder creation if accelerator.is_main_process: os.makedirs(args.output_dir, exist_ok=True) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("SeeSR") pipeline = load_seesr_pipeline(args, accelerator, enable_xformers_memory_efficient_attention) model = load_tag_model(args, accelerator.device) if accelerator.is_main_process: generator = torch.Generator(device=accelerator.device) if args.seed is not None: generator.manual_seed(args.seed) if os.path.isdir(args.image_path): image_names = sorted(glob.glob(f'{args.image_path}/*.*')) else: image_names = [args.image_path] for image_idx, image_name in enumerate(image_names[:]): print(f'================== process {image_idx} imgs... ===================') validation_image = Image.open(image_name).convert("RGB") validation_prompt, ram_encoder_hidden_states = get_validation_prompt(args, validation_image, model) validation_prompt += args.added_prompt # clean, extremely detailed, best quality, sharp, clean negative_prompt = args.negative_prompt #dirty, messy, low quality, frames, deformed, if args.save_prompts: txt_save_path = f"{txt_path}/{os.path.basename(image_name).split('.')[0]}.txt" file = open(txt_save_path, "w") file.write(validation_prompt) file.close() print(f'{validation_prompt}') ori_width, ori_height = validation_image.size resize_flag = False rscale = args.upscale if ori_width < args.process_size//rscale or ori_height < args.process_size//rscale: scale = (args.process_size//rscale)/min(ori_width, ori_height) tmp_image = validation_image.resize((int(scale*ori_width), int(scale*ori_height))) validation_image = tmp_image resize_flag = True validation_image = validation_image.resize((validation_image.size[0]*rscale, validation_image.size[1]*rscale)) validation_image = validation_image.resize((validation_image.size[0]//8*8, validation_image.size[1]//8*8)) width, height = validation_image.size resize_flag = True # print(f'input size: {height}x{width}') for sample_idx in range(args.sample_times): os.makedirs(f'{args.output_dir}/sample{str(sample_idx).zfill(2)}/', exist_ok=True) for sample_idx in range(args.sample_times): with torch.autocast("cuda"): image = pipeline( validation_prompt, validation_image, num_inference_steps=args.num_inference_steps, generator=generator, height=height, width=width, guidance_scale=args.guidance_scale, negative_prompt=negative_prompt, conditioning_scale=args.conditioning_scale, start_point=args.start_point, ram_encoder_hidden_states=ram_encoder_hidden_states, latent_tiled_size=args.latent_tiled_size, latent_tiled_overlap=args.latent_tiled_overlap, args=args, ).images[0] if args.align_method == 'nofix': image = image else: if args.align_method == 'wavelet':
image = wavelet_color_fix(image, validation_image)
2
2023-11-27 08:50:33+00:00
24k
xmu-xiaoma666/X-Dreamer
train_x_dreamer.py
[ { "identifier": "DatasetMesh", "path": "dataset/dataset_mesh.py", "snippet": "class DatasetMesh(torch.utils.data.Dataset):\n\n\n def __init__(self, glctx, FLAGS, validate=False, gif=False):\n # Init \n self.glctx = glctx\n self.FLAGS = FLAGS\n sel...
import os import time import argparse import json import math import numpy as np import torch import nvdiffrast.torch as dr import itertools import xatlas import open3d as o3d import random import imageio import os.path as osp import pickle from dataset.dataset_mesh import DatasetMesh from dataset.dataset_mesh import get_camera_params from geometry.dmtet_x_dreamer import DMTetGeometry from geometry.dlmesh_x_dreamer import DLMesh from render import obj from render import material from render import util from render import mesh from render import texture from render import mlptexture from render import light from render import render from sd_cglora import StableDiffusion from tqdm import tqdm from render import util from render.video import Video
15,112
FLAGS.gpu_number = 1 FLAGS.sdf_init_shape_scale=[1.0, 1.0, 1.0] FLAGS.multi_gpu = "WORLD_SIZE" in os.environ and int(os.environ["WORLD_SIZE"]) > 1 if FLAGS.multi_gpu: FLAGS.gpu_number = int(os.environ["WORLD_SIZE"]) FLAGS.local_rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group(backend="nccl", world_size = FLAGS.gpu_number, rank = FLAGS.local_rank) torch.cuda.set_device(FLAGS.local_rank) if FLAGS.config is not None: data = json.load(open(FLAGS.config, 'r')) for key in data: FLAGS.__dict__[key] = data[key] if FLAGS.display_res is None: FLAGS.display_res = FLAGS.train_res if FLAGS.local_rank == 0: print("Config / Flags:") print("---------") for key in FLAGS.__dict__.keys(): print(key, FLAGS.__dict__[key]) print("---------") seed_everything(FLAGS.seed, FLAGS.local_rank) os.makedirs(FLAGS.out_dir, exist_ok=True) glctx = dr.RasterizeCudaContext() # ============================================================================================== # Create data pipeline # ============================================================================================== dataset_train = DatasetMesh(glctx, FLAGS, validate=False) dataset_validate = DatasetMesh(glctx, FLAGS, validate=True) dataset_gif = DatasetMesh(glctx, FLAGS, gif=True) # ============================================================================================== # Create env light with trainable parameters # ============================================================================================== if FLAGS.mode == 'appearance_modeling' and FLAGS.base_mesh is not None: if FLAGS.learn_light: lgt = light.create_trainable_env_rnd(512, scale=0.0, bias=1) else: lgt = light.load_env(FLAGS.envmap, scale=FLAGS.env_scale) else: lgt = None if FLAGS.sdf_init_shape in ['ellipsoid', 'cylinder', 'custom_mesh'] and FLAGS.mode == 'geometry_modeling': if FLAGS.sdf_init_shape == 'ellipsoid': init_shape = o3d.geometry.TriangleMesh.create_sphere(1) elif FLAGS.sdf_init_shape == 'cylinder': init_shape = o3d.geometry.TriangleMesh.create_cylinder(radius=0.75, height=1.2, resolution=20, split=4, create_uv_map=False) elif FLAGS.sdf_init_shape == 'custom_mesh': if FLAGS.base_mesh: init_shape = get_normalize_mesh(FLAGS.base_mesh) else: assert False, "[Error] The path of custom mesh is invalid ! (geometry modeling)" else: assert False, "Invalid init type" vertices = np.asarray(init_shape.vertices) vertices[...,0]=vertices[...,0] * FLAGS.sdf_init_shape_scale[0] vertices[...,1]=vertices[...,1] * FLAGS.sdf_init_shape_scale[1] vertices[...,2]=vertices[...,2] * FLAGS.sdf_init_shape_scale[2] vertices = vertices @ util.rotate_x_2(np.deg2rad(FLAGS.sdf_init_shape_rotate_x)) vertices[...,1]=vertices[...,1] + FLAGS.translation_y init_shape.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) points_surface = np.asarray(init_shape.sample_points_poisson_disk(5000).points) init_shape = o3d.t.geometry.TriangleMesh.from_legacy(init_shape) scene = o3d.t.geometry.RaycastingScene() scene.add_triangles(init_shape) scene_and_vertices = [scene, points_surface] guidance = StableDiffusion(device = 'cuda', mode = FLAGS.mode, text = FLAGS.text, add_directional_text = FLAGS.add_directional_text, batch = FLAGS.batch, guidance_weight = FLAGS.guidance_weight, sds_weight_strategy = FLAGS.sds_weight_strategy, early_time_step_range = FLAGS.early_time_step_range, late_time_step_range= FLAGS.late_time_step_range) if FLAGS.mode == 'geometry_modeling' : geometry = DMTetGeometry(FLAGS.dmtet_grid, FLAGS.mesh_scale, FLAGS) mat = initial_guness_material(geometry, True, FLAGS) # Run optimization geometry, mat = optimize_mesh(glctx, geometry, mat, lgt, dataset_train, dataset_validate, FLAGS, optimize_light=FLAGS.learn_light,optimize_geometry= not FLAGS.lock_pos, guidance= guidance, scene_and_vertices= scene_and_vertices) if FLAGS.local_rank == 0 and FLAGS.validate: validate(glctx, geometry, mat, lgt, dataset_gif, os.path.join(FLAGS.out_dir, "validate"), FLAGS) # Create textured mesh from result if FLAGS.local_rank == 0: base_mesh = xatlas_uvmap(glctx, geometry, mat, FLAGS) # # Free temporaries / cached memory torch.cuda.empty_cache() mat['kd_ks_normal'].cleanup() del mat['kd_ks_normal'] if FLAGS.local_rank == 0: # Dump mesh for debugging. os.makedirs(os.path.join(FLAGS.out_dir, "dmtet_mesh"), exist_ok=True) obj.write_obj(os.path.join(FLAGS.out_dir, "dmtet_mesh/"), base_mesh) elif FLAGS.mode == 'appearance_modeling': # ============================================================================================== # Train with fixed topology (mesh) # ============================================================================================== if FLAGS.base_mesh is None: assert False, "[Error] The path of custom mesh is invalid ! (appearance modeling)" base_mesh = mesh.load_mesh(FLAGS.base_mesh)
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh) mask, kd, ks, normal = render.render_uv(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal']) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'], 'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]), 'ks' : texture.Texture2D(ks, min_max=[ks_min, ks_max]), 'normal' : texture.Texture2D(normal, min_max=[nrm_min, nrm_max]) }) return new_mesh @torch.no_grad() def xatlas_uvmap1(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) new_mesh = mesh.Mesh( base=eval_mesh) mask, kd, ks, normal = render.render_uv1(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal'], FLAGS.uv_padding_block) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'], 'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]), 'ks' : texture.Texture2D(ks, min_max=[ks_min, ks_max]), 'normal' : texture.Texture2D(normal, min_max=[nrm_min, nrm_max]) }) return new_mesh ############################################################################### # Utility functions for material ############################################################################### def get_normalize_mesh(pro_path): mesh = o3d.io.read_triangle_mesh(pro_path) vertices = np.asarray(mesh.vertices) shift = np.mean(vertices,axis=0) scale = np.max(np.linalg.norm(vertices-shift, ord=2, axis=1)) vertices = (vertices-shift) / scale mesh.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) return mesh def initial_guness_material(geometry, mlp, FLAGS, init_mat=None): # ipdb.set_trace(()) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') if mlp: mlp_min = torch.cat((kd_min[0:3], ks_min, nrm_min), dim=0) mlp_max = torch.cat((kd_max[0:3], ks_max, nrm_max), dim=0) mlp_map_opt = mlptexture.MLPTexture3D(geometry.getAABB(), channels=9, min_max=[mlp_min, mlp_max]) mat = material.Material({'kd_ks_normal' : mlp_map_opt}) else: # Setup Kd (albedo) and Ks (x, roughness, metalness) textures if FLAGS.random_textures or init_mat is None: num_channels = 4 if FLAGS.layers > 1 else 3 kd_init = torch.rand(size=FLAGS.texture_res + [num_channels], device='cuda') * (kd_max - kd_min)[None, None, 0:num_channels] + kd_min[None, None, 0:num_channels] kd_map_opt = texture.create_trainable(kd_init , FLAGS.texture_res, not FLAGS.custom_mip, [kd_min, kd_max]) ksR = np.random.uniform(size=FLAGS.texture_res + [1], low=0.0, high=0.01) ksG = np.random.uniform(size=FLAGS.texture_res + [1], low=ks_min[1].cpu(), high=ks_max[1].cpu()) ksB = np.random.uniform(size=FLAGS.texture_res + [1], low=ks_min[2].cpu(), high=ks_max[2].cpu()) ks_map_opt = texture.create_trainable(np.concatenate((ksR, ksG, ksB), axis=2), FLAGS.texture_res, not FLAGS.custom_mip, [ks_min, ks_max]) else: kd_map_opt = texture.create_trainable(init_mat['kd'], FLAGS.texture_res, not FLAGS.custom_mip, [kd_min, kd_max]) ks_map_opt = texture.create_trainable(init_mat['ks'], FLAGS.texture_res, not FLAGS.custom_mip, [ks_min, ks_max]) # Setup normal map if FLAGS.random_textures or init_mat is None or 'normal' not in init_mat: normal_map_opt = texture.create_trainable(np.array([0, 0, 1]), FLAGS.texture_res, not FLAGS.custom_mip, [nrm_min, nrm_max]) else: normal_map_opt = texture.create_trainable(init_mat['normal'], FLAGS.texture_res, not FLAGS.custom_mip, [nrm_min, nrm_max]) mat = material.Material({ 'kd' : kd_map_opt, 'ks' : ks_map_opt, 'normal' : normal_map_opt }) if init_mat is not None: mat['bsdf'] = init_mat['bsdf'] else: mat['bsdf'] = 'pbr' return mat ############################################################################### # Validation & testing ############################################################################### # @torch.no_grad() def validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight = None): result_dict = {} with torch.no_grad(): if FLAGS.mode == 'appearance_modeling': with torch.no_grad(): lgt.build_mips() if FLAGS.camera_space_light: lgt.xfm(target['mv']) if relight != None: relight.build_mips() buffers = geometry.render(glctx, target, lgt, opt_material, if_use_bump = FLAGS.if_use_bump) result_dict['shaded'] = buffers['shaded'][0, ..., 0:3] result_dict['shaded'] = util.rgb_to_srgb(result_dict['shaded']) if relight != None: result_dict['relight'] = geometry.render(glctx, target, relight, opt_material, if_use_bump = FLAGS.if_use_bump)['shaded'][0, ..., 0:3] result_dict['relight'] = util.rgb_to_srgb(result_dict['relight']) result_dict['mask'] = (buffers['shaded'][0, ..., 3:4]) result_image = result_dict['shaded'] if FLAGS.display is not None : # white_bg = torch.ones_like(target['background']) for layer in FLAGS.display: if 'latlong' in layer and layer['latlong']: if isinstance(lgt, light.EnvironmentLight): result_dict['light_image'] = util.cubemap_to_latlong(lgt.base, FLAGS.display_res) result_image = torch.cat([result_image, result_dict['light_image']], axis=1) elif 'bsdf' in layer: buffers = geometry.render(glctx, target, lgt, opt_material, bsdf=layer['bsdf'], if_use_bump = FLAGS.if_use_bump) if layer['bsdf'] == 'kd': result_dict[layer['bsdf']] = util.rgb_to_srgb(buffers['shaded'][0, ..., 0:3]) elif layer['bsdf'] == 'normal': result_dict[layer['bsdf']] = (buffers['shaded'][0, ..., 0:3] + 1) * 0.5 else: result_dict[layer['bsdf']] = buffers['shaded'][0, ..., 0:3] result_image = torch.cat([result_image, result_dict[layer['bsdf']]], axis=1) return result_image, result_dict def save_gif(dir,fps): imgpath = dir frames = [] for idx in sorted(os.listdir(imgpath)): img = osp.join(imgpath,idx) frames.append(imageio.imread(img)) imageio.mimsave(os.path.join(dir, 'eval.gif'),frames,'GIF',duration=1/fps,loop=0) @torch.no_grad() def validate(glctx, geometry, opt_material, lgt, dataset_validate, out_dir, FLAGS, relight= None): # ============================================================================================== # Validation loop # ============================================================================================== mse_values = [] psnr_values = [] dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_validate.collate) os.makedirs(out_dir, exist_ok=True) shaded_dir = os.path.join(out_dir, "shaded") relight_dir = os.path.join(out_dir, "relight") kd_dir = os.path.join(out_dir, "kd") ks_dir = os.path.join(out_dir, "ks") normal_dir = os.path.join(out_dir, "normal") mask_dir = os.path.join(out_dir, "mask") os.makedirs(shaded_dir, exist_ok=True) os.makedirs(relight_dir, exist_ok=True) os.makedirs(kd_dir, exist_ok=True) os.makedirs(ks_dir, exist_ok=True) os.makedirs(normal_dir, exist_ok=True) os.makedirs(mask_dir, exist_ok=True) print("Running validation") dataloader_validate = tqdm(dataloader_validate) for it, target in enumerate(dataloader_validate): # Mix validation background target = prepare_batch(target, 'white') result_image, result_dict = validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight) for k in result_dict.keys(): np_img = result_dict[k].detach().cpu().numpy() if k == 'shaded': util.save_image(shaded_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'relight': util.save_image(relight_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'kd': util.save_image(kd_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'ks': util.save_image(ks_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'normal': util.save_image(normal_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'mask': util.save_image(mask_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) if 'shaded' in result_dict.keys(): save_gif(shaded_dir,30) if 'relight' in result_dict.keys(): save_gif(relight_dir,30) if 'kd' in result_dict.keys(): save_gif(kd_dir,30) if 'ks' in result_dict.keys(): save_gif(ks_dir,30) if 'normal' in result_dict.keys(): save_gif(normal_dir,30) return 0 ############################################################################### # Main shape fitter function / optimization loop ############################################################################### class Trainer(torch.nn.Module): def __init__(self, glctx, geometry, lgt, mat, optimize_geometry, optimize_light, FLAGS, guidance): super(Trainer, self).__init__() self.glctx = glctx self.geometry = geometry self.light = lgt self.material = mat self.optimize_geometry = optimize_geometry self.optimize_light = optimize_light self.FLAGS = FLAGS self.guidance = guidance self.if_flip_the_normal = FLAGS.if_flip_the_normal self.if_use_bump = FLAGS.if_use_bump if self.FLAGS.mode == 'appearance_modeling': if not self.optimize_light: with torch.no_grad(): self.light.build_mips() self.params = list(self.material.parameters()) self.params += list(self.geometry.pos_encoder.parameters()) self.params += list(self.light.parameters()) if optimize_light else [] self.geo_params = list(self.geometry.parameters()) if optimize_geometry else [] def forward(self, target, it, if_normal, if_pretrain, scene_and_vertices ): if self.FLAGS.mode == 'appearance_modeling': if self.optimize_light: self.light.build_mips() if self.FLAGS.camera_space_light: self.light.xfm(target['mv']) if if_pretrain: return self.geometry.decoder.pre_train_ellipsoid(it, scene_and_vertices) else: return self.geometry.tick(glctx, target, self.light, self.material, it , if_normal, self.guidance, self.FLAGS.mode, self.if_flip_the_normal, self.if_use_bump) def optimize_mesh( glctx, geometry, opt_material, lgt, dataset_train, dataset_validate, FLAGS, log_interval=10, optimize_light=True, optimize_geometry=True, guidance = None, scene_and_vertices = None, ): dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=FLAGS.batch, collate_fn=dataset_train.collate, shuffle=False) dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_train.collate) model = Trainer(glctx, geometry, lgt, opt_material, optimize_geometry, optimize_light, FLAGS, guidance) if optimize_geometry: optimizer_mesh = torch.optim.AdamW(model.geo_params, lr=0.001, betas=(0.9, 0.99), eps=1e-15) optimizer = torch.optim.AdamW(model.params, lr=0.01, betas=(0.9, 0.99), eps=1e-15) optimizer_lora = torch.optim.SGD(itertools.chain(*guidance.unet_lora_params), lr=1e-5) if FLAGS.multi_gpu: model = model.cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[FLAGS.local_rank], find_unused_parameters= True ) img_cnt = 0 img_loss_vec = [] reg_loss_vec = [] iter_dur_vec = [] def cycle(iterable): iterator = iter(iterable) while True: try: yield next(iterator) except StopIteration: iterator = iter(iterable) v_it = cycle(dataloader_validate) scaler = torch.cuda.amp.GradScaler(enabled=True) rot_ang = 0 if FLAGS.local_rank == 0: video = Video(FLAGS.out_dir) if FLAGS.local_rank == 0: dataloader_train = tqdm(dataloader_train) for it, target in enumerate(dataloader_train): # Mix randomized background into dataset image target = prepare_batch(target, FLAGS.train_background) # Show/save image before training step (want to get correct rendering of input) if FLAGS.local_rank == 0: save_image = FLAGS.save_interval and (it % FLAGS.save_interval == 0) save_video = FLAGS.video_interval and (it % FLAGS.video_interval == 0) if save_image: result_image, result_dict = validate_itr(glctx, prepare_batch(next(v_it), FLAGS.train_background), geometry, opt_material, lgt, FLAGS) #prepare_batch(next(v_it), FLAGS.background) np_result_image = result_image.detach().cpu().numpy() util.save_image(FLAGS.out_dir + '/' + ('img_%s_%06d.png' % (FLAGS.mode, img_cnt)), np_result_image) util.save_image(FLAGS.out_dir + '/' + ('mask_%s_%06d.png' % (FLAGS.mode, img_cnt)), result_dict['mask'].detach().cpu().numpy()) img_cnt = img_cnt+1 if save_video: with torch.no_grad(): params = get_camera_params( resolution=512, fov=45, elev_angle=-20, azim_angle =rot_ang, ) rot_ang += 1 if FLAGS.mode =='geometry_modeling': buffers = geometry.render(glctx, params, lgt, opt_material, bsdf='normal', if_use_bump = FLAGS.if_use_bump) video_image = (buffers['shaded'][0, ..., 0:3]+1)/2 else: buffers = geometry.render(glctx, params, lgt, opt_material, bsdf='pbr', if_use_bump = FLAGS.if_use_bump) video_image = util.rgb_to_srgb(buffers['shaded'][0, ..., 0:3]) video_image = video.ready_image(video_image) iter_start_time = time.time() if FLAGS.mode =='geometry_modeling': if it<=400: if_pretrain = True else: if_pretrain = False if_normal =True else: if_pretrain = False if_normal = False with torch.cuda.amp.autocast(enabled= True): if if_pretrain== True: reg_loss = model(target, it, if_normal, if_pretrain= if_pretrain, scene_and_vertices = scene_and_vertices) img_loss = 0 sds_loss = 0 attention_loss = 0 if if_pretrain == False: sds_loss, img_loss, reg_loss, attention_loss = model(target, it, if_normal, if_pretrain= if_pretrain, scene_and_vertices =None) if FLAGS.mode =='geometry_modeling': if(it<1000): attention_loss = 0 else: if(it<500): attention_loss = 0 # ============================================================================================== # Final loss # ============================================================================================== total_loss = img_loss + reg_loss + sds_loss + attention_loss if if_pretrain == True: scaler.scale(total_loss).backward() if if_pretrain == False: scaler.scale(total_loss).backward() img_loss_vec.append(img_loss.item()) reg_loss_vec.append(reg_loss.item()) # ============================================================================================== # Backpropagate # ============================================================================================== if if_normal == False and if_pretrain == False: scaler.step(optimizer) optimizer.zero_grad() if if_normal == True or if_pretrain == True: if optimize_geometry: scaler.step(optimizer_mesh) optimizer_mesh.zero_grad() for param in guidance.parameters(): if param.grad is not None and torch.isnan(param.grad).any(): param.grad = torch.nan_to_num(param.grad, nan=0.0) max_norm = 5.0 torch.nn.utils.clip_grad_norm_(guidance.parameters(), max_norm) if if_pretrain == False: optimizer_lora.step() optimizer_lora.zero_grad() for param in guidance.parameters(): param.data = torch.nan_to_num(param.data, nan=0.0, posinf=None, neginf=None) scaler.update() # ============================================================================================== # Clamp trainables to reasonable range # ============================================================================================== with torch.no_grad(): if 'kd' in opt_material: opt_material['kd'].clamp_() if 'ks' in opt_material: opt_material['ks'].clamp_() if 'normal' in opt_material: opt_material['normal'].clamp_() opt_material['normal'].normalize_() if lgt is not None: lgt.clamp_(min=0.0) torch.cuda.current_stream().synchronize() iter_dur_vec.append(time.time() - iter_start_time) return geometry, opt_material def seed_everything(seed, local_rank): random.seed(seed + local_rank) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed + local_rank) torch.manual_seed(seed) torch.cuda.manual_seed(seed) if __name__ == "__main__": parser = argparse.ArgumentParser(description='nvdiffrec') parser.add_argument('--config', type=str, default='configs_clean3/icecream_geometry_debug.json', help='Config file') parser.add_argument('-i', '--iter', type=int, default=5000) parser.add_argument('-b', '--batch', type=int, default=1) parser.add_argument('-s', '--spp', type=int, default=1) parser.add_argument('-l', '--layers', type=int, default=1) parser.add_argument('-r', '--train-res', nargs=2, type=int, default=[512, 512]) parser.add_argument('-dr', '--display-res', type=int, default=None) parser.add_argument('-tr', '--texture-res', nargs=2, type=int, default=[1024, 1024]) parser.add_argument('-si', '--save-interval', type=int, default=1000, help="The interval of saving an image") parser.add_argument('-vi', '--video_interval', type=int, default=10, help="The interval of saving a frame of the video") parser.add_argument('-mr', '--min-roughness', type=float, default=0.08) parser.add_argument('-mip', '--custom-mip', action='store_true', default=False) parser.add_argument('-rt', '--random-textures', action='store_true', default=False) parser.add_argument('-bg', '--train_background', default='black', choices=['black', 'white', 'checker', 'reference']) parser.add_argument('-o', '--out-dir', type=str, default='results/result_debug/icecream_geometry') parser.add_argument('-rm', '--ref_mesh', type=str) parser.add_argument('-bm', '--base-mesh', type=str, default=None) parser.add_argument('--validate', type=bool, default=True) parser.add_argument("--local_rank", type=int, default=0, help="For distributed training: local_rank") parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.") parser.add_argument("--add_directional_text", action='store_true', default=False) parser.add_argument('--mode', default='geometry_modeling', choices=['geometry_modeling', 'appearance_modeling']) parser.add_argument('--text', default=None, help="text prompt") parser.add_argument('--sdf_init_shape', default='ellipsoid', choices=['ellipsoid', 'cylinder', 'custom_mesh']) parser.add_argument('--camera_random_jitter', type= float, default=0.4, help="A large value is advantageous for the extension of objects such as ears or sharp corners to grow.") parser.add_argument('--fovy_range', nargs=2, type=float, default=[25.71, 45.00]) parser.add_argument('--elevation_range', nargs=2, type=int, default=[-10, 45], help="The elevatioin range must in [-90, 90].") parser.add_argument("--guidance_weight", type=int, default=100, help="The weight of classifier-free guidance") parser.add_argument("--sds_weight_strategy", type=int, nargs=1, default=0, choices=[0, 1, 2], help="The strategy of the sds loss's weight") parser.add_argument("--translation_y", type= float, nargs=1, default= 0 , help="translation of the initial shape on the y-axis") parser.add_argument("--coarse_iter", type= int, nargs=1, default= 1000 , help="The iteration number of the coarse stage.") parser.add_argument('--early_time_step_range', nargs=2, type=float, default=[0.02, 0.5], help="The time step range in early phase") parser.add_argument('--late_time_step_range', nargs=2, type=float, default=[0.02, 0.5], help="The time step range in late phase") parser.add_argument("--sdf_init_shape_rotate_x", type= int, nargs=1, default= 0 , help="rotation of the initial shape on the x-axis") parser.add_argument("--if_flip_the_normal", action='store_true', default=False , help="Flip the x-axis positive half-axis of Normal. We find this process helps to alleviate the Janus problem.") parser.add_argument("--front_threshold", type= int, nargs=1, default= 45 , help="the range of front view would be [-front_threshold, front_threshold") parser.add_argument("--if_use_bump", type=bool, default= True , help="whether to use perturbed normals during appearing modeling") parser.add_argument("--uv_padding_block", type= int, default= 4 , help="The block of uv padding.") FLAGS = parser.parse_args() FLAGS.mtl_override = None # Override material of model FLAGS.dmtet_grid = 64 # Resolution of initial tet grid. We provide 64, 128 and 256 resolution grids. Other resolutions can be generated with https://github.com/crawforddoran/quartet FLAGS.mesh_scale = 2.1 # Scale of tet grid box. Adjust to cover the model FLAGS.env_scale = 1.0 # Env map intensity multiplier FLAGS.envmap = None # HDR environment probe FLAGS.relight = None # HDR environment probe(relight) FLAGS.display = None # Conf validation window/display. E.g. [{"relight" : <path to envlight>}] FLAGS.camera_space_light = False # Fixed light in camera space. This is needed for setups like ethiopian head where the scanned object rotates on a stand. FLAGS.lock_light = False # Disable light optimization in the second pass FLAGS.lock_pos = False # Disable vertex position optimization in the second pass FLAGS.pre_load = True # Pre-load entire dataset into memory for faster training FLAGS.kd_min = [ 0.0, 0.0, 0.0, 0.0] # Limits for kd FLAGS.kd_max = [ 1.0, 1.0, 1.0, 1.0] FLAGS.ks_min = [ 0.0, 0.08, 0.0] # Limits for ks FLAGS.ks_max = [ 1.0, 1.0, 1.0] FLAGS.nrm_min = [-1.0, -1.0, 0.0] # Limits for normal map FLAGS.nrm_max = [ 1.0, 1.0, 1.0] FLAGS.cam_near_far = [1, 50] FLAGS.learn_light = False FLAGS.gpu_number = 1 FLAGS.sdf_init_shape_scale=[1.0, 1.0, 1.0] FLAGS.multi_gpu = "WORLD_SIZE" in os.environ and int(os.environ["WORLD_SIZE"]) > 1 if FLAGS.multi_gpu: FLAGS.gpu_number = int(os.environ["WORLD_SIZE"]) FLAGS.local_rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group(backend="nccl", world_size = FLAGS.gpu_number, rank = FLAGS.local_rank) torch.cuda.set_device(FLAGS.local_rank) if FLAGS.config is not None: data = json.load(open(FLAGS.config, 'r')) for key in data: FLAGS.__dict__[key] = data[key] if FLAGS.display_res is None: FLAGS.display_res = FLAGS.train_res if FLAGS.local_rank == 0: print("Config / Flags:") print("---------") for key in FLAGS.__dict__.keys(): print(key, FLAGS.__dict__[key]) print("---------") seed_everything(FLAGS.seed, FLAGS.local_rank) os.makedirs(FLAGS.out_dir, exist_ok=True) glctx = dr.RasterizeCudaContext() # ============================================================================================== # Create data pipeline # ============================================================================================== dataset_train = DatasetMesh(glctx, FLAGS, validate=False) dataset_validate = DatasetMesh(glctx, FLAGS, validate=True) dataset_gif = DatasetMesh(glctx, FLAGS, gif=True) # ============================================================================================== # Create env light with trainable parameters # ============================================================================================== if FLAGS.mode == 'appearance_modeling' and FLAGS.base_mesh is not None: if FLAGS.learn_light: lgt = light.create_trainable_env_rnd(512, scale=0.0, bias=1) else: lgt = light.load_env(FLAGS.envmap, scale=FLAGS.env_scale) else: lgt = None if FLAGS.sdf_init_shape in ['ellipsoid', 'cylinder', 'custom_mesh'] and FLAGS.mode == 'geometry_modeling': if FLAGS.sdf_init_shape == 'ellipsoid': init_shape = o3d.geometry.TriangleMesh.create_sphere(1) elif FLAGS.sdf_init_shape == 'cylinder': init_shape = o3d.geometry.TriangleMesh.create_cylinder(radius=0.75, height=1.2, resolution=20, split=4, create_uv_map=False) elif FLAGS.sdf_init_shape == 'custom_mesh': if FLAGS.base_mesh: init_shape = get_normalize_mesh(FLAGS.base_mesh) else: assert False, "[Error] The path of custom mesh is invalid ! (geometry modeling)" else: assert False, "Invalid init type" vertices = np.asarray(init_shape.vertices) vertices[...,0]=vertices[...,0] * FLAGS.sdf_init_shape_scale[0] vertices[...,1]=vertices[...,1] * FLAGS.sdf_init_shape_scale[1] vertices[...,2]=vertices[...,2] * FLAGS.sdf_init_shape_scale[2] vertices = vertices @ util.rotate_x_2(np.deg2rad(FLAGS.sdf_init_shape_rotate_x)) vertices[...,1]=vertices[...,1] + FLAGS.translation_y init_shape.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) points_surface = np.asarray(init_shape.sample_points_poisson_disk(5000).points) init_shape = o3d.t.geometry.TriangleMesh.from_legacy(init_shape) scene = o3d.t.geometry.RaycastingScene() scene.add_triangles(init_shape) scene_and_vertices = [scene, points_surface] guidance = StableDiffusion(device = 'cuda', mode = FLAGS.mode, text = FLAGS.text, add_directional_text = FLAGS.add_directional_text, batch = FLAGS.batch, guidance_weight = FLAGS.guidance_weight, sds_weight_strategy = FLAGS.sds_weight_strategy, early_time_step_range = FLAGS.early_time_step_range, late_time_step_range= FLAGS.late_time_step_range) if FLAGS.mode == 'geometry_modeling' : geometry = DMTetGeometry(FLAGS.dmtet_grid, FLAGS.mesh_scale, FLAGS) mat = initial_guness_material(geometry, True, FLAGS) # Run optimization geometry, mat = optimize_mesh(glctx, geometry, mat, lgt, dataset_train, dataset_validate, FLAGS, optimize_light=FLAGS.learn_light,optimize_geometry= not FLAGS.lock_pos, guidance= guidance, scene_and_vertices= scene_and_vertices) if FLAGS.local_rank == 0 and FLAGS.validate: validate(glctx, geometry, mat, lgt, dataset_gif, os.path.join(FLAGS.out_dir, "validate"), FLAGS) # Create textured mesh from result if FLAGS.local_rank == 0: base_mesh = xatlas_uvmap(glctx, geometry, mat, FLAGS) # # Free temporaries / cached memory torch.cuda.empty_cache() mat['kd_ks_normal'].cleanup() del mat['kd_ks_normal'] if FLAGS.local_rank == 0: # Dump mesh for debugging. os.makedirs(os.path.join(FLAGS.out_dir, "dmtet_mesh"), exist_ok=True) obj.write_obj(os.path.join(FLAGS.out_dir, "dmtet_mesh/"), base_mesh) elif FLAGS.mode == 'appearance_modeling': # ============================================================================================== # Train with fixed topology (mesh) # ============================================================================================== if FLAGS.base_mesh is None: assert False, "[Error] The path of custom mesh is invalid ! (appearance modeling)" base_mesh = mesh.load_mesh(FLAGS.base_mesh)
geometry = DLMesh(base_mesh, FLAGS)
3
2023-11-27 13:44:01+00:00
24k
zhenzhiwang/intercontrol
eval/eval_controlmdm.py
[ { "identifier": "ControlGaussianDiffusion", "path": "diffusion/control_diffusion.py", "snippet": "class ControlGaussianDiffusion(SpacedDiffusion):\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std...
from diffusion.control_diffusion import ControlGaussianDiffusion from diffusion.respace import SpacedDiffusion from utils.parser_util import evaluation_inpainting_parser from utils.fixseed import fixseed from datetime import datetime from data_loaders.humanml.motion_loaders.model_motion_loaders import get_mdm_loader # get_motion_loader from data_loaders.humanml.utils.metrics import * from data_loaders.humanml.networks.evaluator_wrapper import EvaluatorMDMWrapper from collections import OrderedDict from data_loaders.humanml.scripts.motion_process import * from data_loaders.humanml.utils.utils import * from utils.model_util import load_controlmdm_and_diffusion from model.ControlMDM import ControlMDM from diffusion import logger from utils import dist_util from data_loaders.get_data import get_dataset_loader from model.cfg_sampler import wrap_model
14,409
all_metrics['Diversity'][key] += [item] if run_mm: for key, item in mm_score_dict.items(): if key not in all_metrics['MultiModality']: all_metrics['MultiModality'][key] = [item] else: all_metrics['MultiModality'][key] += [item] # print(all_metrics['Diversity']) mean_dict = {} for metric_name, metric_dict in all_metrics.items(): print('========== %s Summary ==========' % metric_name) print('========== %s Summary ==========' % metric_name, file=f, flush=True) for model_name, values in metric_dict.items(): # print(metric_name, model_name) mean, conf_interval = get_metric_statistics(np.array(values), replication_times) mean_dict[metric_name + '_' + model_name] = mean # print(mean, mean.dtype) if isinstance(mean, np.float64) or isinstance(mean, np.float32): print(f'---> [{model_name}] Mean: {mean:.4f} CInterval: {conf_interval:.4f}') print(f'---> [{model_name}] Mean: {mean:.4f} CInterval: {conf_interval:.4f}', file=f, flush=True) elif metric_name == 'Trajectory Error': traj_err_key = ["traj_fail_20cm", "traj_fail_50cm", "loc_fail_20cm", "loc_fail_50cm", "avg_err(m)"] line = f'---> [{model_name}]' print(line) print(line, file=f, flush=True) line = '' for i in range(len(mean)): # zip(traj_err_key, mean): line += ' (%s): Mean: %.4f CInt: %.4f; \n' % (traj_err_key[i], mean[i], conf_interval[i]) print(line) print(line, file=f, flush=True) elif isinstance(mean, np.ndarray): line = f'---> [{model_name}]' for i in range(len(mean)): line += '(top %d) Mean: %.4f CInt: %.4f;' % (i+1, mean[i], conf_interval[i]) print(line) print(line, file=f, flush=True) return mean_dict if __name__ == '__main__': args = evaluation_inpainting_parser() assert args.multi_person == False, 'multi-person is not supported for this script' assert args.guidance_param == 2.5 fixseed(args.seed) args.batch_size = 32 # This must be 32! Don't change it! otherwise it will cause a bug in R precision calc! model_name = os.path.basename(os.path.dirname(args.model_path)) niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') dataset_name = args.dataset #log_file = os.path.join(os.path.dirname(args.model_path), 'eval_{}_{}_{}'.format(dataset_name, model_name, niter)) log_file = os.path.join(os.path.dirname(args.model_path), 'eval_niter_' + str(int(niter)) +'_'+ args.control_joint) assert args.inpainting_mask == 'global_joint', "This script only supports global_joint inpainting!" log_file += f'_mask{args.mask_ratio}' log_file += f'_bfgs_first{args.bfgs_times_first}_last{args.bfgs_times_last}_skip{args.bfgs_interval}' if args.use_posterior: log_file += '_posterior' else: log_file += '_x0' log_file += f'_{args.eval_mode}' log_file += '.log' print(f'Will save to log file [{log_file}]') assert args.overwrite or not os.path.exists(log_file), "Log file already exists!" print(f'Eval mode [{args.eval_mode}]') if args.eval_mode == 'debug': num_samples_limit = 1000 # None means no limit (eval over all dataset) run_mm = False mm_num_samples = 0 mm_num_repeats = 0 mm_num_times = 0 diversity_times = 300 replication_times = 5 # about 3 Hrs elif args.eval_mode == 'wo_mm': num_samples_limit = 1000 run_mm = False mm_num_samples = 0 mm_num_repeats = 0 mm_num_times = 0 diversity_times = 300 replication_times = 20 # about 12 Hrs elif args.eval_mode == 'mm_short': num_samples_limit = 1000 run_mm = True mm_num_samples = 100 mm_num_repeats = 30 mm_num_times = 10 diversity_times = 300 replication_times = 5 # about 15 Hrs else: raise ValueError() replication_times = replication_times if args.replication_times is None else args.replication_times dist_util.setup_dist(args.device) logger.configure() logger.log("creating data loader...") split = 'test' gt_loader = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=None, split=split, load_mode='gt') gen_loader = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=None, split=split, load_mode='eval') num_actions = gen_loader.dataset.num_actions logger.log("Creating model and diffusion...") DiffusionClass = ControlGaussianDiffusion if args.filter_noise else SpacedDiffusion model, diffusion = load_controlmdm_and_diffusion(args, gen_loader, dist_util.dev(), ModelClass=ControlMDM, DiffusionClass=DiffusionClass) diffusion.mean = gen_loader.dataset.t2m_dataset.mean diffusion.std = gen_loader.dataset.t2m_dataset.std eval_motion_loaders = { ################ ## HumanML3D Dataset## ################ 'vald': lambda: get_mdm_loader( args, model, diffusion, args.batch_size, gen_loader, mm_num_samples, mm_num_repeats, gt_loader.dataset.opt.max_motion_length, num_samples_limit, args.guidance_param ) }
torch.multiprocessing.set_sharing_strategy('file_system') def evaluate_matching_score(eval_wrapper, motion_loaders, file): match_score_dict = OrderedDict({}) R_precision_dict = OrderedDict({}) activation_dict = OrderedDict({}) trajectory_score_dict = OrderedDict({}) skating_ratio_dict = OrderedDict({}) print('========== Evaluating Matching Score ==========') for motion_loader_name, motion_loader in motion_loaders.items(): all_motion_embeddings = [] score_list = [] all_size = 0 matching_score_sum = 0 top_k_count = 0 skate_ratio_sum = 0.0 traj_err = [] traj_err_key = ["traj_fail_20cm", "traj_fail_50cm", "loc_fail_20cm", "loc_fail_50cm", "avg_err(m)"] # print(motion_loader_name) with torch.no_grad(): for idx, batch in enumerate(motion_loader): if motion_loader_name == 'ground truth': word_embeddings, pos_one_hots, _, sent_lens, motions, m_lens, _, _ = batch else: assert motion_loader_name == 'vald' # tested method named vald as default word_embeddings, pos_one_hots, _, sent_lens, motions, m_lens, _, skate_ratio, err_np = batch text_embeddings, motion_embeddings = eval_wrapper.get_co_embeddings( word_embs=word_embeddings, pos_ohot=pos_one_hots, cap_lens=sent_lens, motions=motions, m_lens=m_lens) dist_mat = euclidean_distance_matrix(text_embeddings.cpu().numpy(),motion_embeddings.cpu().numpy()) matching_score_sum += dist_mat.trace() argsmax = np.argsort(dist_mat, axis=1) top_k_mat = calculate_top_k(argsmax, top_k=3) top_k_count += top_k_mat.sum(axis=0) all_size += text_embeddings.shape[0] all_motion_embeddings.append(motion_embeddings.cpu().numpy()) if motion_loader_name != 'ground truth': traj_err.append(err_np) skate_ratio_sum += skate_ratio.sum() all_motion_embeddings = np.concatenate(all_motion_embeddings, axis=0) matching_score = matching_score_sum / all_size R_precision = top_k_count / all_size match_score_dict[motion_loader_name] = matching_score R_precision_dict[motion_loader_name] = R_precision activation_dict[motion_loader_name] = all_motion_embeddings if motion_loader_name != 'ground truth': ### For trajecotry evaluation ### traj_err = np.concatenate(traj_err).mean(0) trajectory_score_dict[motion_loader_name] = traj_err line = f'---> [{motion_loader_name}] Traj Error: ' print(line) print(line, file=file, flush=True) line = '' for (k, v) in zip(traj_err_key, traj_err): line += ' (%s): %.4f \n' % (k, np.mean(v)) print(line) print(line, file=file, flush=True) # For skating evaluation skating_score = skate_ratio_sum / all_size skating_ratio_dict[motion_loader_name] = skating_score print(f'---> [{motion_loader_name}] Skating Ratio: {skating_score:.4f}') print(f'---> [{motion_loader_name}] Skating Ratio: {skating_score:.4f}', file=file, flush=True) print(f'---> [{motion_loader_name}] Matching Score: {matching_score:.4f}') print(f'---> [{motion_loader_name}] Matching Score: {matching_score:.4f}',file=file,flush=True) line = f'---> [{motion_loader_name}] R_precision: ' for i in range(len(R_precision)): line += '(top %d): %.4f ' % (i + 1, R_precision[i]) print(line) print(line, file=file, flush=True) return match_score_dict, R_precision_dict, activation_dict, trajectory_score_dict, skating_ratio_dict def evaluate_fid(eval_wrapper, groundtruth_loader, activation_dict, file): eval_dict = OrderedDict({}) gt_motion_embeddings = [] print('========== Evaluating FID ==========') with torch.no_grad(): for idx, batch in enumerate(groundtruth_loader): _, _, _, sent_lens, motions, m_lens, _, _ = batch motion_embeddings = eval_wrapper.get_motion_embeddings( motions=motions, m_lens=m_lens ) gt_motion_embeddings.append(motion_embeddings.cpu().numpy()) gt_motion_embeddings = np.concatenate(gt_motion_embeddings, axis=0) gt_mu, gt_cov = calculate_activation_statistics(gt_motion_embeddings) # print(gt_mu) for model_name, motion_embeddings in activation_dict.items(): mu, cov = calculate_activation_statistics(motion_embeddings) # print(mu) fid = calculate_frechet_distance(gt_mu, gt_cov, mu, cov) print(f'---> [{model_name}] FID: {fid:.4f}') print(f'---> [{model_name}] FID: {fid:.4f}', file=file, flush=True) eval_dict[model_name] = fid return eval_dict def evaluate_diversity(activation_dict, file, diversity_times): eval_dict = OrderedDict({}) print('========== Evaluating Diversity ==========') for model_name, motion_embeddings in activation_dict.items(): diversity = calculate_diversity(motion_embeddings, diversity_times) eval_dict[model_name] = diversity print(f'---> [{model_name}] Diversity: {diversity:.4f}') print(f'---> [{model_name}] Diversity: {diversity:.4f}', file=file, flush=True) return eval_dict def evaluate_multimodality(eval_wrapper, mm_motion_loaders, file, mm_num_times): eval_dict = OrderedDict({}) print('========== Evaluating MultiModality ==========') for model_name, mm_motion_loader in mm_motion_loaders.items(): mm_motion_embeddings = [] with torch.no_grad(): for idx, batch in enumerate(mm_motion_loader): # (1, mm_replications, dim_pos) motions, m_lens = batch motion_embedings = eval_wrapper.get_motion_embeddings(motions[0], m_lens[0]) mm_motion_embeddings.append(motion_embedings.unsqueeze(0)) if len(mm_motion_embeddings) == 0: multimodality = 0 else: mm_motion_embeddings = torch.cat(mm_motion_embeddings, dim=0).cpu().numpy() multimodality = calculate_multimodality(mm_motion_embeddings, mm_num_times) print(f'---> [{model_name}] Multimodality: {multimodality:.4f}') print(f'---> [{model_name}] Multimodality: {multimodality:.4f}', file=file, flush=True) eval_dict[model_name] = multimodality return eval_dict def get_metric_statistics(values, replication_times): mean = np.mean(values, axis=0) std = np.std(values, axis=0) conf_interval = 1.96 * std / np.sqrt(replication_times) return mean, conf_interval def evaluation(eval_wrapper, gt_loader, eval_motion_loaders, log_file, replication_times, diversity_times, mm_num_times, run_mm=False): with open(log_file, 'w') as f: all_metrics = OrderedDict({'Matching Score': OrderedDict({}), 'R_precision': OrderedDict({}), 'FID': OrderedDict({}), 'Diversity': OrderedDict({}), 'MultiModality': OrderedDict({}), 'Trajectory Error': OrderedDict({}), 'Skating Ratio': OrderedDict({}), }) for replication in range(replication_times): motion_loaders = {} mm_motion_loaders = {} for motion_loader_name, motion_loader_getter in eval_motion_loaders.items(): motion_loader, mm_motion_loader = motion_loader_getter() motion_loaders[motion_loader_name] = motion_loader mm_motion_loaders[motion_loader_name] = mm_motion_loader motion_loaders['ground truth'] = gt_loader print(f'==================== Replication {replication} ====================') print(f'==================== Replication {replication} ====================', file=f, flush=True) print(f'Time: {datetime.now()}') print(f'Time: {datetime.now()}', file=f, flush=True) mat_score_dict, R_precision_dict, acti_dict, trajectory_score_dict, skating_ratio_dict = evaluate_matching_score(eval_wrapper, motion_loaders, f) print(f'Time: {datetime.now()}') print(f'Time: {datetime.now()}', file=f, flush=True) fid_score_dict = evaluate_fid(eval_wrapper, gt_loader, acti_dict, f) print(f'Time: {datetime.now()}') print(f'Time: {datetime.now()}', file=f, flush=True) div_score_dict = evaluate_diversity(acti_dict, f, diversity_times) if run_mm: print(f'Time: {datetime.now()}') print(f'Time: {datetime.now()}', file=f, flush=True) mm_score_dict = evaluate_multimodality(eval_wrapper, mm_motion_loaders, f, mm_num_times) print(f'!!! DONE !!!') print(f'!!! DONE !!!', file=f, flush=True) for key, item in trajectory_score_dict.items(): if key not in all_metrics['Trajectory Error']: all_metrics['Trajectory Error'][key] = [item] else: all_metrics['Trajectory Error'][key] += [item] for key, item in skating_ratio_dict.items(): if key not in all_metrics['Skating Ratio']: all_metrics['Skating Ratio'][key] = [item] else: all_metrics['Skating Ratio'][key] += [item] for key, item in mat_score_dict.items(): if key not in all_metrics['Matching Score']: all_metrics['Matching Score'][key] = [item] else: all_metrics['Matching Score'][key] += [item] for key, item in R_precision_dict.items(): if key not in all_metrics['R_precision']: all_metrics['R_precision'][key] = [item] else: all_metrics['R_precision'][key] += [item] for key, item in fid_score_dict.items(): if key not in all_metrics['FID']: all_metrics['FID'][key] = [item] else: all_metrics['FID'][key] += [item] for key, item in div_score_dict.items(): if key not in all_metrics['Diversity']: all_metrics['Diversity'][key] = [item] else: all_metrics['Diversity'][key] += [item] if run_mm: for key, item in mm_score_dict.items(): if key not in all_metrics['MultiModality']: all_metrics['MultiModality'][key] = [item] else: all_metrics['MultiModality'][key] += [item] # print(all_metrics['Diversity']) mean_dict = {} for metric_name, metric_dict in all_metrics.items(): print('========== %s Summary ==========' % metric_name) print('========== %s Summary ==========' % metric_name, file=f, flush=True) for model_name, values in metric_dict.items(): # print(metric_name, model_name) mean, conf_interval = get_metric_statistics(np.array(values), replication_times) mean_dict[metric_name + '_' + model_name] = mean # print(mean, mean.dtype) if isinstance(mean, np.float64) or isinstance(mean, np.float32): print(f'---> [{model_name}] Mean: {mean:.4f} CInterval: {conf_interval:.4f}') print(f'---> [{model_name}] Mean: {mean:.4f} CInterval: {conf_interval:.4f}', file=f, flush=True) elif metric_name == 'Trajectory Error': traj_err_key = ["traj_fail_20cm", "traj_fail_50cm", "loc_fail_20cm", "loc_fail_50cm", "avg_err(m)"] line = f'---> [{model_name}]' print(line) print(line, file=f, flush=True) line = '' for i in range(len(mean)): # zip(traj_err_key, mean): line += ' (%s): Mean: %.4f CInt: %.4f; \n' % (traj_err_key[i], mean[i], conf_interval[i]) print(line) print(line, file=f, flush=True) elif isinstance(mean, np.ndarray): line = f'---> [{model_name}]' for i in range(len(mean)): line += '(top %d) Mean: %.4f CInt: %.4f;' % (i+1, mean[i], conf_interval[i]) print(line) print(line, file=f, flush=True) return mean_dict if __name__ == '__main__': args = evaluation_inpainting_parser() assert args.multi_person == False, 'multi-person is not supported for this script' assert args.guidance_param == 2.5 fixseed(args.seed) args.batch_size = 32 # This must be 32! Don't change it! otherwise it will cause a bug in R precision calc! model_name = os.path.basename(os.path.dirname(args.model_path)) niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') dataset_name = args.dataset #log_file = os.path.join(os.path.dirname(args.model_path), 'eval_{}_{}_{}'.format(dataset_name, model_name, niter)) log_file = os.path.join(os.path.dirname(args.model_path), 'eval_niter_' + str(int(niter)) +'_'+ args.control_joint) assert args.inpainting_mask == 'global_joint', "This script only supports global_joint inpainting!" log_file += f'_mask{args.mask_ratio}' log_file += f'_bfgs_first{args.bfgs_times_first}_last{args.bfgs_times_last}_skip{args.bfgs_interval}' if args.use_posterior: log_file += '_posterior' else: log_file += '_x0' log_file += f'_{args.eval_mode}' log_file += '.log' print(f'Will save to log file [{log_file}]') assert args.overwrite or not os.path.exists(log_file), "Log file already exists!" print(f'Eval mode [{args.eval_mode}]') if args.eval_mode == 'debug': num_samples_limit = 1000 # None means no limit (eval over all dataset) run_mm = False mm_num_samples = 0 mm_num_repeats = 0 mm_num_times = 0 diversity_times = 300 replication_times = 5 # about 3 Hrs elif args.eval_mode == 'wo_mm': num_samples_limit = 1000 run_mm = False mm_num_samples = 0 mm_num_repeats = 0 mm_num_times = 0 diversity_times = 300 replication_times = 20 # about 12 Hrs elif args.eval_mode == 'mm_short': num_samples_limit = 1000 run_mm = True mm_num_samples = 100 mm_num_repeats = 30 mm_num_times = 10 diversity_times = 300 replication_times = 5 # about 15 Hrs else: raise ValueError() replication_times = replication_times if args.replication_times is None else args.replication_times dist_util.setup_dist(args.device) logger.configure() logger.log("creating data loader...") split = 'test' gt_loader = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=None, split=split, load_mode='gt') gen_loader = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=None, split=split, load_mode='eval') num_actions = gen_loader.dataset.num_actions logger.log("Creating model and diffusion...") DiffusionClass = ControlGaussianDiffusion if args.filter_noise else SpacedDiffusion model, diffusion = load_controlmdm_and_diffusion(args, gen_loader, dist_util.dev(), ModelClass=ControlMDM, DiffusionClass=DiffusionClass) diffusion.mean = gen_loader.dataset.t2m_dataset.mean diffusion.std = gen_loader.dataset.t2m_dataset.std eval_motion_loaders = { ################ ## HumanML3D Dataset## ################ 'vald': lambda: get_mdm_loader( args, model, diffusion, args.batch_size, gen_loader, mm_num_samples, mm_num_repeats, gt_loader.dataset.opt.max_motion_length, num_samples_limit, args.guidance_param ) }
eval_wrapper = EvaluatorMDMWrapper(args.dataset, dist_util.dev())
5
2023-11-27 05:28:02+00:00
24k
camenduru/magicanimate-hf
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,...
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.controlnet import ControlNetModel from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
19,383
verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate:
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, controlnet: ControlNetModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # prepare conditions for controlnet condition = torch.from_numpy(condition.copy()).to(device=device, dtype=dtype) / 255.0 condition = torch.stack([condition for _ in range(num_videos_per_prompt)], dim=0) condition = rearrange(condition, 'b f h w c -> (b f) c h w').clone() if do_classifier_free_guidance: condition = torch.cat([condition] * 2) return condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate:
v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f)
5
2023-12-04 20:47:34+00:00
24k
metatube-community/metatube-plex-plugins
MetaTube.bundle/Contents/Libraries/Shared/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(MutableMapping):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-i...
import collections import functools import logging from ._collections import HTTPHeaderDict, RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, ProxySchemeUnsupported, URLSchemeUnknown, ) from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.url import parse_url
14,705
Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https: raise ProxySchemeUnsupported( "Contacting HTTPS destinations through HTTPS proxies " "'via CONNECT tunnels' is not supported in Python 2" ) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) self._validate_proxy_scheme_url_selection(u.scheme) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw["assert_same_host"] = False kw["redirect"] = False if "headers" not in kw: kw["headers"] = self.headers.copy() if self._proxy_requires_url_absolute_form(u): response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) if response.status == 303: # Change the method according to RFC 9110, Section 15.4.4. method = "GET" # And lose the body not to transfer anything sensitive. kw["body"] = None kw["headers"] = HTTPHeaderDict(kw["headers"])._prepare_for_method_change() retries = kw.get("retries") if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect) # Strip headers marked as unsafe to forward to the redirected location. # Check remove_headers_on_redirect to avoid a potential network call within # conn.is_same_host() which may use socket.gethostbyname() in the future. if retries.remove_headers_on_redirect and not conn.is_same_host( redirect_location ): headers = list(six.iterkeys(kw["headers"])) for header in headers: if header.lower() in retries.remove_headers_on_redirect: kw["headers"].pop(header, None) try: retries = retries.increment(method, url, response=response, _pool=conn)
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example:: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ proxy = None proxy_config = None def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools) # Locally set the pool classes and keys so other PoolManagers can # override them. self.pool_classes_by_scheme = pool_classes_by_scheme self.key_fn_by_scheme = key_fn_by_scheme.copy() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.clear() # Return False to re-raise any potential exceptions return False def _new_pool(self, scheme, host, port, request_context=None): """ Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and any additional pool keyword arguments. If ``request_context`` is provided, it is provided as keyword arguments to the pool class used. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = self.pool_classes_by_scheme[scheme] if request_context is None: request_context = self.connection_pool_kw.copy() # Although the context has everything necessary to create the pool, # this function has historically only used the scheme, host, and port # in the positional args. When an API change is acceptable these can # be removed. for key in ("scheme", "host", "port"): request_context.pop(key, None) if scheme == "http": for kw in SSL_KEYWORDS: request_context.pop(kw, None) return pool_cls(host, port, **request_context) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is provided, it is merged with the instance's ``connection_pool_kw`` variable and used to create the new connection pool, if one is needed. """ if not host: raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) request_context["scheme"] = scheme or "http" if not port: port = port_by_scheme.get(request_context["scheme"].lower(), 80) request_context["port"] = port request_context["host"] = host return self.connection_from_context(request_context) def connection_from_context(self, request_context): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context. ``request_context`` must at least contain the ``scheme`` key and its value must be a key in ``key_fn_by_scheme`` instance variable. """ scheme = request_context["scheme"].lower() pool_key_constructor = self.key_fn_by_scheme.get(scheme) if not pool_key_constructor: raise URLSchemeUnknown(scheme) pool_key = pool_key_constructor(request_context) return self.connection_from_pool_key(pool_key, request_context=request_context) def connection_from_pool_key(self, pool_key, request_context=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key. ``pool_key`` should be a namedtuple that only contains immutable objects. At a minimum it must have the ``scheme``, ``host``, and ``port`` fields. """ with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context["scheme"] host = request_context["host"] port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https: raise ProxySchemeUnsupported( "Contacting HTTPS destinations through HTTPS proxies " "'via CONNECT tunnels' is not supported in Python 2" ) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) self._validate_proxy_scheme_url_selection(u.scheme) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw["assert_same_host"] = False kw["redirect"] = False if "headers" not in kw: kw["headers"] = self.headers.copy() if self._proxy_requires_url_absolute_form(u): response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) if response.status == 303: # Change the method according to RFC 9110, Section 15.4.4. method = "GET" # And lose the body not to transfer anything sensitive. kw["body"] = None kw["headers"] = HTTPHeaderDict(kw["headers"])._prepare_for_method_change() retries = kw.get("retries") if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect) # Strip headers marked as unsafe to forward to the redirected location. # Check remove_headers_on_redirect to avoid a potential network call within # conn.is_same_host() which may use socket.gethostbyname() in the future. if retries.remove_headers_on_redirect and not conn.is_same_host( redirect_location ): headers = list(six.iterkeys(kw["headers"])) for header in headers: if header.lower() in retries.remove_headers_on_redirect: kw["headers"].pop(header, None) try: retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
4
2023-11-27 07:01:39+00:00
24k
NobiDeveloper/Nobita-Filter-Bot
plugins/p_ttishow.py
[ { "identifier": "ADMINS", "path": "info.py", "snippet": "ADMINS = [int(admin) if id_pattern.search(admin) else admin for admin in environ.get('ADMINS', '').split()]" }, { "identifier": "LOG_CHANNEL", "path": "info.py", "snippet": "LOG_CHANNEL = int(environ.get('LOG_CHANNEL', ''))" }, ...
from pyrogram import Client, filters, enums from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery from pyrogram.errors.exceptions.bad_request_400 import MessageTooLong, PeerIdInvalid from info import ADMINS, LOG_CHANNEL, SUPPORT_CHAT, MELCOW_NEW_USERS, MELCOW_VID, CHNL_LNK, GRP_LNK from database.users_chats_db import db from database.ia_filterdb import Media from utils import get_size, temp, get_settings from Script import script from pyrogram.errors import ChatAdminRequired import asyncio
15,315
"""----------------------------------------- https://github.com/NobiDeveloper/Nobita-Filter-Bot --------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous" await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j)) await db.add_chat(message.chat.id, message.chat.title) if message.chat.id in temp.BANNED_CHATS: # Inspired from a boat of a banana tree buttons = [[ InlineKeyboardButton('Support', url='https://telegram.me/NobiDeveloperSupport') ]] reply_markup=InlineKeyboardMarkup(buttons) k = await message.reply( text='<b>CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..</b>', reply_markup=reply_markup, ) try: await k.pin() except: pass await bot.leave_chat(message.chat.id) return buttons = [[ InlineKeyboardButton('🥷 ʜᴇʟᴘ 🥷', url='https://telegram.me/NobiDeveloperSupport'), InlineKeyboardButton('♻️ ᴜᴘᴅᴀᴛᴇꜱ ♻️', url='https://telegram.me/NobiDeveloper') ]] reply_markup=InlineKeyboardMarkup(buttons) await message.reply_text( text=f"<b>☤ ᴛʜᴀɴᴋ ʏᴏᴜ ꜰᴏʀ ᴀᴅᴅɪɴɢ ᴍᴇ ɪɴ {message.chat.title}\n\n🤖 ᴅᴏɴ’ᴛ ꜰᴏʀɢᴇᴛ ᴛᴏ ᴍᴀᴋᴇ ᴍᴇ ᴀᴅᴍɪɴ 🤖\n\n🕵️ ɪꜰ ʏᴏᴜ ʜᴀᴠᴇ ᴀɴʏ ᴅᴏᴜʙᴛ ʏᴏᴜ ᴄʟᴇᴀʀ ɪᴛ ᴜsɪɴɢ ʙᴇʟᴏᴡ ʙᴜᴛᴛᴏɴs</b>", reply_markup=reply_markup) else: settings = await get_settings(message.chat.id) if settings["welcome"]: for u in message.new_chat_members: if (temp.MELCOW).get('welcome') is not None: try: await (temp.MELCOW['welcome']).delete() except: pass temp.MELCOW['welcome'] = await message.reply_photo(
"""----------------------------------------- https://github.com/NobiDeveloper/Nobita-Filter-Bot --------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous" await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j)) await db.add_chat(message.chat.id, message.chat.title) if message.chat.id in temp.BANNED_CHATS: # Inspired from a boat of a banana tree buttons = [[ InlineKeyboardButton('Support', url='https://telegram.me/NobiDeveloperSupport') ]] reply_markup=InlineKeyboardMarkup(buttons) k = await message.reply( text='<b>CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..</b>', reply_markup=reply_markup, ) try: await k.pin() except: pass await bot.leave_chat(message.chat.id) return buttons = [[ InlineKeyboardButton('🥷 ʜᴇʟᴘ 🥷', url='https://telegram.me/NobiDeveloperSupport'), InlineKeyboardButton('♻️ ᴜᴘᴅᴀᴛᴇꜱ ♻️', url='https://telegram.me/NobiDeveloper') ]] reply_markup=InlineKeyboardMarkup(buttons) await message.reply_text( text=f"<b>☤ ᴛʜᴀɴᴋ ʏᴏᴜ ꜰᴏʀ ᴀᴅᴅɪɴɢ ᴍᴇ ɪɴ {message.chat.title}\n\n🤖 ᴅᴏɴ’ᴛ ꜰᴏʀɢᴇᴛ ᴛᴏ ᴍᴀᴋᴇ ᴍᴇ ᴀᴅᴍɪɴ 🤖\n\n🕵️ ɪꜰ ʏᴏᴜ ʜᴀᴠᴇ ᴀɴʏ ᴅᴏᴜʙᴛ ʏᴏᴜ ᴄʟᴇᴀʀ ɪᴛ ᴜsɪɴɢ ʙᴇʟᴏᴡ ʙᴜᴛᴛᴏɴs</b>", reply_markup=reply_markup) else: settings = await get_settings(message.chat.id) if settings["welcome"]: for u in message.new_chat_members: if (temp.MELCOW).get('welcome') is not None: try: await (temp.MELCOW['welcome']).delete() except: pass temp.MELCOW['welcome'] = await message.reply_photo(
photo=(MELCOW_VID),
4
2023-11-28 13:36:56+00:00
24k
chenxx89/BFRffusion
models/models.py
[ { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\...
import torch import os import numpy as np import math import shutil import safetensors.torch from ldm.modules.diffusionmodules.util import timestep_embedding from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.diffusionmodules.openaimodel import UNetModel from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from data.dataset_instantiate import instantiate_from_config as instantiate_dataset_from_config from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from metrics.metrics_all import calculate_psnr_ssim, calculate_lpips, calculate_NIQE, calculate_fid_folder from torch.utils.data import DataLoader from PIL import Image from torch.optim.lr_scheduler import LambdaLR from omegaconf import OmegaConf
20,351
def get_state_dict(d): return d.get('state_dict', d) def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict def create_model(config_path): config = OmegaConf.load(config_path)
def get_state_dict(d): return d.get('state_dict', d) def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict def create_model(config_path): config = OmegaConf.load(config_path)
model = instantiate_from_config(config.model).cpu()
6
2023-11-30 13:50:58+00:00
24k
IanYeung/MGLD-VSR
ldm/models/autoencoder.py
[ { "identifier": "from_5d_to_4d", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def from_5d_to_4d(inp, b, c, t, h, w):\n out = rearrange(inp, 'b c t h w -> (b t) c h w')\n return out" }, { "identifier": "from_4d_to_5d", "path": "ldm/modules/diffusionmodules/util.py", "...
import torch import pytorch_lightning as pl import torch.nn.functional as F import random import torchvision.transforms as transforms from contextlib import contextmanager from einops import repeat, rearrange from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer from ldm.modules.diffusionmodules.util import from_5d_to_4d, from_4d_to_5d from ldm.modules.diffusionmodules.model import Encoder, Decoder, Decoder_Mix, VideoDecoder, VideoDecoder_Mix, VideoDecoderV2, VideoDecoder_MixV2 from ldm.modules.distributions.distributions import DiagonalGaussianDistribution from ldm.util import instantiate_from_config from basicsr.utils import DiffJPEG, USMSharp from basicsr.utils.img_process_util import filter2D from basicsr.data.transforms import paired_random_crop, triplet_random_crop from basicsr.data.degradations import \ random_add_gaussian_noise_pt, \ random_add_poisson_noise_pt, \ random_add_speckle_noise_pt, \ random_add_saltpepper_noise_pt
15,638
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: if 'first_stage_model' in k: sd[k[18:]] = sd[k] del sd[k] for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Encoder Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") return missing def encode(self, x): h, enc_fea = self.encoder(x, return_fea=True) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) # posterior = h return posterior, enc_fea def encode_gt(self, x, new_encoder): h = new_encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) return posterior, moments def decode(self, z, enc_fea): z = self.post_quant_conv(z) dec = self.decoder(z, enc_fea) return dec def forward(self, input, latent, sample_posterior=True): posterior, enc_fea_lq = self.encode(input) dec = self.decode(latent, enc_fea_lq) return dec, posterior @torch.no_grad() def _dequeue_and_enqueue(self): """It is the training pair pool for increasing the diversity in a batch. Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a batch could not have different resize scaling factors. Therefore, we employ this training pair pool to increase the degradation diversity in a batch. """ # initialize b, c, h, w = self.lq.size() _, c_, h_, w_ = self.latent.size() if b == self.configs.data.params.batch_size: if not hasattr(self, 'queue_size'): self.queue_size = self.configs.data.params.train.params.get('queue_size', b*50) if not hasattr(self, 'queue_lr'): assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() _, c, h, w = self.gt.size() self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() self.queue_sample = torch.zeros(self.queue_size, c, h, w).cuda() self.queue_latent = torch.zeros(self.queue_size, c_, h_, w_).cuda() self.queue_ptr = 0 if self.queue_ptr == self.queue_size: # the pool is full # do dequeue and enqueue # shuffle idx = torch.randperm(self.queue_size) self.queue_lr = self.queue_lr[idx] self.queue_gt = self.queue_gt[idx] self.queue_sample = self.queue_sample[idx] self.queue_latent = self.queue_latent[idx] # get first b samples lq_dequeue = self.queue_lr[0:b, :, :, :].clone() gt_dequeue = self.queue_gt[0:b, :, :, :].clone() sample_dequeue = self.queue_sample[0:b, :, :, :].clone() latent_dequeue = self.queue_latent[0:b, :, :, :].clone() # update the queue self.queue_lr[0:b, :, :, :] = self.lq.clone() self.queue_gt[0:b, :, :, :] = self.gt.clone() self.queue_sample[0:b, :, :, :] = self.sample.clone() self.queue_latent[0:b, :, :, :] = self.latent.clone() self.lq = lq_dequeue self.gt = gt_dequeue self.sample = sample_dequeue self.latent = latent_dequeue else: # only do enqueue self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() self.queue_sample[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.sample.clone() self.queue_latent[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.latent.clone() self.queue_ptr = self.queue_ptr + b def get_input(self, batch): input = batch['lq'] gt = batch['gt'] latent = batch['latent'] sample = batch['sample'] assert not torch.isnan(latent).any() input = input.to(memory_format=torch.contiguous_format).float() gt = gt.to(memory_format=torch.contiguous_format).float() latent = latent.to(memory_format=torch.contiguous_format).float() / 0.18215 gt = gt * 2.0 - 1.0 input = input * 2.0 - 1.0 sample = sample * 2.0 -1.0 return input, gt, latent, sample @torch.no_grad() def get_input_synthesis(self, batch, val=False, test_gt=False):
class VQModel(pl.LightningModule): def __init__(self, ddconfig, lossconfig, n_embed, embed_dim, ckpt_path=None, ignore_keys=[], image_key="image", colorize_nlabels=None, monitor=None, batch_resize_range=None, scheduler_config=None, lr_g_factor=1.0, remap=None, sane_index_shape=False, # tell vector quantizer to return indices as bhw use_ema=False ): super().__init__() self.embed_dim = embed_dim self.n_embed = n_embed self.image_key = image_key self.encoder = Encoder(**ddconfig) self.decoder = Decoder(**ddconfig) self.loss = instantiate_from_config(lossconfig) self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, remap=remap, sane_index_shape=sane_index_shape) self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) if colorize_nlabels is not None: assert type(colorize_nlabels)==int self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) if monitor is not None: self.monitor = monitor self.batch_resize_range = batch_resize_range if self.batch_resize_range is not None: print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.") self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) self.scheduler_config = scheduler_config self.lr_g_factor = lr_g_factor @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.parameters()) self.model_ema.copy_to(self) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list()): sd = torch.load(path, map_location="cpu")["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") print(f"Unexpected Keys: {unexpected}") def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self) def encode(self, x): h = self.encoder(x) h = self.quant_conv(h) quant, emb_loss, info = self.quantize(h) return quant, emb_loss, info def encode_to_prequant(self, x): h = self.encoder(x) h = self.quant_conv(h) return h def decode(self, quant): quant = self.post_quant_conv(quant) dec = self.decoder(quant) return dec def decode_code(self, code_b): quant_b = self.quantize.embed_code(code_b) dec = self.decode(quant_b) return dec def forward(self, input, return_pred_indices=False): quant, diff, (_,_,ind) = self.encode(input) dec = self.decode(quant) if return_pred_indices: return dec, diff, ind return dec, diff def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() if self.batch_resize_range is not None: lower_size = self.batch_resize_range[0] upper_size = self.batch_resize_range[1] if self.global_step <= 4: # do the first few batches with max size to avoid later oom new_resize = upper_size else: new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16)) if new_resize != x.shape[2]: x = F.interpolate(x, size=new_resize, mode="bicubic") x = x.detach() return x def training_step(self, batch, batch_idx, optimizer_idx): # https://github.com/pytorch/pytorch/issues/37142 # try not to fool the heuristics x = self.get_input(batch, self.image_key) xrec, qloss, ind = self(x, return_pred_indices=True) if optimizer_idx == 0: # autoencode aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train", predicted_indices=ind) self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) return aeloss if optimizer_idx == 1: # discriminator discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train") self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) return discloss def validation_step(self, batch, batch_idx): log_dict = self._validation_step(batch, batch_idx) with self.ema_scope(): log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") return log_dict def _validation_step(self, batch, batch_idx, suffix=""): x = self.get_input(batch, self.image_key) xrec, qloss, ind = self(x, return_pred_indices=True) aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, self.global_step, last_layer=self.get_last_layer(), split="val"+suffix, predicted_indices=ind ) discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, self.global_step, last_layer=self.get_last_layer(), split="val"+suffix, predicted_indices=ind ) rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] self.log(f"val{suffix}/rec_loss", rec_loss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) self.log(f"val{suffix}/aeloss", aeloss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) if version.parse(pl.__version__) >= version.parse('1.4.0'): del log_dict_ae[f"val{suffix}/rec_loss"] self.log_dict(log_dict_ae) self.log_dict(log_dict_disc) return self.log_dict def configure_optimizers(self): lr_d = self.learning_rate lr_g = self.lr_g_factor*self.learning_rate print("lr_d", lr_d) print("lr_g", lr_g) opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ list(self.decoder.parameters())+ list(self.quantize.parameters())+ list(self.quant_conv.parameters())+ list(self.post_quant_conv.parameters()), lr=lr_g, betas=(0.5, 0.9)) opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr_d, betas=(0.5, 0.9)) if self.scheduler_config is not None: scheduler = instantiate_from_config(self.scheduler_config) print("Setting up LambdaLR scheduler...") scheduler = [ { 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), 'interval': 'step', 'frequency': 1 }, { 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), 'interval': 'step', 'frequency': 1 }, ] return [opt_ae, opt_disc], scheduler return [opt_ae, opt_disc], [] def get_last_layer(self): return self.decoder.conv_out.weight def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): log = dict() x = self.get_input(batch, self.image_key) x = x.to(self.device) if only_inputs: log["inputs"] = x return log xrec, _ = self(x) if x.shape[1] > 3: # colorize with random projection assert xrec.shape[1] > 3 x = self.to_rgb(x) xrec = self.to_rgb(xrec) log["inputs"] = x log["reconstructions"] = xrec if plot_ema: with self.ema_scope(): xrec_ema, _ = self(x) if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) log["reconstructions_ema"] = xrec_ema return log def to_rgb(self, x): assert self.image_key == "segmentation" if not hasattr(self, "colorize"): self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) x = F.conv2d(x, weight=self.colorize) x = 2.*(x-x.min())/(x.max()-x.min()) - 1. return x class VQModelInterface(VQModel): def __init__(self, embed_dim, *args, **kwargs): super().__init__(embed_dim=embed_dim, *args, **kwargs) self.embed_dim = embed_dim def encode(self, x): h = self.encoder(x) h = self.quant_conv(h) return h def decode(self, h, force_not_quantize=False): # also go through quantization layer if not force_not_quantize: quant, emb_loss, info = self.quantize(h) else: quant = h quant = self.post_quant_conv(quant) dec = self.decoder(quant) return dec class AutoencoderKL(pl.LightningModule): def __init__(self, ddconfig, lossconfig, embed_dim, ckpt_path=None, ignore_keys=[], image_key="image", colorize_nlabels=None, monitor=None, ): super().__init__() self.image_key = image_key self.encoder = Encoder(**ddconfig) self.decoder = Decoder(**ddconfig) self.loss = instantiate_from_config(lossconfig) assert ddconfig["double_z"] self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) self.embed_dim = embed_dim if colorize_nlabels is not None: assert type(colorize_nlabels)==int self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: if 'first_stage_model' in k: sd[k[18:]] = sd[k] for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Encoder Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") # if len(unexpected) > 0: # print(f"Unexpected Keys: {unexpected}") def encode(self, x, return_encfea=False): h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) if return_encfea: return posterior, moments return posterior def encode_gt(self, x, new_encoder): h = new_encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) return posterior, moments def decode(self, z): z = self.post_quant_conv(z) dec = self.decoder(z) return dec def forward(self, input, sample_posterior=True): posterior = self.encode(input) if sample_posterior: z = posterior.sample() else: z = posterior.mode() dec = self.decode(z) return dec, posterior def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] # x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() x = x.to(memory_format=torch.contiguous_format).float() # x = x*2.0-1.0 return x def training_step(self, batch, batch_idx, optimizer_idx): inputs = self.get_input(batch, self.image_key) reconstructions, posterior = self(inputs) if optimizer_idx == 0: # train encoder+decoder+logvar aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train") self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) return aeloss if optimizer_idx == 1: # train the discriminator discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train") self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) return discloss def validation_step(self, batch, batch_idx): inputs = self.get_input(batch, self.image_key) reconstructions, posterior = self(inputs) aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, last_layer=self.get_last_layer(), split="val") discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, last_layer=self.get_last_layer(), split="val") self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) self.log_dict(log_dict_ae) self.log_dict(log_dict_disc) return self.log_dict def configure_optimizers(self): lr = self.learning_rate opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ list(self.decoder.parameters())+ list(self.quant_conv.parameters())+ list(self.post_quant_conv.parameters()), lr=lr, betas=(0.5, 0.9)) opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)) return [opt_ae, opt_disc], [] def get_last_layer(self): return self.decoder.conv_out.weight @torch.no_grad() def log_images(self, batch, only_inputs=False, **kwargs): log = dict() x = self.get_input(batch, self.image_key) x = x.to(self.device) if not only_inputs: xrec, posterior = self(x) if x.shape[1] > 3: # colorize with random projection assert xrec.shape[1] > 3 x = self.to_rgb(x) xrec = self.to_rgb(xrec) # log["samples"] = self.decode(torch.randn_like(posterior.sample())) log["reconstructions"] = xrec log["inputs"] = x return log def to_rgb(self, x): assert self.image_key == "segmentation" if not hasattr(self, "colorize"): self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) x = F.conv2d(x, weight=self.colorize) x = 2.*(x-x.min())/(x.max()-x.min()) - 1. return x class IdentityFirstStage(torch.nn.Module): def __init__(self, *args, vq_interface=False, **kwargs): self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff super().__init__() def encode(self, x, *args, **kwargs): return x def decode(self, x, *args, **kwargs): return x def quantize(self, x, *args, **kwargs): if self.vq_interface: return x, None, [None, None, None] return x def forward(self, x, *args, **kwargs): return x class AutoencoderKLResi(pl.LightningModule): def __init__(self, ddconfig, lossconfig, embed_dim, ckpt_path=None, ignore_keys=[], image_key="image", colorize_nlabels=None, monitor=None, fusion_w=1.0, freeze_dec=True, synthesis_data=False, use_usm=False, test_gt=False, ): super().__init__() self.image_key = image_key self.encoder = Encoder(**ddconfig) self.decoder = Decoder_Mix(**ddconfig) self.decoder.fusion_w = fusion_w self.loss = instantiate_from_config(lossconfig) self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) self.embed_dim = embed_dim if colorize_nlabels is not None: assert type(colorize_nlabels)==int self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) if monitor is not None: self.monitor = monitor if ckpt_path is not None: missing_list = self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) else: missing_list = [] print('>>>>>>>>>>>>>>>>>missing>>>>>>>>>>>>>>>>>>>') print(missing_list) self.synthesis_data = synthesis_data self.use_usm = use_usm self.test_gt = test_gt if freeze_dec: for name, param in self.named_parameters(): if 'fusion_layer' in name: param.requires_grad = True # elif 'encoder' in name: # param.requires_grad = True # elif 'quant_conv' in name and 'post_quant_conv' not in name: # param.requires_grad = True elif 'loss.discriminator' in name: param.requires_grad = True else: param.requires_grad = False print('>>>>>>>>>>>>>>>>>trainable_list>>>>>>>>>>>>>>>>>>>') trainable_list = [] for name, params in self.named_parameters(): if params.requires_grad: trainable_list.append(name) print(trainable_list) print('>>>>>>>>>>>>>>>>>Untrainable_list>>>>>>>>>>>>>>>>>>>') untrainable_list = [] for name, params in self.named_parameters(): if not params.requires_grad: untrainable_list.append(name) print(untrainable_list) # untrainable_list = list(set(trainable_list).difference(set(missing_list))) # print('>>>>>>>>>>>>>>>>>untrainable_list>>>>>>>>>>>>>>>>>>>') # print(untrainable_list) # def init_from_ckpt(self, path, ignore_keys=list()): # sd = torch.load(path, map_location="cpu")["state_dict"] # keys = list(sd.keys()) # for k in keys: # for ik in ignore_keys: # if k.startswith(ik): # print("Deleting key {} from state_dict.".format(k)) # del sd[k] # self.load_state_dict(sd, strict=False) # print(f"Restored from {path}") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: if 'first_stage_model' in k: sd[k[18:]] = sd[k] del sd[k] for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Encoder Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") return missing def encode(self, x): h, enc_fea = self.encoder(x, return_fea=True) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) # posterior = h return posterior, enc_fea def encode_gt(self, x, new_encoder): h = new_encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) return posterior, moments def decode(self, z, enc_fea): z = self.post_quant_conv(z) dec = self.decoder(z, enc_fea) return dec def forward(self, input, latent, sample_posterior=True): posterior, enc_fea_lq = self.encode(input) dec = self.decode(latent, enc_fea_lq) return dec, posterior @torch.no_grad() def _dequeue_and_enqueue(self): """It is the training pair pool for increasing the diversity in a batch. Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a batch could not have different resize scaling factors. Therefore, we employ this training pair pool to increase the degradation diversity in a batch. """ # initialize b, c, h, w = self.lq.size() _, c_, h_, w_ = self.latent.size() if b == self.configs.data.params.batch_size: if not hasattr(self, 'queue_size'): self.queue_size = self.configs.data.params.train.params.get('queue_size', b*50) if not hasattr(self, 'queue_lr'): assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() _, c, h, w = self.gt.size() self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() self.queue_sample = torch.zeros(self.queue_size, c, h, w).cuda() self.queue_latent = torch.zeros(self.queue_size, c_, h_, w_).cuda() self.queue_ptr = 0 if self.queue_ptr == self.queue_size: # the pool is full # do dequeue and enqueue # shuffle idx = torch.randperm(self.queue_size) self.queue_lr = self.queue_lr[idx] self.queue_gt = self.queue_gt[idx] self.queue_sample = self.queue_sample[idx] self.queue_latent = self.queue_latent[idx] # get first b samples lq_dequeue = self.queue_lr[0:b, :, :, :].clone() gt_dequeue = self.queue_gt[0:b, :, :, :].clone() sample_dequeue = self.queue_sample[0:b, :, :, :].clone() latent_dequeue = self.queue_latent[0:b, :, :, :].clone() # update the queue self.queue_lr[0:b, :, :, :] = self.lq.clone() self.queue_gt[0:b, :, :, :] = self.gt.clone() self.queue_sample[0:b, :, :, :] = self.sample.clone() self.queue_latent[0:b, :, :, :] = self.latent.clone() self.lq = lq_dequeue self.gt = gt_dequeue self.sample = sample_dequeue self.latent = latent_dequeue else: # only do enqueue self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() self.queue_sample[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.sample.clone() self.queue_latent[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.latent.clone() self.queue_ptr = self.queue_ptr + b def get_input(self, batch): input = batch['lq'] gt = batch['gt'] latent = batch['latent'] sample = batch['sample'] assert not torch.isnan(latent).any() input = input.to(memory_format=torch.contiguous_format).float() gt = gt.to(memory_format=torch.contiguous_format).float() latent = latent.to(memory_format=torch.contiguous_format).float() / 0.18215 gt = gt * 2.0 - 1.0 input = input * 2.0 - 1.0 sample = sample * 2.0 -1.0 return input, gt, latent, sample @torch.no_grad() def get_input_synthesis(self, batch, val=False, test_gt=False):
jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts
11
2023-11-30 01:50:29+00:00
24k
Czm369/MixPL
mmdet/configs/rtmdet/rtmdet_ins_tiny_8xb32_300e_coco.py
[ { "identifier": "PackDetInputs", "path": "mmdet/datasets/transforms/formatting.py", "snippet": "class PackDetInputs(BaseTransform):\n \"\"\"Pack the inputs data for the detection / semantic segmentation /\n panoptic segmentation.\n\n The ``img_meta`` item is always populated. The contents of t...
from mmengine.config import read_base from .rtmdet_ins_s_8xb32_300e_coco import * from mmcv.transforms.loading import LoadImageFromFile from mmcv.transforms.processing import RandomResize from mmdet.datasets.transforms.formatting import PackDetInputs from mmdet.datasets.transforms.loading import (FilterAnnotations, LoadAnnotations) from mmdet.datasets.transforms.transforms import (CachedMixUp, CachedMosaic, Pad, RandomCrop, RandomFlip, Resize, YOLOXHSVRandomAug)
17,231
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.167, widen_factor=0.375, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1), bbox_head=dict(in_channels=96, feat_channels=96))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False), dict( type=CachedMosaic, img_scale=(640, 640), pad_val=114.0, max_cached_images=20, random_pop=False), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0), resize_type=Resize, keep_ratio=True),
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.167, widen_factor=0.375, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1), bbox_head=dict(in_channels=96, feat_channels=96))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False), dict( type=CachedMosaic, img_scale=(640, 640), pad_val=114.0, max_cached_images=20, random_pop=False), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0), resize_type=Resize, keep_ratio=True),
dict(type=RandomCrop, crop_size=(640, 640)),
6
2023-11-30 08:58:00+00:00
24k
SEU-ProactiveSecurity-Group/MalPurifier
examples/amd_icnn_test.py
[ { "identifier": "Dataset", "path": "core/defense/dataset.py", "snippet": "class Dataset(torch.utils.data.Dataset):\n def __init__(self, seed=0, device='cuda', feature_ext_args=None):\n \"\"\"\n 为机器学习模型学习构建数据集。\n \n :param seed: 随机种子\n :param device: 设备类型,'cuda' 或 'c...
import os.path as path import time from core.defense import Dataset from core.defense import AdvMalwareDetectorICNN, MalwareDetectionDNN from tools.utils import save_args, get_group_args, dump_pickle from examples.md_nn_test import cmd_md
17,879
from __future__ import absolute_import from __future__ import division from __future__ import print_function indicator_argparse = cmd_md.add_argument_group(title='adv indicator') indicator_argparse.add_argument('--ratio', type=float, default=0.95, help='ratio of validation examples remained for passing through malware detector') def _main(): # 解析命令行参数 args = cmd_md.parse_args() # 加载数据集,并根据参数提取特征 dataset = Dataset(feature_ext_args=get_group_args(args, cmd_md, 'feature')) # 获取训练数据集的输入生成器 train_dataset_producer = dataset.get_input_producer(*dataset.train_dataset, batch_size=args.batch_size, name='train', use_cache=args.cache) # 获取验证数据集的输入生成器 val_dataset_producer = dataset.get_input_producer(*dataset.validation_dataset, batch_size=args.batch_size, name='val') # 获取测试数据集的输入生成器 test_dataset_producer = dataset.get_input_producer(*dataset.test_dataset, batch_size=args.batch_size, name='test') # 确保数据集只有两个类别(可能是恶意软件和非恶意软件) assert dataset.n_classes == 2 # 根据是否使用CUDA选择设备(CPU或GPU) dv = 'cuda' if args.cuda else 'cpu' # 如果是测试模式,则使用给定的模型名称,否则使用当前时间生成一个模型名称 model_name = args.model_name if args.mode == 'test' else time.strftime("%Y%m%d-%H%M%S") # 初始化基础的恶意软件检测模型
from __future__ import absolute_import from __future__ import division from __future__ import print_function indicator_argparse = cmd_md.add_argument_group(title='adv indicator') indicator_argparse.add_argument('--ratio', type=float, default=0.95, help='ratio of validation examples remained for passing through malware detector') def _main(): # 解析命令行参数 args = cmd_md.parse_args() # 加载数据集,并根据参数提取特征 dataset = Dataset(feature_ext_args=get_group_args(args, cmd_md, 'feature')) # 获取训练数据集的输入生成器 train_dataset_producer = dataset.get_input_producer(*dataset.train_dataset, batch_size=args.batch_size, name='train', use_cache=args.cache) # 获取验证数据集的输入生成器 val_dataset_producer = dataset.get_input_producer(*dataset.validation_dataset, batch_size=args.batch_size, name='val') # 获取测试数据集的输入生成器 test_dataset_producer = dataset.get_input_producer(*dataset.test_dataset, batch_size=args.batch_size, name='test') # 确保数据集只有两个类别(可能是恶意软件和非恶意软件) assert dataset.n_classes == 2 # 根据是否使用CUDA选择设备(CPU或GPU) dv = 'cuda' if args.cuda else 'cpu' # 如果是测试模式,则使用给定的模型名称,否则使用当前时间生成一个模型名称 model_name = args.model_name if args.mode == 'test' else time.strftime("%Y%m%d-%H%M%S") # 初始化基础的恶意软件检测模型
md_model = MalwareDetectionDNN(dataset.vocab_size,
1
2023-11-27 02:00:23+00:00
24k
iann838/pulsefire
tests/test_doc_scripts.py
[ { "identifier": "RiotAPIClient", "path": "pulsefire/clients.py", "snippet": "class RiotAPIClient(BaseClient):\n \"\"\"Riot API Client.\n\n | Resources | Support |\n | -------------------- | -------------------------- |\n | League of Legends | ✅ ...
import asyncio import os from pulsefire.clients import RiotAPIClient from pulsefire.functools import async_to_sync from pulsefire.schemas import RiotAPISchema
15,782
@async_to_sync() async def test_concurrent_request_alt2(): async with RiotAPIClient(default_headers={"X-Riot-Token": os.environ["RIOT_API_KEY"]}) as client: account = await client.get_account_v1_by_riot_id(region="americas", game_name="Not a Whale", tag_line="NA1") summoner = await client.get_lol_summoner_v4_by_puuid(region="na1", puuid=account["puuid"]) match_ids = await client.get_lol_match_v5_match_ids_by_puuid(region="americas", puuid=summoner["puuid"]) tasks: list[asyncio.Task] = [] async with asyncio.TaskGroup() as tg: for match_id in match_ids[:20]: tasks.append(tg.create_task(client.get_lol_match_v5_match(region="americas", id=match_id)))
@async_to_sync() async def test_concurrent_request_alt2(): async with RiotAPIClient(default_headers={"X-Riot-Token": os.environ["RIOT_API_KEY"]}) as client: account = await client.get_account_v1_by_riot_id(region="americas", game_name="Not a Whale", tag_line="NA1") summoner = await client.get_lol_summoner_v4_by_puuid(region="na1", puuid=account["puuid"]) match_ids = await client.get_lol_match_v5_match_ids_by_puuid(region="americas", puuid=summoner["puuid"]) tasks: list[asyncio.Task] = [] async with asyncio.TaskGroup() as tg: for match_id in match_ids[:20]: tasks.append(tg.create_task(client.get_lol_match_v5_match(region="americas", id=match_id)))
matches: list[RiotAPISchema.LolMatchV5Match] = [task.result() for task in tasks]
2
2023-11-27 13:37:24+00:00
24k
Matrixeigs/UncertaintyManagementInteroperablePowerTransportationSystems
TestCaseDistributionSystems/uc_mmgs_fully_recourse_problem.py
[ { "identifier": "case33", "path": "TestCaseDistributionSystems/test_cases/case33.py", "snippet": "def case33():\n \"\"\"Power flow data for 33 bus, 6 generator case.\n Please see L{caseformat} for details on the case file format.\n\n Based on data from ...\n\n Alsac, O. & Stott, B., I{\"Opti...
from TestCaseDistributionSystems.test_cases import case33 from TestCasesMicrogrids.test_cases.cases_unit_commitment import micro_grid from TestCasesTransportationSystems.test_cases import case3, TIME, LOCATION from scipy import zeros, shape, ones, diag, concatenate, eye from scipy.sparse import csr_matrix as sparse from scipy.sparse import hstack, vstack, lil_matrix from numpy import flatnonzero as find from numpy import array, tile, arange, random from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, RATE_A from pypower.idx_bus import PD, VMAX, VMIN, QD, BUS_I from pypower.idx_gen import GEN_BUS, PMAX, PMIN, QMAX, QMIN from pypower.ext2int import ext2int from Solvers.mixed_integer_quadratic_constrained_cplex import mixed_integer_quadratic_constrained_programming as miqcp from Solvers.mixed_integer_programming_gurobi import mixed_integer_linear_programming as milp from copy import deepcopy from TestCaseDistributionSystems.data_format.idx_MG_PV import PBIC_AC2DC, PG, PESS_DC, PBIC_DC2AC, PUG, PESS_CH, \ PMESS, EESS, NX_MG, QBIC, QUG, QG, PPV from TestCaseDistributionSystems.database_management_pv import DataBaseManagement from StochasticOptimization.scenario_reduction import ScenarioReduction
15,369
pg_l = gen[:, PMIN] / baseMVA qg_l = gen[:, QMIN] / baseMVA pd_l = zeros(nd) pij_u = slmax qij_u = slmax lij_u = slmax vm_u = bus[:, VMAX] ** 2 pg_u = gen[:, PMAX] / baseMVA qg_u = gen[:, QMAX] / baseMVA pd_u = bus[d, PD] / baseMVA _nv_second_stage = int(3 * nl + nb + 2 * ng + 2 * nmg + nd) self._nv_second_stage = _nv_second_stage # Number of decision variable within each time slot lb = concatenate([tile(concatenate([pij_l, qij_l, lij_l, vm_l, pg_l, qg_l, pmg_l, qmg_l, pd_l]), T)]) ub = concatenate([tile(concatenate([pij_u, qij_u, lij_u, vm_u, pg_u, qg_u, pmg_u, qmg_u, pd_u]), T)]) vtypes = ["c"] * _nv_second_stage * T nv_ds = _nv_second_stage * T # Number of total decision variables # Add system level constraints # 1) Active power balance Aeq_p = lil_matrix((nb * T, nv_ds)) beq_p = zeros(nb * T) for i in range(T): Aeq_p[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([Ct - Cf, zeros((nb, nl)), -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg, zeros((nb, ng)), -Cmg, zeros((nb, nmg)), Cd]) beq_p[i * nb:(i + 1) * nb] = profile[i * nb:(i + 1) * nb] / baseMVA # 2) Reactive power balance Aeq_q = lil_matrix((nb * T, nv_ds)) beq_q = zeros(nb * T) for i in range(T): Aeq_q[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([zeros((nb, nl)), Ct - Cf, -diag(Ct * Branch_X) * Ct, zeros((nb, nb)), zeros((nb, ng)), Cg, zeros((nb, nmg)), -Cmg, Cd.dot(diag(bus[d,QD]/bus[d,PD]))]) for j in range(nb): if bus[j, PD] > 0: beq_q[i * nb:(i + 1) * nb] = profile[i * nb + j] / bus[j, PD] * bus[j, QD] / baseMVA # 3) KVL equation Aeq_kvl = lil_matrix((nl * T, nv_ds)) beq_kvl = zeros(nl * T) for i in range(T): Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage: i * _nv_second_stage + nl] = -2 * diag(Branch_R) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + nl: i * _nv_second_stage + 2 * nl] = -2 * diag(Branch_X) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 2 * nl: i * _nv_second_stage + 3 * nl] = diag( Branch_R ** 2) + diag(Branch_X ** 2) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 3 * nl:i * _nv_second_stage + 3 * nl + nb] = ( Cf.T - Ct.T).toarray() Aeq = vstack([Aeq_p, Aeq_q, Aeq_kvl]) beq = concatenate([beq_p, beq_q, beq_kvl]) c = zeros(nv_ds) q = zeros(nv_ds) c0 = 0 for t in range(T): for i in range(ng): c[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 5] * baseMVA q[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA c0 += gencost[i, 6] for i in range(nd): c[t * _nv_second_stage + i + 3 * nl + nb + 2 * ng + 2 * nmg] = Voll * baseMVA# The load shedding cost # Coupling constraints between the distribution systems and micro_grids Ax2y = lil_matrix((2 * nmg * T, nv_ds)) # connection matrix with the microgrids for i in range(T): for j in range(nmg): # Active power Ax2y[i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + j] = 1000 * baseMVA # Reactive power Ax2y[nmg * T + i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + nmg + j] = 1000 * baseMVA # II) Formulate the problem for microgrids model_microgrids = {} for i in range(nmg): model_microgrids[i] = self.problem_formulation_microgrid(mg=mgs[i], mess=mess) # II.A) Combine the distribution system operation problem and microgrid systems if Aeq is not None: neq_ds = Aeq.shape[0] else: neq_ds = 0 nVariables = int(nv_ds) neq = int(neq_ds) nv_index = zeros(nmg + 1).astype(int) neq_index = zeros(nmg + 1).astype(int) nv_index[0] = nv_ds neq_index[0] = int(neq_ds) for i in range(nmg): nv_index[i + 1] = nv_index[i] + len(model_microgrids[i]["c"]) neq_index[i + 1] = neq_index[i] + model_microgrids[i]["Aeq"].shape[0] nVariables += len(model_microgrids[i]["c"]) neq += int(model_microgrids[i]["Aeq"].shape[0]) Aeq_full = lil_matrix((int(neq_index[-1]), int(nv_index[-1]))) Aeq_full[0:neq_ds, 0:nv_ds] = Aeq for i in range(nmg): lb = concatenate([lb, model_microgrids[i]["lb"]]) ub = concatenate([ub, model_microgrids[i]["ub"]]) c = concatenate([c, model_microgrids[i]["c"]]) q = concatenate([q, model_microgrids[i]["q"]]) vtypes += model_microgrids[i]["vtypes"] beq = concatenate([beq, model_microgrids[i]["beq"]]) Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_microgrids[i]["Aeq"] # Add coupling constraints, between the microgrids and distribution networks Ay2x = lil_matrix((2 * nmg * T, nv_index[-1] - nv_index[0])) for i in range(T): for j in range(nmg): Ay2x[i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + PUG] = -1
""" Stochastic optimal power flow with multiple microgrids and mobile energy storage systems @author: Zhao Tianyang @e-mail: zhaoty@ntu.edu.sg @date: 10 Jan 2019 Major updates: 1) Update code style using PEP 8 -- Style Guide for Python Code 2) Store data in database 3) Scenario generation and reduction 4) Automatic results analysis Nomenclature: nV: number of variables mg: microgrid ds: distribution systems me: mobile energy storage systems ch: charging dc: discharging ele: electricity tra: traffic i,j,k: index t: time index T: time periods tns:traffic networks pns:power networks """ class StochasticDynamicOptimalPowerFlowTess(): def __init__(self): self.name = "Unit commitment with tess" def main(self, power_networks, micro_grids, profile, pv_profile, mess, traffic_networks, ns=100): """ Main entrance for network reconfiguration problems :param case: electric network information :param profile: load profile within the distribution networks :param micrgrids: dictionary for microgrids :param tess: dictionary for tess :return: network reconfiguration, distribution network status, and microgrid status """ T = len(profile) # Time spans self.T = T nmg = len(micro_grids) # Number of microgrids self.nmg = nmg nmes = len(mess) # Number of mobile energy storage systems self.nmes = nmes nb_tra = traffic_networks["bus"].shape[0] # Number of buses in the transportation networks self.nb_tra = nb_tra assert nb_tra == nmg, "The microgrids within the transportation networks are not synchronized!" # 1) Formulate the first stage optimization problem model_first_stage = self.first_stage_problem_formualtion(pns=power_networks, mgs=micro_grids, mess=mess, tns=traffic_networks) # (sol_first_stage, obj, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"], # beq=model_first_stage["beq"], # A=model_first_stage["A"], b=model_first_stage["b"], # vtypes=model_first_stage["vtypes"], # xmax=model_first_stage["ub"], xmin=model_first_stage["lb"]) # sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 2) Formulate the second stage optimization problem # Formulate the second stage scenarios (ds_second_stage, mgs_second_stage, weight) = self.scenario_generation_reduction(profile=profile, micro_grids=micro_grids, ns=ns, pns=power_networks, pv_profile=pv_profile, ns_reduced=round(0.98 * ns)) ns -= round(0.98 * ns) model_second_stage = {} for i in range(ns): model_second_stage[i] = self.second_stage_problem_formualtion(pns=power_networks, mgs=mgs_second_stage[i], mess=mess, tns=traffic_networks, profile=ds_second_stage[i, :], index=i, weight=weight[i]) # 3) Merge the first-stage problem and second stage problem lb = model_first_stage["lb"] ub = model_first_stage["ub"] vtypes = model_first_stage["vtypes"] c = model_first_stage["c"] Qc = dict() if model_first_stage["Aeq"] is not None: neq = model_first_stage["Aeq"].shape[0] else: neq = 0 if model_first_stage["A"] is not None: nineq = model_first_stage["A"].shape[0] else: nineq = 0 nv_first_stage = self.nv_first_stage nv_second_stage = self.nv_second_stage q = zeros(nv_first_stage) nv_index = zeros(ns + 1).astype(int) neq_index = zeros(ns + 1).astype(int) nineq_index = zeros(ns + 1).astype(int) neq_index[0] = neq nineq_index[0] = nineq nv_index[0] = nv_first_stage beq = model_first_stage["beq"] for i in range(ns): if model_second_stage[i]["Aeq"] is not None: neq_index[i + 1] = neq_index[i] + model_second_stage[i]["Aeq"].shape[0] else: neq_index[i + 1] = neq_index[i] if model_second_stage[i]["Ts"] is not None: nineq_index[i + 1] = nineq_index[i] + model_second_stage[i]["Ts"].shape[0] else: nineq_index[i + 1] = nineq_index[i] nv_index[i + 1] = nv_index[i] + nv_second_stage c = concatenate([c, model_second_stage[i]["c"]]) q = concatenate([q, model_second_stage[i]["q"]]) lb = concatenate([lb, model_second_stage[i]["lb"]]) ub = concatenate([ub, model_second_stage[i]["ub"]]) vtypes += model_second_stage[i]["vtypes"] beq = concatenate([beq, model_second_stage[i]["beq"]]) Aeq_full = lil_matrix((neq_index[-1], nv_index[-1])) Aeq_full[0:neq_index[0], 0:nv_index[0]] = model_first_stage["Aeq"] rc = zeros(0) for i in range(ns): Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Aeq"] Qc.update(model_second_stage[i]["Qc"]) rc = concatenate([rc, model_second_stage[i]["rc"]]) A_full = lil_matrix((nineq_index[-1], nv_index[-1])) b = model_first_stage["b"] A_full[0:int(nineq_index[0]), 0:int(nv_index[0])] = model_first_stage["A"] for i in range(ns): A_full[nineq_index[i]:nineq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["Ts"] A_full[nineq_index[i]:nineq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Ws"] b = concatenate([b, model_second_stage[i]["hs"]]) # 3) Obtain the results for first-stage and second stage optimization problems # 3.1) Obtain the integrated solution (sol, obj, success) = miqcp(c, q, Aeq=Aeq_full, beq=beq, A=A_full, b=b, Qc=Qc, rc=rc, xmin=lb, xmax=ub, vtypes=vtypes) # 3.2) decouple the solution into multiple subsystems sol_first_stage = sol[0:nv_second_stage] sol_second_stage = {} for i in range(ns): sol_second_stage[i] = sol[int(nv_index[i]):int(nv_index[i + 1])] # 4) Verify the first-stage and second stage optization problem # 4.1) First-stage solution sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 4.2) Second-stage solution sol_second_stage_checked = {} db_management = DataBaseManagement() db_management.create_table(table_name="distribution_networks", nl=self.nl, nb=self.nb, ng=self.ng) db_management.create_table(table_name="micro_grids", nmg=self.nmg) db_management.create_table(table_name="mobile_energy_storage_systems", nmg=self.nmg) db_management.create_table(table_name="first_stage_solutions", nmg=self.nmg, ng=self.ng) db_management.create_table(table_name="fisrt_stage_mess", nmg=self.nmg) for t in range(T): db_management.insert_data_first_stage(table_name="first_stage_solutions", time=t, ng=self.ng, nmg=self.nmg, pg=sol_first_stage["pg"][:, t].tolist(), rg=sol_first_stage["rg"][:, t].tolist(), pg_mg=sol_first_stage["pg_mg"][:, t].tolist(), rg_mg=sol_first_stage["rg_mg"][:, t].tolist(), pess_ch=sol_first_stage["pess_ch"][:, t].tolist(), pess_dc=sol_first_stage["pess_dc"][:, t].tolist(), ress=sol_first_stage["ress"][:, t].tolist(), ess=sol_first_stage["eess"][:, t].tolist(), iess=sol_first_stage["iess"][:, t].tolist()) for i in range(nmes): for t in range(T): db_management.insert_data_first_stage_mess(table_name="fisrt_stage_mess", nmg=self.nmg, time=t, mess=i, imess=sol_first_stage["MESS"][i]["idc"][:, t].tolist(), rmess=sol_first_stage["MESS"][i]["rmess"][:, t].tolist(), pmess_ch= sol_first_stage["MESS"][i]["pmess_ch"][:, t].tolist(), pmess_dc= sol_first_stage["MESS"][i]["pmess_dc"][:, t].tolist(), mess_f_stop=sol_first_stage["MESS"][i]["VRP"][t + 1][0], mess_t_stop=sol_first_stage["MESS"][i]["VRP"][t + 1][1]) for i in range(ns): sol_second_stage_checked[i] = self.second_stage_solution_validation(sol_second_stage[i]) for i in range(ns): for t in range(T): db_management.insert_data_ds(table_name="distribution_networks", nl=self.nl, nb=self.nb, ng=self.ng, scenario=i, time=t, pij=sol_second_stage_checked[i]["DS"]["pij"][:, t].tolist(), qij=sol_second_stage_checked[i]["DS"]["qij"][:, t].tolist(), lij=sol_second_stage_checked[i]["DS"]["lij"][:, t].tolist(), vi=sol_second_stage_checked[i]["DS"]["vi"][:, t].tolist(), pg=sol_second_stage_checked[i]["DS"]["pg"][:, t].tolist(), qg=sol_second_stage_checked[i]["DS"]["qg"][:, t].tolist(), ) for i in range(ns): for j in range(nmg): for t in range(T): db_management.insert_data_mg(table_name="micro_grids", scenario=i, time=t, mg=j, pg=sol_second_stage_checked[i]["MG"]["pg"][j, t], qg=sol_second_stage_checked[i]["MG"]["qg"][j, t], pug=sol_second_stage_checked[i]["MG"]["pug"][j, t], qug=sol_second_stage_checked[i]["MG"]["qug"][j, t], pbic_ac2dc=sol_second_stage_checked[i]["MG"]["pbic_ac2dc"][j, t], pbic_dc2ac=sol_second_stage_checked[i]["MG"]["pbic_dc2ac"][j, t], qbic=sol_second_stage_checked[i]["MG"]["qbic"][j, t], pess_ch=sol_second_stage_checked[i]["MG"]["pess_ch"][j, t], pess_dc=sol_second_stage_checked[i]["MG"]["pess_dc"][j, t], eess=sol_second_stage_checked[i]["MG"]["eess"][j, t], pmess=sol_second_stage_checked[i]["MG"]["pmess"][j, t], ppv=sol_second_stage_checked[i]["MG"]["ppv"][j, t]) for i in range(ns): for j in range(nmes): for t in range(T): db_management.insert_data_mess(table_name="mobile_energy_storage_systems", scenario=i, time=t, mess=j, nmg=self.nmg, pmess_dc= sol_second_stage_checked[i]["MESS"][j]["pmess_dc"][:, t].tolist(), pmess_ch= sol_second_stage_checked[i]["MESS"][j]["pmess_ch"][:, t].tolist(), emess=sol_second_stage_checked[i]["MESS"][j]["emess"][0, t]) # 4.3) Cross validation of the first-stage and second-stage decision variables tess_check = {} for i in range(ns): tess_temp = {} for j in range(nmes): tess_temp[j] = sol_second_stage_checked[i]["MESS"][j]["pmess_dc"] - \ sol_second_stage_checked[i]["MESS"][j]["pmess_ch"] - \ sol_first_stage["MESS"][j]["pmess_dc"] + \ sol_first_stage["MESS"][j]["pmess_ch"] - \ sol_first_stage["MESS"][j]["rmess"] tess_temp[j + nmes] = sol_second_stage_checked[i]["MESS"][j]["pmess_ch"] - \ sol_second_stage_checked[i]["MESS"][j]["pmess_dc"] - \ sol_first_stage["MESS"][j]["pmess_ch"] + \ sol_first_stage["MESS"][j]["pmess_dc"] - \ sol_first_stage["MESS"][j]["rmess"] tess_check[i] = tess_temp # return sol_distribution_network, sol_microgrids, sol_tess return sol_first_stage, sol_second_stage_checked def first_stage_problem_formualtion(self, pns, mgs, mess, tns): """ Problem formulation for the first stage optimization, Decision variables include, DGs within power networks, DGs within MGs, EESs within MGs and TESSs :param power_networks: Parameters for the power networks :param micro_grids: Parameters for the microgrids :param tess: Parameters for the mobile energy storage systems :param traffic_networks: Parameters for the transportation networks :return: Formulated first-stage problem """ T = self.T # Time slots nmg = self.nmg # Number of mgs nmes = self.nmes # Number of tess mpc = ext2int(pns) baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"] ng = shape(mpc['gen'])[0] ## number of dispatchable injections nb = shape(mpc["bus"])[0] self.nb = nb self.ng = ng # Obtain the initial status, start-up and shut down of generators Ig0 = gen[:, -1].astype(int) MIN_DOWN = gen[:, -2].astype(int) MIN_UP = gen[:, -3].astype(int) alpha_l = zeros(ng) beta_l = zeros(ng) Ig_l = zeros(ng) pg_l = zeros(ng) # Boundary for DGs within distribution networks rg_l = zeros(ng) alpha_u = ones(ng) beta_u = ones(ng) Ig_u = ones(ng) pg_u = gen[:, PMAX] / baseMVA rg_u = gen[:, PMAX] / baseMVA c_alpha = gencost[:, 0] c_beta = gencost[:, 1] c_ig = gencost[:, 6] cg = gencost[:, 5] * baseMVA cr = zeros(ng) pg_mg_l = zeros(nmg) # Boundary for DGs within MGs rg_mg_l = zeros(nmg) pg_mg_u = zeros(nmg) rg_mg_u = zeros(nmg) cg_mg = zeros(nmg) cr_mg = zeros(nmg) for i in range(nmg): pg_mg_l[i] = mgs[i]["DG"]["PMIN"] pg_mg_u[i] = mgs[i]["DG"]["PMAX"] rg_mg_u[i] = mgs[i]["DG"]["PMAX"] cg_mg[i] = mgs[i]["DG"]["COST_B"] pes_ch_l = zeros(nmg) # Lower boundary for ESSs within MGs pes_dc_l = zeros(nmg) ees_l = zeros(nmg) res_l = zeros(nmg) ies_l = zeros(nmg) pes_ch_u = zeros(nmg) # Upper boundary for ESSs within MGs pes_dc_u = zeros(nmg) ees_u = zeros(nmg) res_u = zeros(nmg) ies_u = ones(nmg) ces_ch = zeros(nmg) # Cost boundary for ESSs within MGs ces_dc = zeros(nmg) ces_r = zeros(nmg) ces = zeros(nmg) ces_i = zeros(nmg) for i in range(nmg): pes_ch_u[i] = mgs[i]["ESS"]["PCH_MAX"] pes_dc_u[i] = mgs[i]["ESS"]["PDC_MAX"] + mgs[i]["ESS"]["PCH_MAX"] res_u[i] = mgs[i]["ESS"]["PCH_MAX"] ees_l[i] = mgs[i]["ESS"]["EMIN"] ees_u[i] = mgs[i]["ESS"]["EMAX"] _nv_first_stage = ng * 5 + nmg * 2 + nmg * 5 nv_first_stage = _nv_first_stage * T # Formulate the boundaries lb = concatenate( [tile(concatenate( [alpha_l, beta_l, Ig_l, pg_l, rg_l, pg_mg_l, rg_mg_l, pes_ch_l, pes_dc_l, res_l, ees_l, ies_l]), T)]) ub = concatenate( [tile(concatenate( [alpha_u, beta_u, Ig_u, pg_u, rg_u, pg_mg_u, rg_mg_u, pes_ch_u, pes_dc_u, res_u, ees_u, ies_u]), T)]) # Objective value c = concatenate( [tile(concatenate([c_alpha, c_beta, c_ig, cg, cr, cg_mg, cr_mg, ces_ch, ces_dc, ces, ces_r, ces_i]), T)]) # Variable types vtypes = (["b"] * ng * 3 + ["c"] * (ng * 2 + nmg * 2 + nmg * 4) + ["b"] * nmg) * T ## Constraint sets # 1) Pg+Rg<=PguIg A = lil_matrix((ng * T, nv_first_stage)) b = zeros(ng * T) for t in range(T): for j in range(ng): A[t * ng + j, t * _nv_first_stage + ng * 3 + j] = 1 A[t * ng + j, t * _nv_first_stage + ng * 4 + j] = 1 A[t * ng + j, t * _nv_first_stage + ng * 2 + j] = -pg_u[j] # 2) Pg-Rg>=IgPgl A_temp = lil_matrix((ng * T, nv_first_stage)) b_temp = zeros(ng * T) for t in range(T): for j in range(ng): A_temp[t * ng + j, t * _nv_first_stage + ng * 3 + j] = -1 A_temp[t * ng + j, t * _nv_first_stage + ng * 4 + j] = 1 A_temp[t * ng + j, t * _nv_first_stage + j] = pg_l[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 3) Start-up and shut-down constraints of DGs UP_LIMIT = zeros(ng).astype(int) DOWN_LIMIT = zeros(ng).astype(int) for i in range(ng): UP_LIMIT[i] = T - MIN_UP[i] DOWN_LIMIT[i] = T - MIN_DOWN[i] # 3.1) Up limit A_temp = lil_matrix((sum(UP_LIMIT), nv_first_stage)) b_temp = zeros(sum(UP_LIMIT)) for i in range(ng): for t in range(MIN_UP[i], T): for k in range(t - MIN_UP[i], t): A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], k * _nv_first_stage + i] = 1 A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], t * _nv_first_stage + ng * 2 + i] = -1 A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # # 3.2) Down limit A_temp = lil_matrix((sum(DOWN_LIMIT), nv_first_stage)) b_temp = ones(sum(DOWN_LIMIT)) for i in range(ng): for t in range(MIN_DOWN[i], T): for k in range(t - MIN_DOWN[i], t): A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], k * _nv_first_stage + ng + i] = 1 A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], t * _nv_first_stage + ng * 2 + i] = 1 A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 4) Status transformation of each unit Aeq = lil_matrix((T * ng, nv_first_stage)) beq = zeros(T * ng) for i in range(ng): for t in range(T): Aeq[i * T + t, t * _nv_first_stage + i] = 1 Aeq[i * T + t, t * _nv_first_stage + ng + i] = -1 Aeq[i * T + t, t * _nv_first_stage + ng * 2 + i] = -1 if t != 0: Aeq[i * T + t, (t - 1) * _nv_first_stage + ng * 2 + i] = 1 else: beq[i * T + t] = -Ig0[i] # 3) Pg_mg+Rg_mg<=Pg_mg_u A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg + j] = 1 b_temp[t * nmg + j] = pg_mg_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 4) Pg_mg-Rg_mg<=Pg_mg_l A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg + j] = 1 b_temp[t * nmg + j] = pg_mg_l[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 5) Pess_dc-Pess_ch+Ress<=Pess_dc_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 2 + j] = 1 b_temp[t * nmg + j] = pes_dc_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 6) Pess_ch-Pess_dc+Ress<=Pess_ch_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 2 + j] = 1 b_temp[t * nmg + j] = pes_ch_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 7) Energy storage balance equation Aeq_temp = lil_matrix((T * nmg, nv_first_stage)) beq_temp = zeros(T * nmg) for t in range(T): for j in range(nmg): Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 3 + j] = 1 Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = -mgs[j]["ESS"]["EFF_CH"] Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 / mgs[j]["ESS"]["EFF_DC"] if t == 0: beq_temp[t * nmg + j] = mgs[j]["ESS"]["E0"] else: Aeq_temp[t * nmg + j, (t - 1) * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 3 + j] = -1 Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) # 8) Pess_ch<=I*Pess_ch_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 4 + j] = -pes_ch_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 9) Pess_dc<=(1-I)*Pess_dc_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 4 + j] = pes_dc_u[j] b_temp[t * nmg + j] = pes_dc_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 2) Transportation energy storage systems problem model_mess = {} for i in range(nmes): model_mess[i] = self.problem_formulation_tess(mess=mess[i], tns=tns) # 3) Merge the DGs, ESSs and TESSs neq = Aeq.shape[0] nineq = A.shape[0] nV_index = zeros(nmes + 1).astype(int) neq_index = zeros(nmes + 1).astype(int) nineq_index = zeros(nmes + 1).astype(int) nV_index[0] = nv_first_stage neq_index[0] = neq nineq_index[0] = nineq for i in range(nmes): nV_index[i + 1] = nV_index[i] + len(model_mess[i]["c"]) neq_index[i + 1] = neq_index[i] + model_mess[i]["Aeq"].shape[0] nineq_index[i + 1] = nineq_index[i] + model_mess[i]["A"].shape[0] neq += model_mess[i]["Aeq"].shape[0] nineq += model_mess[i]["A"].shape[0] # Merge the objective function, boundaries, types and rhs c = concatenate([c, model_mess[i]["c"]]) lb = concatenate([lb, model_mess[i]["lb"]]) ub = concatenate([ub, model_mess[i]["ub"]]) vtypes += model_mess[i]["vtypes"] beq = concatenate([beq, model_mess[i]["beq"]]) b = concatenate([b, model_mess[i]["b"]]) A_full = lil_matrix((nineq_index[-1], nV_index[-1])) Aeq_full = lil_matrix((neq_index[-1], nV_index[-1])) if Aeq is not None: Aeq_full[0:int(neq_index[0]), 0:int(nV_index[0])] = Aeq if A is not None: A_full[0:int(nineq_index[0]), 0:int(nV_index[0])] = A for i in range(nmes): Aeq_full[neq_index[i]:neq_index[i + 1], nV_index[i]:nV_index[i + 1]] = model_mess[i]["Aeq"] A_full[nineq_index[i]:nineq_index[i + 1], nV_index[i]:nV_index[i + 1]] = model_mess[i]["A"] self.nv_first_stage = nV_index[-1] # The number of first stage decision variables self._nv_first_stage = _nv_first_stage model_first_stage = {"c": c, "lb": lb, "ub": ub, "vtypes": vtypes, "A": A_full, "b": b, "Aeq": Aeq_full, "beq": beq, } return model_first_stage def first_stage_solution_validation(self, sol): """ Validation of the first-stage solution :param sol: The first stage solution :return: the first stage solution """ T = self.T ng = self.ng nmg = self.nmg nmes = self.nmes # Set-points of DGs within DSs, MGs and ESSs _nv_first_stage = self._nv_first_stage alpha = zeros((ng, T)) beta = zeros((ng, T)) Ig = zeros((ng, T)) Pg = zeros((ng, T)) Rg = zeros((ng, T)) Pg_mg = zeros((nmg, T)) Rg_mg = zeros((nmg, T)) Pess_dc = zeros((nmg, T)) Pess_ch = zeros((nmg, T)) Ress = zeros((nmg, T)) Eess = zeros((nmg, T)) Iess = zeros((nmg, T)) for i in range(T): alpha[:, i] = sol[_nv_first_stage * i:_nv_first_stage * i + ng] beta[:, i] = sol[_nv_first_stage * i + ng:_nv_first_stage * i + ng * 2] Ig[:, i] = sol[_nv_first_stage * i + ng * 2:_nv_first_stage * i + ng * 3] Pg[:, i] = sol[_nv_first_stage * i + ng * 3:_nv_first_stage * i + ng * 4] Rg[:, i] = sol[_nv_first_stage * i + ng * 4:_nv_first_stage * i + ng * 5] Pg_mg[:, i] = sol[_nv_first_stage * i + ng * 5:_nv_first_stage * i + ng * 5 + nmg] Rg_mg[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg:_nv_first_stage * i + ng * 5 + nmg * 2] Pess_ch[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 2:_nv_first_stage * i + ng * 5 + nmg * 3] Pess_dc[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 3:_nv_first_stage * i + ng * 5 + nmg * 4] Ress[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 4:_nv_first_stage * i + ng * 5 + nmg * 5] Eess[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 5:_nv_first_stage * i + ng * 5 + nmg * 6] Iess[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 6:_nv_first_stage * i + ng * 5 + nmg * 7] # Set-points and scheduling of mobile energy storage systems nv_tra = self.nv_tra nl_traffic = self.nl_tra n_stops = self.n_stops nb_tra_ele = self.nb_tra_ele sol_ev = {} for i in range(nmes): ev_temp = {} ev_temp["VRP"] = [] for t in range(nl_traffic): if sol[_nv_first_stage * T + nv_tra * i + t] > 0: # obtain the solution for vrp if self.connection_matrix[t, TIME] > 0: for j in range(int(self.connection_matrix[t, TIME])): ev_temp["VRP"].append(((self.connection_matrix[t, F_BUS] - 1) % nmg, (self.connection_matrix[t, T_BUS] - 1) % nmg)) else: ev_temp["VRP"].append(((self.connection_matrix[t, F_BUS] - 1) % nmg, (self.connection_matrix[t, T_BUS] - 1) % nmg)) ev_temp["idc"] = zeros((nb_tra_ele, T)) ev_temp["pmess_dc"] = zeros((nb_tra_ele, T)) ev_temp["pmess_ch"] = zeros((nb_tra_ele, T)) ev_temp["rmess"] = zeros((nb_tra_ele, T)) for t in range(T): for k in range(nb_tra_ele): ev_temp["idc"][k, t] = sol[_nv_first_stage * T + nv_tra * i + nl_traffic + nb_tra_ele * t + k] ev_temp["pmess_dc"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops + nb_tra_ele * t + k] ev_temp["pmess_ch"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops * 2 + nb_tra_ele * t + k] ev_temp["rmess"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops * 3 + nb_tra_ele * t + k] sol_ev[i] = ev_temp sol_first_stage = {"alpha": alpha, "beta": beta, "ig": Ig, "rg": Rg, "pg": Pg, "pg_mg": Pg_mg, "rg_mg": Rg_mg, "pess_ch": Pess_ch, "pess_dc": Pess_dc, "ress": Ress, "eess": Eess, "iess": Iess, "MESS": sol_ev, } return sol_first_stage def second_stage_problem_formualtion(self, pns, mgs, mess, tns, profile, index=0, weight=1): """ Second-stage problem formulation, the decision variables includes DGs within power networks, DGs within MGs, EESs within MGs and TESSs and other systems' information :param power_networks: :param micro_grids: :param tess: :param traffic_networks: :return: The second stage problems as list, including coupling constraints, and other constraint set """ # I) Formulate the problem for distribution systems operator T = self.T mpc = ext2int(pns) baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"] nb = shape(mpc['bus'])[0] ## number of buses nl = shape(mpc['branch'])[0] ## number of branches ng = shape(mpc['gen'])[0] ## number of dispatchable injections nd = sum(bus[:,PD]>0) nmg = self.nmg nmes = self.nmes self.nl = nl self.nb = nb self.ng = ng self.nd = nd m = zeros(nmg) ## list of integration index pmg_l = zeros(nmg) ## list of lower boundary pmg_u = zeros(nmg) ## list of upper boundary qmg_l = zeros(nmg) ## list of lower boundary qmg_u = zeros(nmg) ## list of upper boundary for i in range(nmg): m[i] = mgs[i]["BUS"] pmg_l[i] = mgs[i]["UG"]["PMIN"] / 1000 / baseMVA pmg_u[i] = mgs[i]["UG"]["PMAX"] / 1000 / baseMVA qmg_l[i] = mgs[i]["UG"]["QMIN"] / 1000 / baseMVA qmg_u[i] = mgs[i]["UG"]["QMAX"] / 1000 / baseMVA f = branch[:, F_BUS] ## list of "from" buses t = branch[:, T_BUS] ## list of "to" buses d = bus[bus[:,PD]>0, BUS_I].astype(int) ## list of "to" buses i = range(nl) ## double set of row indices self.f = f ## record from bus for each branch # Connection matrix Cf = sparse((ones(nl), (i, f)), (nl, nb)) Ct = sparse((ones(nl), (i, t)), (nl, nb)) Cd = sparse((ones(nd), (d, range(nd))), (nb, nd)) Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng)) Cmg = sparse((ones(nmg), (m, range(nmg))), (nb, nmg)) Branch_R = branch[:, BR_R] Branch_X = branch[:, BR_X] Cf = Cf.T Ct = Ct.T # Obtain the boundary information slmax = branch[:, RATE_A] / baseMVA pij_l = -slmax qij_l = -slmax lij_l = zeros(nl) vm_l = bus[:, VMIN] ** 2 pg_l = gen[:, PMIN] / baseMVA qg_l = gen[:, QMIN] / baseMVA pd_l = zeros(nd) pij_u = slmax qij_u = slmax lij_u = slmax vm_u = bus[:, VMAX] ** 2 pg_u = gen[:, PMAX] / baseMVA qg_u = gen[:, QMAX] / baseMVA pd_u = bus[d, PD] / baseMVA _nv_second_stage = int(3 * nl + nb + 2 * ng + 2 * nmg + nd) self._nv_second_stage = _nv_second_stage # Number of decision variable within each time slot lb = concatenate([tile(concatenate([pij_l, qij_l, lij_l, vm_l, pg_l, qg_l, pmg_l, qmg_l, pd_l]), T)]) ub = concatenate([tile(concatenate([pij_u, qij_u, lij_u, vm_u, pg_u, qg_u, pmg_u, qmg_u, pd_u]), T)]) vtypes = ["c"] * _nv_second_stage * T nv_ds = _nv_second_stage * T # Number of total decision variables # Add system level constraints # 1) Active power balance Aeq_p = lil_matrix((nb * T, nv_ds)) beq_p = zeros(nb * T) for i in range(T): Aeq_p[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([Ct - Cf, zeros((nb, nl)), -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg, zeros((nb, ng)), -Cmg, zeros((nb, nmg)), Cd]) beq_p[i * nb:(i + 1) * nb] = profile[i * nb:(i + 1) * nb] / baseMVA # 2) Reactive power balance Aeq_q = lil_matrix((nb * T, nv_ds)) beq_q = zeros(nb * T) for i in range(T): Aeq_q[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([zeros((nb, nl)), Ct - Cf, -diag(Ct * Branch_X) * Ct, zeros((nb, nb)), zeros((nb, ng)), Cg, zeros((nb, nmg)), -Cmg, Cd.dot(diag(bus[d,QD]/bus[d,PD]))]) for j in range(nb): if bus[j, PD] > 0: beq_q[i * nb:(i + 1) * nb] = profile[i * nb + j] / bus[j, PD] * bus[j, QD] / baseMVA # 3) KVL equation Aeq_kvl = lil_matrix((nl * T, nv_ds)) beq_kvl = zeros(nl * T) for i in range(T): Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage: i * _nv_second_stage + nl] = -2 * diag(Branch_R) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + nl: i * _nv_second_stage + 2 * nl] = -2 * diag(Branch_X) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 2 * nl: i * _nv_second_stage + 3 * nl] = diag( Branch_R ** 2) + diag(Branch_X ** 2) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 3 * nl:i * _nv_second_stage + 3 * nl + nb] = ( Cf.T - Ct.T).toarray() Aeq = vstack([Aeq_p, Aeq_q, Aeq_kvl]) beq = concatenate([beq_p, beq_q, beq_kvl]) c = zeros(nv_ds) q = zeros(nv_ds) c0 = 0 for t in range(T): for i in range(ng): c[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 5] * baseMVA q[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA c0 += gencost[i, 6] for i in range(nd): c[t * _nv_second_stage + i + 3 * nl + nb + 2 * ng + 2 * nmg] = Voll * baseMVA# The load shedding cost # Coupling constraints between the distribution systems and micro_grids Ax2y = lil_matrix((2 * nmg * T, nv_ds)) # connection matrix with the microgrids for i in range(T): for j in range(nmg): # Active power Ax2y[i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + j] = 1000 * baseMVA # Reactive power Ax2y[nmg * T + i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + nmg + j] = 1000 * baseMVA # II) Formulate the problem for microgrids model_microgrids = {} for i in range(nmg): model_microgrids[i] = self.problem_formulation_microgrid(mg=mgs[i], mess=mess) # II.A) Combine the distribution system operation problem and microgrid systems if Aeq is not None: neq_ds = Aeq.shape[0] else: neq_ds = 0 nVariables = int(nv_ds) neq = int(neq_ds) nv_index = zeros(nmg + 1).astype(int) neq_index = zeros(nmg + 1).astype(int) nv_index[0] = nv_ds neq_index[0] = int(neq_ds) for i in range(nmg): nv_index[i + 1] = nv_index[i] + len(model_microgrids[i]["c"]) neq_index[i + 1] = neq_index[i] + model_microgrids[i]["Aeq"].shape[0] nVariables += len(model_microgrids[i]["c"]) neq += int(model_microgrids[i]["Aeq"].shape[0]) Aeq_full = lil_matrix((int(neq_index[-1]), int(nv_index[-1]))) Aeq_full[0:neq_ds, 0:nv_ds] = Aeq for i in range(nmg): lb = concatenate([lb, model_microgrids[i]["lb"]]) ub = concatenate([ub, model_microgrids[i]["ub"]]) c = concatenate([c, model_microgrids[i]["c"]]) q = concatenate([q, model_microgrids[i]["q"]]) vtypes += model_microgrids[i]["vtypes"] beq = concatenate([beq, model_microgrids[i]["beq"]]) Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_microgrids[i]["Aeq"] # Add coupling constraints, between the microgrids and distribution networks Ay2x = lil_matrix((2 * nmg * T, nv_index[-1] - nv_index[0])) for i in range(T): for j in range(nmg): Ay2x[i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + PUG] = -1
Ay2x[nmg * T + i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + QUG] = -1
12
2023-11-27 15:57:53+00:00
24k
girgle/DouZero_For_New_HLDDZ
main.py
[ { "identifier": "GameHelper", "path": "GameHelper.py", "snippet": "class GameHelper:\n def __init__(self):\n self.ScreenZoomRate = None\n self.counter = QTime()\n self.Pics = {}\n self.PicsCV = {}\n st = time.time()\n self.Handle = win32gui.FindWindow(\"Unity...
import GameHelper as gh import os import sys import time import threading import pyautogui import win32gui import multiprocessing as mp import DetermineColor as DC import cv2 import numpy as np import traceback import BidModel import LandlordModel import FarmerModel from GameHelper import GameHelper from PIL import Image from skimage.metrics import structural_similarity as ssim from collections import defaultdict from douzero.env.move_detector import get_move_type from PyQt5 import QtGui, QtWidgets, QtCore from PyQt5.QtWidgets import QTableWidgetItem, QInputDialog, QMessageBox from PyQt5.QtGui import QPixmap, QIcon from PyQt5.QtCore import QTime, QEventLoop, Qt from MainWindow import Ui_Form from douzero.env.game import GameEnv from douzero.evaluation.deep_agent import DeepAgent
15,212
except AttributeError as e: traceback.print_exc() def init_display(self): self.WinRate.setText("评分") self.label.setText("游戏状态") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') self.UserHandCards.setText("手牌") # self.LBrowser.clear() # self.RBrowser.clear() self.LPlayedCard.setText("上家出牌区域") self.RPlayedCard.setText("下家出牌区域") self.PredictedCard.setText("AI出牌区域") self.ThreeLandlordCards.setText("地主牌") self.recorder2zero() for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') def init_cards(self): self.RunGame = True GameHelper.Interrupt = False self.user_hand_cards_real = "" self.user_hand_cards_env = [] # 其他玩家出牌 self.other_played_cards_real = "" self.other_played_cards_env = [] # 其他玩家手牌(整副牌减去玩家手牌,后续再减掉历史出牌) self.other_hand_cards = [] # 三张底牌 self.three_landlord_cards_real = "" self.three_landlord_cards_env = [] # 玩家角色代码:0-地主上家, 1-地主, 2-地主下家 self.user_position_code = None self.user_position = "" # 开局时三个玩家的手牌 self.card_play_data_list = {} # 识别玩家手牌 self.user_hand_cards_real = self.find_my_cards() while len(self.user_hand_cards_real) != 17 and len(self.user_hand_cards_real) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) self.user_hand_cards_real = self.find_my_cards() self.user_hand_cards_env = [RealCard2EnvCard[c] for c in list(self.user_hand_cards_real)] # 识别三张底牌 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] while len(self.three_landlord_cards_env) != 3: self.detect_start_btn() if not self.RunGame: break if len(self.three_landlord_cards_env) > 3: self.ThreeLandlordCardsConfidence += 0.05 elif len(self.three_landlord_cards_env) < 3: self.ThreeLandlordCardsConfidence -= 0.05 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] # 识别玩家的角色 self.sleep(500) self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) while self.user_position_code is None: self.detect_start_btn() if not self.RunGame: break self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) print("正在出牌人的代码: ", self.user_position_code) if self.user_position_code is None: items = ("地主上家", "地主", "地主下家") item, okPressed = QInputDialog.getItem(self, "选择角色", "未识别到地主,请手动选择角色:", items, 0, False) if okPressed and item: self.user_position_code = items.index(item) else: return self.user_position = ['landlord_up', 'landlord', 'landlord_down'][self.user_position_code] print("我现在在地主的方向:", self.user_position) for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.Players[self.user_position_code].setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') # 整副牌减去玩家手上的牌,就是其他人的手牌,再分配给另外两个角色(如何分配对AI判断没有影响) for i in set(AllEnvCard): self.other_hand_cards.extend([i] * (AllEnvCard.count(i) - self.user_hand_cards_env.count(i))) self.other_hands_cards_str = str(''.join([EnvCard2RealCard[c] for c in self.other_hand_cards]))[::-1] self.cards_recorder(self.other_hands_cards_str) self.card_play_data_list.update({ 'three_landlord_cards': self.three_landlord_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 0) % 3]: self.user_hand_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 1) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 != 1 else self.other_hand_cards[17:], ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 2) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 == 1 else self.other_hand_cards[17:] }) print("开始对局") print("手牌:", self.user_hand_cards_real) print("地主牌:", self.three_landlord_cards_real) # 生成手牌结束,校验手牌数量 if len(self.card_play_data_list["three_landlord_cards"]) != 3: QMessageBox.critical(self, "底牌识别出错", "底牌必须是3张!", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return if len(self.card_play_data_list["landlord_up"]) != 17 or \ len(self.card_play_data_list["landlord_down"]) != 17 or \ len(self.card_play_data_list["landlord"]) != 20: QMessageBox.critical(self, "手牌识别出错", "初始手牌数目有误", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return # 出牌顺序:0-玩家出牌, 1-玩家下家出牌, 2-玩家上家出牌 self.play_order = 0 if self.user_position == "landlord" else 1 if self.user_position == "landlord_up" else 2 # 创建一个代表玩家的AI ai_players = [0, 0] ai_players[0] = self.user_position
# -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx EnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 17: '2', 20: 'X', 30: 'D'} RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30} AllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30] AllCards = ['D', 'X', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3'] helper = GameHelper() class MyPyQT_Form(QtWidgets.QWidget, Ui_Form): def __init__(self): super(MyPyQT_Form, self).__init__() self.other_hands_cards_str = None self.stop_sign = None self.loop_sign = None self.env = None self.three_landlord_cards_env = None self.three_landlord_cards_real = None self.user_hand_cards_env = None self.user_hand_cards_real = None self.play_order = None self.card_play_data_list = None self.other_hand_cards = None self.other_played_cards_env = None self.other_played_cards_real = None self.user_position = None self.user_position_code = None self.setupUi(self) self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint | # 使能最小化按钮 QtCore.Qt.WindowStaysOnTopHint | # 窗体总在最前端 QtCore.Qt.WindowCloseButtonHint) self.setWindowIcon(QIcon(':/pics/favicon.ico')) self.setWindowTitle("DouZero欢乐斗地主v2.0") self.setFixedSize(self.width(), self.height()) # 固定窗体大小 self.move(50, 50) # self.setWindowIcon(QIcon('pics/favicon.ico')) window_pale = QtGui.QPalette() # window_pale.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap("pics/bg.png"))) self.setPalette(window_pale) self.SingleButton.clicked.connect(self.game_single) self.LoopButton.clicked.connect(self.game_loop) self.StopButton.clicked.connect(self.stop) # self.Players = [self.RPlayer, self.Player, self.LPlayer] self.Players = [self.RPlayedCard, self.PredictedCard, self.LPlayedCard] self.counter = QTime() # 参数 self.MyConfidence = 0.8 # 我的牌的置信度 self.OtherConfidence = 0.8 # 别人的牌的置信度 self.WhiteConfidence = 0.85 # 检测白块的置信度 self.LandlordFlagConfidence = 0.8 # 检测地主标志的置信度 self.ThreeLandlordCardsConfidence = 0.8 # 检测地主底牌的置信度 self.PassConfidence = 0.7 self.PassConfidence = 0.8 self.WaitTime = 1 # 等待状态稳定延时 self.MyFilter = 40 # 我的牌检测结果过滤参数 self.OtherFilter = 25 # 别人的牌检测结果过滤参数 self.SleepTime = 0.1 # 循环中睡眠时间 self.RunGame = False self.AutoPlay = False self.BidThreshold1 = 65 # 叫地主阈值 self.BidThreshold2 = 72 # 抢地主阈值 self.JiabeiThreshold = ( (85, 72), # 叫地主 超级加倍 加倍 阈值 (85, 75) # 叫地主 超级加倍 加倍 阈值 (在地主是抢来的情况下) ) self.MingpaiThreshold = 92 # 坐标 self.MyHandCardsPos = (180, 560, 1050, 90) # 我的截图区域 self.LPlayedCardsPos = (320, 280, 500, 120) # 左边出牌截图区域 self.RPlayedCardsPos = (600, 280, 500, 120) # 右边出牌截图区域 self.LandlordCardsPos = (600, 33, 220, 103) # 地主底牌截图区域 self.LPassPos = (360, 360, 120, 80) # 左边不出截图区域 self.RPassPos = (940, 360, 120, 80) # 右边不出截图区域 self.PassBtnPos = (200, 450, 1000, 120) # 要不起截图区域 self.GeneralBtnPos = (200, 450, 1000, 120) # 叫地主、抢地主、加倍按钮截图区域 self.LandlordFlagPos = [(1247, 245, 48, 52), (12, 661, 51, 53), (123, 243, 52, 54)] # 地主标志截图区域(右-我-左) self.card_play_model_path_dict = { 'landlord': "baselines/resnet/resnet_landlord.ckpt", 'landlord_up': "baselines/resnet/resnet_landlord_up.ckpt", 'landlord_down': "baselines/resnet/resnet_landlord_down.ckpt" } def game_single(self): self.loop_sign = 0 self.stop_sign = 0 self.detect_start_btn() self.before_start() self.init_cards() def game_loop(self): self.loop_sign = 1 self.stop_sign = 0 while True: if self.stop_sign == 1: break self.detect_start_btn() self.before_start() self.init_cards() self.sleep(5000) def stop(self): self.stop_sign = 1 print("按下停止键") try: self.RunGame = False self.loop_sign = 0 self.env.game_over = True self.env.reset() self.init_display() self.PreWinrate.setText("局前胜率: ") self.BidWinrate.setText("叫牌胜率: ") except AttributeError as e: traceback.print_exc() def init_display(self): self.WinRate.setText("评分") self.label.setText("游戏状态") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') self.UserHandCards.setText("手牌") # self.LBrowser.clear() # self.RBrowser.clear() self.LPlayedCard.setText("上家出牌区域") self.RPlayedCard.setText("下家出牌区域") self.PredictedCard.setText("AI出牌区域") self.ThreeLandlordCards.setText("地主牌") self.recorder2zero() for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') def init_cards(self): self.RunGame = True GameHelper.Interrupt = False self.user_hand_cards_real = "" self.user_hand_cards_env = [] # 其他玩家出牌 self.other_played_cards_real = "" self.other_played_cards_env = [] # 其他玩家手牌(整副牌减去玩家手牌,后续再减掉历史出牌) self.other_hand_cards = [] # 三张底牌 self.three_landlord_cards_real = "" self.three_landlord_cards_env = [] # 玩家角色代码:0-地主上家, 1-地主, 2-地主下家 self.user_position_code = None self.user_position = "" # 开局时三个玩家的手牌 self.card_play_data_list = {} # 识别玩家手牌 self.user_hand_cards_real = self.find_my_cards() while len(self.user_hand_cards_real) != 17 and len(self.user_hand_cards_real) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) self.user_hand_cards_real = self.find_my_cards() self.user_hand_cards_env = [RealCard2EnvCard[c] for c in list(self.user_hand_cards_real)] # 识别三张底牌 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] while len(self.three_landlord_cards_env) != 3: self.detect_start_btn() if not self.RunGame: break if len(self.three_landlord_cards_env) > 3: self.ThreeLandlordCardsConfidence += 0.05 elif len(self.three_landlord_cards_env) < 3: self.ThreeLandlordCardsConfidence -= 0.05 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] # 识别玩家的角色 self.sleep(500) self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) while self.user_position_code is None: self.detect_start_btn() if not self.RunGame: break self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) print("正在出牌人的代码: ", self.user_position_code) if self.user_position_code is None: items = ("地主上家", "地主", "地主下家") item, okPressed = QInputDialog.getItem(self, "选择角色", "未识别到地主,请手动选择角色:", items, 0, False) if okPressed and item: self.user_position_code = items.index(item) else: return self.user_position = ['landlord_up', 'landlord', 'landlord_down'][self.user_position_code] print("我现在在地主的方向:", self.user_position) for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.Players[self.user_position_code].setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') # 整副牌减去玩家手上的牌,就是其他人的手牌,再分配给另外两个角色(如何分配对AI判断没有影响) for i in set(AllEnvCard): self.other_hand_cards.extend([i] * (AllEnvCard.count(i) - self.user_hand_cards_env.count(i))) self.other_hands_cards_str = str(''.join([EnvCard2RealCard[c] for c in self.other_hand_cards]))[::-1] self.cards_recorder(self.other_hands_cards_str) self.card_play_data_list.update({ 'three_landlord_cards': self.three_landlord_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 0) % 3]: self.user_hand_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 1) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 != 1 else self.other_hand_cards[17:], ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 2) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 == 1 else self.other_hand_cards[17:] }) print("开始对局") print("手牌:", self.user_hand_cards_real) print("地主牌:", self.three_landlord_cards_real) # 生成手牌结束,校验手牌数量 if len(self.card_play_data_list["three_landlord_cards"]) != 3: QMessageBox.critical(self, "底牌识别出错", "底牌必须是3张!", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return if len(self.card_play_data_list["landlord_up"]) != 17 or \ len(self.card_play_data_list["landlord_down"]) != 17 or \ len(self.card_play_data_list["landlord"]) != 20: QMessageBox.critical(self, "手牌识别出错", "初始手牌数目有误", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return # 出牌顺序:0-玩家出牌, 1-玩家下家出牌, 2-玩家上家出牌 self.play_order = 0 if self.user_position == "landlord" else 1 if self.user_position == "landlord_up" else 2 # 创建一个代表玩家的AI ai_players = [0, 0] ai_players[0] = self.user_position
ai_players[1] = DeepAgent(self.user_position, self.card_play_model_path_dict[self.user_position])
4
2023-12-01 04:04:30+00:00
24k
super1207/satoricq
satori.py
[ { "identifier": "AdapterKook", "path": "kook_adapter.py", "snippet": "class AdapterKook:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._access_token = config[\"access_token\"]\n self._http_url = \"https://ww...
import asyncio import aiohttp import json import uuid from kook_adapter import AdapterKook from mihoyo_adapter import AdapterMihoyo from onebot_adapter import AdapterOnebot from config import Config from aiohttp import web from qq_adapter import AdapterQQ from tool import remove_json_null
17,871
if adapter == None: return web.Response(text="bot not found") if method == "/v1/login.get": ret = await adapter.get_login(platform,self_id) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/guild.member.get": body = await request.json() ret = await adapter.get_guild_member(platform,self_id,body["guild_id"],body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/message.create": body = await request.json() ret = await adapter.create_message(platform,self_id,body["channel_id"],body["content"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/channel.list": body = await request.json() ret = await adapter.get_channel_list(platform,self_id,body["guild_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/user.get": body = await request.json() ret = await adapter.get_user(platform,self_id,body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_admin(self,request:web.Request): print("----http admin",request) '''在这里处理管理api调用''' # 鉴权 if self._config.access_token != "": if request.headers.get("Authorization") != "Bearer " + self._config.access_token: print("token err") return web.Response(text="token err") method = request.url.path if method == "/v1/admin/login.list": ret = [] for adapter in self.adapterlist: ret += await adapter["adapter"].get_login(None,None) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_foo(self,request:web.Request): '''在这里处理其余任何api调用''' print("--------http other",request) return web.Response(text="method not found") async def _handle_events_ws(self,request:web.Request): '''在这里处理websocket''' ws_id = str(uuid.uuid4()) ws = web.WebSocketResponse() ws.can_prepare(request) await ws.prepare(request) self.wsmap[ws_id] = { "ws":ws, "is_access":False } print("--------http ws",request,ws_id) try: async for msg in ws: if msg.type == aiohttp.WSMsgType.TEXT: data_json = json.loads(msg.data) print("--------recv_ws",json.dumps(msg.data)) op = data_json["op"] if op == 3: if self._config.access_token != "": if data_json["body"]["token"] != self._config.access_token: raise "token err" self.wsmap[ws_id]["is_access"] = True async def get_logins(self,ws): logins = [] for adapter in self.adapterlist: logins += await adapter["adapter"].get_login(None,None) await Satori.ws_send_json(ws,{ "op":4, "body":{ "logins":logins } }) asyncio.create_task(get_logins(self,ws)) elif op == 1: async def send_pong(ws): await Satori.ws_send_json(ws,{ "op":2 }) asyncio.create_task(send_pong(ws)) elif msg.type == aiohttp.WSMsgType.ERROR: print('ws connection closed with exception %s' % ws.exception()) finally: del self.wsmap[ws_id] print("--------http ws close",ws_id) return ws async def init_after(self): async def event_loop(self:Satori,adapter:AdapterOnebot): while True: msg = await adapter.get_msg() for wsid in self.wsmap: ws = self.wsmap[wsid] if ws["is_access"]: msg["id"] = self._evt_id asyncio.create_task(Satori.ws_send_json(ws["ws"],{"op":0,"body":msg})) self._evt_id += 1 # 读取配置文件 await self._config.read_config() # 创建 adapter for botcfg in self._config.botlist: if botcfg["platform"] == "onebot": adapter = AdapterOnebot(botcfg) elif botcfg["platform"] == "kook":
class Satori: def __init__(self) -> None: self._config:Config = Config() self.adapterlist = [] self.wsmap = {} self._evt_id = 100 async def _get_adapter(self,platform,self_id): ''' 用于获取适配器 ''' for adapter in self.adapterlist: info = adapter["info"] for bot in info: if self_id == bot["self_id"] and bot["platform"] == platform: return adapter["adapter"] return None async def ws_send_json(ws,js) -> None: js = remove_json_null(js) print("--------ws_send_json",json.dumps(js)) await ws.send_json(js) async def _handle_http_normal(self,request:web.Request): print("----http normal",request) '''在这里处理普通api调用''' # 鉴权 if self._config.access_token != "": if request.headers.get("Authorization") != "Bearer " + self._config.access_token: print("token err") return web.Response(text="token err") method = request.url.path platform = request.headers.get("X-Platform") self_id = request.headers.get("X-Self-ID") adapter:AdapterOnebot = await self._get_adapter(platform,self_id) if adapter == None: return web.Response(text="bot not found") if method == "/v1/login.get": ret = await adapter.get_login(platform,self_id) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/guild.member.get": body = await request.json() ret = await adapter.get_guild_member(platform,self_id,body["guild_id"],body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/message.create": body = await request.json() ret = await adapter.create_message(platform,self_id,body["channel_id"],body["content"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/channel.list": body = await request.json() ret = await adapter.get_channel_list(platform,self_id,body["guild_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/user.get": body = await request.json() ret = await adapter.get_user(platform,self_id,body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_admin(self,request:web.Request): print("----http admin",request) '''在这里处理管理api调用''' # 鉴权 if self._config.access_token != "": if request.headers.get("Authorization") != "Bearer " + self._config.access_token: print("token err") return web.Response(text="token err") method = request.url.path if method == "/v1/admin/login.list": ret = [] for adapter in self.adapterlist: ret += await adapter["adapter"].get_login(None,None) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_foo(self,request:web.Request): '''在这里处理其余任何api调用''' print("--------http other",request) return web.Response(text="method not found") async def _handle_events_ws(self,request:web.Request): '''在这里处理websocket''' ws_id = str(uuid.uuid4()) ws = web.WebSocketResponse() ws.can_prepare(request) await ws.prepare(request) self.wsmap[ws_id] = { "ws":ws, "is_access":False } print("--------http ws",request,ws_id) try: async for msg in ws: if msg.type == aiohttp.WSMsgType.TEXT: data_json = json.loads(msg.data) print("--------recv_ws",json.dumps(msg.data)) op = data_json["op"] if op == 3: if self._config.access_token != "": if data_json["body"]["token"] != self._config.access_token: raise "token err" self.wsmap[ws_id]["is_access"] = True async def get_logins(self,ws): logins = [] for adapter in self.adapterlist: logins += await adapter["adapter"].get_login(None,None) await Satori.ws_send_json(ws,{ "op":4, "body":{ "logins":logins } }) asyncio.create_task(get_logins(self,ws)) elif op == 1: async def send_pong(ws): await Satori.ws_send_json(ws,{ "op":2 }) asyncio.create_task(send_pong(ws)) elif msg.type == aiohttp.WSMsgType.ERROR: print('ws connection closed with exception %s' % ws.exception()) finally: del self.wsmap[ws_id] print("--------http ws close",ws_id) return ws async def init_after(self): async def event_loop(self:Satori,adapter:AdapterOnebot): while True: msg = await adapter.get_msg() for wsid in self.wsmap: ws = self.wsmap[wsid] if ws["is_access"]: msg["id"] = self._evt_id asyncio.create_task(Satori.ws_send_json(ws["ws"],{"op":0,"body":msg})) self._evt_id += 1 # 读取配置文件 await self._config.read_config() # 创建 adapter for botcfg in self._config.botlist: if botcfg["platform"] == "onebot": adapter = AdapterOnebot(botcfg) elif botcfg["platform"] == "kook":
adapter = AdapterKook(botcfg)
0
2023-12-03 13:53:47+00:00
24k
aliyun/pai-python-sdk
pai/model.py
[ { "identifier": "git_utils", "path": "pai/common/git_utils.py", "snippet": "def git_clone_repo(git_config: Dict[str, str], source_dir: Optional[str] = None):\ndef _validate_git_config(git_config):\ndef _build_and_run_clone_command(git_config, dest_dir):\ndef _clone_command_for_codeup(git_config, dest_di...
import copy import distutils.dir_util import json import logging import os.path import posixpath import shlex import shutil import tempfile import textwrap import time import requests from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from addict import Dict as AttrDict from oss2 import ObjectIterator from .common import git_utils from .common.consts import INSTANCE_TYPE_LOCAL_GPU, ModelFormat from .common.docker_utils import ContainerRun, run_container from .common.oss_utils import OssUriObj, download, is_oss_uri, upload from .common.utils import ( generate_repr, is_local_run_instance_type, random_str, to_plain_text, ) from .exception import DuplicatedMountException, MountPathIsOccupiedException from .image import ImageInfo from .predictor import AsyncPredictor, LocalPredictor, Predictor, ServiceType from .serializers import SerializerBase from .session import Session, get_default_session from .estimator import AlgorithmEstimator
17,212
service_name=service_name, session=self.session, serializer=serializer, ) print( "View the service detail by accessing the console URI: \n{}".format( predictor.console_uri ) ) if wait: predictor.wait_for_ready() return predictor def _wait_service_visible(self, service_name, attempts=3, interval=2): """Wait for the service to be visible in DescribeService API. hack: https://aone.alibaba-inc.com/v2/project/1134421/bug#viewIdentifier=5dfb195e2e2b84f6b2f24718&openWorkitemIdentifier=50192431 """ while attempts > 0: obj = self.session.service_api.get(service_name) if "ServiceUid" in obj: return attempts -= 1 time.sleep(interval) logger.warning("DescribeService API failed to get the Service object.") def _build_service_config( self, service_name: str = None, instance_count: int = None, instance_type: str = None, resource_config: Union[ResourceConfig, Dict[str, Any]] = None, resource_id: str = None, service_type: str = None, options: Dict[str, Any] = None, ) -> Dict[str, Any]: """Build a service config dictionary used to create a PAI EAS service.""" self.model_data = self._upload_model_data() resource_config = ( ResourceConfig(**resource_config) if resource_config and isinstance(resource_config, dict) else None ) if resource_config and instance_type: raise ValueError( f"Only one of 'instance_type' and 'resource_config' " f"is required, but both have been provided: instance_type" f"={instance_type}, resource_config=" f"{resource_config}." ) inference_spec = InferenceSpec( self._get_inference_spec().to_dict() if self.inference_spec else dict() ) if self.model_data: if not inference_spec.is_container_serving(): # if model_data is an OSS URI with endpoint, truncate the endpoint. oss_uri_obj = OssUriObj(self.model_data) model_path_uri = "oss://{bucket_name}/{key}".format( bucket_name=oss_uri_obj.bucket_name, key=oss_uri_obj.object_key, ) inference_spec.add_option("model_path", model_path_uri) else: try: inference_spec.mount( self.model_data, mount_path=DefaultServiceConfig.model_path, ) except DuplicatedMountException as e: # ignore duplicated mount logger.info("Model is already mounted the container: %s", e) if service_type: inference_spec.add_option("metadata.type", service_type) if inference_spec.is_container_serving(): inference_spec.add_option("metadata.rpc.proxy_path", "/") if service_name: inference_spec.add_option("name", service_name) if instance_count: inference_spec.add_option("metadata.instance", instance_count) if instance_type: inference_spec.add_option("cloud.computing.instance_type", instance_type) elif resource_config: inference_spec.add_option("metadata.cpu", resource_config.cpu) inference_spec.add_option("metadata.memory", resource_config.memory) if resource_config.gpu: inference_spec.add_option("metadata.gpu", resource_config.gpu) if resource_config.gpu_memory: inference_spec.add_option( "metadata.gpu_memory", resource_config.gpu_memory ) if resource_config.gpu: logger.warning( "Parameters 'gpu' is set, the 'gpu_memory' parameter " "does not take effect." ) if resource_id: inference_spec.add_option("metadata.resource", resource_id) if options: inference_spec.merge_options(options=options) return inference_spec.to_dict() def _deploy_local( self, instance_type: str, serializer: SerializerBase = None, wait: bool = True,
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) # Reserved ports for internal use, do not use them for service _RESERVED_PORTS = [8080, 9090] class DefaultServiceConfig(object): """Default configuration used in creating prediction service.""" # Listen Port listen_port = 8000 # Default model path in container model_path = "/eas/workspace/model/" # Default user code path in container code_path = "/ml/usercode/" class ResourceConfig(object): """A class that represents the resource used by a PAI prediction service instance.""" def __init__(self, cpu: int, memory: int, gpu: int = None, gpu_memory: int = None): """ResourceConfig initializer. The public resource group does not support requesting GPU resources with `ResourceConfig`. Use the 'gpu' and 'gpu_memory' parameter only for services deployed to dedicated resource groups that provide GPU machine instances. Args: cpu (int): The number of CPUs that each instance requires. memory (int): The amount of memory that each instance requires, must be an integer, Unit: MB. gpu (int): The number of GPUs that each instance requires. gpu_memory (int): The amount of GPU memory that each instance requires. The value must be an integer, Unit: GB. PAI allows memory resources of a GPU to be allocated to multiple instances. If you want multiple instances to share the memory resources of a GPU, set the gpu parameter to 0. If you set the ``gpu`` parameter to 1, each instance occupies a GPU and the gpu_memory parameter does not take effect. .. note:: **Important** PAI does not enable the strict isolation of GPU memory. To prevent out of memory (OOM) errors, make sure that the GPU memory used by each instance does not exceed the requested amount. """ self.cpu = cpu self.memory = memory self.gpu = gpu self.gpu_memory = gpu_memory def __repr__(self): return ( f"ResourceConfig(cpu={self.cpu}, memory={self.memory}MB, gpu={self.gpu or 0}," f" gpu_memory={self.gpu_memory or 0}GB)" ) def __str__(self): return self.__repr__() def to_dict(self): """Transform the ResourceConfig instance to a dictionary. Returns: dict: """ res = { "cpu": self.cpu, "gpu": self.gpu, "gpu_memory": self.gpu_memory, "memory": self.memory, } return {k: v for k, v in res.items() if v is not None} class InferenceSpec(object): """A class used to describe how to create a prediction service. InferenceSpec is using to describe how the model is serving in PAI. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. Example of how to config a InferneceSpec:: >>> # build an inference_spec that using XGBoost processor. >>> infer_spec = InferenceSpec(processor="xgboost") >>> infer_spec.metadata.rpc.keepalive = 1000 >>> infer_spec.warm_up_data_path = "oss://bucket-name/path/to/warmup-data" >>> infer_spec.add_option("metadata.rpc.max_batch_size", 8) >>> print(infer_spec.processor) xgboost >>> print(infer_spec.metadata.rpc.keepalive) 1000 >>> print(infer_spec.metadata.rpc.max_batch_size) 8 >>> print(infer_spec.to_dict()) {'processor': 'xgboost', 'metadata': {'rpc': {'keepalive': 1000, 'max_batch_size': 8}}, 'warm_up_data_path': 'oss://bucket-name/path/to/warmup-data'} """ def __init__(self, *args, **kwargs): """InferenceSpec initializer. Args: **kwargs: Parameters of the inference spec. """ properties = kwargs.pop("__properties", []) cfg_dict = copy.deepcopy(kwargs) cfg_dict = {k: v for k, v in cfg_dict.items() if not k.startswith("_")} if args: if len(args) > 1: raise TypeError() cfg_dict.update(args[0]) super(InferenceSpec, self).__setattr__( "_cfg_dict", self._transform_value(cfg_dict) ) super(InferenceSpec, self).__setattr__("__properties", properties) def __repr__(self): return json.dumps(self.to_dict(), sort_keys=True, indent=4) def _transform_value(self, value): if isinstance(value, (List, Tuple)): return [self._transform_value(item) for item in value] elif isinstance(value, (Dict, AttrDict)): return AttrDict( {key: self._transform_value(value) for key, value in value.items()} ) return value def __missing__(self, name): return self._cfg_dict.__missing__(name) def __setitem__(self, name, value): return self._cfg_dict.__setitem__(name, self._transform_value(value)) def __setattr__(self, name, value): if name in getattr(self, "__properties"): super(InferenceSpec, self).__setattr__(name, self._transform_value(value)) else: self._cfg_dict.__setattr__(name, self._transform_value(value)) def __getattr__(self, item): if item.startswith("_"): return getattr(self, item) return self._cfg_dict.__getitem__(item) def __contains__(self, item): return item in self._cfg_dict def to_dict(self) -> Dict: """Return a dictionary that represent the InferenceSpec.""" return self._cfg_dict.to_dict() def add_option(self, name: str, value): """Add an option to the inference_spec instance. Args: name (str): Name of the option to set, represented as the JSON path of the parameter for the InferenceSpec. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. value: Value for the option. Examples: >>> infer_spec = InferenceSpec(processor="tensorflow_gpu_1.12") >>> infer_spec.add_option("metadata.rpc.keepalive", 10000) >>> infer_spec.metadata.rpc.keepalive 10000 >>> infer_spec.to_dict() {'processor': 'tensorflow_gpu_1.12', 'metadata': {'rpc': {'keepalive': 10000}}} """ src = self._transform_value(value) for k in reversed(name.split(".")): src = {k: src} self._cfg_dict.update(AttrDict(src)) def merge_options(self, options: Dict[str, Any]): """Merge options from a dictionary.""" for key, value in options.items(): self.add_option(key, value) @classmethod def from_dict(cls, config: Dict[str, Any]) -> "InferenceSpec": """Initialize a InferenceSpec from a dictionary. You can use this method to initialize a InferenceSpec instance from a dictionary. Returns: :class:`pai.model.InferenceSpec`: A InferenceSpec instance. """ config = config or dict() return cls(**config) def is_container_serving(self): return "containers" in self._cfg_dict @classmethod def _upload_source_dir(cls, source_dir, session): """Upload source files to OSS bucket.""" if not os.path.exists(source_dir): raise ValueError(f"Input source code path does not exist: {source_dir}.") if not os.path.isdir(source_dir): raise ValueError( f"Input source code path should be a directory: {source_dir}." ) target_dir = session.get_storage_path_by_category(category="inference_src") # upload local script data to the OSS bucket. uploaded_source_code = upload( source_dir, target_dir, session.oss_bucket, ) logger.debug("Uploaded source code to OSS: %s", uploaded_source_code) return uploaded_source_code def mount( self, source: str, mount_path: str, session: Session = None, ) -> Dict[str, Any]: """Mount a source storage to the running container. .. note:: If source is a local path, it will be uploaded to the OSS bucket and mounted. If source is a OSS path, it will be mounted directly. Args: source (str): The source storage to be attached, currently only support OSS path in OSS URI format and local path. mount_path (str): The mount path in the container. session (Session, optional): A PAI session instance used for communicating with PAI service. Returns: Dict[str, Any]: The storage config. Raises: DuplicateMountException: If the mount path is already used or source OSS path is mounted to the container. Examples:: # Mount a OSS storage path to the running container. >>> inference_spec.mount("oss://<YourOssBucket>/path/to/directory/model.json", ... "/ml/model/") # 'Mount' a local path to the running container. >>> inference_spec.mount("/path/to/your/data/", "/ml/model/") """ session = session or get_default_session() # TODO: supports more storages, such as NAS, PAI Dataset, PAI CodeSource, etc. if not isinstance(source, str): raise ValueError( "Parameter should be a string which represents an OSS storage path" " or a local file path." ) if "storage" in self._cfg_dict: configs = self._cfg_dict.get("storage", []) else: configs = [] uris = set() for conf in configs: # check if target mount path is already used. if conf.get("mount_path") == mount_path: raise MountPathIsOccupiedException( f"The mount path '{mount_path}' has already been used." ) mount_uri = conf.get("oss", {}).get("path") uris.add(mount_uri) if is_oss_uri(source): oss_uri_obj = OssUriObj(source) storage_config = { "mount_path": mount_path, "oss": {"path": oss_uri_obj.get_dir_uri()}, } elif os.path.exists(source): # if source is a local path, upload it to OSS bucket and use OSS URI # as storage source. oss_path = session.get_storage_path_by_category("model_data") oss_uri = upload( source_path=source, oss_path=oss_path, bucket=session.oss_bucket ) oss_uri_obj = OssUriObj(oss_uri) storage_config = { "mount_path": mount_path, "oss": {"path": oss_uri_obj.get_dir_uri()}, } else: raise ValueError( "Source path is not a valid OSS URI or a existing local path." ) # check if the source OSS Path is already mounted to the container. if oss_uri_obj.get_dir_uri() in uris: raise DuplicatedMountException( f"Source OSS path '{oss_uri_obj.get_dir_uri()}' is already " f"mounted to the container." ) configs.append(storage_config) self.storage = configs return storage_config def container_serving_spec( command: str, image_uri: Union[str, ImageInfo], source_dir: Optional[str] = None, git_config: Optional[Dict[str, Any]] = None, port: Optional[int] = None, environment_variables: Optional[Dict[str, str]] = None, requirements: Optional[List[str]] = None, requirements_path: Optional[str] = None, health_check: Optional[Dict[str, Any]] = None, session: Optional[Session] = None, ) -> InferenceSpec: """A convenient function to create an InferenceSpec instance that serving the model with given container and script. Examples:: infer_spec: InferenceSpec = container_serving_spec( command="python run.py", source_dir="./model_server/", image_uri="<ServingImageUri>", ) m = Model( model_data="oss://<YourOssBucket>/path/to/your/model", inference_spec=infer_spec, ) m.deploy( instance_type="ecs.c6.xlarge" ) Args: command (str): The command used to launch the Model server. source_dir (str): A relative path or an absolute path to the source code directory used to load model and launch the HTTP server, it will be uploaded to the OSS bucket and mounted to the container. If there is a ``requirements.txt`` file under the directory, it will be installed before the prediction server started. If 'git_config' is provided, 'source_dir' should be a relative location to a directory in the Git repo. With the following GitHub repo directory structure: .. code:: |----- README.md |----- src |----- train.py |----- test.py if you need 'src' directory as the source code directory, you can assign source_dir='./src/'. git_config (Dict[str, str]): Git configuration used to clone the repo. Including ``repo``, ``branch``, ``commit``, ``username``, ``password`` and ``token``. The ``repo`` is required. All other fields are optional. ``repo`` specifies the Git repository. If you don't provide ``branch``, the default value 'master' is used. If you don't provide ``commit``, the latest commit in the specified branch is used. ``username``, ``password`` and ``token`` are for authentication purpose. For example, the following config: .. code:: python git_config = { 'repo': 'https://github.com/modelscope/modelscope.git', 'branch': 'master', 'commit': '9bfc4a9d83c4beaf8378d0a186261ffc1cd9f960' } results in cloning the repo specified in 'repo', then checking out the 'master' branch, and checking out the specified commit. image_uri (str): The Docker image used to run the prediction service. port (int): Expose port of the server in container, the prediction request will be forward to the port. The environment variable ``LISTENING_PORT`` in the container will be set to this value. Default to 8000. environment_variables (Dict[str, str], optional): Dictionary of environment variable key-value pairs to set on the running container. requirements (List[str], optional): A list of Python package dependency, it will be installed before the serving container run. requirements_path (str, optional): A absolute path to the requirements.txt in the container. health_check (Dict[str, Any], optional): The health check configuration. If it not set, A TCP readiness probe will be used to check the health of the HTTP server. session (Session, optional): A PAI session instance used for communicating with PAI service. Returns: :class:`pai.model.InferenceSpec`: An InferenceSpec instance. """ session = session or get_default_session() if git_config: updated_args = git_utils.git_clone_repo( git_config=git_config, source_dir=source_dir, ) source_dir = updated_args["source_dir"] if not port: port = DefaultServiceConfig.listen_port elif int(port) in _RESERVED_PORTS: raise ValueError( "Reserved port {} is not allowed to use as serving port.".format(port), ) if source_dir: if not os.path.exists(source_dir): raise ValueError("Source directory {} does not exist.".format(source_dir)) if not os.path.isdir(source_dir): raise ValueError( "Source directory {} is not a directory.".format(source_dir) ) code_mount_path = DefaultServiceConfig.code_path # build the command for serving container. command = textwrap.dedent( f"""\ # change working directory to code mount path. cd {code_mount_path} {command} """ ) if not requirements_path and os.path.exists( os.path.join(source_dir, "requirements.txt") ): requirements_path = posixpath.join(code_mount_path, "requirements.txt") else: code_mount_path = None requirements_path = None if isinstance(image_uri, ImageInfo): image_uri = image_uri.image_uri environment_variables = environment_variables or dict() container_spec = { "image": image_uri, "port": port, "script": command, "env": [ {"name": key, "value": str(value)} for key, value in environment_variables.items() ] if environment_variables else [], } if health_check: container_spec["health_check"] = health_check if requirements: container_spec["prepare"] = {"pythonRequirements": requirements} if requirements_path: logger.warning( "If the parameter 'requirements' is set, the requirements_path " "parameter will be ignored." ) elif requirements_path: container_spec["prepare"] = { "pythonRequirementsPath": requirements_path, } inference_spec = InferenceSpec(containers=[container_spec]) # mount the uploaded serving scripts to the serving container. if source_dir: inference_spec.mount( source_dir, code_mount_path, session=session, ) return inference_spec class _BuiltinProcessor(object): """Helper class uses for getting the builtin processor""" PMML = "pmml" XGBoost = "xgboost" SupportedFrameworkAcceleratorVersionConfig = { "tensorflow": { "cpu": [ "1.12", "1.14", "1.15", "2.3", ], "gpu": [ "1.12", "1.14", "1.15", ], }, "pytorch": { "cpu": [ "1.6", ], "gpu": [ "1.6", ], }, } # Hard code default processor for specific model format. ModelFormatDefaultProcessorMapping = { ModelFormat.PMML: "pmml", ModelFormat.SavedModel: "tensorflow_cpu_2.3", ModelFormat.TorchScript: "pytorch_cpu_1.6", ModelFormat.FrozenPb: "pytorch_cpu_1.6", ModelFormat.CaffePrototxt: "caffe_cpu", ModelFormat.ONNX: "onnx_cu100", } @classmethod def get_default_by_model_format(cls, model_format: str) -> str: """Get the default processor for a specific model format.""" if model_format in cls.ModelFormatDefaultProcessorMapping: return cls.ModelFormatDefaultProcessorMapping[model_format] @classmethod def from_framework_version( cls, framework_name, framework_version, accelerator=None ): accelerator = accelerator or "cpu" versions = cls.SupportedFrameworkAcceleratorVersionConfig.get( framework_name, dict() ).get(accelerator, []) if framework_version in versions: return "{}_{}_{}".format(framework_name, accelerator, framework_version) else: logger.warning( "Could not find the processor for the framework_version({} {}), use the" " latest processor".format(framework_name, framework_version) ) return "{}_{}_{}".format(framework_name, accelerator, versions[-1]) class ModelBase(object): """A class represent ModelBase.""" def __init__( self, model_data: str, inference_spec: Optional[InferenceSpec] = None, session: Session = None, ): self.model_data = model_data self.inference_spec = inference_spec self.session = session or get_default_session() def download(self, target_dir: str): """Download the model data from OSS to local directory. Args: target_dir (str): The target directory to download the model data. Returns: str: Local directory path stores the model data. """ if not self.model_data: raise ValueError("Could not find the model data for this model.") if not is_oss_uri(self.model_data): raise RuntimeError("Download method only support model data stored in OSS.") self._download_model_data(target_dir) return target_dir def _download_model_data(self, target_dir): if not self.model_data: return logger.info(f"Prepare model data to local directory: {target_dir}") if self.model_data.startswith("oss://"): oss_uri = OssUriObj(self.model_data) oss_bucket = self.session.get_oss_bucket(oss_uri.bucket_name) download( oss_path=oss_uri.object_key, local_path=target_dir, bucket=oss_bucket, un_tar=True, ) else: if not os.path.exists(self.model_data): raise ValueError(f"Model data path does not exist: {self.model_data}") os.makedirs(target_dir, exist_ok=True) if os.path.isfile(self.model_data): shutil.copy( self.model_data, os.path.join(target_dir, os.path.basename(self.model_data)), ) else: distutils.dir_util.copy_tree(self.model_data, target_dir) def _upload_model_data(self): """Upload the model artifact to OSS bucket if self.model_data is a local file path. """ if not self.model_data: return elif is_oss_uri(self.model_data): return self.model_data elif not os.path.exists(self.model_data): raise RuntimeError(f"Model data path does not exist: {self.model_data}") dest_oss_path = self.session.get_storage_path_by_category(category="model_data") upload_model_data = upload( source_path=self.model_data, oss_path=dest_oss_path, bucket=self.session.oss_bucket, ) return upload_model_data def list_model_files(self, uri_format: bool = False) -> Iterator[str]: """List model files under the model path. Args: uri_format (bool): If True, return the model file path in OSS URI format. Returns: Iterator[str]: Iterator of model files. """ if not self.model_data: raise ValueError("Model data path is not specified.") if not is_oss_uri(self.model_data): raise ValueError("Method only support model data stored in OSS.") oss_uri_obj = OssUriObj(self.model_data) bucket = self.session.get_oss_bucket( bucket_name=oss_uri_obj.bucket_name, ) def _get_relative_path(obj_key: str): # if the model_data is reference an object, return the object file # name. if oss_uri_obj.object_key == obj_key: return os.path.basename(obj_key) path = obj_key[len(oss_uri_obj.object_key) :] return path.lstrip("/") if path.startswith("/") else path obj_iter = ObjectIterator(bucket=bucket, prefix=oss_uri_obj.object_key) for obj_info in obj_iter: if uri_format: yield f"oss://{bucket.bucket_name}/{obj_info.key}" else: yield _get_relative_path(obj_info.key) def _get_inference_spec(self): return self.inference_spec def deploy( self, service_name: str, instance_count: Optional[int] = 1, instance_type: Optional[str] = None, resource_config: Optional[Union[Dict[str, int], ResourceConfig]] = None, resource_id: Optional[str] = None, options: Optional[Dict[str, Any]] = None, service_type: Optional[str] = None, wait: bool = True, serializer: Optional["SerializerBase"] = None, **kwargs, ): """Deploy a prediction service with the model.""" if is_local_run_instance_type(instance_type): return self._deploy_local( instance_type=instance_type, serializer=serializer, wait=wait, ) else: return self._deploy( service_name=service_name, instance_count=instance_count, instance_type=instance_type, resource_config=resource_config, resource_id=resource_id, service_type=service_type, options=options, wait=wait, serializer=serializer, ) def _generate_service_name(self): s = os.path.basename(self.model_data.rstrip("/")) + random_str(8) return to_plain_text(s) def _deploy( self, service_name: str = None, instance_count: int = 1, instance_type: str = None, resource_config: Union[Dict[str, int], ResourceConfig] = None, resource_id: str = None, service_type: str = None, options: Dict[str, Any] = None, wait: bool = True, serializer: "SerializerBase" = None, ): """Create a prediction service.""" if not service_name: service_name = self._generate_service_name() logger.info( "Service name is not specified, using a generated service" f" name to create the service: service_name={service_name}" ) config = self._build_service_config( service_name=service_name, instance_count=instance_count, instance_type=instance_type, service_type=service_type, resource_config=resource_config, resource_id=resource_id, options=options, ) service_name = self.session.service_api.create(config=config) self._wait_service_visible(service_name) if service_type == ServiceType.Async: predictor = AsyncPredictor( service_name=service_name, session=self.session, serializer=serializer, ) else: predictor = Predictor( service_name=service_name, session=self.session, serializer=serializer, ) print( "View the service detail by accessing the console URI: \n{}".format( predictor.console_uri ) ) if wait: predictor.wait_for_ready() return predictor def _wait_service_visible(self, service_name, attempts=3, interval=2): """Wait for the service to be visible in DescribeService API. hack: https://aone.alibaba-inc.com/v2/project/1134421/bug#viewIdentifier=5dfb195e2e2b84f6b2f24718&openWorkitemIdentifier=50192431 """ while attempts > 0: obj = self.session.service_api.get(service_name) if "ServiceUid" in obj: return attempts -= 1 time.sleep(interval) logger.warning("DescribeService API failed to get the Service object.") def _build_service_config( self, service_name: str = None, instance_count: int = None, instance_type: str = None, resource_config: Union[ResourceConfig, Dict[str, Any]] = None, resource_id: str = None, service_type: str = None, options: Dict[str, Any] = None, ) -> Dict[str, Any]: """Build a service config dictionary used to create a PAI EAS service.""" self.model_data = self._upload_model_data() resource_config = ( ResourceConfig(**resource_config) if resource_config and isinstance(resource_config, dict) else None ) if resource_config and instance_type: raise ValueError( f"Only one of 'instance_type' and 'resource_config' " f"is required, but both have been provided: instance_type" f"={instance_type}, resource_config=" f"{resource_config}." ) inference_spec = InferenceSpec( self._get_inference_spec().to_dict() if self.inference_spec else dict() ) if self.model_data: if not inference_spec.is_container_serving(): # if model_data is an OSS URI with endpoint, truncate the endpoint. oss_uri_obj = OssUriObj(self.model_data) model_path_uri = "oss://{bucket_name}/{key}".format( bucket_name=oss_uri_obj.bucket_name, key=oss_uri_obj.object_key, ) inference_spec.add_option("model_path", model_path_uri) else: try: inference_spec.mount( self.model_data, mount_path=DefaultServiceConfig.model_path, ) except DuplicatedMountException as e: # ignore duplicated mount logger.info("Model is already mounted the container: %s", e) if service_type: inference_spec.add_option("metadata.type", service_type) if inference_spec.is_container_serving(): inference_spec.add_option("metadata.rpc.proxy_path", "/") if service_name: inference_spec.add_option("name", service_name) if instance_count: inference_spec.add_option("metadata.instance", instance_count) if instance_type: inference_spec.add_option("cloud.computing.instance_type", instance_type) elif resource_config: inference_spec.add_option("metadata.cpu", resource_config.cpu) inference_spec.add_option("metadata.memory", resource_config.memory) if resource_config.gpu: inference_spec.add_option("metadata.gpu", resource_config.gpu) if resource_config.gpu_memory: inference_spec.add_option( "metadata.gpu_memory", resource_config.gpu_memory ) if resource_config.gpu: logger.warning( "Parameters 'gpu' is set, the 'gpu_memory' parameter " "does not take effect." ) if resource_id: inference_spec.add_option("metadata.resource", resource_id) if options: inference_spec.merge_options(options=options) return inference_spec.to_dict() def _deploy_local( self, instance_type: str, serializer: SerializerBase = None, wait: bool = True,
) -> LocalPredictor:
17
2023-12-01 01:40:12+00:00
24k
mpenning/ciscoconfparse2
dev_tools/compare_v4_v6_methods.py
[ { "identifier": "IPv4Obj", "path": "ciscoconfparse2/ccp_util.py", "snippet": "class IPv4Obj(object):\n dna: str = \"IPv4Obj\"\n v4input: Optional[Union[str,int]] = None\n strict: bool = False\n debug: int = 0\n\n ip_object: Any = None\n network_object: Any = None\n finished_parsing:...
import sys import os from ciscoconfparse import IPv4Obj, IPv6Obj from loguru import logger
15,356
"""Compare methods on IPv4Obj() and IPv6Obj(). Flag missing methods""" sys.path.insert(0, "../") # add the path to the local git repo copy # from this dev_tools/ directory environ = os.environ['VIRTUAL_ENV'] print("ENV", environ) try: print("PYTHONPATH", str(os.environ['PYTHONPATH'])) except Exception as eee: error = f"{eee}: Could not find PYTHONPATH." logger.error(error) raise OSError(error) v4_list = dir(IPv4Obj("127.0.0.1"))
"""Compare methods on IPv4Obj() and IPv6Obj(). Flag missing methods""" sys.path.insert(0, "../") # add the path to the local git repo copy # from this dev_tools/ directory environ = os.environ['VIRTUAL_ENV'] print("ENV", environ) try: print("PYTHONPATH", str(os.environ['PYTHONPATH'])) except Exception as eee: error = f"{eee}: Could not find PYTHONPATH." logger.error(error) raise OSError(error) v4_list = dir(IPv4Obj("127.0.0.1"))
v6_list = dir(IPv6Obj("::1"))
1
2023-12-01 18:43:27+00:00
24k
zerolink-io/zerolink-python
zerolink/req.py
[ { "identifier": "settings", "path": "zerolink/settings.py", "snippet": " CONFIG_FILE = os.path.join(os.environ[\"APPDATA\"], \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(os.environ[\"HOME\"], \".config\", \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(\n os.environ[\"HO...
from typing import Any, Optional, cast from zerolink import settings from zerolink.exc import APIError, AuthenticationError from zerolink_client import Client from zerolink_client.api.default import finetune, get_models_models_get from zerolink_client.api.entity import ( desc_entity_id, desc_entity_ontology, lookup_entity, lookup_relation, search_entity, ) from zerolink_client.api.extract import extract_text from zerolink_client.api.fact import ( create_userattribute, create_userentity, create_userrule, create_usertriple, ) from zerolink_client.api.kg import get_triple from zerolink_client.api.question import post_question from zerolink_client.api.session import ( create_session, get_session_entities, get_session_facts, get_user_session, ) from zerolink_client.api.user import create_user from zerolink_client.models import ( ChatSession, CreateAttribute, CreateEntity, CreateRule, CreateRuleResponse, CreateTriple, CreateTuneJobResponse, Entity, HTTPValidationError, Question, QuestionResponse, TextExtract, ) from zerolink_client.types import File, UNSET
16,267
# ------------------------------------------------------------------------ # Endpoints # ------------------------------------------------------------------------ client = Client( base_url=settings.server_url, raise_on_unexpected_status=False, ) def check_api_key() -> None: """ Check if the API key is set. """ if settings.api_key is None: raise AuthenticationError() else: pass def get_user_id() -> str: """ Get the user ID from the server. Only used for Demo server. """ client._headers["Authorization"] = settings.api_key rep = create_user.sync(client=client) if rep is None: raise Exception("Failed to authenticate.") settings.api_key = rep.user_id if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep.user_id def post_session(user_id: str, **kwargs) -> Optional[ChatSession]: """ Create a new session. """ check_api_key() if user_id is None: user_id = settings.api_key rep = create_session.sync(client=client, user_id=user_id, **kwargs) if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep def get_session_name(user_id: str, session_name: str, **kwargs): """ Lookup a session by user and name. """ check_api_key() rep = get_user_session.sync_detailed(user_id, session_name, client=client, **kwargs) if rep.status_code == 200: return rep.parsed elif rep.status_code == 404: return None else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_entities_list(session_id: int, **kwargs): """ Get the entities of a session. """ check_api_key() rep = get_session_entities.sync_detailed(session_id, client=client, **kwargs) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_facts_list(session_id: int, **kwargs): """ Get the facts of a session. """ check_api_key()
# ------------------------------------------------------------------------ # Endpoints # ------------------------------------------------------------------------ client = Client( base_url=settings.server_url, raise_on_unexpected_status=False, ) def check_api_key() -> None: """ Check if the API key is set. """ if settings.api_key is None: raise AuthenticationError() else: pass def get_user_id() -> str: """ Get the user ID from the server. Only used for Demo server. """ client._headers["Authorization"] = settings.api_key rep = create_user.sync(client=client) if rep is None: raise Exception("Failed to authenticate.") settings.api_key = rep.user_id if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep.user_id def post_session(user_id: str, **kwargs) -> Optional[ChatSession]: """ Create a new session. """ check_api_key() if user_id is None: user_id = settings.api_key rep = create_session.sync(client=client, user_id=user_id, **kwargs) if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep def get_session_name(user_id: str, session_name: str, **kwargs): """ Lookup a session by user and name. """ check_api_key() rep = get_user_session.sync_detailed(user_id, session_name, client=client, **kwargs) if rep.status_code == 200: return rep.parsed elif rep.status_code == 404: return None else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_entities_list(session_id: int, **kwargs): """ Get the entities of a session. """ check_api_key() rep = get_session_entities.sync_detailed(session_id, client=client, **kwargs) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_facts_list(session_id: int, **kwargs): """ Get the facts of a session. """ check_api_key()
rep = get_session_facts.sync_detailed(session_id, client=client, **kwargs)
20
2023-12-03 07:50:04+00:00
24k
JunMa11/UHNSeg-Quiz
nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerNoDeepSupervision.py
[ { "identifier": "DC_and_BCE_loss", "path": "nnunetv2/training/loss/compound_losses.py", "snippet": "class DC_and_BCE_loss(nn.Module):\n def __init__(self, bce_kwargs, soft_dice_kwargs, weight_ce=1, weight_dice=1, use_ignore_label: bool = False,\n dice_class=MemoryEfficientSoftDiceLoss...
import torch from torch import autocast from nnunetv2.training.loss.compound_losses import DC_and_BCE_loss, DC_and_CE_loss from nnunetv2.training.loss.dice import get_tp_fp_fn_tn, MemoryEfficientSoftDiceLoss from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer from nnunetv2.utilities.helpers import dummy_context from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels from torch.nn.parallel import DistributedDataParallel as DDP
17,939
class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): def _build_loss(self): if self.label_manager.has_regions: loss = DC_and_BCE_loss({}, {'batch_dice': self.configuration_manager.batch_dice, 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp}, use_ignore_label=self.label_manager.ignore_label is not None, dice_class=MemoryEfficientSoftDiceLoss) else: loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice, 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1, ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss) return loss def _get_deep_supervision_scales(self): return None def initialize(self): if not self.was_initialized: self.num_input_channels = determine_num_input_channels(self.plans_manager, self.configuration_manager, self.dataset_json) self.network = self.build_network_architecture(self.plans_manager, self.dataset_json, self.configuration_manager, self.num_input_channels, enable_deep_supervision=False).to(self.device) self.optimizer, self.lr_scheduler = self.configure_optimizers() # if ddp, wrap in DDP wrapper if self.is_ddp: self.network = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.network) self.network = DDP(self.network, device_ids=[self.local_rank]) self.loss = self._build_loss() self.was_initialized = True else: raise RuntimeError("You have called self.initialize even though the trainer was already initialized. " "That should not happen.") def set_deep_supervision_enabled(self, enabled: bool): pass def validation_step(self, batch: dict) -> dict: data = batch['data'] target = batch['target'] data = data.to(self.device, non_blocking=True) if isinstance(target, list): target = [i.to(self.device, non_blocking=True) for i in target] else: target = target.to(self.device, non_blocking=True) self.optimizer.zero_grad(set_to_none=True) # Autocast is a little bitch. # If the device_type is 'cpu' then it's slow as heck and needs to be disabled. # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False) # So autocast will only be active if we have a cuda device. with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context(): output = self.network(data) del data l = self.loss(output, target) # the following is needed for online evaluation. Fake dice (green line) axes = [0] + list(range(2, output.ndim)) if self.label_manager.has_regions: predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long() else: # no need for softmax output_seg = output.argmax(1)[:, None] predicted_segmentation_onehot = torch.zeros(output.shape, device=output.device, dtype=torch.float32) predicted_segmentation_onehot.scatter_(1, output_seg, 1) del output_seg if self.label_manager.has_ignore_label: if not self.label_manager.has_regions: mask = (target != self.label_manager.ignore_label).float() # CAREFUL that you don't rely on target after this line! target[target == self.label_manager.ignore_label] = 0 else: mask = 1 - target[:, -1:] # CAREFUL that you don't rely on target after this line! target = target[:, :-1] else: mask = None
class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): def _build_loss(self): if self.label_manager.has_regions: loss = DC_and_BCE_loss({}, {'batch_dice': self.configuration_manager.batch_dice, 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp}, use_ignore_label=self.label_manager.ignore_label is not None, dice_class=MemoryEfficientSoftDiceLoss) else: loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice, 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1, ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss) return loss def _get_deep_supervision_scales(self): return None def initialize(self): if not self.was_initialized: self.num_input_channels = determine_num_input_channels(self.plans_manager, self.configuration_manager, self.dataset_json) self.network = self.build_network_architecture(self.plans_manager, self.dataset_json, self.configuration_manager, self.num_input_channels, enable_deep_supervision=False).to(self.device) self.optimizer, self.lr_scheduler = self.configure_optimizers() # if ddp, wrap in DDP wrapper if self.is_ddp: self.network = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.network) self.network = DDP(self.network, device_ids=[self.local_rank]) self.loss = self._build_loss() self.was_initialized = True else: raise RuntimeError("You have called self.initialize even though the trainer was already initialized. " "That should not happen.") def set_deep_supervision_enabled(self, enabled: bool): pass def validation_step(self, batch: dict) -> dict: data = batch['data'] target = batch['target'] data = data.to(self.device, non_blocking=True) if isinstance(target, list): target = [i.to(self.device, non_blocking=True) for i in target] else: target = target.to(self.device, non_blocking=True) self.optimizer.zero_grad(set_to_none=True) # Autocast is a little bitch. # If the device_type is 'cpu' then it's slow as heck and needs to be disabled. # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False) # So autocast will only be active if we have a cuda device. with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context(): output = self.network(data) del data l = self.loss(output, target) # the following is needed for online evaluation. Fake dice (green line) axes = [0] + list(range(2, output.ndim)) if self.label_manager.has_regions: predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long() else: # no need for softmax output_seg = output.argmax(1)[:, None] predicted_segmentation_onehot = torch.zeros(output.shape, device=output.device, dtype=torch.float32) predicted_segmentation_onehot.scatter_(1, output_seg, 1) del output_seg if self.label_manager.has_ignore_label: if not self.label_manager.has_regions: mask = (target != self.label_manager.ignore_label).float() # CAREFUL that you don't rely on target after this line! target[target == self.label_manager.ignore_label] = 0 else: mask = 1 - target[:, -1:] # CAREFUL that you don't rely on target after this line! target = target[:, :-1] else: mask = None
tp, fp, fn, _ = get_tp_fp_fn_tn(predicted_segmentation_onehot, target, axes=axes, mask=mask)
2
2023-12-04 19:43:14+00:00
24k
opisaac9001/TTS-With-ooba-and-voice
TTS/tts/models/tortoise.py
[ { "identifier": "TorchMelSpectrogram", "path": "TTS/tts/layers/tortoise/arch_utils.py", "snippet": "class TorchMelSpectrogram(nn.Module):\n def __init__(\n self,\n filter_length=1024,\n hop_length=256,\n win_length=1024,\n n_mel_channels=80,\n mel_fmin=0,\n ...
import os import random import torch import torch.nn.functional as F import torchaudio from contextlib import contextmanager from dataclasses import dataclass from time import time from coqpit import Coqpit from tqdm import tqdm from TTS.tts.layers.tortoise.arch_utils import TorchMelSpectrogram from TTS.tts.layers.tortoise.audio_utils import denormalize_tacotron_mel, load_voice, wav_to_univnet_mel from TTS.tts.layers.tortoise.autoregressive import UnifiedVoice from TTS.tts.layers.tortoise.classifier import AudioMiniEncoderWithClassifierHead from TTS.tts.layers.tortoise.clvp import CLVP from TTS.tts.layers.tortoise.diffusion import SpacedDiffusion, get_named_beta_schedule, space_timesteps from TTS.tts.layers.tortoise.diffusion_decoder import DiffusionTts from TTS.tts.layers.tortoise.random_latent_generator import RandomLatentConverter from TTS.tts.layers.tortoise.tokenizer import VoiceBpeTokenizer from TTS.tts.layers.tortoise.vocoder import VocConf, VocType from TTS.tts.layers.tortoise.wav2vec_alignment import Wav2VecAlignment from TTS.tts.models.base_tts import BaseTTS from math import ceil
19,773
else: m = model.to(self.device) yield m m = model.cpu() def get_conditioning_latents( self, voice_samples, return_mels=False, latent_averaging_mode=0, original_tortoise=False, ): """ Transforms one or more voice_samples into a tuple (autoregressive_conditioning_latent, diffusion_conditioning_latent). These are expressive learned latents that encode aspects of the provided clips like voice, intonation, and acoustic properties. :param voice_samples: List of arbitrary reference clips, which should be *pairs* of torch tensors containing arbitrary kHz waveform data. :param latent_averaging_mode: 0/1/2 for following modes: 0 - latents will be generated as in original tortoise, using ~4.27s from each voice sample, averaging latent across all samples 1 - latents will be generated using (almost) entire voice samples, averaged across all the ~4.27s chunks 2 - latents will be generated using (almost) entire voice samples, averaged per voice sample """ assert latent_averaging_mode in [ 0, 1, 2, ], "latent_averaging mode has to be one of (0, 1, 2)" with torch.no_grad(): voice_samples = [[v.to(self.device) for v in ls] for ls in voice_samples] auto_conds = [] for ls in voice_samples: auto_conds.append(format_conditioning(ls[0], device=self.device, mel_norm_file=self.mel_norm_path)) auto_conds = torch.stack(auto_conds, dim=1) with self.temporary_cuda(self.autoregressive) as ar: auto_latent = ar.get_conditioning(auto_conds) diffusion_conds = [] DURS_CONST = self.args.duration_const for ls in voice_samples: # The diffuser operates at a sample rate of 24000 (except for the latent inputs) sample = torchaudio.functional.resample(ls[0], 22050, 24000) if original_tortoise else ls[1] if latent_averaging_mode == 0: sample = pad_or_truncate(sample, DURS_CONST) cond_mel = wav_to_univnet_mel( sample.to(self.device), do_normalization=False, device=self.device, ) diffusion_conds.append(cond_mel) else: if latent_averaging_mode == 2: temp_diffusion_conds = [] for chunk in range(ceil(sample.shape[1] / DURS_CONST)): current_sample = sample[:, chunk * DURS_CONST : (chunk + 1) * DURS_CONST] current_sample = pad_or_truncate(current_sample, DURS_CONST) cond_mel = wav_to_univnet_mel( current_sample.to(self.device), do_normalization=False, device=self.device, ) if latent_averaging_mode == 1: diffusion_conds.append(cond_mel) elif latent_averaging_mode == 2: temp_diffusion_conds.append(cond_mel) if latent_averaging_mode == 2: diffusion_conds.append(torch.stack(temp_diffusion_conds).mean(0)) diffusion_conds = torch.stack(diffusion_conds, dim=1) with self.temporary_cuda(self.diffusion) as diffusion: diffusion_latent = diffusion.get_conditioning(diffusion_conds) if return_mels: return auto_latent, diffusion_latent, auto_conds, diffusion_conds return auto_latent, diffusion_latent def get_random_conditioning_latents(self): # Lazy-load the RLG models. if self.rlg_auto is None: self.rlg_auto = RandomLatentConverter(1024).eval() self.rlg_auto.load_state_dict( torch.load( os.path.join(self.models_dir, "rlg_auto.pth"), map_location=torch.device("cpu"), ) ) self.rlg_diffusion = RandomLatentConverter(2048).eval() self.rlg_diffusion.load_state_dict( torch.load( os.path.join(self.models_dir, "rlg_diffuser.pth"), map_location=torch.device("cpu"), ) ) with torch.no_grad(): return self.rlg_auto(torch.tensor([0.0])), self.rlg_diffusion(torch.tensor([0.0])) def synthesize(self, text, config, speaker_id="random", voice_dirs=None, **kwargs): """Synthesize speech with the given input text. Args: text (str): Input text. config (TortoiseConfig): Config with inference parameters. speaker_id (str): One of the available speaker names. If `random`, it generates a random speaker. voice_dirs (List[str]): List of paths that host reference audio files for speakers. Defaults to None. **kwargs: Inference settings. See `inference()`. Returns: A dictionary of the output values with `wav` as output waveform, `deterministic_seed` as seed used at inference, `text_input` as text token IDs after tokenizer, `voice_samples` as samples used for cloning, `conditioning_latents` as latents used at inference. """ speaker_id = "random" if speaker_id is None else speaker_id if voice_dirs is not None: voice_dirs = [voice_dirs]
def pad_or_truncate(t, length): """ Utility function for forcing <t> to have the specified sequence length, whether by clipping it or padding it with 0s. """ tp = t[..., :length] if t.shape[-1] == length: tp = t elif t.shape[-1] < length: tp = F.pad(t, (0, length - t.shape[-1])) return tp def deterministic_state(seed=None): """ Sets the random seeds that tortoise uses to the current time() and returns that seed so results can be reproduced. """ seed = int(time()) if seed is None else seed torch.manual_seed(seed) random.seed(seed) # Can't currently set this because of CUBLAS. TODO: potentially enable it if necessary. # torch.use_deterministic_algorithms(True) return seed def load_discrete_vocoder_diffuser( trained_diffusion_steps=4000, desired_diffusion_steps=200, cond_free=True, cond_free_k=1, sampler="ddim", ): """ Helper function to load a GaussianDiffusion instance configured for use as a vocoder. """ return SpacedDiffusion( use_timesteps=space_timesteps(trained_diffusion_steps, [desired_diffusion_steps]), model_mean_type="epsilon", model_var_type="learned_range", loss_type="mse", betas=get_named_beta_schedule("linear", trained_diffusion_steps), conditioning_free=cond_free, conditioning_free_k=cond_free_k, sampler=sampler, ) def format_conditioning(clip, cond_length=132300, device="cuda", **kwargs): """ Converts the given conditioning signal to a MEL spectrogram and clips it as expected by the models. """ gap = clip.shape[-1] - cond_length if gap < 0: clip = F.pad(clip, pad=(0, abs(gap))) elif gap > 0: rand_start = random.randint(0, gap) clip = clip[:, rand_start : rand_start + cond_length] mel_clip = TorchMelSpectrogram(**kwargs)(clip.unsqueeze(0)).squeeze(0) return mel_clip.unsqueeze(0).to(device) def fix_autoregressive_output(codes, stop_token, complain=True): """ This function performs some padding on coded audio that fixes a mismatch issue between what the diffusion model was trained on and what the autoregressive code generator creates (which has no padding or end). This is highly specific to the DVAE being used, so this particular coding will not necessarily work if used with a different DVAE. This can be inferred by feeding a audio clip padded with lots of zeros on the end through the DVAE and copying out the last few codes. Failing to do this padding will produce speech with a harsh end that sounds like "BLAH" or similar. """ # Strip off the autoregressive stop token and add padding. stop_token_indices = (codes == stop_token).nonzero() if len(stop_token_indices) == 0: if complain: print( "No stop tokens found in one of the generated voice clips. This typically means the spoken audio is " "too long. In some cases, the output will still be good, though. Listen to it and if it is missing words, " "try breaking up your input text." ) return codes codes[stop_token_indices] = 83 stm = stop_token_indices.min().item() codes[stm:] = 83 if stm - 3 < codes.shape[0]: codes[-3] = 45 codes[-2] = 45 codes[-1] = 248 return codes def do_spectrogram_diffusion( diffusion_model, diffuser, latents, conditioning_latents, temperature=1, verbose=True, ): """ Uses the specified diffusion model to convert discrete codes into a spectrogram. """ with torch.no_grad(): output_seq_len = ( latents.shape[1] * 4 * 24000 // 22050 ) # This diffusion model converts from 22kHz spectrogram codes to a 24kHz spectrogram signal. output_shape = (latents.shape[0], 100, output_seq_len) precomputed_embeddings = diffusion_model.timestep_independent( latents, conditioning_latents, output_seq_len, False ) noise = torch.randn(output_shape, device=latents.device) * temperature mel = diffuser.sample_loop( diffusion_model, output_shape, noise=noise, model_kwargs={"precomputed_aligned_embeddings": precomputed_embeddings}, progress=verbose, ) return denormalize_tacotron_mel(mel)[:, :, :output_seq_len] def classify_audio_clip(clip, model_dir): """ Returns whether or not Tortoises' classifier thinks the given clip came from Tortoise. :param clip: torch tensor containing audio waveform data (get it from load_audio) :return: True if the clip was classified as coming from Tortoise and false if it was classified as real. """ classifier = AudioMiniEncoderWithClassifierHead( 2, spec_dim=1, embedding_dim=512, depth=5, downsample_factor=4, resnet_blocks=2, attn_blocks=4, num_attn_heads=4, base_channels=32, dropout=0, kernel_size=5, distribute_zero_label=False, ) classifier.load_state_dict(torch.load(os.path.join(model_dir, "classifier.pth"), map_location=torch.device("cpu"))) clip = clip.cpu().unsqueeze(0) results = F.softmax(classifier(clip), dim=-1) return results[0][0] def pick_best_batch_size_for_gpu(): """ Tries to pick a batch size that will fit in your GPU. These sizes aren't guaranteed to work, but they should give you a good shot. """ if torch.cuda.is_available(): _, available = torch.cuda.mem_get_info() availableGb = available / (1024**3) batch_size = 1 if availableGb > 14: batch_size = 16 elif availableGb > 10: batch_size = 8 elif availableGb > 7: batch_size = 4 return batch_size @dataclass class TortoiseAudioConfig(Coqpit): sample_rate: int = 22050 diffusion_sample_rate: int = 24000 output_sample_rate: int = 24000 @dataclass class TortoiseArgs(Coqpit): """A dataclass to represent Tortoise model arguments that define the model structure. Args: autoregressive_batch_size (int): The size of the auto-regressive batch. enable_redaction (bool, optional): Whether to enable redaction. Defaults to True. high_vram (bool, optional): Whether to use high VRAM. Defaults to False. kv_cache (bool, optional): Whether to use the kv_cache. Defaults to True. ar_checkpoint (str, optional): The checkpoint for the autoregressive model. Defaults to None. clvp_checkpoint (str, optional): The checkpoint for the ConditionalLatentVariablePerseq model. Defaults to None. diff_checkpoint (str, optional): The checkpoint for the DiffTTS model. Defaults to None. num_chars (int, optional): The maximum number of characters to generate. Defaults to 255. vocoder (VocType, optional): The vocoder to use for synthesis. Defaults to VocConf.Univnet. For UnifiedVoice model: ar_max_mel_tokens (int, optional): The maximum mel tokens for the autoregressive model. Defaults to 604. ar_max_text_tokens (int, optional): The maximum text tokens for the autoregressive model. Defaults to 402. ar_max_conditioning_inputs (int, optional): The maximum conditioning inputs for the autoregressive model. Defaults to 2. ar_layers (int, optional): The number of layers for the autoregressive model. Defaults to 30. ar_model_dim (int, optional): The model dimension for the autoregressive model. Defaults to 1024. ar_heads (int, optional): The number of heads for the autoregressive model. Defaults to 16. ar_number_text_tokens (int, optional): The number of text tokens for the autoregressive model. Defaults to 255. ar_start_text_token (int, optional): The start text token for the autoregressive model. Defaults to 255. ar_checkpointing (bool, optional): Whether to use checkpointing for the autoregressive model. Defaults to False. ar_train_solo_embeddings (bool, optional): Whether to train embeddings for the autoregressive model. Defaults to False. For DiffTTS model: diff_model_channels (int, optional): The number of channels for the DiffTTS model. Defaults to 1024. diff_num_layers (int, optional): The number of layers for the DiffTTS model. Defaults to 10. diff_in_channels (int, optional): The input channels for the DiffTTS model. Defaults to 100. diff_out_channels (int, optional): The output channels for the DiffTTS model. Defaults to 200. diff_in_latent_channels (int, optional): The input latent channels for the DiffTTS model. Defaults to 1024. diff_in_tokens (int, optional): The input tokens for the DiffTTS model. Defaults to 8193. diff_dropout (int, optional): The dropout percentage for the DiffTTS model. Defaults to 0. diff_use_fp16 (bool, optional): Whether to use fp16 for the DiffTTS model. Defaults to False. diff_num_heads (int, optional): The number of heads for the DiffTTS model. Defaults to 16. diff_layer_drop (int, optional): The layer dropout percentage for the DiffTTS model. Defaults to 0. diff_unconditioned_percentage (int, optional): The percentage of unconditioned inputs for the DiffTTS model. Defaults to 0. For ConditionalLatentVariablePerseq model: clvp_dim_text (int): The dimension of the text input for the CLVP module. Defaults to 768. clvp_dim_speech (int): The dimension of the speech input for the CLVP module. Defaults to 768. clvp_dim_latent (int): The dimension of the latent representation for the CLVP module. Defaults to 768. clvp_num_text_tokens (int): The number of text tokens used by the CLVP module. Defaults to 256. clvp_text_enc_depth (int): The depth of the text encoder in the CLVP module. Defaults to 20. clvp_text_seq_len (int): The maximum sequence length of the text input for the CLVP module. Defaults to 350. clvp_text_heads (int): The number of attention heads used by the text encoder in the CLVP module. Defaults to 12. clvp_num_speech_tokens (int): The number of speech tokens used by the CLVP module. Defaults to 8192. clvp_speech_enc_depth (int): The depth of the speech encoder in the CLVP module. Defaults to 20. clvp_speech_heads (int): The number of attention heads used by the speech encoder in the CLVP module. Defaults to 12. clvp_speech_seq_len (int): The maximum sequence length of the speech input for the CLVP module. Defaults to 430. clvp_use_xformers (bool): A flag indicating whether the model uses transformers in the CLVP module. Defaults to True. duration_const (int): A constant value used in the model. Defaults to 102400. """ autoregressive_batch_size: int = 1 enable_redaction: bool = False high_vram: bool = False kv_cache: bool = True ar_checkpoint: str = None clvp_checkpoint: str = None diff_checkpoint: str = None num_chars: int = 255 vocoder: VocType = VocConf.Univnet # UnifiedVoice params ar_max_mel_tokens: int = 604 ar_max_text_tokens: int = 402 ar_max_conditioning_inputs: int = 2 ar_layers: int = 30 ar_model_dim: int = 1024 ar_heads: int = 16 ar_number_text_tokens: int = 255 ar_start_text_token: int = 255 ar_checkpointing: bool = False ar_train_solo_embeddings: bool = False # DiffTTS params diff_model_channels: int = 1024 diff_num_layers: int = 10 diff_in_channels: int = 100 diff_out_channels: int = 200 diff_in_latent_channels: int = 1024 diff_in_tokens: int = 8193 diff_dropout: int = 0 diff_use_fp16: bool = False diff_num_heads: int = 16 diff_layer_drop: int = 0 diff_unconditioned_percentage: int = 0 # clvp params clvp_dim_text: int = 768 clvp_dim_speech: int = 768 clvp_dim_latent: int = 768 clvp_num_text_tokens: int = 256 clvp_text_enc_depth: int = 20 clvp_text_seq_len: int = 350 clvp_text_heads: int = 12 clvp_num_speech_tokens: int = 8192 clvp_speech_enc_depth: int = 20 clvp_speech_heads: int = 12 clvp_speech_seq_len: int = 430 clvp_use_xformers: bool = True # constants duration_const: int = 102400 class Tortoise(BaseTTS): """Tortoise model class. Currently only supports inference. Examples: >>> from TTS.tts.configs.tortoise_config import TortoiseConfig >>> from TTS.tts.models.tortoise import Tortoise >>> config = TortoiseConfig() >>> model = Tortoise.inif_from_config(config) >>> model.load_checkpoint(config, checkpoint_dir="paths/to/models_dir/", eval=True) """ def __init__(self, config: Coqpit): super().__init__(config, ap=None, tokenizer=None) self.mel_norm_path = None self.config = config self.ar_checkpoint = self.args.ar_checkpoint self.diff_checkpoint = self.args.diff_checkpoint # TODO: check if this is even needed self.models_dir = config.model_dir self.autoregressive_batch_size = ( pick_best_batch_size_for_gpu() if self.args.autoregressive_batch_size is None else self.args.autoregressive_batch_size ) self.enable_redaction = self.args.enable_redaction self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if self.enable_redaction: self.aligner = Wav2VecAlignment() self.tokenizer = VoiceBpeTokenizer() self.autoregressive = UnifiedVoice( max_mel_tokens=self.args.ar_max_mel_tokens, max_text_tokens=self.args.ar_max_text_tokens, max_conditioning_inputs=self.args.ar_max_conditioning_inputs, layers=self.args.ar_layers, model_dim=self.args.ar_model_dim, heads=self.args.ar_heads, number_text_tokens=self.args.ar_number_text_tokens, start_text_token=self.args.ar_start_text_token, checkpointing=self.args.ar_checkpointing, train_solo_embeddings=self.args.ar_train_solo_embeddings, ).cpu() self.diffusion = DiffusionTts( model_channels=self.args.diff_model_channels, num_layers=self.args.diff_num_layers, in_channels=self.args.diff_in_channels, out_channels=self.args.diff_out_channels, in_latent_channels=self.args.diff_in_latent_channels, in_tokens=self.args.diff_in_tokens, dropout=self.args.diff_dropout, use_fp16=self.args.diff_use_fp16, num_heads=self.args.diff_num_heads, layer_drop=self.args.diff_layer_drop, unconditioned_percentage=self.args.diff_unconditioned_percentage, ).cpu() self.clvp = CLVP( dim_text=self.args.clvp_dim_text, dim_speech=self.args.clvp_dim_speech, dim_latent=self.args.clvp_dim_latent, num_text_tokens=self.args.clvp_num_text_tokens, text_enc_depth=self.args.clvp_text_enc_depth, text_seq_len=self.args.clvp_text_seq_len, text_heads=self.args.clvp_text_heads, num_speech_tokens=self.args.clvp_num_speech_tokens, speech_enc_depth=self.args.clvp_speech_enc_depth, speech_heads=self.args.clvp_speech_heads, speech_seq_len=self.args.clvp_speech_seq_len, use_xformers=self.args.clvp_use_xformers, ).cpu() self.vocoder = self.args.vocoder.value.constructor().cpu() # Random latent generators (RLGs) are loaded lazily. self.rlg_auto = None self.rlg_diffusion = None if self.args.high_vram: self.autoregressive = self.autoregressive.to(self.device) self.diffusion = self.diffusion.to(self.device) self.clvp = self.clvp.to(self.device) self.vocoder = self.vocoder.to(self.device) self.high_vram = self.args.high_vram @contextmanager def temporary_cuda(self, model): if self.high_vram: yield model else: m = model.to(self.device) yield m m = model.cpu() def get_conditioning_latents( self, voice_samples, return_mels=False, latent_averaging_mode=0, original_tortoise=False, ): """ Transforms one or more voice_samples into a tuple (autoregressive_conditioning_latent, diffusion_conditioning_latent). These are expressive learned latents that encode aspects of the provided clips like voice, intonation, and acoustic properties. :param voice_samples: List of arbitrary reference clips, which should be *pairs* of torch tensors containing arbitrary kHz waveform data. :param latent_averaging_mode: 0/1/2 for following modes: 0 - latents will be generated as in original tortoise, using ~4.27s from each voice sample, averaging latent across all samples 1 - latents will be generated using (almost) entire voice samples, averaged across all the ~4.27s chunks 2 - latents will be generated using (almost) entire voice samples, averaged per voice sample """ assert latent_averaging_mode in [ 0, 1, 2, ], "latent_averaging mode has to be one of (0, 1, 2)" with torch.no_grad(): voice_samples = [[v.to(self.device) for v in ls] for ls in voice_samples] auto_conds = [] for ls in voice_samples: auto_conds.append(format_conditioning(ls[0], device=self.device, mel_norm_file=self.mel_norm_path)) auto_conds = torch.stack(auto_conds, dim=1) with self.temporary_cuda(self.autoregressive) as ar: auto_latent = ar.get_conditioning(auto_conds) diffusion_conds = [] DURS_CONST = self.args.duration_const for ls in voice_samples: # The diffuser operates at a sample rate of 24000 (except for the latent inputs) sample = torchaudio.functional.resample(ls[0], 22050, 24000) if original_tortoise else ls[1] if latent_averaging_mode == 0: sample = pad_or_truncate(sample, DURS_CONST) cond_mel = wav_to_univnet_mel( sample.to(self.device), do_normalization=False, device=self.device, ) diffusion_conds.append(cond_mel) else: if latent_averaging_mode == 2: temp_diffusion_conds = [] for chunk in range(ceil(sample.shape[1] / DURS_CONST)): current_sample = sample[:, chunk * DURS_CONST : (chunk + 1) * DURS_CONST] current_sample = pad_or_truncate(current_sample, DURS_CONST) cond_mel = wav_to_univnet_mel( current_sample.to(self.device), do_normalization=False, device=self.device, ) if latent_averaging_mode == 1: diffusion_conds.append(cond_mel) elif latent_averaging_mode == 2: temp_diffusion_conds.append(cond_mel) if latent_averaging_mode == 2: diffusion_conds.append(torch.stack(temp_diffusion_conds).mean(0)) diffusion_conds = torch.stack(diffusion_conds, dim=1) with self.temporary_cuda(self.diffusion) as diffusion: diffusion_latent = diffusion.get_conditioning(diffusion_conds) if return_mels: return auto_latent, diffusion_latent, auto_conds, diffusion_conds return auto_latent, diffusion_latent def get_random_conditioning_latents(self): # Lazy-load the RLG models. if self.rlg_auto is None: self.rlg_auto = RandomLatentConverter(1024).eval() self.rlg_auto.load_state_dict( torch.load( os.path.join(self.models_dir, "rlg_auto.pth"), map_location=torch.device("cpu"), ) ) self.rlg_diffusion = RandomLatentConverter(2048).eval() self.rlg_diffusion.load_state_dict( torch.load( os.path.join(self.models_dir, "rlg_diffuser.pth"), map_location=torch.device("cpu"), ) ) with torch.no_grad(): return self.rlg_auto(torch.tensor([0.0])), self.rlg_diffusion(torch.tensor([0.0])) def synthesize(self, text, config, speaker_id="random", voice_dirs=None, **kwargs): """Synthesize speech with the given input text. Args: text (str): Input text. config (TortoiseConfig): Config with inference parameters. speaker_id (str): One of the available speaker names. If `random`, it generates a random speaker. voice_dirs (List[str]): List of paths that host reference audio files for speakers. Defaults to None. **kwargs: Inference settings. See `inference()`. Returns: A dictionary of the output values with `wav` as output waveform, `deterministic_seed` as seed used at inference, `text_input` as text token IDs after tokenizer, `voice_samples` as samples used for cloning, `conditioning_latents` as latents used at inference. """ speaker_id = "random" if speaker_id is None else speaker_id if voice_dirs is not None: voice_dirs = [voice_dirs]
voice_samples, conditioning_latents = load_voice(speaker_id, voice_dirs)
2
2023-11-29 08:15:06+00:00
24k
magic-research/magic-animate
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,...
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.controlnet import ControlNetModel from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
16,750
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel,
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel,
controlnet: ControlNetModel,
1
2023-11-21 08:33:54+00:00
24k
wenquanlu/HandRefiner
cldm/cldm.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(...
import einops import torch import torch as th import torch.nn as nn from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer from ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock from ldm.models.diffusion.ddpm import LatentDiffusion, LatentInpaintDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig
21,272
class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, c_concat=None, only_mid_control=False, **kwargs): hs = [] with torch.no_grad():
class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, c_concat=None, only_mid_control=False, **kwargs): hs = [] with torch.no_grad():
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
3
2023-11-24 10:19:23+00:00
24k
eth-sri/language-model-arithmetic
src/model_arithmetic/evaluation.py
[ { "identifier": "BaseClass", "path": "src/model_arithmetic/base.py", "snippet": "class BaseClass:\n \"\"\"\n Base class for providing a serialization and deserialization mechanism.\n \"\"\"\n def __init__(self, **kwargs):\n \"\"\"\n Instantiates the base class with keyword argu...
from .base import BaseClass from loguru import logger from transformers import set_seed, Trainer, TrainingArguments, DataCollatorWithPadding from .dataset import CustomDataset from sklearn.model_selection import train_test_split from .basic_model_loader import load_model, load_tokenizer from .model_arithmetic import ModelArithmetic from googleapiclient import discovery from dotenv import load_dotenv from torch.utils.data import DataLoader from .utils import ENABLE_LOGGING, log from lm_eval import evaluator import pandas as pd import numpy as np import torch import os import json import time
16,094
self.dataset.at[index, "perspective"] = value scores.append(value) time.sleep(0.1) result = np.mean(scores) log(logger.info, f"Perspective is {result:.6f}") self.output["perspective"] = { "mean": result, "std": np.std(scores), } return result def generate_samples(self, max_tokens, batch_size=1, temperature=1.0, top_p=1.0, top_k=0, stop_texts=None, speculation=False, **kwargs): """ Generates samples from the model. Args: max_tokens (int): The maximum number of tokens to be used for evaluation. batch_size (int, optional): The batch size to be used for evaluation. temperature (float, optional): The temperature to be used for sampling. top_p (float, optional): The top-p value to be used for sampling. top_k (int, optional): The top-k value to be used for sampling. stop_texts (list, optional): The list of texts at which sampling should be stopped speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ start_time = time.time() if "generated" not in self.dataset.columns: texts = self.generator.generate_text(self.dataset["input"].tolist(), max_length=max_tokens, batch_size=batch_size, temperature=temperature, top_p=top_p, top_k=top_k, stop_texts=stop_texts, do_speculation=speculation) self.dataset["generated"] = texts end_time = time.time() self.output["time"] = { "total_time": end_time - start_time, "time_per_sample": (end_time - start_time) / len(self.dataset), "dataset_size": len(self.dataset), "max_tokens": max_tokens, "batch_size": batch_size } def save_generated(self, output_location): """ Saves the generated samples to the specified location. Args: output_location (string): The location to save the generated samples. """ log(logger.debug, f"Saving generated samples to {output_location}") self.dataset.to_csv(output_location) def get_perplexity(self, dataset, model, tokenizer, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model (PreTrainedModel): The model to be evaluated. tokenizer (Tokenizer): The tokenizer to be used for tokenizing the sentences. **kwargs: Additional keyword arguments. """ perplexities = [] sum_nllos = 0 n_tokens = 0 for index, sample in dataset.iterrows(): input_sentence = sample['input'] sentence = sample['generated'] if len(sentence) == 0: continue combined_sentence = input_sentence + sentence encodings = tokenizer(combined_sentence, return_tensors='pt') input_ids = encodings['input_ids'].to(model.device) attention_mask = encodings['attention_mask'].to(model.device) input_encodings = tokenizer(input_sentence, return_tensors='pt') input_ids_inputs = input_encodings['input_ids'] input_length = input_ids_inputs.size(1) with torch.no_grad(): output = model(input_ids, labels=input_ids, attention_mask=attention_mask) logprobs = output.logits[0, :].log_softmax(dim=-1) loss_func = torch.nn.NLLLoss(ignore_index=-100, reduction='sum') loss = loss_func(logprobs[..., input_length:-1, :].contiguous(), input_ids[0, :][..., input_length+1:].contiguous()) loss = loss.to(torch.float32).detach().cpu().numpy() n_tokens_here = input_ids.shape[-1] - input_length - 1 if n_tokens_here > 0: perplexity = np.exp(loss / n_tokens_here) sum_nllos += loss n_tokens += n_tokens_here if not np.isnan(perplexity): perplexities.append(perplexity) average = np.mean(perplexities) median = np.median(perplexities) real = np.exp(sum_nllos / n_tokens) return { "average": average, "median": median, "correct_perplexity": real } def perplexity(self, dataset, model_name_fluency="gpt2-xl", dtype=torch.float16, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model_name_fluency (string, optional): The name of the model to be used for calculating fluency. dtype (torch.dtype, optional): The data type to be used for the model. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating fluency") if "perplexity" in self.output: log(logger.info, f"Reloading perplexity. Perplexity is {self.output['perplexity']}") return self.output["perplexity"]
load_dotenv() class Evaluation(BaseClass): """ This class is used for evaluating a model's performance on a given dataset. It includes methods for preparing the dataset, evaluating the model, generating samples, calculating perplexity and faithfulness of the model. """ def __init__(self, generator=None, dataset_location=None, dataset=None, train_dataset=None, train_dataset_location=None, n_input_words=5, bleurt_checkpoint="../models/BLEURT-20", **kwargs): """ Initialize the Evaluation class with the given parameters. Args: generator (ModelArithmetic, optional): The model to be evaluated. dataset_location (string, optional): The location of the dataset to be used for evaluation. Either this or dataset should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) dataset (pd.DataFrame, optional): The dataset to be used for evaluation. Either this or dataset_location should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) train_dataset (pd.DataFrame, optional): The dataset to be used for training the model. Only used when calculating the faithfulness of the model and when the downstream model still needs to be finetuned. train_dataset_location (string, optional): The location of the dataset to be used for training the model. n_input_words (int, optional): The number of input words to be used in the generator. Only used if the dataset does not contain the column "input". bleurt_checkpoint (string, optional): The location of the BLEURT model checkpoint. **kwargs: Additional keyword arguments. """ self.has_input_task = True self.dataset = None if dataset is not None: self.dataset = dataset.copy() elif dataset_location is not None: self.dataset = pd.read_csv(dataset_location, escapechar='\\', lineterminator="\n") if train_dataset is not None: self.train_dataset = train_dataset elif train_dataset_location is not None: self.train_dataset = pd.read_csv(train_dataset_location, escapechar='\\', lineterminator="\n") else: self.train_dataset = None if self.dataset is not None: self.prepare_dataset(n_input_words) super().__init__(**kwargs, dataset_location=dataset_location, generator=generator, has_input_task=self.has_input_task, output=dict(), extra_kwargs=None, bleurt_checkpoint=bleurt_checkpoint, train_dataset_location=None) if isinstance(generator, ModelArithmetic): # If we don't do it this way, we can't store the evaluator because ModelArithmetic is not serializable del self.kwargs["generator"] self.kwargs["formula"] = generator.formula self.formula = generator.formula def prepare_dataset(self, n_input_words=5): """ Prepares the dataset for evaluation. If the dataset does not have an input column, it assumes the input is the first n_input_words words of the output. If the dataset does not have a label column, it assumes all labels are 1. Args: n_input_words (int): The number of input words to be used. """ log(logger.debug, "Preparing dataset") if "input" not in self.dataset.columns: log(logger.debug, f"No input column found, assuming input is the first {n_input_words} words of the output") self.dataset["input"] = self.dataset["text"].apply(lambda x: " ".join(x.split()[:n_input_words])) self.dataset["output"] = self.dataset["text"].apply(lambda x: " " + " ".join(x.split()[n_input_words:])) self.has_input_task = False if "label" not in self.dataset.columns: log(logger.debug, "No label column found, assuming all labels are 1") self.dataset["label"] = 1 def evaluate_lm_eval(self, model, task_name, batch_size, num_fewshot, model_args, no_cache=False, limit=None, write_out=False, output_folder=None, **kwargs): """ Evaluates the model using the lm_eval package. Args: model (PreTrainedModel): The model to be evaluated. task_name (string): The name of the task for evaluation. batch_size (int): The batch size to be used for evaluation. num_fewshot (int): The number of fewshot examples to be used for evaluation. model_args (dict): The arguments to be passed to the model. no_cache (bool, optional): Whether to use cached results or not. limit (int, optional): The maximum number of examples to be used for evaluation. write_out (bool, optional): Whether to write out the results or not. output_folder (string, optional): The folder to write out the results. **kwargs: Additional keyword arguments. """ try: except ImportError: raise ImportError("Please install lm_eval to run this function") results = evaluator.simple_evaluate( model=model, model_args=model_args, tasks=[task_name], num_fewshot=num_fewshot, batch_size=batch_size, device="cuda" if torch.cuda.is_available() else "cpu", no_cache=no_cache, limit=limit, write_out=write_out, output_base_path=output_folder ) if "lm_eval" in self.output: self.output["lm_eval"][task_name] = results else: self.output["lm_eval"] = {task_name: results} def evaluate(self, max_tokens=128, store_file=None, reload=True, dataset_file=None, reload_data=True, preserve_memory=False, batch_size=1, do_perspective=True, speculation=False, only_faithfulness=False, **kwargs): """ Evaluates the model on the dataset and calculates the perplexity and faithfulness Args: max_tokens (int, optional): The maximum number of tokens to be used for evaluation. store_file (string, optional): The file to store the evaluation results. reload (bool, optional): Whether to reload the dataset or not if it was stored before. dataset_file (string, optional): The file containing the dataset. If path exists, dataset is loaded from path. If path does not exist, dataset is saved to path. reload_data (bool, optional): Whether to reload the data or not if it was stored before. preserve_memory (bool, optional): Whether to preserve memory or not. batch_size (int, optional): The batch size to be used for evaluation. do_perspective (bool, optional): Whether to calculate the perspective score or not. speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ if store_file is not None: os.makedirs(os.path.dirname(store_file), exist_ok=True) if dataset_file is not None: os.makedirs(os.path.dirname(dataset_file), exist_ok=True) if (reload_data or reload) and dataset_file is not None and os.path.isfile(dataset_file): log(logger.debug, f"Reloading dataset from {dataset_file}") self.dataset = pd.read_csv(dataset_file, escapechar='\\', lineterminator="\n") self.dataset.fillna("", inplace=True) else: log(logger.debug,"Generating samples") self.generate_samples(max_tokens, batch_size=batch_size, speculation=speculation, **kwargs) if dataset_file is not None: log(logger.info, f"Saving dataset to {dataset_file}") self.dataset.to_csv(dataset_file, index=False, escapechar='\\', lineterminator="\n") if reload and store_file is not None and os.path.isfile(store_file): self.output = json.load(open(store_file))["output"] log(logger.info, f"Reloading output. Output is {self.output}") if not reload: self.output = dict() self.dataset.dropna(inplace=True) if preserve_memory and hasattr(self.generator, "clear_memory"): # can have weird side effects! self.generator.clear_memory() if not only_faithfulness: self.perplexity(self.dataset, **kwargs) else: del self.output['faithfulness'] self.faithfulness_multiple(self.dataset, **kwargs) if do_perspective and not only_faithfulness: self.perspective(**kwargs) if dataset_file is not None: log(logger.info, f"Saving dataset to {dataset_file}") self.dataset.to_csv(dataset_file, index=False, escapechar='\\', lineterminator="\n") self.extra_kwargs = {kwarg: str(kwargs[kwarg]) for kwarg in kwargs} self.extra_kwargs["max_tokens"] = max_tokens if store_file is not None: log(logger.info, f"Saving output to {store_file}") self.save(store_file) return self.output def perspective(self, classification_with_input=True, **kwargs): """ Calculates the perspective score of the generated sentences. Args: classification_with_input (bool, optional): Whether to use the input of the sentence for calculating the perspective score or not. **kwargs: Additional keyword arguments. """ retries = 0 if "perspective" in self.output: log(logger.info, f"Reloading perspective. Perspective is {self.output['perspective']}") return self.output["perspective"] log(logger.info, "Calculating perspective") service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=os.environ["PERSPECTIVE_API_KEY"], discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1", static_discovery=False) scores = [] self.dataset["perspective"] = [None] * len(self.dataset) for index, sample in self.dataset.iterrows(): sentence = sample['generated'] if classification_with_input: sentence = sample['input'] + sentence analyze_request = { 'comment': { 'text': sentence }, 'languages': ["en"], 'requestedAttributes': {'TOXICITY': {}} } while retries < 10 ** 5: try: response = service.comments().analyze(body=analyze_request).execute() break except Exception as e: if "LANGUAGE_NOT_SUPPORTED_BY_ATTRIBUTE" in str(e): continue log(logger.warning, f"Exception {e} occurred, retrying...") retries += 1 time.sleep(10) if retries == 10 ** 5: log(logger.error, "Could not get all perspective scores") break value = response["attributeScores"]["TOXICITY"]["summaryScore"]["value"] self.dataset.at[index, "perspective"] = value scores.append(value) time.sleep(0.1) result = np.mean(scores) log(logger.info, f"Perspective is {result:.6f}") self.output["perspective"] = { "mean": result, "std": np.std(scores), } return result def generate_samples(self, max_tokens, batch_size=1, temperature=1.0, top_p=1.0, top_k=0, stop_texts=None, speculation=False, **kwargs): """ Generates samples from the model. Args: max_tokens (int): The maximum number of tokens to be used for evaluation. batch_size (int, optional): The batch size to be used for evaluation. temperature (float, optional): The temperature to be used for sampling. top_p (float, optional): The top-p value to be used for sampling. top_k (int, optional): The top-k value to be used for sampling. stop_texts (list, optional): The list of texts at which sampling should be stopped speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ start_time = time.time() if "generated" not in self.dataset.columns: texts = self.generator.generate_text(self.dataset["input"].tolist(), max_length=max_tokens, batch_size=batch_size, temperature=temperature, top_p=top_p, top_k=top_k, stop_texts=stop_texts, do_speculation=speculation) self.dataset["generated"] = texts end_time = time.time() self.output["time"] = { "total_time": end_time - start_time, "time_per_sample": (end_time - start_time) / len(self.dataset), "dataset_size": len(self.dataset), "max_tokens": max_tokens, "batch_size": batch_size } def save_generated(self, output_location): """ Saves the generated samples to the specified location. Args: output_location (string): The location to save the generated samples. """ log(logger.debug, f"Saving generated samples to {output_location}") self.dataset.to_csv(output_location) def get_perplexity(self, dataset, model, tokenizer, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model (PreTrainedModel): The model to be evaluated. tokenizer (Tokenizer): The tokenizer to be used for tokenizing the sentences. **kwargs: Additional keyword arguments. """ perplexities = [] sum_nllos = 0 n_tokens = 0 for index, sample in dataset.iterrows(): input_sentence = sample['input'] sentence = sample['generated'] if len(sentence) == 0: continue combined_sentence = input_sentence + sentence encodings = tokenizer(combined_sentence, return_tensors='pt') input_ids = encodings['input_ids'].to(model.device) attention_mask = encodings['attention_mask'].to(model.device) input_encodings = tokenizer(input_sentence, return_tensors='pt') input_ids_inputs = input_encodings['input_ids'] input_length = input_ids_inputs.size(1) with torch.no_grad(): output = model(input_ids, labels=input_ids, attention_mask=attention_mask) logprobs = output.logits[0, :].log_softmax(dim=-1) loss_func = torch.nn.NLLLoss(ignore_index=-100, reduction='sum') loss = loss_func(logprobs[..., input_length:-1, :].contiguous(), input_ids[0, :][..., input_length+1:].contiguous()) loss = loss.to(torch.float32).detach().cpu().numpy() n_tokens_here = input_ids.shape[-1] - input_length - 1 if n_tokens_here > 0: perplexity = np.exp(loss / n_tokens_here) sum_nllos += loss n_tokens += n_tokens_here if not np.isnan(perplexity): perplexities.append(perplexity) average = np.mean(perplexities) median = np.median(perplexities) real = np.exp(sum_nllos / n_tokens) return { "average": average, "median": median, "correct_perplexity": real } def perplexity(self, dataset, model_name_fluency="gpt2-xl", dtype=torch.float16, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model_name_fluency (string, optional): The name of the model to be used for calculating fluency. dtype (torch.dtype, optional): The data type to be used for the model. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating fluency") if "perplexity" in self.output: log(logger.info, f"Reloading perplexity. Perplexity is {self.output['perplexity']}") return self.output["perplexity"]
tokenizer = load_tokenizer(model_name_fluency)
3
2023-11-21 20:01:08+00:00
24k
HeliosZhao/Animate124
dnerf/utils.py
[ { "identifier": "save_tensor2image", "path": "nerf/utils.py", "snippet": "def save_tensor2image(x: torch.Tensor, path, channel_last=False, quality=75, **kwargs):\n # assume the input x is channel last\n # ipdb.set_trace()\n # if x.ndim == 4:\n # if channel_last:\n # x = x.perm...
import os import glob import tqdm import random import logging import gc import numpy as np import imageio, imageio_ffmpeg import time import cv2 import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.distributed as dist import torchvision.transforms.functional as TF import ipdb import copy from torch import Tensor from torch.utils.tensorboard import SummaryWriter from torchvision.utils import make_grid from torchmetrics.functional import pearson_corrcoef from nerf.utils import save_tensor2image, nonzero_normalize_depth, Trainer from einops import rearrange from nerf.utils import custom_meshgrid, safe_normalize from dnerf.network_4dgrid import NeRFNetwork
21,277
save_guidance_zero123_path = os.path.join(self.opt.workspace, 'guidance_zero123', f'train_step{self.global_step}_guidance.jpg') if self.opt.save_guidance_every > 0 and self.global_step % self.opt.save_guidance_every ==0 else None polar = data['polar'] azimuth = data['azimuth'] radius = data['radius'] # input_3dprior B,3,H,W # ipdb.set_trace() input_3dprior = pred_rgb[:,0] loss_zero123 = self.guidance['zero123'].train_step(self.embeddings['zero123']['default'], input_3dprior, polar, azimuth, radius, guidance_scale=self.opt.guidance_scale['zero123'], as_latent=as_latent, grad_scale=self.opt.lambda_guidance['zero123'], save_guidance_path=save_guidance_zero123_path) if 'clip' in self.guidance: # empirical, far view should apply smaller CLIP loss lambda_guidance = 10 * (1 - abs(azimuth) / 180) * self.opt.lambda_guidance['clip'] loss_clip = self.guidance['clip'].train_step(self.embeddings['clip'], pred_rgb, grad_scale=lambda_guidance) loss += loss_sds + loss_if + loss_zero123 + loss_clip + loss_sr + loss_cn # regularizations if not self.opt.dmtet: if self.opt.lambda_opacity > 0: # 0 loss_opacity = self.opt.lambda_opacity * (outputs['weights_sum'] ** 2).mean() reg_losses_dict['loss_opacity'] = loss_opacity.item() if self.opt.lambda_entropy > 0: # 1e-3 lambda_entropy = self.opt.lambda_entropy * \ min(1, 2 * self.global_step / self.opt.iters) alphas = outputs['weights'].clamp(1e-5, 1 - 1e-5) # alphas = alphas ** 2 # skewed entropy, favors 0 over 1 loss_entropy = lambda_entropy * (- alphas * torch.log2(alphas) - (1 - alphas) * torch.log2(1 - alphas)).mean() reg_losses_dict['loss_entropy'] = loss_entropy.item() if self.opt.lambda_normal_smooth > 0 and 'normal_image' in outputs: # 0.5 # no image in sd-dreamfusion should be 0 pred_vals = outputs['normal_image'].reshape(-1, H, W, 3) # BF,H,W,3 # total-variation loss_smooth = (pred_vals[:, 1:, :, :] - pred_vals[:, :-1, :, :]).square().mean() + \ (pred_vals[:, :, 1:, :] - pred_vals[:, :, :-1, :]).square().mean() loss_smooth = self.opt.lambda_normal_smooth * loss_smooth reg_losses_dict['loss_smooth'] = loss_smooth.item() if self.opt.lambda_normal_smooth2d > 0 and 'normal_image' in outputs: # 0.5 # no image in sd-dreamfusion should be 0 pred_vals = outputs['normal_image'].reshape( -1, H, W, 3).permute(0,3,1,2).contiguous() # BF,3,H,W smoothed_vals = TF.gaussian_blur(pred_vals, kernel_size=9) loss_smooth2d = self.opt.lambda_normal_smooth2d * F.mse_loss(pred_vals, smoothed_vals) reg_losses_dict['loss_smooth2d'] = loss_smooth2d.item() if self.opt.lambda_orient > 0 and 'loss_orient' in outputs: # 1e-2 loss_orient = self.opt.lambda_orient * outputs['loss_orient'].mean() reg_losses_dict['loss_orient'] = loss_orient.item() if self.opt.lambda_3d_normal_smooth > 0 and 'loss_normal_perturb' in outputs: # 0 loss_smooth3d = self.opt.lambda_3d_normal_smooth * outputs['loss_normal_perturb'].mean() reg_losses_dict['loss_smooth3d'] = loss_smooth3d.item() if self.opt.lambda_time_tv > 0: if self.opt.backbone == 'grid4d': loss_time_tv = self.opt.lambda_time_tv * self.model.TV_loss() reg_losses_dict['loss_time_tv'] = loss_time_tv.item() loss += loss_opacity + loss_entropy + loss_smooth + loss_smooth2d + loss_orient + loss_smooth3d + loss_time_tv + loss_canonical else: if self.opt.lambda_mesh_normal > 0: loss_mesh_normal = self.opt.lambda_mesh_normal * \ outputs['loss_normal'].mean() reg_losses_dict['loss_mesh_normal'] = loss_mesh_normal.item() if self.opt.lambda_mesh_lap > 0: loss_mesh_lap = self.opt.lambda_mesh_lap * outputs['loss_lap'].mean() reg_losses_dict['loss_mesh_lap'] = loss_mesh_lap.item() loss += loss_mesh_normal + loss_mesh_lap losses_dict = { 'loss': loss.item(), 'loss_sds': loss_sds.item(), 'loss_sr': loss_sr.item(), 'loss_cn': loss_cn.item(), # 'loss_if': loss_if.item(), 'loss_zero123': loss_zero123.item(), # 'loss_clip': loss_clip.item(), 'loss_rgb': loss_rgb.item(), 'loss_mask': loss_mask.item(), 'loss_normal': loss_normal.item(), 'loss_depth': loss_depth.item(), # 'loss_opacity': loss_opacity.item(), # 'loss_entropy': loss_entropy.item(), # 'loss_smooth': loss_smooth.item(), # 'loss_smooth2d': loss_smooth2d.item(), # 'loss_smooth3d': loss_smooth3d.item(), # 'loss_orient': loss_orient.item(), # 'loss_mesh_normal': loss_mesh_normal.item(), # 'loss_mesh_lap': loss_mesh_lap.item(), } losses_dict.update(reg_losses_dict) # if loss_guidance_dict: # for key, val in loss_guidance_dict.items(): # losses_dict[key] = val.item() if isinstance(val, torch.Tensor) else val if 'normal' in out_dict: out_dict['normal'] = rearrange(out_dict['normal'], "b f h w c -> b f c h w").contiguous() # B,F,H,W,3 -> B,F,3,H,W if torch.isnan(loss): ipdb.set_trace() # save for debug purpose if self.opt.save_train_every > 0 and self.global_step % self.opt.save_train_every == 0: image_save_path = os.path.join(self.workspace, 'train_debug',) os.makedirs(image_save_path, exist_ok=True) for key, value in out_dict.items(): if value is not None: value = ((value - value.min()) / (value.max() - value.min() + 1e-6)).detach().mul(255).to(torch.uint8) # 0-255 try:
logger = logging.getLogger(__name__) class DTrainer(Trainer): def __init__(self, argv, name, opt, model, guidance, criterion=None, optimizer=None, ema_decay=None, lr_scheduler=None, metrics=[], local_rank=0, world_size=1, device=None, mute=False, fp16=False, max_keep_ckpt=1, workspace='workspace', best_mode='min', use_loss_as_metric=True, report_metric_at_train=False, use_checkpoint="latest", use_tensorboard=True, scheduler_update_every_step=False, **kwargs): super().__init__(argv, name, opt, model, guidance, criterion, optimizer, ema_decay, lr_scheduler, metrics, local_rank, world_size, device, mute, fp16, max_keep_ckpt, workspace, best_mode, use_loss_as_metric, report_metric_at_train, use_checkpoint, use_tensorboard, scheduler_update_every_step, **kwargs) self.rgbd_scale = opt.get("rgbd_scale", 1.0) self.fix_dynamic = opt.fix_dynamic if self.fix_dynamic: assert opt.backbone == 'grid4d' self.dynamic_model = NeRFNetwork(opt) # ipdb.set_trace() model_state_dict = self.model.state_dict() self.dynamic_model.load_state_dict(model_state_dict) for p in self.dynamic_model.parameters(): p.requires_grad = False self.dynamic_model.train() self.dynamic_model.to(opt.device) @torch.no_grad() def eval_static_step(self, data, shading): rays_o = data['rays_o'] # [B, N, 3] / B,F,N,3 rays_d = data['rays_d'] # [B, N, 3] / B,F,N,3 mvp = data['mvp'] # B,4,4 / B,F,4,4 if rays_o.ndim == 4: rays_o = rays_o[:, 0] rays_d = rays_d[:, 0] mvp = mvp[:, 0] B, N = rays_o.shape[:2] H, W = data['H'], data['W'] ambient_ratio = data['ambient_ratio'] if 'ambient_ratio' in data else 1.0 light_d = data['light_d'] if 'light_d' in data else None # ipdb.set_trace() outputs = self.static_model.render(rays_o, rays_d, mvp, H, W, staged=True, perturb=False, bg_color=None, light_d=light_d, ambient_ratio=ambient_ratio, shading=shading) pred_rgb = outputs['image'].reshape(B, H, W, 3) pred_depth = outputs['depth'].reshape(B, H, W, 1) if self.opt.normalize_depth: pred_depth = nonzero_normalize_depth(pred_depth) if 'normal_image' in outputs: # eval mode no normal image pred_normal = outputs['normal_image'].reshape(B, H, W, 3) else: pred_normal = None pred_mask = outputs['weights_sum'].reshape(B, H, W, 1) out_dict = { 'rgb': pred_rgb, 'depth': pred_depth, 'normal_image': pred_normal, 'mask': pred_mask, } return out_dict def train_step(self, data): # perform RGBD loss instead of SDS if is image-conditioned do_rgbd_loss = self.opt.images is not None and \ ((self.global_step < self.opt.known_iters) or (self.global_step % self.opt.known_view_interval == 0)) # ipdb.set_trace() # override random camera with fixed known camera if do_rgbd_loss: data = self.default_view_data # progressively relaxing view range if self.opt.progressive_view: r = min(1.0, 0.2 + self.global_step / (0.5 * self.opt.iters)) self.opt.phi_range = [self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[0] * r, self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[1] * r] self.opt.theta_range = [self.opt.default_polar * (1 - r) + self.opt.full_theta_range[0] * r, self.opt.default_polar * (1 - r) + self.opt.full_theta_range[1] * r] self.opt.radius_range = [self.opt.default_radius * (1 - r) + self.opt.full_radius_range[0] * r, self.opt.default_radius * (1 - r) + self.opt.full_radius_range[1] * r] self.opt.fovy_range = [self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[0] * r, self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[1] * r] # progressively increase max_level if self.opt.progressive_level: self.model.max_level = min(1.0, 0.25 + self.global_step / (0.5 * self.opt.iters)) rays_o = data['rays_o'] # [B, N, 3] # B,F,N,3 rays_d = data['rays_d'] # [B, N, 3] # B,F,N,3 mvp = data['mvp'] # [B, 4, 4] / [B,F,4,4] time = data['time'] # [B,T] use_dynamic_cam = (rays_o.ndim == 4) B = rays_o.size(0) # ipdb.set_trace() N = rays_o.size(1) if not use_dynamic_cam else rays_o.size(2) H, W = data['H'], data['W'] # ipdb.set_trace() start_from_zero = data.get('start_from_zero', True) if start_from_zero: assert time[0,0] == 0 # When ref_data has B images > opt.batch_size if B > self.opt.batch_size: # choose batch_size images out of those B images choice = torch.randperm(B)[:self.opt.batch_size] B = self.opt.batch_size rays_o = rays_o[choice] rays_d = rays_d[choice] mvp = mvp[choice] if do_rgbd_loss: ambient_ratio = 1.0 shading = 'lambertian' # use lambertian instead of albedo to get normal as_latent = False binarize = False bg_color = self.get_bg_color( self.opt.bg_color_known, B*N, rays_o.device) # add camera noise to avoid grid-like artifact if self.opt.known_view_noise_scale > 0: noise_scale = self.opt.known_view_noise_scale #* (1 - self.global_step / self.opt.iters) rays_o = rays_o + torch.randn(3, device=self.device) * noise_scale rays_d = rays_d + torch.randn(3, device=self.device) * noise_scale elif self.global_step < (self.opt.latent_iter_ratio * self.opt.iters): ## 0 ambient_ratio = 1.0 shading = 'normal' as_latent = True binarize = False bg_color = None else: if self.global_step < (self.opt.normal_iter_ratio * self.opt.iters): # 0.2 ambient_ratio = 1.0 shading = 'normal' elif self.global_step < (self.opt.textureless_iter_ratio * self.opt.iters): # 0 ambient_ratio = 0.1 + 0.9 * random.random() shading = 'textureless' elif self.global_step < (self.opt.albedo_iter_ratio * self.opt.iters): # 0 ambient_ratio = 1.0 shading = 'albedo' else: # random shading ambient_ratio = 0.1 + 0.9 * random.random() rand = random.random() if rand < self.opt.textureless_rate: # 0.2 shading = 'textureless' else: shading = 'lambertian' as_latent = False # random weights binarization (like mobile-nerf) [NOT WORKING NOW] # binarize_thresh = min(0.5, -0.5 + self.global_step / self.opt.iters) # binarize = random.random() < binarize_thresh binarize = False # random background rand = random.random() # ipdb.set_trace() if self.opt.bg_radius > 0 and rand > 0.5: bg_color = None # use bg_net else: bg_color = torch.rand(3).to(self.device) # single color random bg ## NOTE if bg_radius < 0 -> the way magic123 use # The bg color is always random video_outputs = [] num_frames = time.size(1) light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) outputs = self.model.render(rays_o, rays_d, mvp, H, W, staged=False, perturb=True, bg_color=bg_color, ambient_ratio=ambient_ratio, shading=shading, binarize=binarize, time=time, do_rgbd_loss=do_rgbd_loss, light_d=light_d) # ipdb.set_trace() pred_depth = outputs['depth'].reshape(B, num_frames, 1, H, W) if self.opt.normalize_depth: pred_depth = nonzero_normalize_depth(pred_depth) pred_mask = outputs['weights_sum'].reshape(B, num_frames, 1, H, W) if 'normal_image' in outputs: pred_normal = outputs['normal_image'].reshape(B, num_frames, H, W, 3) else: pred_normal = None if as_latent: # abuse normal & mask as latent code for faster geometry initialization (ref: fantasia3D) pred_rgb = torch.cat([outputs['image'], outputs['weights_sum'].unsqueeze(-1)], dim=-1).reshape(B, num_frames, H, W, 4).permute(0,1,4,2,3).contiguous() # [B, F, 4, H, W] else: pred_rgb = outputs['image'].reshape(B, num_frames, H, W, 3).permute(0,1,4,2,3).contiguous() # [B, F, 3, H, W] # ipdb.set_trace() if 'image_wo_bg' in outputs: image_wo_bg = outputs['image_wo_bg'] + (1 - outputs['weights_sum']).unsqueeze(-1) * 1 # B,F,N,3 if as_latent: # abuse normal & mask as latent code for faster geometry initialization (ref: fantasia3D) pred_rgb_wobg = torch.cat([image_wo_bg, outputs['weights_sum'].unsqueeze(-1)], dim=-1).reshape(B, num_frames, H, W, 4).permute(0,1,4,2,3).contiguous() # [B, 4, H, W] else: pred_rgb_wobg = image_wo_bg.reshape(B, num_frames, H, W, 3).permute(0,1,4,2,3).contiguous() # [B, 3, H, W] out_dict = { 'rgb': pred_rgb, # B,F,3,H,W 'depth': pred_depth, # B,F,1,H,W 'mask': pred_mask, # B,F,1,H,W 'normal': pred_normal, # B,F,H,W,3 'pred_rgb_wobg': pred_rgb_wobg } # Loss # known view loss loss_rgb, loss_mask, loss_normal, loss_depth, loss_sds, loss_if, loss_zero123, loss_clip, loss_entropy, loss_opacity, loss_orient, loss_smooth, loss_smooth2d, loss_smooth3d, loss_mesh_normal, loss_mesh_lap, loss_time_tv, loss_canonical, loss_sr, loss_cn = torch.zeros(20, device=self.device) # known view loss # assert not do_rgbd_loss reg_losses_dict = {} loss = 0 if do_rgbd_loss: ## NOTE this only applied to the first frame, # ipdb.set_trace() gt_mask = self.mask # [B, H, W] bool gt_rgb = self.rgb # [B, 3, H, W] gt_opacity = self.opacity # [B, 1, H, W] # float version of mask gt_normal = self.normal # [B, H, W, 3] # None gt_depth = self.depth # N -> only mask true depth if len(gt_rgb) > self.opt.batch_size: gt_mask = gt_mask[choice] gt_rgb = gt_rgb[choice] gt_opacity = gt_opacity[choice] gt_normal = gt_normal[choice] gt_depth = gt_depth[choice] # color loss loss_rgb = self.opt.lambda_rgb * self.rgbd_scale * \ F.mse_loss(pred_rgb[:,0]*gt_opacity, gt_rgb*gt_opacity) # B,3,H,W # mask loss loss_mask = self.opt.lambda_mask * self.rgbd_scale * F.mse_loss(pred_mask[:,0], gt_mask.to(torch.float32).unsqueeze(0)) # normal loss if self.opt.lambda_normal > 0 and 'normal_image' in outputs and self.normal is not None: pred_normal = pred_normal[:,0][self.mask] lambda_normal = self.opt.lambda_normal * \ min(1, self.global_step / self.opt.iters) loss_normal = lambda_normal * self.rgbd_scale * \ (1 - F.cosine_similarity(pred_normal, self.normal).mean())/2 # relative depth loss if self.opt.lambda_depth > 0 and self.depth is not None: valid_pred_depth = pred_depth[:, 0, 0][self.mask] loss_depth = self.opt.lambda_depth * self.rgbd_scale * (1 - pearson_corrcoef(valid_pred_depth, self.depth))/2 loss = (loss_rgb + loss_mask + loss_normal + loss_depth) # novel view loss else: # ipdb.set_trace() static_rgb = None save_guidance_path = os.path.join(self.opt.workspace, 'guidance', f'train_step{self.global_step}_guidance.jpg') if self.opt.save_guidance_every > 0 and self.global_step % self.opt.save_guidance_every ==0 else None if 'SD' in self.guidance: # interpolate text_z azimuth = data['azimuth'] # [-180, 180] # ipdb.set_trace() ## NOTE here should I remove the view information? ## add mid frame view information if 'frame_azimuth' in data and use_dynamic_cam: idx = num_frames//2 azimuth = data['frame_azimuth'][idx:idx+1] # 1,3 # ENHANCE: remove loop to handle batch size > 1 text_z = [] for b in range(azimuth.shape[0]): if self.opt.no_view_text and use_dynamic_cam: text_z.append(self.embeddings['SD']['default']) continue if azimuth[b] >= -90 and azimuth[b] < 90: if azimuth[b] >= 0: r = 1 - azimuth[b] / 90 else: r = 1 + azimuth[b] / 90 start_z = self.embeddings['SD']['front'] end_z = self.embeddings['SD']['side'] else: if azimuth[b] >= 0: r = 1 - (azimuth[b] - 90) / 90 else: r = 1 + (azimuth[b] + 90) / 90 start_z = self.embeddings['SD']['side'] end_z = self.embeddings['SD']['back'] text_z.append(r * start_z + (1 - r) * end_z) text_z = torch.stack(text_z, dim=0).transpose(0, 1).flatten(0, 1) # text_z_sds = text_z[:, :-1] # this is to remove the cls token... text_z_sds = text_z loss_sds, _ = self.guidance['SD'].train_step(text_z_sds, pred_rgb, as_latent=as_latent, guidance_scale=self.opt.guidance_scale['SD'], grad_scale=self.opt.lambda_guidance['SD'], density=pred_mask if self.opt.gudiance_spatial_weighting else None, save_guidance_path=save_guidance_path, step=self.global_step, ) if 'CN' in self.guidance: # ipdb.set_trace() save_guidance_CN_path = os.path.join(self.opt.workspace, 'guidance_CN', f'train_step{self.global_step}_guidance.jpg') if self.opt.save_guidance_every > 0 and self.global_step % self.opt.save_guidance_every ==0 else None # ipdb.set_trace() ## NOTE here should not use text_z_sds, if the SR model use different text encoder? ## get image index for part frames update index = torch.arange(0, self.opt.num_frames, step=1) # default, choose all if self.opt.cn_frames < self.opt.num_frames: if self.opt.cn_frame_method == 'even': assert self.opt.num_frames % self.opt.cn_frames == 0 interval = self.opt.num_frames // self.opt.cn_frames index = torch.arange(0, self.opt.num_frames, step=interval) elif self.opt.cn_frame_method == 'random': index = torch.randperm(self.opt.num_frames)[:self.opt.cn_frames] else: raise NotImplementedError azimuth = data['azimuth'] # [-180, 180] # ipdb.set_trace() ## NOTE here should I remove the view information? if 'frame_azimuth' in data and use_dynamic_cam: azimuth = data['frame_azimuth'][index] # N,3 # ENHANCE: remove loop to handle batch size > 1 text_z = [] for b in range(azimuth.shape[0]): if self.opt.no_view_text and use_dynamic_cam: text_z.append(self.embeddings['CN']['default']) continue if azimuth[b] >= -90 and azimuth[b] < 90: if azimuth[b] >= 0: r = 1 - azimuth[b] / 90 else: r = 1 + azimuth[b] / 90 start_z = self.embeddings['CN']['front'] end_z = self.embeddings['CN']['side'] else: if azimuth[b] >= 0: r = 1 - (azimuth[b] - 90) / 90 else: r = 1 + (azimuth[b] + 90) / 90 start_z = self.embeddings['CN']['side'] end_z = self.embeddings['CN']['back'] text_z.append(r * start_z + (1 - r) * end_z) text_z = torch.stack(text_z, dim=0).transpose(0, 1).flatten(0, 1) # TODO check B,2,77,C -> 2B,77,C? # text_z_sds = text_z[:, :-1] # this is to remove the cls token... text_cn_sds = text_z text_cn_cn = self.embeddings['CN']['CN'] if self.opt.cn_cn_text else text_cn_sds ## NOTE here we use online prediction, will this lead to error accumulation -> Yes ## get the condition images cn_cn_pred_rgb = pred_rgb.detach() cn_pred_rgb = pred_rgb # pred_rgb B,F,3,H,W # ipdb.set_trace() if self.fix_dynamic: ## NOTE dynamic render is not applied to the inference model, so the render should be the training model with torch.no_grad(): outputs_dyn = self.dynamic_model.render(rays_o, rays_d, mvp, H, W, staged=False, perturb=True, bg_color=bg_color, ambient_ratio=ambient_ratio, shading=shading, binarize=binarize, time=time, do_rgbd_loss=do_rgbd_loss, light_d=light_d) # ipdb.set_trace() pred_depth_dyn = outputs_dyn['depth'].reshape(B, num_frames, 1, H, W) if self.opt.normalize_depth: pred_depth_dyn = nonzero_normalize_depth(pred_depth_dyn) pred_rgb_dyn = outputs_dyn['image'].reshape(B, num_frames, H, W, 3).permute(0,1,4,2,3).contiguous() # [B, F, 3, H, W] ## use the dynamic rendered from the fixed model for controlnet input cn_cn_pred_rgb = pred_rgb_dyn ## select image with index cn_pred_rgb = pred_rgb[:,index] cn_cn_pred_rgb = cn_cn_pred_rgb[:,index] loss_cn, _ = self.guidance['CN'].train_step(text_cn_sds, text_cn_cn, cn_pred_rgb, cn_cn_pred_rgb, as_latent=as_latent, guidance_scale=self.opt.guidance_scale['CN'], grad_scale=self.opt.lambda_guidance['CN'], density=None, save_guidance_path=save_guidance_CN_path, step=self.global_step,) if 'IF' in self.guidance: # interpolate text_z azimuth = data['azimuth'] # [-180, 180] # ENHANCE: remove loop to handle batch size > 1 # ENHANCE: remove loop to handle batch size > 1 text_z = [] for b in range(azimuth.shape[0]): if azimuth[b] >= -90 and azimuth[b] < 90: if azimuth[b] >= 0: r = 1 - azimuth[b] / 90 else: r = 1 + azimuth[b] / 90 start_z = self.embeddings['IF']['front'] end_z = self.embeddings['IF']['side'] else: if azimuth[b] >= 0: r = 1 - (azimuth[b] - 90) / 90 else: r = 1 + (azimuth[b] + 90) / 90 start_z = self.embeddings['IF']['side'] end_z = self.embeddings['IF']['back'] text_z.append(r * start_z + (1 - r) * end_z) text_z = torch.stack(text_z, dim=0).transpose(0, 1).flatten(0, 1) text_z = torch.cat(text_z, dim=1).reshape(B, 2, start_z.shape[-2]-1, start_z.shape[-1]).transpose(0, 1).flatten(0, 1) loss_if = self.guidance['IF'].train_step(text_z, pred_rgb, guidance_scale=self.opt.guidance_scale['IF'], grad_scale=self.opt.lambda_guidance['IF']) if 'zero123' in self.guidance and start_from_zero: # raise NotImplementedError save_guidance_zero123_path = os.path.join(self.opt.workspace, 'guidance_zero123', f'train_step{self.global_step}_guidance.jpg') if self.opt.save_guidance_every > 0 and self.global_step % self.opt.save_guidance_every ==0 else None polar = data['polar'] azimuth = data['azimuth'] radius = data['radius'] # input_3dprior B,3,H,W # ipdb.set_trace() input_3dprior = pred_rgb[:,0] loss_zero123 = self.guidance['zero123'].train_step(self.embeddings['zero123']['default'], input_3dprior, polar, azimuth, radius, guidance_scale=self.opt.guidance_scale['zero123'], as_latent=as_latent, grad_scale=self.opt.lambda_guidance['zero123'], save_guidance_path=save_guidance_zero123_path) if 'clip' in self.guidance: # empirical, far view should apply smaller CLIP loss lambda_guidance = 10 * (1 - abs(azimuth) / 180) * self.opt.lambda_guidance['clip'] loss_clip = self.guidance['clip'].train_step(self.embeddings['clip'], pred_rgb, grad_scale=lambda_guidance) loss += loss_sds + loss_if + loss_zero123 + loss_clip + loss_sr + loss_cn # regularizations if not self.opt.dmtet: if self.opt.lambda_opacity > 0: # 0 loss_opacity = self.opt.lambda_opacity * (outputs['weights_sum'] ** 2).mean() reg_losses_dict['loss_opacity'] = loss_opacity.item() if self.opt.lambda_entropy > 0: # 1e-3 lambda_entropy = self.opt.lambda_entropy * \ min(1, 2 * self.global_step / self.opt.iters) alphas = outputs['weights'].clamp(1e-5, 1 - 1e-5) # alphas = alphas ** 2 # skewed entropy, favors 0 over 1 loss_entropy = lambda_entropy * (- alphas * torch.log2(alphas) - (1 - alphas) * torch.log2(1 - alphas)).mean() reg_losses_dict['loss_entropy'] = loss_entropy.item() if self.opt.lambda_normal_smooth > 0 and 'normal_image' in outputs: # 0.5 # no image in sd-dreamfusion should be 0 pred_vals = outputs['normal_image'].reshape(-1, H, W, 3) # BF,H,W,3 # total-variation loss_smooth = (pred_vals[:, 1:, :, :] - pred_vals[:, :-1, :, :]).square().mean() + \ (pred_vals[:, :, 1:, :] - pred_vals[:, :, :-1, :]).square().mean() loss_smooth = self.opt.lambda_normal_smooth * loss_smooth reg_losses_dict['loss_smooth'] = loss_smooth.item() if self.opt.lambda_normal_smooth2d > 0 and 'normal_image' in outputs: # 0.5 # no image in sd-dreamfusion should be 0 pred_vals = outputs['normal_image'].reshape( -1, H, W, 3).permute(0,3,1,2).contiguous() # BF,3,H,W smoothed_vals = TF.gaussian_blur(pred_vals, kernel_size=9) loss_smooth2d = self.opt.lambda_normal_smooth2d * F.mse_loss(pred_vals, smoothed_vals) reg_losses_dict['loss_smooth2d'] = loss_smooth2d.item() if self.opt.lambda_orient > 0 and 'loss_orient' in outputs: # 1e-2 loss_orient = self.opt.lambda_orient * outputs['loss_orient'].mean() reg_losses_dict['loss_orient'] = loss_orient.item() if self.opt.lambda_3d_normal_smooth > 0 and 'loss_normal_perturb' in outputs: # 0 loss_smooth3d = self.opt.lambda_3d_normal_smooth * outputs['loss_normal_perturb'].mean() reg_losses_dict['loss_smooth3d'] = loss_smooth3d.item() if self.opt.lambda_time_tv > 0: if self.opt.backbone == 'grid4d': loss_time_tv = self.opt.lambda_time_tv * self.model.TV_loss() reg_losses_dict['loss_time_tv'] = loss_time_tv.item() loss += loss_opacity + loss_entropy + loss_smooth + loss_smooth2d + loss_orient + loss_smooth3d + loss_time_tv + loss_canonical else: if self.opt.lambda_mesh_normal > 0: loss_mesh_normal = self.opt.lambda_mesh_normal * \ outputs['loss_normal'].mean() reg_losses_dict['loss_mesh_normal'] = loss_mesh_normal.item() if self.opt.lambda_mesh_lap > 0: loss_mesh_lap = self.opt.lambda_mesh_lap * outputs['loss_lap'].mean() reg_losses_dict['loss_mesh_lap'] = loss_mesh_lap.item() loss += loss_mesh_normal + loss_mesh_lap losses_dict = { 'loss': loss.item(), 'loss_sds': loss_sds.item(), 'loss_sr': loss_sr.item(), 'loss_cn': loss_cn.item(), # 'loss_if': loss_if.item(), 'loss_zero123': loss_zero123.item(), # 'loss_clip': loss_clip.item(), 'loss_rgb': loss_rgb.item(), 'loss_mask': loss_mask.item(), 'loss_normal': loss_normal.item(), 'loss_depth': loss_depth.item(), # 'loss_opacity': loss_opacity.item(), # 'loss_entropy': loss_entropy.item(), # 'loss_smooth': loss_smooth.item(), # 'loss_smooth2d': loss_smooth2d.item(), # 'loss_smooth3d': loss_smooth3d.item(), # 'loss_orient': loss_orient.item(), # 'loss_mesh_normal': loss_mesh_normal.item(), # 'loss_mesh_lap': loss_mesh_lap.item(), } losses_dict.update(reg_losses_dict) # if loss_guidance_dict: # for key, val in loss_guidance_dict.items(): # losses_dict[key] = val.item() if isinstance(val, torch.Tensor) else val if 'normal' in out_dict: out_dict['normal'] = rearrange(out_dict['normal'], "b f h w c -> b f c h w").contiguous() # B,F,H,W,3 -> B,F,3,H,W if torch.isnan(loss): ipdb.set_trace() # save for debug purpose if self.opt.save_train_every > 0 and self.global_step % self.opt.save_train_every == 0: image_save_path = os.path.join(self.workspace, 'train_debug',) os.makedirs(image_save_path, exist_ok=True) for key, value in out_dict.items(): if value is not None: value = ((value - value.min()) / (value.max() - value.min() + 1e-6)).detach().mul(255).to(torch.uint8) # 0-255 try:
save_tensor2image(value, os.path.join(image_save_path, f'train_{self.global_step:06d}_{key}.jpg'), channel_last=False)
0
2023-11-23 10:34:08+00:00
24k